diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 55a8968a75ad0..0000000000000 --- a/.gitignore +++ /dev/null @@ -1,101 +0,0 @@ -# -# NOTE! Don't add files that are generated in specific -# subdirectories here. Add them in the ".gitignore" file -# in that subdirectory instead. -# -# NOTE! Please use 'git ls-files -i --exclude-standard' -# command after changing this file, to see if there are -# any tracked files which get ignored after the change. -# -# Normal rules -# -.* -*.o -*.o.* -*.a -*.s -*.ko -*.so -*.so.dbg -*.mod.c -*.i -*.lst -*.symtypes -*.order -modules.builtin -*.elf -*.bin -*.gz -*.bz2 -*.lzma -*.xz -*.lz4 -*.lzo -*.patch -*.gcno - -# -# Top-level generic files -# -/tags -/TAGS -/linux -/vmlinux -/vmlinuz -/System.map -/Module.markers -/Module.symvers - -# -# Debian directory (make deb-pkg) -# -/debian/ - -# -# git files that we don't want to ignore even it they are dot-files -# -!.gitignore -!.mailmap - -# -# Generated include files -# -include/config -include/generated -arch/*/include/generated - -# stgit generated dirs -patches-* - -# quilt's files -patches -series - -# cscope files -cscope.* -ncscope.* - -# gnu global files -GPATH -GRTAGS -GSYMS -GTAGS - -*.orig -*~ -\#*# - -# -# Leavings from module signing -# -extra_certificates -signing_key.priv -signing_key.x509 -x509.genkey - -# Kconfig presets -all.config - -# customer folder -customer - diff --git a/Documentation/.gitignore b/Documentation/.gitignore deleted file mode 100644 index bcd907b4141f2..0000000000000 --- a/Documentation/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -filesystems/dnotify_test -laptops/dslm -timers/hpet_example -vm/hugepage-mmap -vm/hugepage-shm -vm/map_hugetlb - diff --git a/Documentation/DocBook/.gitignore b/Documentation/DocBook/.gitignore deleted file mode 100644 index 7ebd5465d9277..0000000000000 --- a/Documentation/DocBook/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -*.xml -*.ps -*.pdf -*.html -*.9.gz -*.9 -*.aux -*.dvi -*.log -*.out -*.png -*.gif -*.svg -media-indices.tmpl -media-entities.tmpl diff --git a/Documentation/DocBook/media/dvb/.gitignore b/Documentation/DocBook/media/dvb/.gitignore deleted file mode 100644 index d7ec32eafac93..0000000000000 --- a/Documentation/DocBook/media/dvb/.gitignore +++ /dev/null @@ -1 +0,0 @@ -!*.xml diff --git a/Documentation/DocBook/media/v4l/.gitignore b/Documentation/DocBook/media/v4l/.gitignore deleted file mode 100644 index d7ec32eafac93..0000000000000 --- a/Documentation/DocBook/media/v4l/.gitignore +++ /dev/null @@ -1 +0,0 @@ -!*.xml diff --git a/Documentation/accounting/.gitignore b/Documentation/accounting/.gitignore deleted file mode 100644 index 86485203c4aea..0000000000000 --- a/Documentation/accounting/.gitignore +++ /dev/null @@ -1 +0,0 @@ -getdelays diff --git a/Documentation/auxdisplay/.gitignore b/Documentation/auxdisplay/.gitignore deleted file mode 100644 index 7af222860a967..0000000000000 --- a/Documentation/auxdisplay/.gitignore +++ /dev/null @@ -1 +0,0 @@ -cfag12864b-example diff --git a/Documentation/connector/.gitignore b/Documentation/connector/.gitignore deleted file mode 100644 index d2b9c32accd48..0000000000000 --- a/Documentation/connector/.gitignore +++ /dev/null @@ -1 +0,0 @@ -ucon diff --git a/Documentation/ia64/.gitignore b/Documentation/ia64/.gitignore deleted file mode 100644 index ab806edc87325..0000000000000 --- a/Documentation/ia64/.gitignore +++ /dev/null @@ -1 +0,0 @@ -aliasing-test diff --git a/Documentation/mic/mpssd/.gitignore b/Documentation/mic/mpssd/.gitignore deleted file mode 100644 index 8b7c72f07c92f..0000000000000 --- a/Documentation/mic/mpssd/.gitignore +++ /dev/null @@ -1 +0,0 @@ -mpssd diff --git a/Documentation/misc-devices/mei/.gitignore b/Documentation/misc-devices/mei/.gitignore deleted file mode 100644 index f356b81ca1ecb..0000000000000 --- a/Documentation/misc-devices/mei/.gitignore +++ /dev/null @@ -1 +0,0 @@ -mei-amt-version diff --git a/Documentation/networking/timestamping/.gitignore b/Documentation/networking/timestamping/.gitignore deleted file mode 100644 index a380159765ce6..0000000000000 --- a/Documentation/networking/timestamping/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -timestamping -hwtstamp_config diff --git a/Documentation/pcmcia/.gitignore b/Documentation/pcmcia/.gitignore deleted file mode 100644 index 53d0813367574..0000000000000 --- a/Documentation/pcmcia/.gitignore +++ /dev/null @@ -1 +0,0 @@ -crc32hash diff --git a/Documentation/spi/.gitignore b/Documentation/spi/.gitignore deleted file mode 100644 index 4280576397e87..0000000000000 --- a/Documentation/spi/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -spidev_fdx -spidev_test diff --git a/Documentation/video4linux/.gitignore b/Documentation/video4linux/.gitignore deleted file mode 100644 index 952703943e8e8..0000000000000 --- a/Documentation/video4linux/.gitignore +++ /dev/null @@ -1 +0,0 @@ -v4lgrab diff --git a/Documentation/vm/.gitignore b/Documentation/vm/.gitignore deleted file mode 100644 index 09b164a5700ff..0000000000000 --- a/Documentation/vm/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -page-types -slabinfo diff --git a/Documentation/watchdog/src/.gitignore b/Documentation/watchdog/src/.gitignore deleted file mode 100644 index ac90997dba937..0000000000000 --- a/Documentation/watchdog/src/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -watchdog-simple -watchdog-test diff --git a/Makefile b/Makefile index c607470e8eb39..9aee7b9194ed7 100644 --- a/Makefile +++ b/Makefile @@ -381,7 +381,8 @@ KBUILD_CPPFLAGS := -D__KERNEL__ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -fno-common \ -Wno-format-security \ - -fno-delete-null-pointer-checks + -fno-delete-null-pointer-checks \ + -mstrict-align KBUILD_CFLAGS += -Wno-error=cpp KBUILD_AFLAGS_KERNEL := KBUILD_CFLAGS_KERNEL := @@ -438,6 +439,9 @@ PHONY += scripts_basic scripts_basic: $(Q)$(MAKE) $(build)=scripts/basic $(Q)rm -f .tmp_quiet_recordmcount + $(Q)mkdir -p $(srctree)/.git/hooks + $(Q)cp $(srctree)/scripts/amlogic/pre-commit $(srctree)/.git/hooks/pre-commit + $(Q)chmod +x $(srctree)/.git/hooks/pre-commit # To avoid any implicit rule to kick in, define an empty command. scripts/basic/%: scripts_basic ; diff --git a/arch/.gitignore b/arch/.gitignore deleted file mode 100644 index 7414689203208..0000000000000 --- a/arch/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -i386 -x86_64 diff --git a/arch/alpha/kernel/.gitignore b/arch/alpha/kernel/.gitignore deleted file mode 100644 index c5f676c3c224b..0000000000000 --- a/arch/alpha/kernel/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vmlinux.lds diff --git a/arch/arc/boot/.gitignore b/arch/arc/boot/.gitignore deleted file mode 100644 index 5d65b54bf17a8..0000000000000 --- a/arch/arc/boot/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.dtb* diff --git a/arch/arc/kernel/.gitignore b/arch/arc/kernel/.gitignore deleted file mode 100644 index c5f676c3c224b..0000000000000 --- a/arch/arc/kernel/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vmlinux.lds diff --git a/arch/arm/boot/.gitignore b/arch/arm/boot/.gitignore deleted file mode 100644 index 7fe069148d46c..0000000000000 --- a/arch/arm/boot/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -Image -zImage -xipImage -ccImage -bootpImage -uImage -*.dtb diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore deleted file mode 100644 index 0714e0334e33c..0000000000000 --- a/arch/arm/boot/compressed/.gitignore +++ /dev/null @@ -1,21 +0,0 @@ -ashldi3.S -bswapsdi2.S -font.c -lib1funcs.S -hyp-stub.S -piggy.gzip -piggy.lzo -piggy.lzma -piggy.xzkern -piggy.lz4 -vmlinux -vmlinux.lds - -# borrowed libfdt files -fdt.c -fdt.h -fdt_ro.c -fdt_rw.c -fdt_wip.c -libfdt.h -libfdt_internal.h diff --git a/arch/arm/crypto/.gitignore b/arch/arm/crypto/.gitignore deleted file mode 100644 index 6231d36b36352..0000000000000 --- a/arch/arm/crypto/.gitignore +++ /dev/null @@ -1 +0,0 @@ -aesbs-core.S diff --git a/arch/arm/kernel/.gitignore b/arch/arm/kernel/.gitignore deleted file mode 100644 index c5f676c3c224b..0000000000000 --- a/arch/arm/kernel/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vmlinux.lds diff --git a/arch/arm64/boot/dts/gxl_p212_2g.dts b/arch/arm64/boot/dts/gxl_p212_2g.dts index 014fa5f5ca5c6..35c848d0c3a78 100644 --- a/arch/arm64/boot/dts/gxl_p212_2g.dts +++ b/arch/arm64/boot/dts/gxl_p212_2g.dts @@ -564,6 +564,80 @@ status = "okay"; }; };*/ + partitions: partitions{ + parts = <11>; + part-0 = <&logo>; + part-1 = <&recovery>; + part-2 = <&rsv>; + part-3 = <&tee>; + part-4 = <&crypt>; + part-5 = <&misc>; + part-6 = <&instaboot>; + part-7 = <&boot>; + part-8 = <&system>; + part-9 = <&cache>; + part-10 = <&data>; + + logo:logo{ + pname = "logo"; + size = <0x0 0x2000000>; + mask = <1>; + }; + recovery:recovery{ + pname = "recovery"; + size = <0x0 0x2000000>; + mask = <1>; + }; + rsv:rsv{ + pname = "rsv"; + size = <0x0 0x800000>; + mask = <1>; + }; + tee:tee{ + pname = "tee"; + size = <0x0 0x800000>; + mask = <1>; + }; + crypt:crypt{ + pname = "crypt"; + size = <0x0 0x2000000>; + mask = <1>; + }; + misc:misc{ + pname = "misc"; + size = <0x0 0x2000000>; + mask = <1>; + }; + instaboot:instaboot{ + pname = "instaboot"; + size = <0x0 0x20000000>; + mask = <1>; + }; + boot:boot + { + pname = "boot"; + size = <0x0 0x2000000>; + mask = <1>; + }; + system:system + { + pname = "system"; + size = <0x0 0x40000000>; + mask = <1>; + }; + cache:cache + { + pname = "cache"; + size = <0x0 0x20000000>; + mask = <2>; + }; + data:data + { + pname = "data"; + size = <0xffffffff 0xffffffff>; + mask = <4>; + }; + }; unifykey{ compatible = "amlogic, unifykey"; status = "ok"; diff --git a/arch/arm64/boot/dts/gxm_q200_2g.dts b/arch/arm64/boot/dts/gxm_q200_2g.dts index a184246c3012d..b44f94362b5ad 100644 --- a/arch/arm64/boot/dts/gxm_q200_2g.dts +++ b/arch/arm64/boot/dts/gxm_q200_2g.dts @@ -601,6 +601,80 @@ status = "okay"; }; };*/ + partitions: partitions{ + parts = <11>; + part-0 = <&logo>; + part-1 = <&recovery>; + part-2 = <&rsv>; + part-3 = <&tee>; + part-4 = <&crypt>; + part-5 = <&misc>; + part-6 = <&instaboot>; + part-7 = <&boot>; + part-8 = <&system>; + part-9 = <&cache>; + part-10 = <&data>; + + logo:logo{ + pname = "logo"; + size = <0x0 0x2000000>; + mask = <1>; + }; + recovery:recovery{ + pname = "recovery"; + size = <0x0 0x2000000>; + mask = <1>; + }; + rsv:rsv{ + pname = "rsv"; + size = <0x0 0x800000>; + mask = <1>; + }; + tee:tee{ + pname = "tee"; + size = <0x0 0x800000>; + mask = <1>; + }; + crypt:crypt{ + pname = "crypt"; + size = <0x0 0x2000000>; + mask = <1>; + }; + misc:misc{ + pname = "misc"; + size = <0x0 0x2000000>; + mask = <1>; + }; + instaboot:instaboot{ + pname = "instaboot"; + size = <0x0 0x20000000>; + mask = <1>; + }; + boot:boot + { + pname = "boot"; + size = <0x0 0x2000000>; + mask = <1>; + }; + system:system + { + pname = "system"; + size = <0x0 0x40000000>; + mask = <1>; + }; + cache:cache + { + pname = "cache"; + size = <0x0 0x20000000>; + mask = <2>; + }; + data:data + { + pname = "data"; + size = <0xffffffff 0xffffffff>; + mask = <4>; + }; + }; unifykey{ compatible = "amlogic, unifykey"; status = "ok"; diff --git a/arch/arm64/kernel/.gitignore b/arch/arm64/kernel/.gitignore deleted file mode 100644 index c5f676c3c224b..0000000000000 --- a/arch/arm64/kernel/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vmlinux.lds diff --git a/arch/arm64/kernel/vdso/.gitignore b/arch/arm64/kernel/vdso/.gitignore deleted file mode 100644 index b8cc94e9698b6..0000000000000 --- a/arch/arm64/kernel/vdso/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -vdso.lds -vdso-offsets.h diff --git a/arch/avr32/boot/images/.gitignore b/arch/avr32/boot/images/.gitignore deleted file mode 100644 index 64ea9d0141d2c..0000000000000 --- a/arch/avr32/boot/images/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -uImage -uImage.srec -vmlinux.cso -sfdwarf.log diff --git a/arch/avr32/kernel/.gitignore b/arch/avr32/kernel/.gitignore deleted file mode 100644 index c5f676c3c224b..0000000000000 --- a/arch/avr32/kernel/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vmlinux.lds diff --git a/arch/blackfin/boot/.gitignore b/arch/blackfin/boot/.gitignore deleted file mode 100644 index 1287a5487e7d6..0000000000000 --- a/arch/blackfin/boot/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -vmImage* -vmlinux* -uImage* diff --git a/arch/blackfin/kernel/.gitignore b/arch/blackfin/kernel/.gitignore deleted file mode 100644 index c5f676c3c224b..0000000000000 --- a/arch/blackfin/kernel/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vmlinux.lds diff --git a/arch/cris/boot/.gitignore b/arch/cris/boot/.gitignore deleted file mode 100644 index 171a0853caf8c..0000000000000 --- a/arch/cris/boot/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -Image -zImage diff --git a/arch/ia64/kernel/.gitignore b/arch/ia64/kernel/.gitignore deleted file mode 100644 index 21cb0da5ded8d..0000000000000 --- a/arch/ia64/kernel/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -gate.lds -vmlinux.lds diff --git a/arch/m32r/kernel/.gitignore b/arch/m32r/kernel/.gitignore deleted file mode 100644 index c5f676c3c224b..0000000000000 --- a/arch/m32r/kernel/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vmlinux.lds diff --git a/arch/m68k/kernel/.gitignore b/arch/m68k/kernel/.gitignore deleted file mode 100644 index c5f676c3c224b..0000000000000 --- a/arch/m68k/kernel/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vmlinux.lds diff --git a/arch/metag/boot/.gitignore b/arch/metag/boot/.gitignore deleted file mode 100644 index 2d6c0c1608843..0000000000000 --- a/arch/metag/boot/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -vmlinux* -uImage* -ramdisk.* -*.dtb* diff --git a/arch/metag/kernel/.gitignore b/arch/metag/kernel/.gitignore deleted file mode 100644 index c5f676c3c224b..0000000000000 --- a/arch/metag/kernel/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vmlinux.lds diff --git a/arch/microblaze/boot/.gitignore b/arch/microblaze/boot/.gitignore deleted file mode 100644 index bf04591860277..0000000000000 --- a/arch/microblaze/boot/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*.dtb -linux.bin* -simpleImage.* diff --git a/arch/microblaze/kernel/.gitignore b/arch/microblaze/kernel/.gitignore deleted file mode 100644 index c5f676c3c224b..0000000000000 --- a/arch/microblaze/kernel/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vmlinux.lds diff --git a/arch/mips/boot/.gitignore b/arch/mips/boot/.gitignore deleted file mode 100644 index a73d6e2c4f64f..0000000000000 --- a/arch/mips/boot/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -mkboot -elf2ecoff -vmlinux.* -zImage -zImage.tmp -calc_vmlinuz_load_addr -uImage diff --git a/arch/mips/cavium-octeon/.gitignore b/arch/mips/cavium-octeon/.gitignore deleted file mode 100644 index 39c968605ff63..0000000000000 --- a/arch/mips/cavium-octeon/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.dtb.S -*.dtb diff --git a/arch/mips/kernel/.gitignore b/arch/mips/kernel/.gitignore deleted file mode 100644 index c5f676c3c224b..0000000000000 --- a/arch/mips/kernel/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vmlinux.lds diff --git a/arch/mn10300/boot/.gitignore b/arch/mn10300/boot/.gitignore deleted file mode 100644 index b6718de236934..0000000000000 --- a/arch/mn10300/boot/.gitignore +++ /dev/null @@ -1 +0,0 @@ -zImage diff --git a/arch/parisc/kernel/.gitignore b/arch/parisc/kernel/.gitignore deleted file mode 100644 index c5f676c3c224b..0000000000000 --- a/arch/parisc/kernel/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vmlinux.lds diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore deleted file mode 100644 index d61c03525777c..0000000000000 --- a/arch/powerpc/boot/.gitignore +++ /dev/null @@ -1,44 +0,0 @@ -addnote -empty.c -hack-coff -inffast.c -inffast.h -inffixed.h -inflate.c -inflate.h -inftrees.c -inftrees.h -infutil.c -infutil.h -kernel-vmlinux.strip.c -kernel-vmlinux.strip.gz -mktree -uImage -cuImage.* -dtbImage.* -*.dtb -treeImage.* -zImage -zImage.initrd -zImage.bin.* -zImage.chrp -zImage.coff -zImage.epapr -zImage.holly -zImage.*lds -zImage.miboot -zImage.pmac -zImage.pseries -zconf.h -zlib.h -zutil.h -fdt.c -fdt.h -fdt_ro.c -fdt_rw.c -fdt_strerror.c -fdt_sw.c -fdt_wip.c -libfdt.h -libfdt_internal.h - diff --git a/arch/powerpc/kernel/.gitignore b/arch/powerpc/kernel/.gitignore deleted file mode 100644 index c5f676c3c224b..0000000000000 --- a/arch/powerpc/kernel/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vmlinux.lds diff --git a/arch/powerpc/kernel/vdso32/.gitignore b/arch/powerpc/kernel/vdso32/.gitignore deleted file mode 100644 index fea5809857a51..0000000000000 --- a/arch/powerpc/kernel/vdso32/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -vdso32.lds -vdso32.so.dbg diff --git a/arch/powerpc/kernel/vdso64/.gitignore b/arch/powerpc/kernel/vdso64/.gitignore deleted file mode 100644 index 77a0b423642cc..0000000000000 --- a/arch/powerpc/kernel/vdso64/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -vdso64.lds -vdso64.so.dbg diff --git a/arch/powerpc/platforms/cell/spufs/.gitignore b/arch/powerpc/platforms/cell/spufs/.gitignore deleted file mode 100644 index a09ee8d84d6c6..0000000000000 --- a/arch/powerpc/platforms/cell/spufs/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -spu_save_dump.h -spu_restore_dump.h diff --git a/arch/s390/boot/.gitignore b/arch/s390/boot/.gitignore deleted file mode 100644 index 017d5912ad2d5..0000000000000 --- a/arch/s390/boot/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -image -bzImage diff --git a/arch/s390/boot/compressed/.gitignore b/arch/s390/boot/compressed/.gitignore deleted file mode 100644 index ae06b9b4c02f7..0000000000000 --- a/arch/s390/boot/compressed/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -sizes.h -vmlinux -vmlinux.lds diff --git a/arch/s390/kernel/.gitignore b/arch/s390/kernel/.gitignore deleted file mode 100644 index c5f676c3c224b..0000000000000 --- a/arch/s390/kernel/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vmlinux.lds diff --git a/arch/s390/kernel/vdso32/.gitignore b/arch/s390/kernel/vdso32/.gitignore deleted file mode 100644 index e45fba9d0ced3..0000000000000 --- a/arch/s390/kernel/vdso32/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vdso32.lds diff --git a/arch/s390/kernel/vdso64/.gitignore b/arch/s390/kernel/vdso64/.gitignore deleted file mode 100644 index 3fd18cf9fec2b..0000000000000 --- a/arch/s390/kernel/vdso64/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vdso64.lds diff --git a/arch/sh/boot/.gitignore b/arch/sh/boot/.gitignore deleted file mode 100644 index 541087d2029c8..0000000000000 --- a/arch/sh/boot/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -zImage -vmlinux* -uImage* diff --git a/arch/sh/boot/compressed/.gitignore b/arch/sh/boot/compressed/.gitignore deleted file mode 100644 index 2374a83d87b26..0000000000000 --- a/arch/sh/boot/compressed/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vmlinux.bin.* diff --git a/arch/sh/boot/compressed/vmlinux.scr b/arch/sh/boot/compressed/vmlinux.scr deleted file mode 100644 index 862d748082369..0000000000000 --- a/arch/sh/boot/compressed/vmlinux.scr +++ /dev/null @@ -1,10 +0,0 @@ -SECTIONS -{ - .rodata..compressed : { - input_len = .; - LONG(input_data_end - input_data) input_data = .; - *(.data) - output_len = . - 4; - input_data_end = .; - } -} diff --git a/arch/sh/boot/romimage/vmlinux.scr b/arch/sh/boot/romimage/vmlinux.scr deleted file mode 100644 index 590394e2f5f2e..0000000000000 --- a/arch/sh/boot/romimage/vmlinux.scr +++ /dev/null @@ -1,8 +0,0 @@ -SECTIONS -{ - .text : { - zero_page_pos = .; - *(.data) - end_data = .; - } -} diff --git a/arch/sh/kernel/.gitignore b/arch/sh/kernel/.gitignore deleted file mode 100644 index c5f676c3c224b..0000000000000 --- a/arch/sh/kernel/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vmlinux.lds diff --git a/arch/sh/kernel/vsyscall/.gitignore b/arch/sh/kernel/vsyscall/.gitignore deleted file mode 100644 index 40836ad9079cf..0000000000000 --- a/arch/sh/kernel/vsyscall/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vsyscall.lds diff --git a/arch/sparc/boot/.gitignore b/arch/sparc/boot/.gitignore deleted file mode 100644 index fc6f3986c76c4..0000000000000 --- a/arch/sparc/boot/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -btfix.S -btfixupprep -image -zImage -tftpboot.img -vmlinux.aout -piggyback - diff --git a/arch/sparc/kernel/.gitignore b/arch/sparc/kernel/.gitignore deleted file mode 100644 index c5f676c3c224b..0000000000000 --- a/arch/sparc/kernel/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vmlinux.lds diff --git a/arch/um/.gitignore b/arch/um/.gitignore deleted file mode 100644 index a73d3a1cc7461..0000000000000 --- a/arch/um/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -kernel/config.c -kernel/config.tmp -kernel/vmlinux.lds diff --git a/arch/unicore32/.gitignore b/arch/unicore32/.gitignore deleted file mode 100644 index 947e99c2a9572..0000000000000 --- a/arch/unicore32/.gitignore +++ /dev/null @@ -1,21 +0,0 @@ -# -# Generated include files -# -include/generated -# -# Generated ld script file -# -kernel/vmlinux.lds -# -# Generated images in boot -# -boot/Image -boot/zImage -boot/uImage -# -# Generated files in boot/compressed -# -boot/compressed/piggy.S -boot/compressed/piggy.gzip -boot/compressed/vmlinux -boot/compressed/vmlinux.lds diff --git a/arch/x86/.gitignore b/arch/x86/.gitignore deleted file mode 100644 index 7cab8c08e6d1d..0000000000000 --- a/arch/x86/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -boot/compressed/vmlinux -tools/test_get_len -tools/insn_sanity - diff --git a/arch/x86/boot/.gitignore b/arch/x86/boot/.gitignore deleted file mode 100644 index e3cf9f682be55..0000000000000 --- a/arch/x86/boot/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -bootsect -bzImage -cpustr.h -mkcpustr -voffset.h -zoffset.h -setup -setup.bin -setup.elf diff --git a/arch/x86/boot/compressed/.gitignore b/arch/x86/boot/compressed/.gitignore deleted file mode 100644 index 4a46fab7162e3..0000000000000 --- a/arch/x86/boot/compressed/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -relocs -vmlinux.bin.all -vmlinux.relocs -vmlinux.lds -mkpiggy -piggy.S diff --git a/arch/x86/boot/tools/.gitignore b/arch/x86/boot/tools/.gitignore deleted file mode 100644 index 378eac25d3117..0000000000000 --- a/arch/x86/boot/tools/.gitignore +++ /dev/null @@ -1 +0,0 @@ -build diff --git a/arch/x86/kernel/.gitignore b/arch/x86/kernel/.gitignore deleted file mode 100644 index 08f4fd7314696..0000000000000 --- a/arch/x86/kernel/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -vsyscall.lds -vsyscall_32.lds -vmlinux.lds diff --git a/arch/x86/kernel/cpu/.gitignore b/arch/x86/kernel/cpu/.gitignore deleted file mode 100644 index 667df55a43995..0000000000000 --- a/arch/x86/kernel/cpu/.gitignore +++ /dev/null @@ -1 +0,0 @@ -capflags.c diff --git a/arch/x86/lib/.gitignore b/arch/x86/lib/.gitignore deleted file mode 100644 index 8df89f0a3fe62..0000000000000 --- a/arch/x86/lib/.gitignore +++ /dev/null @@ -1 +0,0 @@ -inat-tables.c diff --git a/arch/x86/realmode/rm/.gitignore b/arch/x86/realmode/rm/.gitignore deleted file mode 100644 index b6ed3a2555cb0..0000000000000 --- a/arch/x86/realmode/rm/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -pasyms.h -realmode.lds -realmode.relocs diff --git a/arch/x86/tools/.gitignore b/arch/x86/tools/.gitignore deleted file mode 100644 index be0ed065249b7..0000000000000 --- a/arch/x86/tools/.gitignore +++ /dev/null @@ -1 +0,0 @@ -relocs diff --git a/arch/x86/um/vdso/.gitignore b/arch/x86/um/vdso/.gitignore deleted file mode 100644 index 9cac6d0721998..0000000000000 --- a/arch/x86/um/vdso/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -vdso-syms.lds -vdso.lds diff --git a/arch/x86/vdso/.gitignore b/arch/x86/vdso/.gitignore deleted file mode 100644 index 3282874bc61dd..0000000000000 --- a/arch/x86/vdso/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -vdso.lds -vdso-syms.lds -vdsox32.lds -vdsox32-syms.lds -vdso32-syms.lds -vdso32-syscall-syms.lds -vdso32-sysenter-syms.lds -vdso32-int80-syms.lds diff --git a/arch/x86/vdso/vdso32/.gitignore b/arch/x86/vdso/vdso32/.gitignore deleted file mode 100644 index e45fba9d0ced3..0000000000000 --- a/arch/x86/vdso/vdso32/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vdso32.lds diff --git a/arch/xtensa/boot/.gitignore b/arch/xtensa/boot/.gitignore deleted file mode 100644 index be7655998b26c..0000000000000 --- a/arch/xtensa/boot/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -uImage -zImage.redboot -*.dtb diff --git a/arch/xtensa/boot/boot-elf/.gitignore b/arch/xtensa/boot/boot-elf/.gitignore deleted file mode 100644 index 5ff8fbb8561b9..0000000000000 --- a/arch/xtensa/boot/boot-elf/.gitignore +++ /dev/null @@ -1 +0,0 @@ -boot.lds diff --git a/arch/xtensa/boot/lib/.gitignore b/arch/xtensa/boot/lib/.gitignore deleted file mode 100644 index 1629a61677558..0000000000000 --- a/arch/xtensa/boot/lib/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -inffast.c -inflate.c -inftrees.c diff --git a/arch/xtensa/kernel/.gitignore b/arch/xtensa/kernel/.gitignore deleted file mode 100644 index c5f676c3c224b..0000000000000 --- a/arch/xtensa/kernel/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vmlinux.lds diff --git a/crypto/asymmetric_keys/.gitignore b/crypto/asymmetric_keys/.gitignore deleted file mode 100644 index ee328374dba8f..0000000000000 --- a/crypto/asymmetric_keys/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*-asn1.[ch] diff --git a/drivers/amlogic/Makefile b/drivers/amlogic/Makefile index 247e5b246a860..7f9d137636779 100644 --- a/drivers/amlogic/Makefile +++ b/drivers/amlogic/Makefile @@ -32,7 +32,7 @@ obj-$(CONFIG_AM_IRBLASTER) += irblaster/ obj-$(CONFIG_I2C_AML) += i2c/ -obj-$(CONFIG_AMLOGIC_PWM) += pwm/ +obj-$(CONFIG_AML_PWM) += pwm/ obj-$(CONFIG_AML_POWER_SUPPORT) += power/ diff --git a/drivers/amlogic/display/vout/vout_notify.c b/drivers/amlogic/display/vout/vout_notify.c index 739075caf90d1..4d476e3ab6f4e 100644 --- a/drivers/amlogic/display/vout/vout_notify.c +++ b/drivers/amlogic/display/vout/vout_notify.c @@ -347,6 +347,23 @@ enum vmode_e validate_vmode(char *name) } EXPORT_SYMBOL(validate_vmode); +/* +*interface export to client who want to shutdown. +*/ +int vout_shutdown(void) +{ + int ret = -1; + struct vout_server_s *p_server; + + list_for_each_entry(p_server, &vout_module.vout_server_list, list) { + if (p_server->op.vout_shutdown) + ret = p_server->op.vout_shutdown(); + } + + return ret; +} +EXPORT_SYMBOL(vout_shutdown); + /* *here we offer two functions to get and register vout module server *vout module server will set and store tvmode attributes for vout encoder diff --git a/drivers/amlogic/display/vout/vout_serve.c b/drivers/amlogic/display/vout/vout_serve.c index e3623d46956a4..e70657620b34e 100644 --- a/drivers/amlogic/display/vout/vout_serve.c +++ b/drivers/amlogic/display/vout/vout_serve.c @@ -726,6 +726,12 @@ static int meson_vout_remove(struct platform_device *pdev) return 0; } +static void meson_vout_shutdown(struct platform_device *pdev) +{ + vout_log_info("%s\n", __func__); + vout_shutdown(); +} + static const struct of_device_id meson_vout_dt_match[] = { { .compatible = "amlogic, meson-vout",}, { }, @@ -745,6 +751,7 @@ static struct platform_driver vout_driver = { .probe = meson_vout_probe, .remove = meson_vout_remove, + .shutdown = meson_vout_shutdown, #ifdef CONFIG_PM .suspend = meson_vout_suspend, .resume = meson_vout_resume, @@ -761,7 +768,7 @@ static struct platform_driver static int __init vout_init_module(void) { int ret = 0; - vout_log_info("%s\n", __func__); + /*vout_log_info("%s\n", __func__);*/ if (platform_driver_register(&vout_driver)) { vout_log_err("failed to register VOUT driver\n"); @@ -773,7 +780,7 @@ static int __init vout_init_module(void) static __exit void vout_exit_module(void) { - vout_log_info("%s\n", __func__); + /*vout_log_info("%s\n", __func__);*/ platform_driver_unregister(&vout_driver); } diff --git a/drivers/amlogic/efuse/efuse64.c b/drivers/amlogic/efuse/efuse64.c index 4951f6a5eaa62..4a885e9ab6032 100644 --- a/drivers/amlogic/efuse/efuse64.c +++ b/drivers/amlogic/efuse/efuse64.c @@ -381,7 +381,7 @@ ssize_t efuse_user_attr_show(char *name, char *buf) return ret; } -char *efuse_get_mac(char *addr) +int efuse_get_mac(char *addr) { char buf[6]; int ret; @@ -391,11 +391,13 @@ char *efuse_get_mac(char *addr) */ ret = efuse_user_attr_show("mac", buf); if (ret < 0) { - pr_err("hwmac: error to read MAC address, use default address\n"); - memcpy(buf, "\xc0\xff\xee\x00\x01\x9f", 6); + pr_err("%s: failed to read MAC\n", __func__); + } else { + printk("%s: %pM\n", __func__, buf); + memcpy(addr, buf, 6); } - return memcpy(addr, buf, 6); + return ret; } EXPORT_SYMBOL(efuse_get_mac); diff --git a/drivers/amlogic/hdmi/hdmi_tx_20/hdmi_tx_main.c b/drivers/amlogic/hdmi/hdmi_tx_20/hdmi_tx_main.c index 5db8f7b3ec1b0..c0bb9a6376244 100644 --- a/drivers/amlogic/hdmi/hdmi_tx_20/hdmi_tx_main.c +++ b/drivers/amlogic/hdmi/hdmi_tx_20/hdmi_tx_main.c @@ -2171,6 +2171,20 @@ void hdmi_print(int dbg_lvl, const char *fmt, ...) } } +static ssize_t store_output_rgb(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + if (strncmp(buf, "1", 1) == 0) + hdmitx_output_rgb(); + + hdmitx_set_display(&hdmitx_device, hdmitx_device.cur_VIC); + + return count; +} + +static DEVICE_ATTR(output_rgb, S_IWUSR | S_IWGRP, + NULL, store_output_rgb); + static DEVICE_ATTR(disp_mode, S_IWUSR | S_IRUGO | S_IWGRP, show_disp_mode, store_disp_mode); static DEVICE_ATTR(attr, S_IWUSR | S_IRUGO | S_IWGRP, show_attr, store_attr); @@ -3026,6 +3040,7 @@ static int amhdmitx_probe(struct platform_device *pdev) ret = device_create_file(dev, &dev_attr_support_3d); ret = device_create_file(dev, &dev_attr_dc_cap); ret = device_create_file(dev, &dev_attr_valid_mode); + ret = device_create_file(dev, &dev_attr_output_rgb); #ifdef CONFIG_AM_TV_OUTPUT vout_register_client(&hdmitx_notifier_nb_v); @@ -3206,6 +3221,7 @@ static int amhdmitx_remove(struct platform_device *pdev) device_remove_file(dev, &dev_attr_hdcp_pwr); device_remove_file(dev, &dev_attr_aud_output_chs); device_remove_file(dev, &dev_attr_div40); + device_remove_file(dev, &dev_attr_output_rgb); cdev_del(&hdmitx_device.cdev); @@ -3241,6 +3257,9 @@ static int amhdmitx_suspend(struct platform_device *pdev, hdmitx_device.HWOp.CntlMisc(&hdmitx_device, MISC_TMDS_PHY_OP, TMDS_PHY_DISABLE); #endif + if (hdmitx_device.HWOp.Cntl) + hdmitx_device.HWOp.CntlMisc(&hdmitx_device, + MISC_TMDS_PHY_OP, TMDS_PHY_DISABLE); return 0; } diff --git a/drivers/amlogic/hdmi/hdmi_tx_20/hdmi_tx_video.c b/drivers/amlogic/hdmi/hdmi_tx_20/hdmi_tx_video.c index 20e8940c246d0..938173197837d 100644 --- a/drivers/amlogic/hdmi/hdmi_tx_20/hdmi_tx_video.c +++ b/drivers/amlogic/hdmi/hdmi_tx_20/hdmi_tx_video.c @@ -33,7 +33,7 @@ #include #include -static unsigned char hdmi_output_rgb; +unsigned char hdmi_output_rgb; static void hdmitx_set_spd_info(struct hdmitx_dev *hdev); static void hdmi_set_vend_spec_infofram(struct hdmitx_dev *hdev, enum hdmi_vic VideoCode); @@ -647,7 +647,8 @@ int hdmitx_set_display(struct hdmitx_dev *hdev, enum hdmi_vic VideoCode) break; } if (param->color == COLORSPACE_RGB444) { - hdev->para->cs = hdev->cur_video_param->color; + hdev->para->cs = COLORSPACE_RGB444; + hdev->para->cd = COLORDEPTH_24B; pr_info("hdmitx: rx edid only support RGB format\n"); } diff --git a/drivers/amlogic/hdmi/hdmi_tx_20/hw/hdmi_tx_hw.c b/drivers/amlogic/hdmi/hdmi_tx_20/hw/hdmi_tx_hw.c index 34a743fb8f9bd..476063ffdb14f 100644 --- a/drivers/amlogic/hdmi/hdmi_tx_20/hw/hdmi_tx_hw.c +++ b/drivers/amlogic/hdmi/hdmi_tx_20/hw/hdmi_tx_hw.c @@ -78,6 +78,8 @@ static void hdmitx_set_avi_colorimetry(struct hdmi_format_para *para); unsigned char hdmi_pll_mode = 0; /* 1, use external clk as hdmi pll source */ +extern unsigned char hdmi_output_rgb; + /* HSYNC polarity: active high */ #define HSYNC_POLARITY 1 /* VSYNC polarity: active high */ @@ -3781,7 +3783,7 @@ static int hdmitx_hdmi_dvi_config(struct hdmitx_dev *hdev, hdmitx_set_reg_bits(HDMITX_DWC_FC_AVICONF0, 0, 0, 2); #else hdmitx_csc_config(TX_INPUT_COLOR_FORMAT, - TX_OUTPUT_COLOR_FORMAT, TX_COLOR_DEPTH); + COLORSPACE_RGB444, TX_COLOR_DEPTH); #endif /* set dvi flag */ @@ -3792,7 +3794,11 @@ static int hdmitx_hdmi_dvi_config(struct hdmitx_dev *hdev, hdmitx_wr_reg(HDMITX_DWC_MC_FLOWCTRL, 0x0); /* set ycc indicator */ - if (hdev->para->cs == COLORSPACE_YUV420) + if (hdmi_output_rgb) { + hdmitx_csc_config(TX_INPUT_COLOR_FORMAT, + COLORSPACE_RGB444, TX_COLOR_DEPTH); + hdmitx_set_reg_bits(HDMITX_DWC_FC_INVIDCONF, 0, 3, 1); + } else if (hdev->para->cs == COLORSPACE_YUV420) hdmitx_set_reg_bits(HDMITX_DWC_FC_AVICONF0, 3, 0, 2); else hdmitx_set_reg_bits(HDMITX_DWC_FC_AVICONF0, 2, 0, 2); diff --git a/drivers/amlogic/led/led_sys.c b/drivers/amlogic/led/led_sys.c index d12c8b7eb33fe..121b5253cf0ec 100644 --- a/drivers/amlogic/led/led_sys.c +++ b/drivers/amlogic/led/led_sys.c @@ -33,8 +33,8 @@ #include "led_sys.h" -#define AML_DEV_NAME "leds" -#define AML_LED_NAME "red" +#define AML_DEV_NAME "sysled" +#define AML_LED_NAME "led-sys" static void aml_sysled_output_setup(struct aml_sysled_dev *ldev, diff --git a/drivers/amlogic/pwm/Kconfig b/drivers/amlogic/pwm/Kconfig index 9286289a9d8c1..3ae2385385992 100644 --- a/drivers/amlogic/pwm/Kconfig +++ b/drivers/amlogic/pwm/Kconfig @@ -3,7 +3,7 @@ # -menuconfig AMLOGIC_PWM +menuconfig AML_PWM boolean "Amlogic PWM driver" select PWM select PWM_SYSFS @@ -11,9 +11,9 @@ menuconfig AMLOGIC_PWM help say y to enable Amlogic PWM driver. -if AMLOGIC_PWM -config PWM_MESON - bool "Amlogic GXBB | GXTVBB | TXL PWM driver" +if AML_PWM +config GX_PWM + bool "Amlogic GXBB & GXTVBB PWM driver" default n help say y to enable Amlogic PWM driver. diff --git a/drivers/amlogic/pwm/Makefile b/drivers/amlogic/pwm/Makefile index 7017e2197f8f0..bfa425cd84ba9 100644 --- a/drivers/amlogic/pwm/Makefile +++ b/drivers/amlogic/pwm/Makefile @@ -2,4 +2,4 @@ # Makefile for PWM # -obj-$(CONFIG_PWM_MESON) += pwm_meson.o pwm_meson_sysfs.o +obj-$(CONFIG_GX_PWM) += pwm-gx.o diff --git a/drivers/amlogic/pwm/pwm-gx.c b/drivers/amlogic/pwm/pwm-gx.c new file mode 100644 index 0000000000000..4618a7a7afaa6 --- /dev/null +++ b/drivers/amlogic/pwm/pwm-gx.c @@ -0,0 +1,703 @@ +/* + * drivers/amlogic/pwm/pwm-gx.c + * + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define REG_PWM_A 0x0 +#define REG_PWM_B 0x4 +#define REG_MISC_AB 0x8 +#define REG_DS_A_B 0xc + +#define REG_PWM_C 0x100 +#define REG_PWM_D 0x104 +#define REG_MISC_CD 0x108 +#define REG_DS_C_D 0x10c + +#define REG_PWM_E 0x170 +#define REG_PWM_F 0x174 +#define REG_MISC_EF 0x178 +#define REG_DS_E_F 0x17c + +#define REG_PWM_AO_A 0x0 +#define REG_PWM_AO_B 0x4 +#define REG_MISC_AO_AB 0x8 +#define REG_DS_AO_A_B 0xc + +#define FIN_FREQ (24 * 1000) +#define DUTY_MAX 1024 + +#define AML_PWM_NUM 8 +#undef pr_fmt +#define pr_fmt(fmt) "gx-pwm : " fmt + + +enum pwm_channel { + PWM_A = 0, + PWM_B, + PWM_C, + PWM_D, + PWM_E, + PWM_F, + PWM_AO_A, + PWM_AO_B, +}; + +static DEFINE_SPINLOCK(aml_pwm_lock); + +/*pwm att*/ +struct aml_pwm_channel { + unsigned int pwm_hi; + unsigned int pwm_lo; + unsigned int pwm_pre_div; + + unsigned int period_ns; + unsigned int duty_ns; + unsigned int pwm_freq; +}; + +/*pwm regiset att*/ +struct aml_pwm_variant { + u8 output_mask; +}; + +struct aml_pwm_chip { + struct pwm_chip chip; + void __iomem *base; + void __iomem *ao_base; + struct aml_pwm_variant variant; + u8 inverter_mask; + + unsigned int clk_mask; + struct clk *xtal_clk; + struct clk *vid_pll_clk; + struct clk *fclk_div4_clk; + struct clk *fclk_div3_clk; + +}; + +static void pwm_set_reg_bits(void __iomem *reg, + unsigned int mask, + const unsigned int val) +{ + unsigned int tmp, orig; + orig = readl(reg); + tmp = orig & ~mask; + tmp |= val & mask; + writel(tmp, reg); +} + +static inline void pwm_write_reg(void __iomem *reg, + const unsigned int val) +{ + unsigned int tmp, orig; + orig = readl(reg); + tmp = orig & ~(0xffffffff); + tmp |= val; + writel(tmp, reg); +}; + +static inline +struct aml_pwm_chip *to_aml_pwm_chip(struct pwm_chip *chip) +{ + return container_of(chip, struct aml_pwm_chip, chip); +} + +static +struct aml_pwm_channel *pwm_aml_calc(struct aml_pwm_chip *chip, + struct pwm_device *pwm, + unsigned int duty_ns, + unsigned int period_ns, + struct clk *clk) +{ + struct aml_pwm_channel *our_chan = pwm_get_chip_data(pwm); + unsigned int fout_freq = 0, pwm_pre_div = 0; + unsigned int i = 0; + unsigned long temp = 0; + unsigned long pwm_cnt = 0; + unsigned long rate = 0; + unsigned int pwm_freq; + unsigned long freq_div; + + if ((duty_ns < 0) || (duty_ns > period_ns)) { + dev_err(chip->chip.dev, "Not available duty error!\n"); + return NULL; + } + + if (!IS_ERR(clk)) + rate = clk_get_rate(clk); + + pwm_freq = NSEC_PER_SEC / period_ns; + + fout_freq = ((pwm_freq >= ((rate/1000) * 500)) ? + ((rate/1000) * 500) : pwm_freq); + for (i = 0; i < 0x7f; i++) { + pwm_pre_div = i; + freq_div = rate / (pwm_pre_div + 1); + if (freq_div < pwm_freq) + continue; + pwm_cnt = freq_div / pwm_freq; + if (pwm_cnt <= 0xffff) + break; + } + + our_chan->pwm_pre_div = pwm_pre_div; + if (duty_ns == 0) { + our_chan->pwm_hi = 0; + our_chan->pwm_lo = pwm_cnt; + return our_chan; + } else if (duty_ns == period_ns) { + our_chan->pwm_hi = pwm_cnt; + our_chan->pwm_lo = 0; + return our_chan; + } + + temp = (unsigned long)(pwm_cnt * duty_ns); + temp /= period_ns; + + our_chan->pwm_hi = (unsigned int)temp-1; + our_chan->pwm_lo = pwm_cnt - (unsigned int)temp-1; + our_chan->pwm_freq = pwm_freq; + + return our_chan; + +} + +static int pwm_aml_request(struct pwm_chip *chip, + struct pwm_device *pwm) +{ + struct aml_pwm_chip *our_chip = to_aml_pwm_chip(chip); + struct aml_pwm_channel *our_chan; + + if (!(our_chip->variant.output_mask & BIT(pwm->hwpwm))) { + dev_warn(chip->dev, + "tried to request PWM channel %d without output\n", + pwm->hwpwm); + return -EINVAL; + } + + our_chan = devm_kzalloc(chip->dev, sizeof(*our_chan), GFP_KERNEL); + if (!our_chan) + return -ENOMEM; + + pwm_set_chip_data(pwm, our_chan); + + return 0; +} + +static void pwm_aml_free(struct pwm_chip *chip, + struct pwm_device *pwm) +{ + devm_kfree(chip->dev, pwm_get_chip_data(pwm)); + pwm_set_chip_data(pwm, NULL); +} + +static int pwm_aml_enable(struct pwm_chip *chip, + struct pwm_device *pwm) +{ + struct aml_pwm_chip *our_chip = to_aml_pwm_chip(chip); + unsigned int id = pwm->hwpwm; + unsigned long flags; + + spin_lock_irqsave(&aml_pwm_lock, flags); + if (get_cpu_type() == MESON_CPU_MAJOR_ID_GXBB) { + switch (id) { + case PWM_A: + pwm_set_reg_bits(our_chip->base + REG_MISC_AB, + (1 << 0), (1 << 0)); + break; + case PWM_B: + pwm_set_reg_bits(our_chip->base + REG_MISC_AB, + (1 << 1), (1 << 1)); + break; + case PWM_C: + pwm_set_reg_bits(our_chip->base + REG_MISC_CD, + (1 << 0), (1 << 0)); + break; + case PWM_D: + pwm_set_reg_bits(our_chip->base + REG_MISC_CD, + (1 << 1), (1 << 1)); + break; + case PWM_E: + pwm_set_reg_bits(our_chip->base + REG_MISC_EF, + (1 << 0), (1 << 0)); + break; + case PWM_F: + pwm_set_reg_bits(our_chip->base + REG_MISC_EF, + (1 << 1), (1 << 1)); + break; + case PWM_AO_A: + pwm_set_reg_bits(our_chip->ao_base + REG_MISC_AO_AB, + (1 << 0), (1 << 0)); + break; + case PWM_AO_B: + pwm_set_reg_bits(our_chip->ao_base + REG_MISC_AO_AB, + (1 << 1), (1 << 1)); + break; + default: + break; + } + } else if (get_cpu_type() == MESON_CPU_MAJOR_ID_GXTVBB) { + switch (id) { + case PWM_A: + case PWM_B: + pwm_set_reg_bits(our_chip->base + REG_MISC_AB, + (0x3 << 0), (0x3 << 0)); + break; + case PWM_C: + case PWM_D: + pwm_set_reg_bits(our_chip->base + REG_MISC_CD, + (0x3 << 0), (0x3 << 0)); + break; + case PWM_E: + case PWM_F: + pwm_set_reg_bits(our_chip->base + REG_MISC_EF, + (0x3 << 0), (0x3 << 0)); + break; + case PWM_AO_A: + case PWM_AO_B: + pwm_set_reg_bits(our_chip->ao_base + REG_MISC_AO_AB, + (0x3 << 0), (0x3 << 0)); + break; + default: + break; + } + } + spin_unlock_irqrestore(&aml_pwm_lock, flags); + + return 0; +} + +static void pwm_aml_disable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct aml_pwm_chip *our_chip = to_aml_pwm_chip(chip); + unsigned int id = pwm->hwpwm; + unsigned long flags; + + spin_lock_irqsave(&aml_pwm_lock, flags); + switch (id) { + case PWM_A: + pwm_set_reg_bits(our_chip->base + REG_MISC_AB, + (1 << 0), (0 << 0)); + break; + case PWM_B: + pwm_set_reg_bits(our_chip->base + REG_MISC_AB, + (1 << 1), (0 << 1)); + break; + case PWM_C: + pwm_set_reg_bits(our_chip->base + REG_MISC_CD, + (1 << 0), (0 << 0)); + break; + case PWM_D: + pwm_set_reg_bits(our_chip->base + REG_MISC_CD, + (1 << 1), (0 << 1)); + break; + case PWM_E: + pwm_set_reg_bits(our_chip->base + REG_MISC_EF, + (1 << 0), (0 << 0)); + break; + case PWM_F: + pwm_set_reg_bits(our_chip->base + REG_MISC_EF, + (1 << 1), (0 << 1)); + break; + case PWM_AO_A: + pwm_set_reg_bits(our_chip->ao_base + REG_MISC_AO_AB, + (1 << 0), (0 << 0)); + break; + case PWM_AO_B: + pwm_set_reg_bits(our_chip->ao_base + REG_MISC_AO_AB, + (1 << 1), (0 << 1)); + break; + default: + break; + } + spin_unlock_irqrestore(&aml_pwm_lock, flags); + +} + +static int pwm_aml_clk(struct aml_pwm_chip *our_chip, + struct pwm_device *pwm, + unsigned int duty_ns, + unsigned int period_ns, + unsigned int offset) +{ + struct aml_pwm_channel *our_chan = pwm_get_chip_data(pwm); + struct clk *clk; + + switch ((our_chip->clk_mask >> offset)&0x3) { + case 0x0: + clk = our_chip->xtal_clk; + break; + case 0x1: + clk = our_chip->vid_pll_clk; + break; + case 0x2: + clk = our_chip->fclk_div4_clk; + break; + case 0x3: + clk = our_chip->fclk_div3_clk; + break; + default: + clk = our_chip->xtal_clk; + break; + } + + our_chan = pwm_aml_calc(our_chip, pwm, duty_ns, period_ns, clk); + if (NULL == our_chan) + return -EINVAL; + + return 0; +} + + +static int pwm_aml_config(struct pwm_chip *chip, + struct pwm_device *pwm, + int duty_ns, + int period_ns) +{ + struct aml_pwm_chip *our_chip = to_aml_pwm_chip(chip); + struct aml_pwm_channel *our_chan = pwm_get_chip_data(pwm); + unsigned int id = pwm->hwpwm; + unsigned int offset; + int ret; + + if ((~(our_chip->inverter_mask >> id) & 0x1)) + duty_ns = period_ns - duty_ns; + + if (period_ns > NSEC_PER_SEC) + return -ERANGE; + + if (period_ns == our_chan->period_ns && duty_ns == our_chan->duty_ns) + return 0; + + offset = id * 2; + ret = pwm_aml_clk(our_chip, pwm, duty_ns, period_ns, offset); + if (ret) { + dev_err(chip->dev, "tried to calc pwm freq err\n"); + return -EINVAL; + } + + switch (id) { + case PWM_A: + pwm_set_reg_bits(our_chip->base + REG_MISC_AB, + (0x3 << 4), + (((our_chip->clk_mask)&0x3) << 4)); + pwm_set_reg_bits(our_chip->base + REG_MISC_AB, + (0x7f << 8)|(1 << 15), + (our_chan->pwm_pre_div << 8)|(1 << 15)); + pwm_write_reg(our_chip->base + REG_PWM_A, + (our_chan->pwm_hi << 16) | (our_chan->pwm_lo)); + break; + case PWM_B: + pwm_set_reg_bits(our_chip->base + REG_MISC_AB, + (0x3 << 6), + (((our_chip->clk_mask >> 2)&0x3) << 6)); + pwm_set_reg_bits(our_chip->base + REG_MISC_AB, + (0x7f << 16)|(1 << 23), + (our_chan->pwm_pre_div << 16)|(1 << 23)); + pwm_write_reg(our_chip->base + REG_PWM_B, + (our_chan->pwm_hi << 16) | (our_chan->pwm_lo)); + break; + case PWM_C: + pwm_set_reg_bits(our_chip->base + REG_MISC_CD, + (0x3 << 4), + (((our_chip->clk_mask >> 4)&0x3) << 4)); + pwm_set_reg_bits(our_chip->base + REG_MISC_CD, + (0x7f << 8)|(1 << 15), + (our_chan->pwm_pre_div << 8)|(1 << 15)); + pwm_write_reg(our_chip->base + REG_PWM_C, + (our_chan->pwm_hi << 16) | (our_chan->pwm_lo)); + break; + case PWM_D: + pwm_set_reg_bits(our_chip->base + REG_MISC_CD, + (0x3 << 6), + (((our_chip->clk_mask >> 6)&0x3) << 6)); + pwm_set_reg_bits(our_chip->base + REG_MISC_CD, + (0x7f << 16)|(1 << 23), + (our_chan->pwm_pre_div << 16)|(1 << 23)); + pwm_write_reg(our_chip->base + REG_PWM_D, + (our_chan->pwm_hi << 16) | (our_chan->pwm_lo)); + break; + case PWM_E: + pwm_set_reg_bits(our_chip->base + REG_MISC_EF, + (0x3 << 4), + (((our_chip->clk_mask >> 8)&0x3) << 4)); + pwm_set_reg_bits(our_chip->base + REG_MISC_EF, + (0x7f << 8)|(1 << 15), + (our_chan->pwm_pre_div << 8)|(1 << 15)); + pwm_write_reg(our_chip->base + REG_PWM_E, + (our_chan->pwm_hi << 16) | (our_chan->pwm_lo)); + break; + case PWM_F: + pwm_set_reg_bits(our_chip->base + REG_MISC_EF, + (0x3 << 6), + (((our_chip->clk_mask >> 10)&0x3) << 6)); + pwm_set_reg_bits(our_chip->base + REG_MISC_EF, + (0x7f << 16)|(1 << 23), + (our_chan->pwm_pre_div << 16)|(1 << 23)); + pwm_write_reg(our_chip->base + REG_PWM_E, + (our_chan->pwm_hi << 16) | (our_chan->pwm_lo)); + break; + case PWM_AO_A: + pwm_set_reg_bits(our_chip->ao_base + REG_MISC_AO_AB, + (0x3 << 4), + (((our_chip->clk_mask >> 12)&0x3) << 4)); + pwm_set_reg_bits(our_chip->ao_base + REG_MISC_AO_AB, + (0x7f << 8)|(1 << 15), + (our_chan->pwm_pre_div << 8)|(1 << 15)); + pwm_write_reg(our_chip->ao_base + REG_PWM_AO_A, + (our_chan->pwm_hi << 16) | (our_chan->pwm_lo)); + break; + case PWM_AO_B: + pwm_set_reg_bits(our_chip->ao_base + REG_MISC_AO_AB, + (0x3 << 6), + (((our_chip->clk_mask >> 14)&0x3) << 6)); + pwm_set_reg_bits(our_chip->ao_base + REG_MISC_AO_AB, + (0x7f << 16)|(1 << 23), + (our_chan->pwm_pre_div << 16)|(1 << 23)); + pwm_write_reg(our_chip->ao_base + REG_PWM_AO_B, + (our_chan->pwm_hi << 16) | (our_chan->pwm_lo)); + break; + default: + break; + } + + our_chan->period_ns = period_ns; + our_chan->duty_ns = duty_ns; + + return 0; +} + +static void pwm_aml_set_invert(struct pwm_chip *chip, struct pwm_device *pwm, + unsigned int channel, bool invert) +{ + struct aml_pwm_chip *our_chip = to_aml_pwm_chip(chip); + unsigned long flags; + struct aml_pwm_channel *our_chan = pwm_get_chip_data(pwm); + + spin_lock_irqsave(&aml_pwm_lock, flags); + if (invert) { + our_chip->inverter_mask |= BIT(channel); + } else { + our_chip->inverter_mask &= ~BIT(channel); + } + pwm_aml_config(chip, pwm, our_chan->duty_ns, + our_chan->period_ns); + + spin_unlock_irqrestore(&aml_pwm_lock, flags); +} + + +static int pwm_aml_set_polarity(struct pwm_chip *chip, + struct pwm_device *pwm, + enum pwm_polarity polarity) +{ + bool invert = (polarity == PWM_POLARITY_NORMAL); + + /* Inverted means normal in the hardware. */ + pwm_aml_set_invert(chip, pwm, pwm->hwpwm, invert); + + return 0; +} + +static const struct pwm_ops pwm_aml_ops = { + .request = pwm_aml_request, + .free = pwm_aml_free, + .enable = pwm_aml_enable, + .disable = pwm_aml_disable, + .config = pwm_aml_config, + .set_polarity = pwm_aml_set_polarity, + .owner = THIS_MODULE, +}; + +#ifdef CONFIG_OF +static const struct of_device_id aml_pwm_matches[] = { + { .compatible = "amlogic, gx-pwm", }, + {}, +}; + +static int pwm_aml_parse_dt(struct aml_pwm_chip *chip) +{ + struct device_node *np = chip->chip.dev->of_node; + const struct of_device_id *match; + int i = 0; + struct property *prop; + const __be32 *cur; + u32 val; + + match = of_match_node(aml_pwm_matches, np); + if (!match) + return -ENODEV; + + chip->base = of_iomap(chip->chip.dev->of_node, 0); + if (IS_ERR(chip->base)) + return PTR_ERR(chip->base); + chip->ao_base = of_iomap(chip->chip.dev->of_node, 1); + if (IS_ERR(chip->ao_base)) + return PTR_ERR(chip->ao_base); + + of_property_for_each_u32(np, "pwm-outputs", prop, cur, val) { + if (val >= AML_PWM_NUM) { + dev_err(chip->chip.dev, + "%s: invalid channel index in pwm-outputs property\n", + __func__); + continue; + } + chip->variant.output_mask |= BIT(val); + } + + chip->xtal_clk = clk_get(chip->chip.dev, "xtal"); + chip->vid_pll_clk = clk_get(chip->chip.dev, "vid_pll_clk"); + chip->fclk_div4_clk = clk_get(chip->chip.dev, "fclk_div4"); + chip->fclk_div3_clk = clk_get(chip->chip.dev, "fclk_div3"); + + of_property_for_each_u32(np, "clock-select", prop, cur, val) { + if (val >= AML_PWM_NUM) { + dev_err(chip->chip.dev, + "%s: invalid channel index in pwm-outputs property\n", + __func__); + continue; + } + chip->clk_mask |= val<<(2 * i); + i++; + } + + return 0; +} +#else +static int pwm_aml_parse_dt(struct aml_pwm_chip *chip) +{ + return -ENODEV; +} +#endif + +static int pwm_aml_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct aml_pwm_chip *chip; + int ret; + + chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); + if (chip == NULL) { + dev_err(dev, "pwm alloc MEMORY err!!\n"); + return -ENOMEM; + } + + chip->chip.dev = &pdev->dev; + chip->chip.ops = &pwm_aml_ops; + chip->chip.base = -1; + chip->chip.npwm = AML_PWM_NUM; + chip->inverter_mask = BIT(AML_PWM_NUM) - 1; + + if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) { + ret = pwm_aml_parse_dt(chip); + if (ret) + return ret; + } else { + if (!pdev->dev.platform_data) { + dev_err(&pdev->dev, "no platform data specified\n"); + return -EINVAL; + } + memcpy(&chip->variant, pdev->dev.platform_data, + sizeof(chip->variant)); + } + + platform_set_drvdata(pdev, chip); + + ret = pwmchip_add(&chip->chip); + if (ret < 0) { + dev_err(dev, "failed to register PWM chip\n"); + return ret; + } + + return 0; +} + +static int pwm_aml_remove(struct platform_device *pdev) +{ + struct aml_pwm_chip *chip = platform_get_drvdata(pdev); + int ret; + + ret = pwmchip_remove(&chip->chip); + if (ret < 0) + return ret; + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int pwm_aml_suspend(struct device *dev) +{ + struct aml_pwm_chip *chip = dev_get_drvdata(dev); + unsigned int i; + +/* + * No one preserves these values during suspend so reset them. + * Otherwise driver leaves PWM unconfigured if same values are + * passed to pwm_config() next time. +*/ + for (i = 0; i < AML_PWM_NUM; ++i) { + struct pwm_device *pwm = &chip->chip.pwms[i]; + struct aml_pwm_channel *chan = pwm_get_chip_data(pwm); + if (!chan) + continue; + + chan->period_ns = 0; + chan->duty_ns = 0; + } + + return 0; +} + +static int pwm_aml_resume(struct device *dev) +{ + return 0; +} +#endif + +static const struct dev_pm_ops pwm_aml_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pwm_aml_suspend, pwm_aml_resume) +}; + +static struct platform_driver pwm_aml_driver = { + .driver = { + .name = "aml-pwm", + .owner = THIS_MODULE, + .pm = &pwm_aml_pm_ops, + .of_match_table = of_match_ptr(aml_pwm_matches), + }, + .probe = pwm_aml_probe, + .remove = pwm_aml_remove, +}; +module_platform_driver(pwm_aml_driver); + +MODULE_ALIAS("platform:meson-pwm"); +MODULE_LICENSE("GPL"); diff --git a/drivers/amlogic/usb/dwc_otg/310/dwc_otg_hcd_intr.c b/drivers/amlogic/usb/dwc_otg/310/dwc_otg_hcd_intr.c index c888d0aa474f4..27c06935918f0 100644 --- a/drivers/amlogic/usb/dwc_otg/310/dwc_otg_hcd_intr.c +++ b/drivers/amlogic/usb/dwc_otg/310/dwc_otg_hcd_intr.c @@ -1302,7 +1302,7 @@ static int32_t handle_hc_nak_intr(dwc_otg_hcd_t *hcd, hcd->ssplit_lock = 0; qtd->complete_split = 0; - if (qtd->error_count > 200) { + if (qtd->error_count > 20000) { DWC_ERROR("Can not read device info from hub.We take it error\n"); halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR); } else { diff --git a/drivers/amlogic/usb/dwc_otg/310/dwc_otg_hcd_queue.c b/drivers/amlogic/usb/dwc_otg/310/dwc_otg_hcd_queue.c index 3dd3ba7cacb63..6678a4cacc1b3 100644 --- a/drivers/amlogic/usb/dwc_otg/310/dwc_otg_hcd_queue.c +++ b/drivers/amlogic/usb/dwc_otg/310/dwc_otg_hcd_queue.c @@ -42,6 +42,18 @@ #include "dwc_otg_hcd.h" #include "dwc_otg_regs.h" +int usb_timing_tweak = 0; +static int __init setup_usbtweak(char *s) { + usb_timing_tweak = 0; + + if (!strcmp(s, "true")) + usb_timing_tweak = 1; + + return 0; +} +__setup("usbmulticam=", setup_usbtweak); + + /** * Free each QTD in the QH's QTD-list then free the QH. QH should already be * removed from a list. QTD list should already be empty if called from URB @@ -366,14 +378,19 @@ static int check_periodic_bandwidth(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) * High speed mode. * Max periodic usecs is 80% x 125 usec = 100 usec. */ - - max_claimed_usecs = 100 - qh->usecs; + if(usb_timing_tweak) + max_claimed_usecs = 125 - qh->usecs; + else + max_claimed_usecs = 100 - qh->usecs; else /* * Full speed mode. * Max periodic usecs is 90% x 1000 usec = 900 usec. */ - max_claimed_usecs = 900 - qh->usecs; + if(usb_timing_tweak) + max_claimed_usecs = 1000 - qh->usecs; + else + max_claimed_usecs = 900 - qh->usecs; if (hcd->periodic_usecs > max_claimed_usecs) { DWC_INFO("%s: already claimed usecs %d, required usecs %d\n", __func__, hcd->periodic_usecs, qh->usecs); @@ -724,4 +741,4 @@ int dwc_otg_hcd_qtd_add(dwc_otg_qtd_t *qtd, return retval; } -#endif /* DWC_DEVICE_ONLY */ \ No newline at end of file +#endif /* DWC_DEVICE_ONLY */ diff --git a/drivers/amlogic/wifi/Kconfig b/drivers/amlogic/wifi/Kconfig index ceff556db0bba..40677940f7357 100644 --- a/drivers/amlogic/wifi/Kconfig +++ b/drivers/amlogic/wifi/Kconfig @@ -11,4 +11,12 @@ config BCMDHD_USE_STATIC_BUF boolean "broadcom wifi static buff support" depends on AM_WIFI default n + +config AM_WIFI_DUMMY + tristate "Amlogic SDIO WiFi turn on and rescan module" + depends on AM_WIFI + default m + +source "drivers/amlogic/wifi/bcmdhd/Kconfig" + endif # AM_WIFI diff --git a/drivers/amlogic/wifi/Makefile b/drivers/amlogic/wifi/Makefile index bac3e1fac370c..adc7720571949 100644 --- a/drivers/amlogic/wifi/Makefile +++ b/drivers/amlogic/wifi/Makefile @@ -1,2 +1,5 @@ obj-$(CONFIG_AM_WIFI) += wifi_dt.o obj-$(CONFIG_BCMDHD_USE_STATIC_BUF) += dhd_static_buf.o +obj-$(CONFIG_AM_WIFI_DUMMY) += wifi_dummy.o +obj-$(CONFIG_AM_WIFI_USB) += wifi_power.o +obj-$(CONFIG_BCMDHD) += bcmdhd/ diff --git a/drivers/net/wireless/bcmdhd/Kconfig b/drivers/amlogic/wifi/bcmdhd/Kconfig similarity index 93% rename from drivers/net/wireless/bcmdhd/Kconfig rename to drivers/amlogic/wifi/bcmdhd/Kconfig index 631f8c3fc7c72..bba8b4f59ae25 100644 --- a/drivers/net/wireless/bcmdhd/Kconfig +++ b/drivers/amlogic/wifi/bcmdhd/Kconfig @@ -7,14 +7,14 @@ config BCMDHD config BCMDHD_FW_PATH depends on BCMDHD string "Firmware path" - default "/etc/firmware/fw_bcmdhd.bin" + default "/system/etc/firmware/fw_bcmdhd.bin" ---help--- Path to the firmware file. config BCMDHD_NVRAM_PATH depends on BCMDHD string "NVRAM path" - default "/etc/firmware/nvram.txt" + default "/system/etc/firmware/nvram.txt" ---help--- Path to the calibration file. diff --git a/drivers/net/wireless/bcmdhd/Makefile b/drivers/amlogic/wifi/bcmdhd/Makefile similarity index 69% rename from drivers/net/wireless/bcmdhd/Makefile rename to drivers/amlogic/wifi/bcmdhd/Makefile index 6afa27472adc6..d8b76862ec18a 100644 --- a/drivers/net/wireless/bcmdhd/Makefile +++ b/drivers/amlogic/wifi/bcmdhd/Makefile @@ -2,10 +2,15 @@ # 1. WL_IFACE_COMB_NUM_CHANNELS must be added if Android version is 4.4 with Kernel version 3.0~3.4, # otherwise please remove it. +# if not confiure pci mode, we use sdio mode as default +ifeq ($(CONFIG_BCMDHD_PCIE),) +$(info bcm SDIO driver configured) CONFIG_BCMDHD_SDIO := y -#CONFIG_BCMDHD_PCIE := y +CONFIG_DHD_USE_STATIC_BUF :=y +endif +CONFIG_BCMDHD_PROPTXSTATUS := y -#export CONFIG_BCMDHD = m +export CONFIG_BCMDHD = m export CONFIG_BCMDHD_OOB = y DHDCFLAGS = -Wall -Wstrict-prototypes -Dlinux -DBCMDRIVER -DSDTEST \ @@ -14,7 +19,8 @@ DHDCFLAGS = -Wall -Wstrict-prototypes -Dlinux -DBCMDRIVER -DSDTEST \ -DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT -DSUPPORT_PM2_ONLY \ -DKEEP_ALIVE -DPKT_FILTER_SUPPORT -DPNO_SUPPORT -DDHDTCPACK_SUPPRESS \ -DDHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT -DRXFRAME_THREAD \ - -DSWTXGLOM \ + -DTSQ_MULTIPLIER \ + -DBCMSDIOH_TXGLOM_EXT -DWL_EXT_IAPSTA \ -DENABLE_INSMOD_NO_FW_LOAD \ -I$(src) -I$(src)/include @@ -22,19 +28,23 @@ DHDOFILES = aiutils.o siutils.o sbutils.o bcmutils.o bcmwifi_channels.o \ dhd_linux.o dhd_linux_platdev.o dhd_linux_sched.o dhd_pno.o \ dhd_common.o dhd_ip.o dhd_linux_wq.o dhd_custom_gpio.o \ bcmevent.o hndpmu.o linux_osl.o wldev_common.o wl_android.o \ - hnd_pktq.o hnd_pktpool.o dhd_config.o + hnd_pktq.o hnd_pktpool.o dhd_config.o wl_android_ext.o ifneq ($(CONFIG_BCMDHD_SDIO),) DHDCFLAGS += \ -DBCMSDIO -DMMC_SDIO_ABORT -DBCMLXSDMMC -DUSE_SDIOFIFO_IOVAR \ - -DBDC -DPROP_TXSTATUS -DDHD_USE_IDLECOUNT -DBCMSDIOH_TXGLOM \ - -DCUSTOM_SDIO_F2_BLKSIZE=128 + -DBDC -DDHD_USE_IDLECOUNT -DBCMSDIOH_TXGLOM \ + -DCUSTOM_SDIO_F2_BLKSIZE=256 + +DHDCFLAGS += -DDCUSTOM_DPC_PRIO_SETTING=-1 +DHDCFLAGS += -DCUSTOM_RXF_CPUCORE=2 +DHDCFLAGS += -DDHD_FIRSTREAD=256 -DMAX_HDR_READ=256 DHDOFILES += bcmsdh.o bcmsdh_linux.o bcmsdh_sdmmc.o bcmsdh_sdmmc_linux.o \ dhd_sdio.o dhd_cdc.o dhd_wlfc.o ifeq ($(CONFIG_BCMDHD_OOB),y) -DHDCFLAGS += -DOOB_INTR_ONLY -DHW_OOB -DCUSTOMER_OOB +DHDCFLAGS += -DOOB_INTR_ONLY -DCUSTOMER_OOB -DHW_OOB ifeq ($(CONFIG_BCMDHD_DISABLE_WOWLAN),y) DHDCFLAGS += -DDISABLE_WOWLAN endif @@ -43,10 +53,20 @@ DHDCFLAGS += -DSDIO_ISR_THREAD endif endif +ifeq ($(CONFIG_BCMDHD_PROPTXSTATUS),y) +ifneq ($(CONFIG_BCMDHD_SDIO),) +DHDCFLAGS += -DPROP_TXSTATUS +endif +ifneq ($(CONFIG_CFG80211),) +DHDCFLAGS += -DPROP_TXSTATUS_VSDB +endif +endif + ifneq ($(CONFIG_BCMDHD_PCIE),) DHDCFLAGS += \ - -DPCIE_FULL_DONGLE -DBCMPCIE -DSHOW_LOGTRACE -DDPCIE_TX_DEFERRAL \ - -DCUSTOM_DPC_PRIO_SETTING=-1 + -DPCIE_FULL_DONGLE -DBCMPCIE -DCUSTOM_DPC_PRIO_SETTING=-1 +DHDCFLAGS += -DDHD_PCIE_BAR1_WIN_BASE_FIX=0x200000 +DHDCFLAGS += -DDHD_USE_MSI DHDOFILES += dhd_pcie.o dhd_pcie_linux.o pcie_core.o dhd_flowring.o \ dhd_msgbuf.o @@ -55,9 +75,10 @@ endif obj-$(CONFIG_BCMDHD) += dhd.o dhd-objs += $(DHDOFILES) -#ifeq ($(CONFIG_MACH_ODROID_4210),y) +#ifeq ($(CONFIG_MACH_PLATFORM),y) DHDOFILES += dhd_gpio.o -DHDCFLAGS += -DCUSTOMER_HW -DDHD_OF_SUPPORT -DCUSTOMER_HW_AMLOGIC +DHDCFLAGS += -DCUSTOMER_HW -DDHD_OF_SUPPORT +DHDCFLAGS += -DCUSTOMER_HW_AMLOGIC #DHDCFLAGS += -DBCMWAPI_WPI -DBCMWAPI_WAI #endif @@ -73,11 +94,11 @@ DHDCFLAGS += -DSTATIC_WL_PRIV_STRUCT -DENHANCED_STATIC_BUF endif ifneq ($(CONFIG_WIRELESS_EXT),) -DHDOFILES += wl_iw.o -DHDCFLAGS += -DSOFTAP -DWL_WIRELESS_EXT -DUSE_IW +DHDOFILES += wl_iw.o wl_escan.o +DHDCFLAGS += -DSOFTAP -DWL_WIRELESS_EXT -DUSE_IW -DWL_ESCAN endif ifneq ($(CONFIG_CFG80211),) -DHDOFILES += wl_cfg80211.o wl_cfgp2p.o wl_linux_mon.o wl_cfg_btcoex.o +DHDOFILES += wl_cfg80211.o wl_cfgp2p.o wl_linux_mon.o wl_cfg_btcoex.o wl_cfgvendor.o DHDOFILES += dhd_cfg80211.o dhd_cfg_vendor.o DHDCFLAGS += -DWL_CFG80211 -DWLP2P -DWL_CFG80211_STA_EVENT -DWL_ENABLE_P2P_IF DHDCFLAGS += -DWL_IFACE_COMB_NUM_CHANNELS @@ -89,7 +110,7 @@ DHDCFLAGS += -DWL_SUPPORT_AUTO_CHANNEL DHDCFLAGS += -DWL_SUPPORT_BACKPORTED_KPATCHES DHDCFLAGS += -DESCAN_RESULT_PATCH DHDCFLAGS += -DVSDB -DWL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST -DHDCFLAGS += -DWLTDLS -DMIRACAST_AMPDU_SIZE=8 -DPROP_TXSTATUS_VSDB +DHDCFLAGS += -DWLTDLS -DMIRACAST_AMPDU_SIZE=8 endif EXTRA_CFLAGS = $(DHDCFLAGS) ifeq ($(CONFIG_BCMDHD),m) @@ -101,7 +122,7 @@ endif ARCH ?= arm64 CROSS_COMPILE ?=aarch64-linux-gnu- -KDIR ?=/mnt/fileroot/jiamin.miao/amlogic_kernel/common +KDIR ?=../../../../../../common dhd: $(MAKE) -C $(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules diff --git a/drivers/net/wireless/bcmdhd/Module.symvers b/drivers/amlogic/wifi/bcmdhd/Module.symvers similarity index 100% rename from drivers/net/wireless/bcmdhd/Module.symvers rename to drivers/amlogic/wifi/bcmdhd/Module.symvers diff --git a/drivers/net/wireless/bcmdhd/aiutils.c b/drivers/amlogic/wifi/bcmdhd/aiutils.c similarity index 81% rename from drivers/net/wireless/bcmdhd/aiutils.c rename to drivers/amlogic/wifi/bcmdhd/aiutils.c index 03079b072c202..493a2e08ab9c3 100644 --- a/drivers/net/wireless/bcmdhd/aiutils.c +++ b/drivers/amlogic/wifi/bcmdhd/aiutils.c @@ -2,9 +2,30 @@ * Misc utility routines for accessing chip-specific features * of the SiliconBackplane-based Broadcom chips. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: aiutils.c 467150 2014-04-02 17:30:43Z $ + * + * <> + * + * $Id: aiutils.c 607900 2015-12-22 13:38:53Z $ */ #include #include @@ -20,8 +41,10 @@ #define BCM47162_DMP() (0) #define BCM5357_DMP() (0) +#define BCM53573_DMP() (0) #define BCM4707_DMP() (0) #define PMU_DMP() (0) +#define GCI_DMP() (0) #define remap_coreid(sih, coreid) (coreid) #define remap_corerev(sih, corerev) (corerev) @@ -32,6 +55,7 @@ get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match) { uint32 ent; uint inv = 0, nom = 0; + uint32 size = 0; while (TRUE) { ent = R_REG(si_osh(sih), *eromptr); @@ -51,6 +75,13 @@ get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match) if ((ent & mask) == match) break; + /* escape condition related EROM size if it has invalid values */ + size += sizeof(*eromptr); + if (size >= ER_SZ_MAX) { + SI_ERROR(("Failed to find end of EROM marker\n")); + break; + } + nom++; } @@ -288,6 +319,8 @@ ai_scan(si_t *sih, void *regs, uint devid) } if (i == 0) cores_info->wrapba[idx] = addrl; + else if (i == 1) + cores_info->wrapba2[idx] = addrl; } /* And finally slave wrappers */ @@ -295,6 +328,12 @@ ai_scan(si_t *sih, void *regs, uint devid) uint fwp = (nsp == 1) ? 0 : 1; asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh, &sizel, &sizeh); + + /* cache APB bridge wrapper address for set/clear timeout */ + if ((mfg == MFGID_ARM) && (cid == APB_BRIDGE_ID)) { + ASSERT(sii->num_br < SI_MAXBR); + sii->br_wrapba[sii->num_br++] = addrl; + } if (asd == 0) { SI_ERROR(("Missing descriptor for SW %d\n", i)); goto error; @@ -305,6 +344,8 @@ ai_scan(si_t *sih, void *regs, uint devid) } if ((nmw == 0) && (i == 0)) cores_info->wrapba[idx] = addrl; + else if ((nmw == 0) && (i == 1)) + cores_info->wrapba2[idx] = addrl; } @@ -316,7 +357,7 @@ ai_scan(si_t *sih, void *regs, uint devid) sii->numcores++; } - SI_ERROR(("Reached end of erom without finding END")); + SI_ERROR(("Reached end of erom without finding END\n")); error: sii->numcores = 0; @@ -329,12 +370,12 @@ ai_scan(si_t *sih, void *regs, uint devid) /* This function changes the logical "focus" to the indicated core. * Return the current core's virtual address. */ -void * -ai_setcoreidx(si_t *sih, uint coreidx) +static void * +_ai_setcoreidx(si_t *sih, uint coreidx, uint use_wrap2) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; - uint32 addr, wrap; + uint32 addr, wrap, wrap2; void *regs; if (coreidx >= MIN(sii->numcores, SI_MAXCORES)) @@ -342,6 +383,7 @@ ai_setcoreidx(si_t *sih, uint coreidx) addr = cores_info->coresba[coreidx]; wrap = cores_info->wrapba[coreidx]; + wrap2 = cores_info->wrapba2[coreidx]; /* * If the user has provided an interrupt mask enabled function, @@ -362,7 +404,14 @@ ai_setcoreidx(si_t *sih, uint coreidx) cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE); ASSERT(GOODREGS(cores_info->wrappers[coreidx])); } - sii->curwrap = cores_info->wrappers[coreidx]; + if (!cores_info->wrappers2[coreidx] && (wrap2 != 0)) { + cores_info->wrappers2[coreidx] = REG_MAP(wrap2, SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->wrappers2[coreidx])); + } + if (use_wrap2) + sii->curwrap = cores_info->wrappers2[coreidx]; + else + sii->curwrap = cores_info->wrappers[coreidx]; break; case PCI_BUS: @@ -370,6 +419,8 @@ ai_setcoreidx(si_t *sih, uint coreidx) OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr); regs = sii->curmap; /* point bar0 2nd 4KB window to the primary wrapper */ + if (use_wrap2) + wrap = wrap2; if (PCIE_GEN2(sii)) OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap); else @@ -380,7 +431,10 @@ ai_setcoreidx(si_t *sih, uint coreidx) case SPI_BUS: case SDIO_BUS: sii->curmap = regs = (void *)((uintptr)addr); - sii->curwrap = (void *)((uintptr)wrap); + if (use_wrap2) + sii->curwrap = (void *)((uintptr)wrap2); + else + sii->curwrap = (void *)((uintptr)wrap); break; #endif /* BCMSDIO */ @@ -397,6 +451,17 @@ ai_setcoreidx(si_t *sih, uint coreidx) return regs; } +void * +ai_setcoreidx(si_t *sih, uint coreidx) +{ + return _ai_setcoreidx(sih, coreidx, 0); +} + +void * +ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx) +{ + return _ai_setcoreidx(sih, coreidx, 1); +} void ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size) @@ -552,13 +617,25 @@ ai_flag(si_t *sih) __FUNCTION__)); return sii->curidx; } - + if (BCM53573_DMP()) { + SI_ERROR(("%s: Attempting to read DMP registers on 53573\n", __FUNCTION__)); + return sii->curidx; + } #ifdef REROUTE_OOBINT if (PMU_DMP()) { SI_ERROR(("%s: Attempting to read PMU DMP registers\n", __FUNCTION__)); return PMU_OOB_BIT; } +#else + if (PMU_DMP()) { + uint idx, flag; + idx = sii->curidx; + ai_setcoreidx(sih, SI_CC_IDX); + flag = ai_flag_alt(sih); + ai_setcoreidx(sih, idx); + return flag; + } #endif /* REROUTE_OOBINT */ ai = sii->curwrap; @@ -806,8 +883,10 @@ ai_corereg_addr(si_t *sih, uint coreidx, uint regoff) } } - if (!fast) - return 0; + if (!fast) { + ASSERT(sii->curidx == coreidx); + r = (uint32*) ((uchar*)sii->curmap + regoff); + } return (r); } @@ -856,22 +935,31 @@ ai_core_disable(si_t *sih, uint32 bits) * bits - core specific bits that are set during and after reset sequence * resetbits - core specific bits that are set only during reset sequence */ -void -ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits) +static void +_ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits) { si_info_t *sii = SI_INFO(sih); aidmp_t *ai; volatile uint32 dummy; uint loop_counter = 10; +#ifdef CUSTOMER_HW4_DEBUG + printf("%s: bits: 0x%x, resetbits: 0x%x\n", __FUNCTION__, bits, resetbits); +#endif ASSERT(GOODREGS(sii->curwrap)); ai = sii->curwrap; /* ensure there are no pending backplane operations */ SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); +#ifdef CUSTOMER_HW4_DEBUG + printf("%s: resetstatus: %p dummy: %x\n", __FUNCTION__, &ai->resetstatus, dummy); +#endif /* put core into reset state */ +#ifdef CUSTOMER_HW4_DEBUG + printf("%s: resetctrl: %p\n", __FUNCTION__, &ai->resetctrl); +#endif W_REG(sii->osh, &ai->resetctrl, AIRC_RESET); OSL_DELAY(10); @@ -880,6 +968,9 @@ ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits) W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN)); dummy = R_REG(sii->osh, &ai->ioctrl); +#ifdef CUSTOMER_HW4_DEBUG + printf("%s: ioctrl: %p dummy: 0x%x\n", __FUNCTION__, &ai->ioctrl, dummy); +#endif BCM_REFERENCE(dummy); /* ensure there are no pending backplane operations */ @@ -893,6 +984,10 @@ ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits) /* take core out of reset */ W_REG(sii->osh, &ai->resetctrl, 0); +#ifdef CUSTOMER_HW4_DEBUG + printf("%s: loop_counter: %d resetstatus: %p resetctrl: %p\n", + __FUNCTION__, loop_counter, &ai->resetstatus, &ai->resetctrl); +#endif /* ensure there are no pending backplane operations */ SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300); @@ -901,10 +996,29 @@ ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits) W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN)); dummy = R_REG(sii->osh, &ai->ioctrl); +#ifdef CUSTOMER_HW4_DEBUG + printf("%s: ioctl: %p dummy: 0x%x\n", __FUNCTION__, &ai->ioctrl, dummy); +#endif BCM_REFERENCE(dummy); OSL_DELAY(1); } +void +ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint idx = sii->curidx; + + if (cores_info->wrapba2[idx] != 0) { + ai_setcoreidx_2ndwrap(sih, idx); + _ai_core_reset(sih, bits, resetbits); + ai_setcoreidx(sih, idx); + } + + _ai_core_reset(sih, bits, resetbits); +} + void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val) { @@ -1094,4 +1208,77 @@ ai_dumpregs(si_t *sih, struct bcmstrbuf *b) R_REG(osh, &ai->itcr)); } } -#endif +#endif + + +void +ai_enable_backplane_timeouts(si_t *sih) +{ +#ifdef AXI_TIMEOUTS + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + int i; + + for (i = 0; i < sii->num_br; ++i) { + ai = (aidmp_t *) sii->br_wrapba[i]; + W_REG(sii->osh, &ai->errlogctrl, (1 << AIELC_TO_ENAB_SHIFT) | + ((AXI_TO_VAL << AIELC_TO_EXP_SHIFT) & AIELC_TO_EXP_MASK)); + } +#endif /* AXI_TIMEOUTS */ +} + +void +ai_clear_backplane_to(si_t *sih) +{ +#ifdef AXI_TIMEOUTS + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + int i; + uint32 errlogstatus; + + for (i = 0; i < sii->num_br; ++i) { + ai = (aidmp_t *) sii->br_wrapba[i]; + /* check for backplane timeout & clear backplane hang */ + errlogstatus = R_REG(sii->osh, &ai->errlogstatus); + + if ((errlogstatus & AIELS_TIMEOUT_MASK) != 0) { + /* set ErrDone to clear the condition */ + W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK); + + /* SPINWAIT on errlogstatus timeout status bits */ + while (R_REG(sii->osh, &ai->errlogstatus) & AIELS_TIMEOUT_MASK) + ; + + /* only reset APB Bridge on timeout (not slave error, or dec error) */ + switch (errlogstatus & AIELS_TIMEOUT_MASK) { + case 0x1: + printf("AXI slave error"); + break; + case 0x2: + /* reset APB Bridge */ + OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET); + /* sync write */ + (void)R_REG(sii->osh, &ai->resetctrl); + /* clear Reset bit */ + AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET)); + /* sync write */ + (void)R_REG(sii->osh, &ai->resetctrl); + printf("AXI timeout"); + break; + case 0x3: + printf("AXI decode error"); + break; + default: + ; /* should be impossible */ + } + printf("; APB Bridge %d\n", i); + printf("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x", + R_REG(sii->osh, &ai->errlogaddrlo), + R_REG(sii->osh, &ai->errlogaddrhi), + R_REG(sii->osh, &ai->errlogid), + R_REG(sii->osh, &ai->errlogflags)); + printf(", status 0x%08x\n", errlogstatus); + } + } +#endif /* AXI_TIMEOUTS */ +} diff --git a/drivers/amlogic/wifi/bcmdhd/bcm_app_utils.c b/drivers/amlogic/wifi/bcmdhd/bcm_app_utils.c new file mode 100644 index 0000000000000..d138849d65c61 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/bcm_app_utils.c @@ -0,0 +1,1012 @@ +/* + * Misc utility routines used by kernel or app-level. + * Contents are wifi-specific, used by any kernel or app-level + * software that might want wifi things as it grows. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcm_app_utils.c 547371 2015-04-08 12:51:39Z $ + */ + +#include + +#ifdef BCMDRIVER +#include +#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) +#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c)) +#else /* BCMDRIVER */ +#include +#include +#include +#include +#ifndef ASSERT +#define ASSERT(exp) +#endif +#endif /* BCMDRIVER */ +#include + +#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL)) +#include /* For wl/exe/GNUmakefile.brcm_wlu and GNUmakefile.wlm_dll */ +#endif + +#include +#include +#include + +#ifndef BCMDRIVER +/* Take an array of measurments representing a single channel over time and return + a summary. Currently implemented as a simple average but could easily evolve + into more cpomplex alogrithms. +*/ +cca_congest_channel_req_t * +cca_per_chan_summary(cca_congest_channel_req_t *input, cca_congest_channel_req_t *avg, bool percent) +{ + int sec; + cca_congest_t totals; + + totals.duration = 0; + totals.congest_ibss = 0; + totals.congest_obss = 0; + totals.interference = 0; + avg->num_secs = 0; + + for (sec = 0; sec < input->num_secs; sec++) { + if (input->secs[sec].duration) { + totals.duration += input->secs[sec].duration; + totals.congest_ibss += input->secs[sec].congest_ibss; + totals.congest_obss += input->secs[sec].congest_obss; + totals.interference += input->secs[sec].interference; + avg->num_secs++; + } + } + avg->chanspec = input->chanspec; + + if (!avg->num_secs || !totals.duration) + return (avg); + + if (percent) { + avg->secs[0].duration = totals.duration / avg->num_secs; + avg->secs[0].congest_ibss = totals.congest_ibss * 100/totals.duration; + avg->secs[0].congest_obss = totals.congest_obss * 100/totals.duration; + avg->secs[0].interference = totals.interference * 100/totals.duration; + } else { + avg->secs[0].duration = totals.duration / avg->num_secs; + avg->secs[0].congest_ibss = totals.congest_ibss / avg->num_secs; + avg->secs[0].congest_obss = totals.congest_obss / avg->num_secs; + avg->secs[0].interference = totals.interference / avg->num_secs; + } + + return (avg); +} + +static void +cca_info(uint8 *bitmap, int num_bits, int *left, int *bit_pos) +{ + int i; + for (*left = 0, i = 0; i < num_bits; i++) { + if (isset(bitmap, i)) { + (*left)++; + *bit_pos = i; + } + } +} + +static uint8 +spec_to_chan(chanspec_t chspec) +{ + uint8 center_ch, edge, primary, sb; + + center_ch = CHSPEC_CHANNEL(chspec); + + if (CHSPEC_IS20(chspec)) { + return center_ch; + } else { + /* the lower edge of the wide channel is half the bw from + * the center channel. + */ + if (CHSPEC_IS40(chspec)) { + edge = center_ch - CH_20MHZ_APART; + } else { + /* must be 80MHz (until we support more) */ + ASSERT(CHSPEC_IS80(chspec)); + edge = center_ch - CH_40MHZ_APART; + } + + /* find the channel number of the lowest 20MHz primary channel */ + primary = edge + CH_10MHZ_APART; + + /* select the actual subband */ + sb = (chspec & WL_CHANSPEC_CTL_SB_MASK) >> WL_CHANSPEC_CTL_SB_SHIFT; + primary = primary + sb * CH_20MHZ_APART; + + return primary; + } +} + +/* + Take an array of measumrements representing summaries of different channels. + Return a recomended channel. + Interference is evil, get rid of that first. + Then hunt for lowest Other bss traffic. + Don't forget that channels with low duration times may not have accurate readings. + For the moment, do not overwrite input array. +*/ +int +cca_analyze(cca_congest_channel_req_t *input[], int num_chans, uint flags, chanspec_t *answer) +{ + uint8 *bitmap = NULL; /* 38 Max channels needs 5 bytes = 40 */ + int i, left, winner, ret_val = 0; + uint32 min_obss = 1 << 30; + uint bitmap_sz; + + bitmap_sz = CEIL(num_chans, NBBY); + bitmap = (uint8 *)malloc(bitmap_sz); + if (bitmap == NULL) { + printf("unable to allocate memory\n"); + return BCME_NOMEM; + } + + memset(bitmap, 0, bitmap_sz); + /* Initially, all channels are up for consideration */ + for (i = 0; i < num_chans; i++) { + if (input[i]->chanspec) + setbit(bitmap, i); + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_TOO_FEW; + goto f_exit; + } + + /* Filter for 2.4 GHz Band */ + if (flags & CCA_FLAG_2G_ONLY) { + for (i = 0; i < num_chans; i++) { + if (!CHSPEC_IS2G(input[i]->chanspec)) + clrbit(bitmap, i); + } + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_BAND; + goto f_exit; + } + + /* Filter for 5 GHz Band */ + if (flags & CCA_FLAG_5G_ONLY) { + for (i = 0; i < num_chans; i++) { + if (!CHSPEC_IS5G(input[i]->chanspec)) + clrbit(bitmap, i); + } + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_BAND; + goto f_exit; + } + + /* Filter for Duration */ + if (!(flags & CCA_FLAG_IGNORE_DURATION)) { + for (i = 0; i < num_chans; i++) { + if (input[i]->secs[0].duration < CCA_THRESH_MILLI) + clrbit(bitmap, i); + } + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_DURATION; + goto f_exit; + } + + /* Filter for 1 6 11 on 2.4 Band */ + if (flags & CCA_FLAGS_PREFER_1_6_11) { + int tmp_channel = spec_to_chan(input[i]->chanspec); + int is2g = CHSPEC_IS2G(input[i]->chanspec); + for (i = 0; i < num_chans; i++) { + if (is2g && tmp_channel != 1 && tmp_channel != 6 && tmp_channel != 11) + clrbit(bitmap, i); + } + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_PREF_CHAN; + goto f_exit; + } + + /* Toss high interference interference */ + if (!(flags & CCA_FLAG_IGNORE_INTERFER)) { + for (i = 0; i < num_chans; i++) { + if (input[i]->secs[0].interference > CCA_THRESH_INTERFERE) + clrbit(bitmap, i); + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_INTERFER; + goto f_exit; + } + } + + /* Now find lowest obss */ + winner = 0; + for (i = 0; i < num_chans; i++) { + if (isset(bitmap, i) && input[i]->secs[0].congest_obss < min_obss) { + winner = i; + min_obss = input[i]->secs[0].congest_obss; + } + } + *answer = input[winner]->chanspec; + f_exit: + free(bitmap); /* free the allocated memory for bitmap */ + return ret_val; +} +#endif /* !BCMDRIVER */ + +/* offset of cntmember by sizeof(uint32) from the first cnt variable, txframe. */ +#define IDX_IN_WL_CNT_VER_6_T(cntmember) \ + ((OFFSETOF(wl_cnt_ver_6_t, cntmember) - OFFSETOF(wl_cnt_ver_6_t, txframe)) / sizeof(uint32)) + +#define IDX_IN_WL_CNT_VER_11_T(cntmember) \ + ((OFFSETOF(wl_cnt_ver_11_t, cntmember) - OFFSETOF(wl_cnt_ver_11_t, txframe)) \ + / sizeof(uint32)) + +/* Exclude version and length fields */ +#define NUM_OF_CNT_IN_WL_CNT_VER_6_T \ + ((sizeof(wl_cnt_ver_6_t) - 2 * sizeof(uint16)) / sizeof(uint32)) +/* Exclude macstat cnt variables. wl_cnt_ver_6_t only has 62 macstat cnt variables. */ +#define NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T \ + (NUM_OF_CNT_IN_WL_CNT_VER_6_T - (WL_CNT_MCST_VAR_NUM - 2)) + +/* Exclude version and length fields */ +#define NUM_OF_CNT_IN_WL_CNT_VER_11_T \ + ((sizeof(wl_cnt_ver_11_t) - 2 * sizeof(uint16)) / sizeof(uint32)) +/* Exclude 64 macstat cnt variables. */ +#define NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T \ + (NUM_OF_CNT_IN_WL_CNT_VER_11_T - WL_CNT_MCST_VAR_NUM) + +/* Index conversion table from wl_cnt_ver_6_t to wl_cnt_wlc_t */ +static const uint8 wlcntver6t_to_wlcntwlct[NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T] = { + IDX_IN_WL_CNT_VER_6_T(txframe), + IDX_IN_WL_CNT_VER_6_T(txbyte), + IDX_IN_WL_CNT_VER_6_T(txretrans), + IDX_IN_WL_CNT_VER_6_T(txerror), + IDX_IN_WL_CNT_VER_6_T(txctl), + IDX_IN_WL_CNT_VER_6_T(txprshort), + IDX_IN_WL_CNT_VER_6_T(txserr), + IDX_IN_WL_CNT_VER_6_T(txnobuf), + IDX_IN_WL_CNT_VER_6_T(txnoassoc), + IDX_IN_WL_CNT_VER_6_T(txrunt), + IDX_IN_WL_CNT_VER_6_T(txchit), + IDX_IN_WL_CNT_VER_6_T(txcmiss), + IDX_IN_WL_CNT_VER_6_T(txuflo), + IDX_IN_WL_CNT_VER_6_T(txphyerr), + IDX_IN_WL_CNT_VER_6_T(txphycrs), + IDX_IN_WL_CNT_VER_6_T(rxframe), + IDX_IN_WL_CNT_VER_6_T(rxbyte), + IDX_IN_WL_CNT_VER_6_T(rxerror), + IDX_IN_WL_CNT_VER_6_T(rxctl), + IDX_IN_WL_CNT_VER_6_T(rxnobuf), + IDX_IN_WL_CNT_VER_6_T(rxnondata), + IDX_IN_WL_CNT_VER_6_T(rxbadds), + IDX_IN_WL_CNT_VER_6_T(rxbadcm), + IDX_IN_WL_CNT_VER_6_T(rxfragerr), + IDX_IN_WL_CNT_VER_6_T(rxrunt), + IDX_IN_WL_CNT_VER_6_T(rxgiant), + IDX_IN_WL_CNT_VER_6_T(rxnoscb), + IDX_IN_WL_CNT_VER_6_T(rxbadproto), + IDX_IN_WL_CNT_VER_6_T(rxbadsrcmac), + IDX_IN_WL_CNT_VER_6_T(rxbadda), + IDX_IN_WL_CNT_VER_6_T(rxfilter), + IDX_IN_WL_CNT_VER_6_T(rxoflo), + IDX_IN_WL_CNT_VER_6_T(rxuflo), + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 1, + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 2, + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 3, + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 4, + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 5, + IDX_IN_WL_CNT_VER_6_T(d11cnt_txrts_off), + IDX_IN_WL_CNT_VER_6_T(d11cnt_rxcrc_off), + IDX_IN_WL_CNT_VER_6_T(d11cnt_txnocts_off), + IDX_IN_WL_CNT_VER_6_T(dmade), + IDX_IN_WL_CNT_VER_6_T(dmada), + IDX_IN_WL_CNT_VER_6_T(dmape), + IDX_IN_WL_CNT_VER_6_T(reset), + IDX_IN_WL_CNT_VER_6_T(tbtt), + IDX_IN_WL_CNT_VER_6_T(txdmawar), + IDX_IN_WL_CNT_VER_6_T(pkt_callback_reg_fail), + IDX_IN_WL_CNT_VER_6_T(txfrag), + IDX_IN_WL_CNT_VER_6_T(txmulti), + IDX_IN_WL_CNT_VER_6_T(txfail), + IDX_IN_WL_CNT_VER_6_T(txretry), + IDX_IN_WL_CNT_VER_6_T(txretrie), + IDX_IN_WL_CNT_VER_6_T(rxdup), + IDX_IN_WL_CNT_VER_6_T(txrts), + IDX_IN_WL_CNT_VER_6_T(txnocts), + IDX_IN_WL_CNT_VER_6_T(txnoack), + IDX_IN_WL_CNT_VER_6_T(rxfrag), + IDX_IN_WL_CNT_VER_6_T(rxmulti), + IDX_IN_WL_CNT_VER_6_T(rxcrc), + IDX_IN_WL_CNT_VER_6_T(txfrmsnt), + IDX_IN_WL_CNT_VER_6_T(rxundec), + IDX_IN_WL_CNT_VER_6_T(tkipmicfaill), + IDX_IN_WL_CNT_VER_6_T(tkipcntrmsr), + IDX_IN_WL_CNT_VER_6_T(tkipreplay), + IDX_IN_WL_CNT_VER_6_T(ccmpfmterr), + IDX_IN_WL_CNT_VER_6_T(ccmpreplay), + IDX_IN_WL_CNT_VER_6_T(ccmpundec), + IDX_IN_WL_CNT_VER_6_T(fourwayfail), + IDX_IN_WL_CNT_VER_6_T(wepundec), + IDX_IN_WL_CNT_VER_6_T(wepicverr), + IDX_IN_WL_CNT_VER_6_T(decsuccess), + IDX_IN_WL_CNT_VER_6_T(tkipicverr), + IDX_IN_WL_CNT_VER_6_T(wepexcluded), + IDX_IN_WL_CNT_VER_6_T(txchanrej), + IDX_IN_WL_CNT_VER_6_T(psmwds), + IDX_IN_WL_CNT_VER_6_T(phywatchdog), + IDX_IN_WL_CNT_VER_6_T(prq_entries_handled), + IDX_IN_WL_CNT_VER_6_T(prq_undirected_entries), + IDX_IN_WL_CNT_VER_6_T(prq_bad_entries), + IDX_IN_WL_CNT_VER_6_T(atim_suppress_count), + IDX_IN_WL_CNT_VER_6_T(bcn_template_not_ready), + IDX_IN_WL_CNT_VER_6_T(bcn_template_not_ready_done), + IDX_IN_WL_CNT_VER_6_T(late_tbtt_dpc), + IDX_IN_WL_CNT_VER_6_T(rx1mbps), + IDX_IN_WL_CNT_VER_6_T(rx2mbps), + IDX_IN_WL_CNT_VER_6_T(rx5mbps5), + IDX_IN_WL_CNT_VER_6_T(rx6mbps), + IDX_IN_WL_CNT_VER_6_T(rx9mbps), + IDX_IN_WL_CNT_VER_6_T(rx11mbps), + IDX_IN_WL_CNT_VER_6_T(rx12mbps), + IDX_IN_WL_CNT_VER_6_T(rx18mbps), + IDX_IN_WL_CNT_VER_6_T(rx24mbps), + IDX_IN_WL_CNT_VER_6_T(rx36mbps), + IDX_IN_WL_CNT_VER_6_T(rx48mbps), + IDX_IN_WL_CNT_VER_6_T(rx54mbps), + IDX_IN_WL_CNT_VER_6_T(rx108mbps), + IDX_IN_WL_CNT_VER_6_T(rx162mbps), + IDX_IN_WL_CNT_VER_6_T(rx216mbps), + IDX_IN_WL_CNT_VER_6_T(rx270mbps), + IDX_IN_WL_CNT_VER_6_T(rx324mbps), + IDX_IN_WL_CNT_VER_6_T(rx378mbps), + IDX_IN_WL_CNT_VER_6_T(rx432mbps), + IDX_IN_WL_CNT_VER_6_T(rx486mbps), + IDX_IN_WL_CNT_VER_6_T(rx540mbps), + IDX_IN_WL_CNT_VER_6_T(rfdisable), + IDX_IN_WL_CNT_VER_6_T(txexptime), + IDX_IN_WL_CNT_VER_6_T(txmpdu_sgi), + IDX_IN_WL_CNT_VER_6_T(rxmpdu_sgi), + IDX_IN_WL_CNT_VER_6_T(txmpdu_stbc), + IDX_IN_WL_CNT_VER_6_T(rxmpdu_stbc), + IDX_IN_WL_CNT_VER_6_T(rxundec_mcst), + IDX_IN_WL_CNT_VER_6_T(tkipmicfaill_mcst), + IDX_IN_WL_CNT_VER_6_T(tkipcntrmsr_mcst), + IDX_IN_WL_CNT_VER_6_T(tkipreplay_mcst), + IDX_IN_WL_CNT_VER_6_T(ccmpfmterr_mcst), + IDX_IN_WL_CNT_VER_6_T(ccmpreplay_mcst), + IDX_IN_WL_CNT_VER_6_T(ccmpundec_mcst), + IDX_IN_WL_CNT_VER_6_T(fourwayfail_mcst), + IDX_IN_WL_CNT_VER_6_T(wepundec_mcst), + IDX_IN_WL_CNT_VER_6_T(wepicverr_mcst), + IDX_IN_WL_CNT_VER_6_T(decsuccess_mcst), + IDX_IN_WL_CNT_VER_6_T(tkipicverr_mcst), + IDX_IN_WL_CNT_VER_6_T(wepexcluded_mcst) +}; + +/* Index conversion table from wl_cnt_ver_11_t to wl_cnt_wlc_t */ +static const uint8 wlcntver11t_to_wlcntwlct[NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T] = { + IDX_IN_WL_CNT_VER_11_T(txframe), + IDX_IN_WL_CNT_VER_11_T(txbyte), + IDX_IN_WL_CNT_VER_11_T(txretrans), + IDX_IN_WL_CNT_VER_11_T(txerror), + IDX_IN_WL_CNT_VER_11_T(txctl), + IDX_IN_WL_CNT_VER_11_T(txprshort), + IDX_IN_WL_CNT_VER_11_T(txserr), + IDX_IN_WL_CNT_VER_11_T(txnobuf), + IDX_IN_WL_CNT_VER_11_T(txnoassoc), + IDX_IN_WL_CNT_VER_11_T(txrunt), + IDX_IN_WL_CNT_VER_11_T(txchit), + IDX_IN_WL_CNT_VER_11_T(txcmiss), + IDX_IN_WL_CNT_VER_11_T(txuflo), + IDX_IN_WL_CNT_VER_11_T(txphyerr), + IDX_IN_WL_CNT_VER_11_T(txphycrs), + IDX_IN_WL_CNT_VER_11_T(rxframe), + IDX_IN_WL_CNT_VER_11_T(rxbyte), + IDX_IN_WL_CNT_VER_11_T(rxerror), + IDX_IN_WL_CNT_VER_11_T(rxctl), + IDX_IN_WL_CNT_VER_11_T(rxnobuf), + IDX_IN_WL_CNT_VER_11_T(rxnondata), + IDX_IN_WL_CNT_VER_11_T(rxbadds), + IDX_IN_WL_CNT_VER_11_T(rxbadcm), + IDX_IN_WL_CNT_VER_11_T(rxfragerr), + IDX_IN_WL_CNT_VER_11_T(rxrunt), + IDX_IN_WL_CNT_VER_11_T(rxgiant), + IDX_IN_WL_CNT_VER_11_T(rxnoscb), + IDX_IN_WL_CNT_VER_11_T(rxbadproto), + IDX_IN_WL_CNT_VER_11_T(rxbadsrcmac), + IDX_IN_WL_CNT_VER_11_T(rxbadda), + IDX_IN_WL_CNT_VER_11_T(rxfilter), + IDX_IN_WL_CNT_VER_11_T(rxoflo), + IDX_IN_WL_CNT_VER_11_T(rxuflo), + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 1, + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 2, + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 3, + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 4, + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 5, + IDX_IN_WL_CNT_VER_11_T(d11cnt_txrts_off), + IDX_IN_WL_CNT_VER_11_T(d11cnt_rxcrc_off), + IDX_IN_WL_CNT_VER_11_T(d11cnt_txnocts_off), + IDX_IN_WL_CNT_VER_11_T(dmade), + IDX_IN_WL_CNT_VER_11_T(dmada), + IDX_IN_WL_CNT_VER_11_T(dmape), + IDX_IN_WL_CNT_VER_11_T(reset), + IDX_IN_WL_CNT_VER_11_T(tbtt), + IDX_IN_WL_CNT_VER_11_T(txdmawar), + IDX_IN_WL_CNT_VER_11_T(pkt_callback_reg_fail), + IDX_IN_WL_CNT_VER_11_T(txfrag), + IDX_IN_WL_CNT_VER_11_T(txmulti), + IDX_IN_WL_CNT_VER_11_T(txfail), + IDX_IN_WL_CNT_VER_11_T(txretry), + IDX_IN_WL_CNT_VER_11_T(txretrie), + IDX_IN_WL_CNT_VER_11_T(rxdup), + IDX_IN_WL_CNT_VER_11_T(txrts), + IDX_IN_WL_CNT_VER_11_T(txnocts), + IDX_IN_WL_CNT_VER_11_T(txnoack), + IDX_IN_WL_CNT_VER_11_T(rxfrag), + IDX_IN_WL_CNT_VER_11_T(rxmulti), + IDX_IN_WL_CNT_VER_11_T(rxcrc), + IDX_IN_WL_CNT_VER_11_T(txfrmsnt), + IDX_IN_WL_CNT_VER_11_T(rxundec), + IDX_IN_WL_CNT_VER_11_T(tkipmicfaill), + IDX_IN_WL_CNT_VER_11_T(tkipcntrmsr), + IDX_IN_WL_CNT_VER_11_T(tkipreplay), + IDX_IN_WL_CNT_VER_11_T(ccmpfmterr), + IDX_IN_WL_CNT_VER_11_T(ccmpreplay), + IDX_IN_WL_CNT_VER_11_T(ccmpundec), + IDX_IN_WL_CNT_VER_11_T(fourwayfail), + IDX_IN_WL_CNT_VER_11_T(wepundec), + IDX_IN_WL_CNT_VER_11_T(wepicverr), + IDX_IN_WL_CNT_VER_11_T(decsuccess), + IDX_IN_WL_CNT_VER_11_T(tkipicverr), + IDX_IN_WL_CNT_VER_11_T(wepexcluded), + IDX_IN_WL_CNT_VER_11_T(txchanrej), + IDX_IN_WL_CNT_VER_11_T(psmwds), + IDX_IN_WL_CNT_VER_11_T(phywatchdog), + IDX_IN_WL_CNT_VER_11_T(prq_entries_handled), + IDX_IN_WL_CNT_VER_11_T(prq_undirected_entries), + IDX_IN_WL_CNT_VER_11_T(prq_bad_entries), + IDX_IN_WL_CNT_VER_11_T(atim_suppress_count), + IDX_IN_WL_CNT_VER_11_T(bcn_template_not_ready), + IDX_IN_WL_CNT_VER_11_T(bcn_template_not_ready_done), + IDX_IN_WL_CNT_VER_11_T(late_tbtt_dpc), + IDX_IN_WL_CNT_VER_11_T(rx1mbps), + IDX_IN_WL_CNT_VER_11_T(rx2mbps), + IDX_IN_WL_CNT_VER_11_T(rx5mbps5), + IDX_IN_WL_CNT_VER_11_T(rx6mbps), + IDX_IN_WL_CNT_VER_11_T(rx9mbps), + IDX_IN_WL_CNT_VER_11_T(rx11mbps), + IDX_IN_WL_CNT_VER_11_T(rx12mbps), + IDX_IN_WL_CNT_VER_11_T(rx18mbps), + IDX_IN_WL_CNT_VER_11_T(rx24mbps), + IDX_IN_WL_CNT_VER_11_T(rx36mbps), + IDX_IN_WL_CNT_VER_11_T(rx48mbps), + IDX_IN_WL_CNT_VER_11_T(rx54mbps), + IDX_IN_WL_CNT_VER_11_T(rx108mbps), + IDX_IN_WL_CNT_VER_11_T(rx162mbps), + IDX_IN_WL_CNT_VER_11_T(rx216mbps), + IDX_IN_WL_CNT_VER_11_T(rx270mbps), + IDX_IN_WL_CNT_VER_11_T(rx324mbps), + IDX_IN_WL_CNT_VER_11_T(rx378mbps), + IDX_IN_WL_CNT_VER_11_T(rx432mbps), + IDX_IN_WL_CNT_VER_11_T(rx486mbps), + IDX_IN_WL_CNT_VER_11_T(rx540mbps), + IDX_IN_WL_CNT_VER_11_T(rfdisable), + IDX_IN_WL_CNT_VER_11_T(txexptime), + IDX_IN_WL_CNT_VER_11_T(txmpdu_sgi), + IDX_IN_WL_CNT_VER_11_T(rxmpdu_sgi), + IDX_IN_WL_CNT_VER_11_T(txmpdu_stbc), + IDX_IN_WL_CNT_VER_11_T(rxmpdu_stbc), + IDX_IN_WL_CNT_VER_11_T(rxundec_mcst), + IDX_IN_WL_CNT_VER_11_T(tkipmicfaill_mcst), + IDX_IN_WL_CNT_VER_11_T(tkipcntrmsr_mcst), + IDX_IN_WL_CNT_VER_11_T(tkipreplay_mcst), + IDX_IN_WL_CNT_VER_11_T(ccmpfmterr_mcst), + IDX_IN_WL_CNT_VER_11_T(ccmpreplay_mcst), + IDX_IN_WL_CNT_VER_11_T(ccmpundec_mcst), + IDX_IN_WL_CNT_VER_11_T(fourwayfail_mcst), + IDX_IN_WL_CNT_VER_11_T(wepundec_mcst), + IDX_IN_WL_CNT_VER_11_T(wepicverr_mcst), + IDX_IN_WL_CNT_VER_11_T(decsuccess_mcst), + IDX_IN_WL_CNT_VER_11_T(tkipicverr_mcst), + IDX_IN_WL_CNT_VER_11_T(wepexcluded_mcst), + IDX_IN_WL_CNT_VER_11_T(dma_hang), + IDX_IN_WL_CNT_VER_11_T(reinit), + IDX_IN_WL_CNT_VER_11_T(pstatxucast), + IDX_IN_WL_CNT_VER_11_T(pstatxnoassoc), + IDX_IN_WL_CNT_VER_11_T(pstarxucast), + IDX_IN_WL_CNT_VER_11_T(pstarxbcmc), + IDX_IN_WL_CNT_VER_11_T(pstatxbcmc), + IDX_IN_WL_CNT_VER_11_T(cso_passthrough), + IDX_IN_WL_CNT_VER_11_T(cso_normal), + IDX_IN_WL_CNT_VER_11_T(chained), + IDX_IN_WL_CNT_VER_11_T(chainedsz1), + IDX_IN_WL_CNT_VER_11_T(unchained), + IDX_IN_WL_CNT_VER_11_T(maxchainsz), + IDX_IN_WL_CNT_VER_11_T(currchainsz), + IDX_IN_WL_CNT_VER_11_T(pciereset), + IDX_IN_WL_CNT_VER_11_T(cfgrestore), + IDX_IN_WL_CNT_VER_11_T(reinitreason), + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 1, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 2, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 3, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 4, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 5, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 6, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 7, + IDX_IN_WL_CNT_VER_11_T(rxrtry), + IDX_IN_WL_CNT_VER_11_T(rxmpdu_mu), + IDX_IN_WL_CNT_VER_11_T(txbar), + IDX_IN_WL_CNT_VER_11_T(rxbar), + IDX_IN_WL_CNT_VER_11_T(txpspoll), + IDX_IN_WL_CNT_VER_11_T(rxpspoll), + IDX_IN_WL_CNT_VER_11_T(txnull), + IDX_IN_WL_CNT_VER_11_T(rxnull), + IDX_IN_WL_CNT_VER_11_T(txqosnull), + IDX_IN_WL_CNT_VER_11_T(rxqosnull), + IDX_IN_WL_CNT_VER_11_T(txassocreq), + IDX_IN_WL_CNT_VER_11_T(rxassocreq), + IDX_IN_WL_CNT_VER_11_T(txreassocreq), + IDX_IN_WL_CNT_VER_11_T(rxreassocreq), + IDX_IN_WL_CNT_VER_11_T(txdisassoc), + IDX_IN_WL_CNT_VER_11_T(rxdisassoc), + IDX_IN_WL_CNT_VER_11_T(txassocrsp), + IDX_IN_WL_CNT_VER_11_T(rxassocrsp), + IDX_IN_WL_CNT_VER_11_T(txreassocrsp), + IDX_IN_WL_CNT_VER_11_T(rxreassocrsp), + IDX_IN_WL_CNT_VER_11_T(txauth), + IDX_IN_WL_CNT_VER_11_T(rxauth), + IDX_IN_WL_CNT_VER_11_T(txdeauth), + IDX_IN_WL_CNT_VER_11_T(rxdeauth), + IDX_IN_WL_CNT_VER_11_T(txprobereq), + IDX_IN_WL_CNT_VER_11_T(rxprobereq), + IDX_IN_WL_CNT_VER_11_T(txprobersp), + IDX_IN_WL_CNT_VER_11_T(rxprobersp), + IDX_IN_WL_CNT_VER_11_T(txaction), + IDX_IN_WL_CNT_VER_11_T(rxaction) +}; + +/* Index conversion table from wl_cnt_ver_11_t to + * either wl_cnt_ge40mcst_v1_t or wl_cnt_lt40mcst_v1_t + */ +static const uint8 wlcntver11t_to_wlcntXX40mcstv1t[WL_CNT_MCST_VAR_NUM] = { + IDX_IN_WL_CNT_VER_11_T(txallfrm), + IDX_IN_WL_CNT_VER_11_T(txrtsfrm), + IDX_IN_WL_CNT_VER_11_T(txctsfrm), + IDX_IN_WL_CNT_VER_11_T(txackfrm), + IDX_IN_WL_CNT_VER_11_T(txdnlfrm), + IDX_IN_WL_CNT_VER_11_T(txbcnfrm), + IDX_IN_WL_CNT_VER_11_T(txfunfl), + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 1, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 2, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 3, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 4, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 5, + IDX_IN_WL_CNT_VER_11_T(txfbw), + IDX_IN_WL_CNT_VER_11_T(txmpdu), + IDX_IN_WL_CNT_VER_11_T(txtplunfl), + IDX_IN_WL_CNT_VER_11_T(txphyerror), + IDX_IN_WL_CNT_VER_11_T(pktengrxducast), + IDX_IN_WL_CNT_VER_11_T(pktengrxdmcast), + IDX_IN_WL_CNT_VER_11_T(rxfrmtoolong), + IDX_IN_WL_CNT_VER_11_T(rxfrmtooshrt), + IDX_IN_WL_CNT_VER_11_T(rxinvmachdr), + IDX_IN_WL_CNT_VER_11_T(rxbadfcs), + IDX_IN_WL_CNT_VER_11_T(rxbadplcp), + IDX_IN_WL_CNT_VER_11_T(rxcrsglitch), + IDX_IN_WL_CNT_VER_11_T(rxstrt), + IDX_IN_WL_CNT_VER_11_T(rxdfrmucastmbss), + IDX_IN_WL_CNT_VER_11_T(rxmfrmucastmbss), + IDX_IN_WL_CNT_VER_11_T(rxcfrmucast), + IDX_IN_WL_CNT_VER_11_T(rxrtsucast), + IDX_IN_WL_CNT_VER_11_T(rxctsucast), + IDX_IN_WL_CNT_VER_11_T(rxackucast), + IDX_IN_WL_CNT_VER_11_T(rxdfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxmfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxcfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxrtsocast), + IDX_IN_WL_CNT_VER_11_T(rxctsocast), + IDX_IN_WL_CNT_VER_11_T(rxdfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxmfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxcfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxbeaconmbss), + IDX_IN_WL_CNT_VER_11_T(rxdfrmucastobss), + IDX_IN_WL_CNT_VER_11_T(rxbeaconobss), + IDX_IN_WL_CNT_VER_11_T(rxrsptmout), + IDX_IN_WL_CNT_VER_11_T(bcntxcancl), + IDX_IN_WL_CNT_VER_11_T(rxnodelim), + IDX_IN_WL_CNT_VER_11_T(rxf0ovfl), + IDX_IN_WL_CNT_VER_11_T(rxf1ovfl), + IDX_IN_WL_CNT_VER_11_T(rxf2ovfl), + IDX_IN_WL_CNT_VER_11_T(txsfovfl), + IDX_IN_WL_CNT_VER_11_T(pmqovfl), + IDX_IN_WL_CNT_VER_11_T(rxcgprqfrm), + IDX_IN_WL_CNT_VER_11_T(rxcgprsqovfl), + IDX_IN_WL_CNT_VER_11_T(txcgprsfail), + IDX_IN_WL_CNT_VER_11_T(txcgprssuc), + IDX_IN_WL_CNT_VER_11_T(prs_timeout), + IDX_IN_WL_CNT_VER_11_T(rxnack), + IDX_IN_WL_CNT_VER_11_T(frmscons), + IDX_IN_WL_CNT_VER_11_T(txnack), + IDX_IN_WL_CNT_VER_11_T(rxback), + IDX_IN_WL_CNT_VER_11_T(txback), + IDX_IN_WL_CNT_VER_11_T(bphy_rxcrsglitch), + IDX_IN_WL_CNT_VER_11_T(rxdrop20s), + IDX_IN_WL_CNT_VER_11_T(rxtoolate), + IDX_IN_WL_CNT_VER_11_T(bphy_badplcp) +}; + +/* For mcst offsets that were not used. (2 Pads) */ +#define INVALID_MCST_IDX ((uint8)(-1)) +/* Index conversion table from wl_cnt_ver_11_t to wl_cnt_v_le10_mcst_t */ +static const uint8 wlcntver11t_to_wlcntvle10mcstt[WL_CNT_MCST_VAR_NUM] = { + IDX_IN_WL_CNT_VER_11_T(txallfrm), + IDX_IN_WL_CNT_VER_11_T(txrtsfrm), + IDX_IN_WL_CNT_VER_11_T(txctsfrm), + IDX_IN_WL_CNT_VER_11_T(txackfrm), + IDX_IN_WL_CNT_VER_11_T(txdnlfrm), + IDX_IN_WL_CNT_VER_11_T(txbcnfrm), + IDX_IN_WL_CNT_VER_11_T(txfunfl), + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 1, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 2, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 3, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 4, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 5, + IDX_IN_WL_CNT_VER_11_T(txfbw), + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_11_T(txtplunfl), + IDX_IN_WL_CNT_VER_11_T(txphyerror), + IDX_IN_WL_CNT_VER_11_T(pktengrxducast), + IDX_IN_WL_CNT_VER_11_T(pktengrxdmcast), + IDX_IN_WL_CNT_VER_11_T(rxfrmtoolong), + IDX_IN_WL_CNT_VER_11_T(rxfrmtooshrt), + IDX_IN_WL_CNT_VER_11_T(rxinvmachdr), + IDX_IN_WL_CNT_VER_11_T(rxbadfcs), + IDX_IN_WL_CNT_VER_11_T(rxbadplcp), + IDX_IN_WL_CNT_VER_11_T(rxcrsglitch), + IDX_IN_WL_CNT_VER_11_T(rxstrt), + IDX_IN_WL_CNT_VER_11_T(rxdfrmucastmbss), + IDX_IN_WL_CNT_VER_11_T(rxmfrmucastmbss), + IDX_IN_WL_CNT_VER_11_T(rxcfrmucast), + IDX_IN_WL_CNT_VER_11_T(rxrtsucast), + IDX_IN_WL_CNT_VER_11_T(rxctsucast), + IDX_IN_WL_CNT_VER_11_T(rxackucast), + IDX_IN_WL_CNT_VER_11_T(rxdfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxmfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxcfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxrtsocast), + IDX_IN_WL_CNT_VER_11_T(rxctsocast), + IDX_IN_WL_CNT_VER_11_T(rxdfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxmfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxcfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxbeaconmbss), + IDX_IN_WL_CNT_VER_11_T(rxdfrmucastobss), + IDX_IN_WL_CNT_VER_11_T(rxbeaconobss), + IDX_IN_WL_CNT_VER_11_T(rxrsptmout), + IDX_IN_WL_CNT_VER_11_T(bcntxcancl), + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_11_T(rxf0ovfl), + IDX_IN_WL_CNT_VER_11_T(rxf1ovfl), + IDX_IN_WL_CNT_VER_11_T(rxf2ovfl), + IDX_IN_WL_CNT_VER_11_T(txsfovfl), + IDX_IN_WL_CNT_VER_11_T(pmqovfl), + IDX_IN_WL_CNT_VER_11_T(rxcgprqfrm), + IDX_IN_WL_CNT_VER_11_T(rxcgprsqovfl), + IDX_IN_WL_CNT_VER_11_T(txcgprsfail), + IDX_IN_WL_CNT_VER_11_T(txcgprssuc), + IDX_IN_WL_CNT_VER_11_T(prs_timeout), + IDX_IN_WL_CNT_VER_11_T(rxnack), + IDX_IN_WL_CNT_VER_11_T(frmscons), + IDX_IN_WL_CNT_VER_11_T(txnack), + IDX_IN_WL_CNT_VER_11_T(rxback), + IDX_IN_WL_CNT_VER_11_T(txback), + IDX_IN_WL_CNT_VER_11_T(bphy_rxcrsglitch), + IDX_IN_WL_CNT_VER_11_T(rxdrop20s), + IDX_IN_WL_CNT_VER_11_T(rxtoolate), + IDX_IN_WL_CNT_VER_11_T(bphy_badplcp) +}; + + +/* Index conversion table from wl_cnt_ver_6_t to wl_cnt_v_le10_mcst_t */ +static const uint8 wlcntver6t_to_wlcntvle10mcstt[WL_CNT_MCST_VAR_NUM] = { + IDX_IN_WL_CNT_VER_6_T(txallfrm), + IDX_IN_WL_CNT_VER_6_T(txrtsfrm), + IDX_IN_WL_CNT_VER_6_T(txctsfrm), + IDX_IN_WL_CNT_VER_6_T(txackfrm), + IDX_IN_WL_CNT_VER_6_T(txdnlfrm), + IDX_IN_WL_CNT_VER_6_T(txbcnfrm), + IDX_IN_WL_CNT_VER_6_T(txfunfl), + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 1, + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 2, + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 3, + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 4, + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 5, + IDX_IN_WL_CNT_VER_6_T(txfbw), + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_6_T(txtplunfl), + IDX_IN_WL_CNT_VER_6_T(txphyerror), + IDX_IN_WL_CNT_VER_6_T(pktengrxducast), + IDX_IN_WL_CNT_VER_6_T(pktengrxdmcast), + IDX_IN_WL_CNT_VER_6_T(rxfrmtoolong), + IDX_IN_WL_CNT_VER_6_T(rxfrmtooshrt), + IDX_IN_WL_CNT_VER_6_T(rxinvmachdr), + IDX_IN_WL_CNT_VER_6_T(rxbadfcs), + IDX_IN_WL_CNT_VER_6_T(rxbadplcp), + IDX_IN_WL_CNT_VER_6_T(rxcrsglitch), + IDX_IN_WL_CNT_VER_6_T(rxstrt), + IDX_IN_WL_CNT_VER_6_T(rxdfrmucastmbss), + IDX_IN_WL_CNT_VER_6_T(rxmfrmucastmbss), + IDX_IN_WL_CNT_VER_6_T(rxcfrmucast), + IDX_IN_WL_CNT_VER_6_T(rxrtsucast), + IDX_IN_WL_CNT_VER_6_T(rxctsucast), + IDX_IN_WL_CNT_VER_6_T(rxackucast), + IDX_IN_WL_CNT_VER_6_T(rxdfrmocast), + IDX_IN_WL_CNT_VER_6_T(rxmfrmocast), + IDX_IN_WL_CNT_VER_6_T(rxcfrmocast), + IDX_IN_WL_CNT_VER_6_T(rxrtsocast), + IDX_IN_WL_CNT_VER_6_T(rxctsocast), + IDX_IN_WL_CNT_VER_6_T(rxdfrmmcast), + IDX_IN_WL_CNT_VER_6_T(rxmfrmmcast), + IDX_IN_WL_CNT_VER_6_T(rxcfrmmcast), + IDX_IN_WL_CNT_VER_6_T(rxbeaconmbss), + IDX_IN_WL_CNT_VER_6_T(rxdfrmucastobss), + IDX_IN_WL_CNT_VER_6_T(rxbeaconobss), + IDX_IN_WL_CNT_VER_6_T(rxrsptmout), + IDX_IN_WL_CNT_VER_6_T(bcntxcancl), + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_6_T(rxf0ovfl), + IDX_IN_WL_CNT_VER_6_T(rxf1ovfl), + IDX_IN_WL_CNT_VER_6_T(rxf2ovfl), + IDX_IN_WL_CNT_VER_6_T(txsfovfl), + IDX_IN_WL_CNT_VER_6_T(pmqovfl), + IDX_IN_WL_CNT_VER_6_T(rxcgprqfrm), + IDX_IN_WL_CNT_VER_6_T(rxcgprsqovfl), + IDX_IN_WL_CNT_VER_6_T(txcgprsfail), + IDX_IN_WL_CNT_VER_6_T(txcgprssuc), + IDX_IN_WL_CNT_VER_6_T(prs_timeout), + IDX_IN_WL_CNT_VER_6_T(rxnack), + IDX_IN_WL_CNT_VER_6_T(frmscons), + IDX_IN_WL_CNT_VER_6_T(txnack), + IDX_IN_WL_CNT_VER_6_T(rxback), + IDX_IN_WL_CNT_VER_6_T(txback), + IDX_IN_WL_CNT_VER_6_T(bphy_rxcrsglitch), + IDX_IN_WL_CNT_VER_6_T(rxdrop20s), + IDX_IN_WL_CNT_VER_6_T(rxtoolate), + IDX_IN_WL_CNT_VER_6_T(bphy_badplcp) +}; + +/* copy wlc layer counters from old type cntbuf to wl_cnt_wlc_t type. */ +static int +wl_copy_wlccnt(uint16 cntver, uint32 *dst, uint32 *src, uint8 src_max_idx) +{ + uint i; + if (dst == NULL || src == NULL) { + return BCME_ERROR; + } + + /* Init wlccnt with invalid value. Unchanged value will not be printed out */ + for (i = 0; i < (sizeof(wl_cnt_wlc_t) / sizeof(uint32)); i++) { + dst[i] = INVALID_CNT_VAL; + } + + if (cntver == WL_CNT_VERSION_6) { + for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T; i++) { + if (wlcntver6t_to_wlcntwlct[i] >= src_max_idx) { + /* src buffer does not have counters from here */ + break; + } + dst[i] = src[wlcntver6t_to_wlcntwlct[i]]; + } + } else { + for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T; i++) { + if (wlcntver11t_to_wlcntwlct[i] >= src_max_idx) { + /* src buffer does not have counters from here */ + break; + } + dst[i] = src[wlcntver11t_to_wlcntwlct[i]]; + } + } + return BCME_OK; +} + +/* copy macstat counters from old type cntbuf to wl_cnt_v_le10_mcst_t type. */ +static int +wl_copy_macstat_upto_ver10(uint16 cntver, uint32 *dst, uint32 *src) +{ + uint i; + + if (dst == NULL || src == NULL) { + return BCME_ERROR; + } + + if (cntver == WL_CNT_VERSION_6) { + for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) { + if (wlcntver6t_to_wlcntvle10mcstt[i] == INVALID_MCST_IDX) { + /* This mcst counter does not exist in wl_cnt_ver_6_t */ + dst[i] = INVALID_CNT_VAL; + } else { + dst[i] = src[wlcntver6t_to_wlcntvle10mcstt[i]]; + } + } + } else { + for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) { + if (wlcntver11t_to_wlcntvle10mcstt[i] == INVALID_MCST_IDX) { + /* This mcst counter does not exist in wl_cnt_ver_11_t */ + dst[i] = INVALID_CNT_VAL; + } else { + dst[i] = src[wlcntver11t_to_wlcntvle10mcstt[i]]; + } + } + } + return BCME_OK; +} + +static int +wl_copy_macstat_ver11(uint32 *dst, uint32 *src) +{ + uint i; + + if (dst == NULL || src == NULL) { + return BCME_ERROR; + } + + for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) { + dst[i] = src[wlcntver11t_to_wlcntXX40mcstv1t[i]]; + } + return BCME_OK; +} + +/** + * Translate non-xtlv 'wl counters' IOVar buffer received by old driver/FW to xtlv format. + * Parameters: + * cntbuf: pointer to non-xtlv 'wl counters' IOVar buffer received by old driver/FW. + * Newly translated xtlv format is written to this pointer. + * buflen: length of the "cntbuf" without any padding. + * corerev: chip core revision of the driver/FW. + */ +int +wl_cntbuf_to_xtlv_format(void *ctx, void *cntbuf, int buflen, uint32 corerev) +{ + wl_cnt_wlc_t *wlccnt = NULL; + uint32 *macstat = NULL; + xtlv_desc_t xtlv_desc[3]; + uint16 mcst_xtlv_id; + int res = BCME_OK; + wl_cnt_info_t *cntinfo = cntbuf; + void *xtlvbuf_p = cntinfo->data; + uint16 ver = cntinfo->version; + uint16 xtlvbuflen = (uint16)buflen; + uint16 src_max_idx; +#ifdef BCMDRIVER + osl_t *osh = ctx; +#else + BCM_REFERENCE(ctx); +#endif + + if (ver == WL_CNT_T_VERSION) { + /* Already in xtlv format. */ + goto exit; + } + +#ifdef BCMDRIVER + wlccnt = MALLOC(osh, sizeof(*wlccnt)); + macstat = MALLOC(osh, WL_CNT_MCST_STRUCT_SZ); +#else + wlccnt = (wl_cnt_wlc_t *)malloc(sizeof(*wlccnt)); + macstat = (uint32 *)malloc(WL_CNT_MCST_STRUCT_SZ); +#endif + if (!wlccnt) { + printf("wl_cntbuf_to_xtlv_format malloc fail!\n"); + res = BCME_NOMEM; + goto exit; + } + + /* Check if the max idx in the struct exceeds the boundary of uint8 */ + if (NUM_OF_CNT_IN_WL_CNT_VER_6_T > ((uint8)(-1) + 1) || + NUM_OF_CNT_IN_WL_CNT_VER_11_T > ((uint8)(-1) + 1)) { + printf("wlcntverXXt_to_wlcntwlct and src_max_idx need" + " to be of uint16 instead of uint8\n"); + res = BCME_ERROR; + goto exit; + } + + /* Exclude version and length fields in either wlc_cnt_ver_6_t or wlc_cnt_ver_11_t */ + src_max_idx = (cntinfo->datalen - OFFSETOF(wl_cnt_info_t, data)) / sizeof(uint32); + + if (src_max_idx > (uint8)(-1)) { + printf("wlcntverXXt_to_wlcntwlct and src_max_idx need" + " to be of uint16 instead of uint8\n" + "Try updating wl utility to the latest.\n"); + res = BCME_ERROR; + } + + /* Copy wlc layer counters to wl_cnt_wlc_t */ + res = wl_copy_wlccnt(ver, (uint32 *)wlccnt, (uint32 *)cntinfo->data, (uint8)src_max_idx); + if (res != BCME_OK) { + printf("wl_copy_wlccnt fail!\n"); + goto exit; + } + + /* Copy macstat counters to wl_cnt_wlc_t */ + if (ver == WL_CNT_VERSION_11) { + res = wl_copy_macstat_ver11(macstat, (uint32 *)cntinfo->data); + if (res != BCME_OK) { + printf("wl_copy_macstat_ver11 fail!\n"); + goto exit; + } + if (corerev >= 40) { + mcst_xtlv_id = WL_CNT_XTLV_GE40_UCODE_V1; + } else { + mcst_xtlv_id = WL_CNT_XTLV_LT40_UCODE_V1; + } + } else { + res = wl_copy_macstat_upto_ver10(ver, macstat, (uint32 *)cntinfo->data); + if (res != BCME_OK) { + printf("wl_copy_macstat_upto_ver10 fail!\n"); + goto exit; + } + mcst_xtlv_id = WL_CNT_XTLV_CNTV_LE10_UCODE; + } + + xtlv_desc[0].type = WL_CNT_XTLV_WLC; + xtlv_desc[0].len = sizeof(*wlccnt); + xtlv_desc[0].ptr = wlccnt; + + xtlv_desc[1].type = mcst_xtlv_id; + xtlv_desc[1].len = WL_CNT_MCST_STRUCT_SZ; + xtlv_desc[1].ptr = macstat; + + xtlv_desc[2].type = 0; + xtlv_desc[2].len = 0; + xtlv_desc[2].ptr = NULL; + + memset(cntbuf, 0, WL_CNTBUF_MAX_SIZE); + + res = bcm_pack_xtlv_buf_from_mem(&xtlvbuf_p, &xtlvbuflen, + xtlv_desc, BCM_XTLV_OPTION_ALIGN32); + cntinfo->datalen = (buflen - xtlvbuflen); +exit: +#ifdef BCMDRIVER + if (wlccnt) { + MFREE(osh, wlccnt, sizeof(*wlccnt)); + } + if (macstat) { + MFREE(osh, macstat, WL_CNT_MCST_STRUCT_SZ); + } +#else + if (wlccnt) { + free(wlccnt); + } + if (macstat) { + free(macstat); + } +#endif + return res; +} diff --git a/drivers/net/wireless/bcmdhd/bcmevent.c b/drivers/amlogic/wifi/bcmdhd/bcmevent.c similarity index 64% rename from drivers/net/wireless/bcmdhd/bcmevent.c rename to drivers/amlogic/wifi/bcmdhd/bcmevent.c index dc308d71028b4..1746f47fd613b 100644 --- a/drivers/net/wireless/bcmdhd/bcmevent.c +++ b/drivers/amlogic/wifi/bcmdhd/bcmevent.c @@ -1,12 +1,35 @@ /* * bcmevent read-only data shared by kernel or app layers * - * $Copyright Open Broadcom Corporation$ - * $Id: bcmevent.c 492377 2014-07-21 19:54:06Z $ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmevent.c 530174 2015-01-29 09:47:55Z $ */ #include #include +#include #include #include #include @@ -56,16 +79,13 @@ static const bcmevent_name_str_t bcmevent_names[] = { BCMEVENT_NAME(WLC_E_BCNLOST_MSG), BCMEVENT_NAME(WLC_E_ROAM_PREP), BCMEVENT_NAME(WLC_E_PFN_NET_FOUND), + BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE), BCMEVENT_NAME(WLC_E_PFN_NET_LOST), #if defined(IBSS_PEER_DISCOVERY_EVENT) BCMEVENT_NAME(WLC_E_IBSS_ASSOC), #endif /* defined(IBSS_PEER_DISCOVERY_EVENT) */ BCMEVENT_NAME(WLC_E_RADIO), BCMEVENT_NAME(WLC_E_PSM_WATCHDOG), -#if defined(BCMCCX) && defined(CCX_SDK) - BCMEVENT_NAME(WLC_E_CCX_ASSOC_START), - BCMEVENT_NAME(WLC_E_CCX_ASSOC_ABORT), -#endif /* BCMCCX && CCX_SDK */ BCMEVENT_NAME(WLC_E_PROBREQ_MSG), BCMEVENT_NAME(WLC_E_SCAN_CONFIRM_IND), BCMEVENT_NAME(WLC_E_PSK_SUP), @@ -75,25 +95,15 @@ static const bcmevent_name_str_t bcmevent_names[] = { BCMEVENT_NAME(WLC_E_UNICAST_DECODE_ERROR), BCMEVENT_NAME(WLC_E_MULTICAST_DECODE_ERROR), BCMEVENT_NAME(WLC_E_TRACE), -#ifdef WLBTAMP - BCMEVENT_NAME(WLC_E_BTA_HCI_EVENT), -#endif BCMEVENT_NAME(WLC_E_IF), #ifdef WLP2P BCMEVENT_NAME(WLC_E_P2P_DISC_LISTEN_COMPLETE), #endif BCMEVENT_NAME(WLC_E_RSSI), - BCMEVENT_NAME(WLC_E_PFN_SCAN_COMPLETE), BCMEVENT_NAME(WLC_E_EXTLOG_MSG), -#ifdef WIFI_ACT_FRAME BCMEVENT_NAME(WLC_E_ACTION_FRAME), BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX), BCMEVENT_NAME(WLC_E_ACTION_FRAME_COMPLETE), -#endif -#ifdef BCMWAPI_WAI - BCMEVENT_NAME(WLC_E_WAI_STA_EVENT), - BCMEVENT_NAME(WLC_E_WAI_MSG), -#endif /* BCMWAPI_WAI */ BCMEVENT_NAME(WLC_E_ESCAN_RESULT), BCMEVENT_NAME(WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE), #ifdef WLP2P @@ -143,20 +153,25 @@ static const bcmevent_name_str_t bcmevent_names[] = { #ifdef PROP_TXSTATUS BCMEVENT_NAME(WLC_E_BCMC_CREDIT_SUPPORT), #endif + BCMEVENT_NAME(WLC_E_PSTA_PRIMARY_INTF_IND), BCMEVENT_NAME(WLC_E_TXFAIL_THRESH), -#ifdef WLAIBSS - BCMEVENT_NAME(WLC_E_AIBSS_TXFAIL), -#endif /* WLAIBSS */ +#ifdef GSCAN_SUPPORT + BCMEVENT_NAME(WLC_E_PFN_GSCAN_FULL_RESULT), + BCMEVENT_NAME(WLC_E_PFN_SWC), +#endif /* GSCAN_SUPPORT */ #ifdef WLBSSLOAD_REPORT BCMEVENT_NAME(WLC_E_BSS_LOAD), #endif #if defined(BT_WIFI_HANDOVER) || defined(WL_TBOW) BCMEVENT_NAME(WLC_E_BT_WIFI_HANDOVER_REQ), #endif -#ifdef WLFBT - BCMEVENT_NAME(WLC_E_FBT_AUTH_REQ_IND), -#endif /* WLFBT */ + BCMEVENT_NAME(WLC_E_AUTHORIZED), + BCMEVENT_NAME(WLC_E_PROBREQ_MSG_RX), + BCMEVENT_NAME(WLC_E_CSA_START_IND), + BCMEVENT_NAME(WLC_E_CSA_DONE_IND), + BCMEVENT_NAME(WLC_E_CSA_FAILURE_IND), BCMEVENT_NAME(WLC_E_RMC_EVENT), + BCMEVENT_NAME(WLC_E_DPSTA_INTF_IND), }; @@ -183,3 +198,33 @@ const char *bcmevent_get_name(uint event_type) */ return ((event_name) ? event_name : "Unknown Event"); } + +void +wl_event_to_host_order(wl_event_msg_t * evt) +{ + /* Event struct members passed from dongle to host are stored in network + * byte order. Convert all members to host-order. + */ + evt->event_type = ntoh32(evt->event_type); + evt->flags = ntoh16(evt->flags); + evt->status = ntoh32(evt->status); + evt->reason = ntoh32(evt->reason); + evt->auth_type = ntoh32(evt->auth_type); + evt->datalen = ntoh32(evt->datalen); + evt->version = ntoh16(evt->version); +} + +void +wl_event_to_network_order(wl_event_msg_t * evt) +{ + /* Event struct members passed from dongle to host are stored in network + * byte order. Convert all members to host-order. + */ + evt->event_type = hton32(evt->event_type); + evt->flags = hton16(evt->flags); + evt->status = hton32(evt->status); + evt->reason = hton32(evt->reason); + evt->auth_type = hton32(evt->auth_type); + evt->datalen = hton32(evt->datalen); + evt->version = hton16(evt->version); +} diff --git a/drivers/net/wireless/bcmdhd/bcmsdh.c b/drivers/amlogic/wifi/bcmdhd/bcmsdh.c similarity index 93% rename from drivers/net/wireless/bcmdhd/bcmsdh.c rename to drivers/amlogic/wifi/bcmdhd/bcmsdh.c index 98ce4e6138667..400f441cc58d3 100644 --- a/drivers/net/wireless/bcmdhd/bcmsdh.c +++ b/drivers/amlogic/wifi/bcmdhd/bcmsdh.c @@ -2,9 +2,30 @@ * BCMSDH interface glue * implement bcmsdh API for SDIOH driver * - * $ Copyright Open Broadcom Corporation $ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmsdh.c 450676 2014-01-22 22:45:13Z $ + * + * <> + * + * $Id: bcmsdh.c 514727 2014-11-12 03:02:48Z $ */ /** @@ -32,9 +53,6 @@ const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL; /* local copy of bcm sd handler */ bcmsdh_info_t * l_bcmsdh = NULL; -#if 0 && (NDISVER < 0x0630) -extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd); -#endif #if defined(OOB_INTR_ONLY) && defined(HW_OOB) || defined(FORCE_WOWLAN) extern int @@ -86,10 +104,6 @@ bcmsdh_detach(osl_t *osh, void *sdh) bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; if (bcmsdh != NULL) { -#if 0 && (NDISVER < 0x0630) - if (bcmsdh->sdioh) - sdioh_detach(osh, bcmsdh->sdioh); -#endif MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t)); } @@ -378,7 +392,7 @@ bcmsdh_reg_read(void *sdh, uint32 addr, uint size) SDIOH_API_RC status; uint32 word = 0; - BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, ", __FUNCTION__, addr)); + BCMSDH_INFO(("%s:fun = 1, addr = 0x%x\n", __FUNCTION__, addr)); if (!bcmsdh) bcmsdh = l_bcmsdh; diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_linux.c b/drivers/amlogic/wifi/bcmdhd/bcmsdh_linux.c similarity index 88% rename from drivers/net/wireless/bcmdhd/bcmsdh_linux.c rename to drivers/amlogic/wifi/bcmdhd/bcmsdh_linux.c index 54ca0ed22d26c..5087eb4eccf60 100644 --- a/drivers/net/wireless/bcmdhd/bcmsdh_linux.c +++ b/drivers/amlogic/wifi/bcmdhd/bcmsdh_linux.c @@ -1,9 +1,30 @@ /* * SDIO access interface for drivers - linux specific (pci only) * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmsdh_linux.c 461444 2014-03-12 02:55:28Z $ + * + * <> + * + * $Id: bcmsdh_linux.c 514727 2014-11-12 03:02:48Z $ */ /** @@ -31,6 +52,8 @@ extern void dhdsdio_isr(void * args); #endif /* defined(CONFIG_ARCH_ODIN) */ #include +#include + /* driver info, initialized when bcmsdh_register is called */ static bcmsdh_driver_t drvinfo = {NULL, NULL, NULL, NULL}; @@ -324,8 +347,13 @@ int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handl #else printf("%s: SW_OOB enabled\n", __FUNCTION__); #endif - SDLX_MSG(("%s OOB irq=%d flags=%X\n", __FUNCTION__, + SDLX_MSG(("%s OOB irq=%d flags=0x%X\n", __FUNCTION__, (int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags)); + // anthony: test begin + bcmsdh_osinfo->oob_irq_flags &= IRQF_TRIGGER_MASK; + printf("%s change flags to 0x%X\n", __FUNCTION__, (int)bcmsdh_osinfo->oob_irq_flags); + // anthony: test end + bcmsdh_osinfo->oob_irq_handler = oob_irq_handler; bcmsdh_osinfo->oob_irq_handler_context = oob_irq_handler_context; bcmsdh_osinfo->oob_irq_enabled = TRUE; @@ -344,6 +372,10 @@ int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handl return err; } +#ifdef CUSTOM_DPC_CPUCORE + irq_set_affinity(bcmsdh_osinfo->oob_irq_num, cpumask_of(CUSTOM_DPC_CPUCORE)); +#endif + #if defined(DISABLE_WOWLAN) SDLX_MSG(("%s: disable_irq_wake\n", __FUNCTION__)); bcmsdh_osinfo->oob_irq_wake_enabled = FALSE; @@ -381,7 +413,7 @@ void bcmsdh_oob_intr_unregister(bcmsdh_info_t *bcmsdh) free_irq(bcmsdh_osinfo->oob_irq_num, bcmsdh); bcmsdh_osinfo->oob_irq_registered = FALSE; } -#endif +#endif /* Module parameters specific to each host-controller driver */ diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c b/drivers/amlogic/wifi/bcmdhd/bcmsdh_sdmmc.c similarity index 96% rename from drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c rename to drivers/amlogic/wifi/bcmdhd/bcmsdh_sdmmc.c index 0805697c0084e..58be7ff6aa76d 100644 --- a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c +++ b/drivers/amlogic/wifi/bcmdhd/bcmsdh_sdmmc.c @@ -1,14 +1,14 @@ /* * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel * - * Copyright (C) 1999-2014, Broadcom Corporation - * + * Copyright (C) 1999-2016, Broadcom Corporation + * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: - * + * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that @@ -16,12 +16,15 @@ * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. - * + * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmsdh_sdmmc.c 459285 2014-03-03 02:54:39Z $ + * + * <> + * + * $Id: bcmsdh_sdmmc.c 591104 2015-10-07 04:45:18Z $ */ #include @@ -60,7 +63,7 @@ static void IRQHandler(struct sdio_func *func); static void IRQHandlerF2(struct sdio_func *func); #endif /* !defined(OOB_INTR_ONLY) */ static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr); -#if defined(ENABLE_INSMOD_NO_FW_LOAD) +#if defined(ENABLE_INSMOD_NO_FW_LOAD) && !defined(BUS_POWER_RESTORE) extern int sdio_reset_comm(struct mmc_card *card); #else int sdio_reset_comm(struct mmc_card *card) @@ -543,6 +546,17 @@ sdioh_iovar_op(sdioh_info_t *si, const char *name, /* Now set it */ si->client_block_size[func] = blksize; + if (si->func[func] == NULL) { + sd_err(("%s: SDIO Device not present\n", __FUNCTION__)); + bcmerror = BCME_NORESOURCE; + break; + } + sdio_claim_host(si->func[func]); + bcmerror = sdio_set_block_size(si->func[func], blksize); + if (bcmerror) + sd_err(("%s: Failed to set F%d blocksize to %d(%d)\n", + __FUNCTION__, func, blksize, bcmerror)); + sdio_release_host(si->func[func]); break; } @@ -706,7 +720,9 @@ sdioh_iovar_op(sdioh_info_t *si, const char *name, } #if (defined(OOB_INTR_ONLY) && defined(HW_OOB)) || defined(FORCE_WOWLAN) +#ifdef CUSTOMER_HW_AMLOGIC extern int wifi_irq_trigger_level(void); +#endif SDIOH_API_RC sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable) { @@ -807,14 +823,14 @@ sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *by #endif struct timespec now, before; - if (sd_msglevel && SDH_COST_VAL) + if (sd_msglevel & SDH_COST_VAL) getnstimeofday(&before); sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr)); DHD_PM_RESUME_WAIT(sdioh_request_byte_wait); DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); - if (rw) { /* CMD52 Write */ + if(rw) { /* CMD52 Write */ if (func == 0) { /* Can only directly write to some F0 registers. Handle F2 enable * as a special case. @@ -901,10 +917,11 @@ sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *by } } - if (sd_msglevel && SDH_COST_VAL) + if (sd_msglevel & SDH_COST_VAL) { getnstimeofday(&now); - sd_cost(("%s: len=1 cost=%lds %luus\n", __FUNCTION__, - now.tv_sec-before.tv_sec, now.tv_nsec/1000-before.tv_nsec/1000)); +// sd_cost(("%s: rw=%d len=1 cost=%lds %luus\n", __FUNCTION__, +// rw, now.tv_sec-before.tv_sec, now.tv_nsec/1000-before.tv_nsec/1000)); + } return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); } @@ -1314,7 +1331,7 @@ sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint add #endif struct timespec now, before; - if (sd_msglevel && SDH_COST_VAL) + if (sd_msglevel & SDH_COST_VAL) getnstimeofday(&before); if (func == 0) { @@ -1330,7 +1347,7 @@ sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint add /* Claim host controller */ sdio_claim_host(sd->func[func]); - if (rw) { /* CMD52 Write */ + if(rw) { /* CMD52 Write */ if (nbytes == 4) { sdio_writel(sd->func[func], *word, addr, &err_ret); } else if (nbytes == 2) { @@ -1377,10 +1394,11 @@ sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint add } } - if (sd_msglevel && SDH_COST_VAL) + if (sd_msglevel & SDH_COST_VAL) { getnstimeofday(&now); - sd_cost(("%s: len=%d cost=%lds %luus\n", __FUNCTION__, - nbytes, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000)); +// sd_cost(("%s: rw=%d, len=%d cost=%lds %luus\n", __FUNCTION__, +// rw, nbytes, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000)); + } return (((err_ret == 0)&&(err_ret2 == 0)) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); } @@ -1415,7 +1433,7 @@ sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func DHD_PM_RESUME_WAIT(sdioh_request_packet_wait); DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); - if (sd_msglevel && SDH_COST_VAL) + if (sd_msglevel & SDH_COST_VAL) getnstimeofday(&before); blk_size = sd->client_block_size[func]; @@ -1457,7 +1475,7 @@ sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func * a restriction on max tx/glom count (based on host->max_segs). */ if (sg_count >= ARRAYSIZE(sd->sg_list)) { - sd_err(("%s: sg list entries exceed limit\n", __FUNCTION__)); +// sd_err(("%s: sg list entries exceed limit, sg_count=%d(%lu)\n", __FUNCTION__, sg_count, ARRAYSIZE(sd->sg_list))); return (SDIOH_API_RC_FAIL); } pdata += pkt_offset; @@ -1593,10 +1611,11 @@ sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func MFREE(sd->osh, localbuf, ttl_len); #endif /* BCMSDIOH_TXGLOM */ - if (sd_msglevel && SDH_COST_VAL) + if (sd_msglevel & SDH_COST_VAL) { getnstimeofday(&now); - sd_cost(("%s: cost=%lds %luus\n", __FUNCTION__, - now.tv_sec-before.tv_sec, now.tv_nsec/1000-before.tv_nsec/1000)); +// sd_cost(("%s: rw=%d, cost=%lds %luus\n", __FUNCTION__, +// write, now.tv_sec-before.tv_sec, now.tv_nsec/1000-before.tv_nsec/1000)); + } sd_trace(("%s: Exit\n", __FUNCTION__)); return SDIOH_API_RC_SUCCESS; @@ -1613,7 +1632,7 @@ sdioh_buffer_tofrom_bus(sdioh_info_t *sd, uint fix_inc, uint write, uint func, sd_trace(("%s: Enter\n", __FUNCTION__)); ASSERT(buf); - if (sd_msglevel && SDH_COST_VAL) + if (sd_msglevel & SDH_COST_VAL) getnstimeofday(&before); /* NOTE: @@ -1647,10 +1666,12 @@ sdioh_buffer_tofrom_bus(sdioh_info_t *sd, uint fix_inc, uint write, uint func, sd_trace(("%s: Exit\n", __FUNCTION__)); - if (sd_msglevel && SDH_COST_VAL) + if (sd_msglevel & SDH_COST_VAL) { getnstimeofday(&now); - sd_cost(("%s: len=%d cost=%lds %luus\n", __FUNCTION__, - len, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000)); +// sd_cost(("%s: rw=%d, len=%d cost=%lds %luus\n", __FUNCTION__, +// write, len, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000)); + } + return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); } @@ -1678,7 +1699,7 @@ sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, u DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait); DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); - if (sd_msglevel && SDH_COST_VAL) + if (sd_msglevel & SDH_COST_VAL) getnstimeofday(&before); if (pkt) { @@ -1722,10 +1743,11 @@ sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, u PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE); - if (sd_msglevel && SDH_COST_VAL) + if (sd_msglevel & SDH_COST_VAL) { getnstimeofday(&now); - sd_cost(("%s: len=%d cost=%lds %luus\n", __FUNCTION__, - buf_len, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000)); +// sd_cost(("%s: len=%d cost=%lds %luus\n", __FUNCTION__, +// buf_len, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000)); + } return status; } diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c b/drivers/amlogic/wifi/bcmdhd/bcmsdh_sdmmc_linux.c similarity index 96% rename from drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c rename to drivers/amlogic/wifi/bcmdhd/bcmsdh_sdmmc_linux.c index 19e0018bf9110..741c50892102b 100644 --- a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c +++ b/drivers/amlogic/wifi/bcmdhd/bcmsdh_sdmmc_linux.c @@ -1,14 +1,14 @@ /* * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel * - * Copyright (C) 1999-2014, Broadcom Corporation - * + * Copyright (C) 1999-2016, Broadcom Corporation + * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: - * + * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that @@ -16,12 +16,15 @@ * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. - * + * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmsdh_sdmmc_linux.c 434777 2013-11-07 09:30:27Z $ + * + * <> + * + * $Id: bcmsdh_sdmmc_linux.c 591173 2015-10-07 06:24:22Z $ */ #include @@ -110,9 +113,12 @@ static int sdioh_probe(struct sdio_func *func) sd_err(("bus num (host idx)=%d, slot num (rca)=%d\n", host_idx, rca)); adapter = dhd_wifi_platform_get_adapter(SDIO_BUS, host_idx, rca); - if (adapter != NULL) + if (adapter != NULL) { sd_err(("found adapter info '%s'\n", adapter->name)); - else +#ifdef BUS_POWER_RESTORE + adapter->sdio_func = func; +#endif + } else sd_err(("can't find adapter info for this chip\n")); #ifdef WL_CFG80211 @@ -237,16 +243,19 @@ static int bcmsdh_sdmmc_suspend(struct device *pdev) if (func->num != 2) return 0; + dhd_mmc_suspend = TRUE; sdioh = sdio_get_drvdata(func); err = bcmsdh_suspend(sdioh->bcmsdh); if (err) { printf("%s bcmsdh_suspend err=%d\n", __FUNCTION__, err); + dhd_mmc_suspend = FALSE; return err; } sdio_flags = sdio_get_host_pm_caps(func); if (!(sdio_flags & MMC_PM_KEEP_POWER)) { sd_err(("%s: can't keep power while host is suspended\n", __FUNCTION__)); + dhd_mmc_suspend = FALSE; return -EINVAL; } @@ -254,12 +263,12 @@ static int bcmsdh_sdmmc_suspend(struct device *pdev) err = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); if (err) { sd_err(("%s: error while trying to keep power\n", __FUNCTION__)); + dhd_mmc_suspend = FALSE; return err; } #if defined(OOB_INTR_ONLY) bcmsdh_oob_intr_set(sdioh->bcmsdh, FALSE); #endif - dhd_mmc_suspend = TRUE; smp_mb(); printf("%s Exit\n", __FUNCTION__); @@ -292,7 +301,7 @@ static const struct dev_pm_ops bcmsdh_sdmmc_pm_ops = { .suspend = bcmsdh_sdmmc_suspend, .resume = bcmsdh_sdmmc_resume, }; -#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */ +#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */ #if defined(BCMLXSDMMC) static struct semaphore *notify_semaphore = NULL; diff --git a/drivers/amlogic/wifi/bcmdhd/bcmsdspi_linux.c b/drivers/amlogic/wifi/bcmdhd/bcmsdspi_linux.c new file mode 100644 index 0000000000000..139288e73ad82 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/bcmsdspi_linux.c @@ -0,0 +1,252 @@ +/* + * Broadcom SPI Host Controller Driver - Linux Per-port + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdspi_linux.c 514727 2014-11-12 03:02:48Z $ + */ + +#include +#include + +#include /* bcmsdh to/from specific controller APIs */ +#include /* to get msglevel bit values */ + +#include +#include /* SDIO Device and Protocol Specs */ +#include /* request_irq(), free_irq() */ +#include +#include + +extern uint sd_crc; +module_param(sd_crc, uint, 0); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define KERNEL26 +#endif + +struct sdos_info { + sdioh_info_t *sd; + spinlock_t lock; + wait_queue_head_t intr_wait_queue; +}; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define BLOCKABLE() (!in_atomic()) +#else +#define BLOCKABLE() (!in_interrupt()) +#endif + +/* Interrupt handler */ +static irqreturn_t +sdspi_isr(int irq, void *dev_id +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) +, struct pt_regs *ptregs +#endif +) +{ + sdioh_info_t *sd; + struct sdos_info *sdos; + bool ours; + + sd = (sdioh_info_t *)dev_id; + sd->local_intrcount++; + + if (!sd->card_init_done) { + sd_err(("%s: Hey Bogus intr...not even initted: irq %d\n", __FUNCTION__, irq)); + return IRQ_RETVAL(FALSE); + } else { + ours = spi_check_client_intr(sd, NULL); + + /* For local interrupts, wake the waiting process */ + if (ours && sd->got_hcint) { + sdos = (struct sdos_info *)sd->sdos_info; + wake_up_interruptible(&sdos->intr_wait_queue); + } + + return IRQ_RETVAL(ours); + } +} + + +/* Register with Linux for interrupts */ +int +spi_register_irq(sdioh_info_t *sd, uint irq) +{ + sd_trace(("Entering %s: irq == %d\n", __FUNCTION__, irq)); + if (request_irq(irq, sdspi_isr, IRQF_SHARED, "bcmsdspi", sd) < 0) { + sd_err(("%s: request_irq() failed\n", __FUNCTION__)); + return ERROR; + } + return SUCCESS; +} + +/* Free Linux irq */ +void +spi_free_irq(uint irq, sdioh_info_t *sd) +{ + free_irq(irq, sd); +} + +/* Map Host controller registers */ +uint32 * +spi_reg_map(osl_t *osh, uintptr addr, int size) +{ + return (uint32 *)REG_MAP(addr, size); +} + +void +spi_reg_unmap(osl_t *osh, uintptr addr, int size) +{ + REG_UNMAP((void*)(uintptr)addr); +} + +int +spi_osinit(sdioh_info_t *sd) +{ + struct sdos_info *sdos; + + sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info)); + sd->sdos_info = (void*)sdos; + if (sdos == NULL) + return BCME_NOMEM; + + sdos->sd = sd; + spin_lock_init(&sdos->lock); + init_waitqueue_head(&sdos->intr_wait_queue); + return BCME_OK; +} + +void +spi_osfree(sdioh_info_t *sd) +{ + struct sdos_info *sdos; + ASSERT(sd && sd->sdos_info); + + sdos = (struct sdos_info *)sd->sdos_info; + MFREE(sd->osh, sdos, sizeof(struct sdos_info)); +} + +/* Interrupt enable/disable */ +SDIOH_API_RC +sdioh_interrupt_set(sdioh_info_t *sd, bool enable) +{ + ulong flags; + struct sdos_info *sdos; + + sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling")); + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + if (!(sd->host_init_done && sd->card_init_done)) { + sd_err(("%s: Card & Host are not initted - bailing\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + if (enable && !(sd->intr_handler && sd->intr_handler_arg)) { + sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + /* Ensure atomicity for enable/disable calls */ + spin_lock_irqsave(&sdos->lock, flags); + + sd->client_intr_enabled = enable; + if (enable && !sd->lockcount) + spi_devintr_on(sd); + else + spi_devintr_off(sd); + + spin_unlock_irqrestore(&sdos->lock, flags); + + return SDIOH_API_RC_SUCCESS; +} + +/* Protect against reentrancy (disable device interrupts while executing) */ +void +spi_lock(sdioh_info_t *sd) +{ + ulong flags; + struct sdos_info *sdos; + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + sd_trace(("%s: %d\n", __FUNCTION__, sd->lockcount)); + + spin_lock_irqsave(&sdos->lock, flags); + if (sd->lockcount) { + sd_err(("%s: Already locked!\n", __FUNCTION__)); + ASSERT(sd->lockcount == 0); + } + spi_devintr_off(sd); + sd->lockcount++; + spin_unlock_irqrestore(&sdos->lock, flags); +} + +/* Enable client interrupt */ +void +spi_unlock(sdioh_info_t *sd) +{ + ulong flags; + struct sdos_info *sdos; + + sd_trace(("%s: %d, %d\n", __FUNCTION__, sd->lockcount, sd->client_intr_enabled)); + ASSERT(sd->lockcount > 0); + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + spin_lock_irqsave(&sdos->lock, flags); + if (--sd->lockcount == 0 && sd->client_intr_enabled) { + spi_devintr_on(sd); + } + spin_unlock_irqrestore(&sdos->lock, flags); +} + +void spi_waitbits(sdioh_info_t *sd, bool yield) +{ +#ifndef BCMSDYIELD + ASSERT(!yield); +#endif + sd_trace(("%s: yield %d canblock %d\n", + __FUNCTION__, yield, BLOCKABLE())); + + /* Clear the "interrupt happened" flag and last intrstatus */ + sd->got_hcint = FALSE; + +#ifdef BCMSDYIELD + if (yield && BLOCKABLE()) { + struct sdos_info *sdos; + sdos = (struct sdos_info *)sd->sdos_info; + /* Wait for the indication, the interrupt will be masked when the ISR fires. */ + wait_event_interruptible(sdos->intr_wait_queue, (sd->got_hcint)); + } else +#endif /* BCMSDYIELD */ + { + spi_spinbits(sd); + } + +} diff --git a/drivers/amlogic/wifi/bcmdhd/bcmspibrcm.c b/drivers/amlogic/wifi/bcmdhd/bcmspibrcm.c new file mode 100644 index 0000000000000..10d982e0f8d84 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/bcmspibrcm.c @@ -0,0 +1,1819 @@ +/* + * Broadcom BCMSDH to gSPI Protocol Conversion Layer + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmspibrcm.c 591086 2015-10-07 02:51:01Z $ + */ + +#define HSMODE + +#include + +#include +#include +#include +#include +#include +#include +#include +#include /* SDIO device core hardware definitions. */ +#include + +#include /* bcmsdh to/from specific controller APIs */ +#include /* ioctl/iovars */ +#include /* SDIO Device and Protocol Specs */ + +#include + + +#include +#include + +/* these are for the older cores... for newer cores we have control for each of them */ +#define F0_RESPONSE_DELAY 16 +#define F1_RESPONSE_DELAY 16 +#define F2_RESPONSE_DELAY F0_RESPONSE_DELAY + + +#define GSPI_F0_RESP_DELAY 0 +#define GSPI_F1_RESP_DELAY F1_RESPONSE_DELAY +#define GSPI_F2_RESP_DELAY 0 +#define GSPI_F3_RESP_DELAY 0 + +#define CMDLEN 4 + +#define DWORDMODE_ON (sd->chip == BCM4329_CHIP_ID) && (sd->chiprev == 2) && (sd->dwordmode == TRUE) + +/* Globals */ +#if defined(DHD_DEBUG) +uint sd_msglevel = SDH_ERROR_VAL; +#else +uint sd_msglevel = 0; +#endif + +uint sd_hiok = FALSE; /* Use hi-speed mode if available? */ +uint sd_sdmode = SDIOH_MODE_SPI; /* Use SD4 mode by default */ +uint sd_f2_blocksize = 64; /* Default blocksize */ + + +uint sd_divisor = 2; +uint sd_power = 1; /* Default to SD Slot powered ON */ +uint sd_clock = 1; /* Default to SD Clock turned ON */ +uint sd_crc = 0; /* Default to SPI CRC Check turned OFF */ +uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */ + +uint8 spi_outbuf[SPI_MAX_PKT_LEN]; +uint8 spi_inbuf[SPI_MAX_PKT_LEN]; + +/* 128bytes buffer is enough to clear data-not-available and program response-delay F0 bits + * assuming we will not exceed F0 response delay > 100 bytes at 48MHz. + */ +#define BUF2_PKT_LEN 128 +uint8 spi_outbuf2[BUF2_PKT_LEN]; +uint8 spi_inbuf2[BUF2_PKT_LEN]; + +#define SPISWAP_WD4(x) bcmswap32(x); +#define SPISWAP_WD2(x) (bcmswap16(x & 0xffff)) | \ + (bcmswap16((x & 0xffff0000) >> 16) << 16); + +/* Prototypes */ +static bool bcmspi_test_card(sdioh_info_t *sd); +static bool bcmspi_host_device_init_adapt(sdioh_info_t *sd); +static int bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode); +static int bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg, + uint32 *data, uint32 datalen); +static int bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, + int regsize, uint32 *data); +static int bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, + int regsize, uint32 data); +static int bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr, + uint8 *data); +static int bcmspi_driver_init(sdioh_info_t *sd); +static int bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, + uint32 addr, int nbytes, uint32 *data); +static int bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, + uint32 *data); +static void bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer); +static int bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg); + +/* + * Public entry points & extern's + */ +extern sdioh_info_t * +sdioh_attach(osl_t *osh, void *bar0, uint irq) +{ + sdioh_info_t *sd; + + sd_trace(("%s\n", __FUNCTION__)); + if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) { + sd_err(("%s: out of memory, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh))); + return NULL; + } + bzero((char *)sd, sizeof(sdioh_info_t)); + sd->osh = osh; + if (spi_osinit(sd) != 0) { + sd_err(("%s: spi_osinit() failed\n", __FUNCTION__)); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return NULL; + } + + sd->bar0 = bar0; + sd->irq = irq; + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; + sd->intr_handler_valid = FALSE; + + /* Set defaults */ + sd->use_client_ints = TRUE; + sd->sd_use_dma = FALSE; /* DMA Not supported */ + + /* Spi device default is 16bit mode, change to 4 when device is changed to 32bit + * mode + */ + sd->wordlen = 2; + + + if (!spi_hw_attach(sd)) { + sd_err(("%s: spi_hw_attach() failed\n", __FUNCTION__)); + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } + + if (bcmspi_driver_init(sd) != SUCCESS) { + sd_err(("%s: bcmspi_driver_init() failed()\n", __FUNCTION__)); + spi_hw_detach(sd); + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } + + if (spi_register_irq(sd, irq) != SUCCESS) { + sd_err(("%s: spi_register_irq() failed for irq = %d\n", __FUNCTION__, irq)); + spi_hw_detach(sd); + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } + + sd_trace(("%s: Done\n", __FUNCTION__)); + + return sd; +} + +extern SDIOH_API_RC +sdioh_detach(osl_t *osh, sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + if (sd) { + sd_err(("%s: detaching from hardware\n", __FUNCTION__)); + spi_free_irq(sd->irq, sd); + spi_hw_detach(sd); + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + } + return SDIOH_API_RC_SUCCESS; +} + +/* Configure callback to client when we recieve client interrupt */ +extern SDIOH_API_RC +sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); +#if !defined(OOB_INTR_ONLY) + sd->intr_handler = fn; + sd->intr_handler_arg = argh; + sd->intr_handler_valid = TRUE; +#endif /* !defined(OOB_INTR_ONLY) */ + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_deregister(sdioh_info_t *sd) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); +#if !defined(OOB_INTR_ONLY) + sd->intr_handler_valid = FALSE; + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; +#endif /* !defined(OOB_INTR_ONLY) */ + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + *onoff = sd->client_intr_enabled; + return SDIOH_API_RC_SUCCESS; +} + +#if defined(DHD_DEBUG) +extern bool +sdioh_interrupt_pending(sdioh_info_t *sd) +{ + return 0; +} +#endif + +extern SDIOH_API_RC +sdioh_query_device(sdioh_info_t *sd) +{ + /* Return a BRCM ID appropriate to the dongle class */ + return (sd->num_funcs > 1) ? BCM4329_D11N_ID : BCM4318_D11G_ID; +} + +/* Provide dstatus bits of spi-transaction for dhd layers. */ +extern uint32 +sdioh_get_dstatus(sdioh_info_t *sd) +{ + return sd->card_dstatus; +} + +extern void +sdioh_chipinfo(sdioh_info_t *sd, uint32 chip, uint32 chiprev) +{ + sd->chip = chip; + sd->chiprev = chiprev; +} + +extern void +sdioh_dwordmode(sdioh_info_t *sd, bool set) +{ + uint8 reg = 0; + int status; + + if ((status = sdioh_request_byte(sd, SDIOH_READ, SPI_FUNC_0, SPID_STATUS_ENABLE, ®)) != + SUCCESS) { + sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__)); + return; + } + + if (set) { + reg |= DWORD_PKT_LEN_EN; + sd->dwordmode = TRUE; + sd->client_block_size[SPI_FUNC_2] = 4096; /* h2spi's limit is 4KB, we support 8KB */ + } else { + reg &= ~DWORD_PKT_LEN_EN; + sd->dwordmode = FALSE; + sd->client_block_size[SPI_FUNC_2] = 2048; + } + + if ((status = sdioh_request_byte(sd, SDIOH_WRITE, SPI_FUNC_0, SPID_STATUS_ENABLE, ®)) != + SUCCESS) { + sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__)); + return; + } +} + + +uint +sdioh_query_iofnum(sdioh_info_t *sd) +{ + return sd->num_funcs; +} + +/* IOVar table */ +enum { + IOV_MSGLEVEL = 1, + IOV_BLOCKMODE, + IOV_BLOCKSIZE, + IOV_DMA, + IOV_USEINTS, + IOV_NUMINTS, + IOV_NUMLOCALINTS, + IOV_HOSTREG, + IOV_DEVREG, + IOV_DIVISOR, + IOV_SDMODE, + IOV_HISPEED, + IOV_HCIREGS, + IOV_POWER, + IOV_CLOCK, + IOV_SPIERRSTATS, + IOV_RESP_DELAY_ALL +}; + +const bcm_iovar_t sdioh_iovars[] = { + {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 }, + {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */ + {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 }, + {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 }, + {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 }, + {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 }, + {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 }, + {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 }, + {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 }, + {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100}, + {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0}, + {"spi_errstats", IOV_SPIERRSTATS, 0, IOVT_BUFFER, sizeof(struct spierrstats_t) }, + {"spi_respdelay", IOV_RESP_DELAY_ALL, 0, IOVT_BOOL, 0 }, + {NULL, 0, 0, 0, 0 } +}; + +int +sdioh_iovar_op(sdioh_info_t *si, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + int val_size; + int32 int_val = 0; + bool bool_val; + uint32 actionid; +/* + sdioh_regs_t *regs; +*/ + + ASSERT(name); + ASSERT(len >= 0); + + /* Get must have return space; Set does not take qualifiers */ + ASSERT(set || (arg && len)); + ASSERT(!set || (!params && !plen)); + + sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name)); + + if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) { + bcmerror = BCME_UNSUPPORTED; + goto exit; + } + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0) + goto exit; + + /* Set up params so get and set can share the convenience variables */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + val_size = sizeof(int); + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + switch (actionid) { + case IOV_GVAL(IOV_MSGLEVEL): + int_val = (int32)sd_msglevel; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MSGLEVEL): + sd_msglevel = int_val; + break; + + case IOV_GVAL(IOV_BLOCKSIZE): + if ((uint32)int_val > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + int_val = (int32)si->client_block_size[int_val]; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_DMA): + int_val = (int32)si->sd_use_dma; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DMA): + si->sd_use_dma = (bool)int_val; + break; + + case IOV_GVAL(IOV_USEINTS): + int_val = (int32)si->use_client_ints; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_USEINTS): + break; + + case IOV_GVAL(IOV_DIVISOR): + int_val = (uint32)sd_divisor; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DIVISOR): + sd_divisor = int_val; + if (!spi_start_clock(si, (uint16)sd_divisor)) { + sd_err(("%s: set clock failed\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + } + break; + + case IOV_GVAL(IOV_POWER): + int_val = (uint32)sd_power; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_POWER): + sd_power = int_val; + break; + + case IOV_GVAL(IOV_CLOCK): + int_val = (uint32)sd_clock; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CLOCK): + sd_clock = int_val; + break; + + case IOV_GVAL(IOV_SDMODE): + int_val = (uint32)sd_sdmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDMODE): + sd_sdmode = int_val; + break; + + case IOV_GVAL(IOV_HISPEED): + int_val = (uint32)sd_hiok; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HISPEED): + sd_hiok = int_val; + + if (!bcmspi_set_highspeed_mode(si, (bool)sd_hiok)) { + sd_err(("%s: Failed changing highspeed mode to %d.\n", + __FUNCTION__, sd_hiok)); + bcmerror = BCME_ERROR; + return ERROR; + } + break; + + case IOV_GVAL(IOV_NUMINTS): + int_val = (int32)si->intrcount; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_NUMLOCALINTS): + int_val = (int32)si->local_intrcount; + bcopy(&int_val, arg, val_size); + break; + case IOV_GVAL(IOV_DEVREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data; + + if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + + int_val = (int)data; + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_DEVREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data = (uint8)sd_ptr->value; + + if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + break; + } + + + case IOV_GVAL(IOV_SPIERRSTATS): + { + bcopy(&si->spierrstats, arg, sizeof(struct spierrstats_t)); + break; + } + + case IOV_SVAL(IOV_SPIERRSTATS): + { + bzero(&si->spierrstats, sizeof(struct spierrstats_t)); + break; + } + + case IOV_GVAL(IOV_RESP_DELAY_ALL): + int_val = (int32)si->resp_delay_all; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RESP_DELAY_ALL): + si->resp_delay_all = (bool)int_val; + int_val = STATUS_ENABLE|INTR_WITH_STATUS; + if (si->resp_delay_all) + int_val |= RESP_DELAY_ALL; + else { + if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_RESPONSE_DELAY, 1, + F1_RESPONSE_DELAY) != SUCCESS) { + sd_err(("%s: Unable to set response delay.\n", __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + break; + } + } + + if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_STATUS_ENABLE, 1, int_val) + != SUCCESS) { + sd_err(("%s: Unable to set response delay.\n", __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + break; + } + break; + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } +exit: + + return bcmerror; +} + +extern SDIOH_API_RC +sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + SDIOH_API_RC status; + /* No lock needed since sdioh_request_byte does locking */ + status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + /* No lock needed since sdioh_request_byte does locking */ + SDIOH_API_RC status; + + if ((fnc_num == SPI_FUNC_1) && (addr == SBSDIO_FUNC1_FRAMECTRL)) { + uint8 dummy_data; + status = sdioh_cfg_read(sd, fnc_num, addr, &dummy_data); + if (status) { + sd_err(("sdioh_cfg_read() failed.\n")); + return status; + } + } + + status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length) +{ + uint32 count; + int offset; + uint32 cis_byte; + uint16 *cis = (uint16 *)cisd; + uint bar0 = SI_ENUM_BASE; + int status; + uint8 data; + + sd_trace(("%s: Func %d\n", __FUNCTION__, func)); + + spi_lock(sd); + + /* Set sb window address to 0x18000000 */ + data = (bar0 >> 8) & SBSDIO_SBADDRLOW_MASK; + status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, &data); + if (status == SUCCESS) { + data = (bar0 >> 16) & SBSDIO_SBADDRMID_MASK; + status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, &data); + } else { + sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__)); + spi_unlock(sd); + return (BCME_ERROR); + } + if (status == SUCCESS) { + data = (bar0 >> 24) & SBSDIO_SBADDRHIGH_MASK; + status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, &data); + } else { + sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__)); + spi_unlock(sd); + return (BCME_ERROR); + } + + offset = CC_SROM_OTP; /* OTP offset in chipcommon. */ + for (count = 0; count < length/2; count++) { + if (bcmspi_card_regread (sd, SDIO_FUNC_1, offset, 2, &cis_byte) < 0) { + sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__)); + spi_unlock(sd); + return (BCME_ERROR); + } + + *cis = (uint16)cis_byte; + cis++; + offset += 2; + } + + spi_unlock(sd); + + return (BCME_OK); +} + +extern SDIOH_API_RC +sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte) +{ + int status; + uint32 cmd_arg; + uint32 dstatus; + uint32 data = (uint32)(*byte); + + spi_lock(sd); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, rw == SDIOH_READ ? 0 : 1); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1); + + if (rw == SDIOH_READ) { + sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x\n", + __FUNCTION__, cmd_arg, func, regaddr)); + } else { + sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x data=0x%x\n", + __FUNCTION__, cmd_arg, func, regaddr, data)); + } + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, 1)) != SUCCESS) { + spi_unlock(sd); + return status; + } + + if (rw == SDIOH_READ) { + *byte = (uint8)data; + sd_trace(("%s: RD result=0x%x\n", __FUNCTION__, *byte)); + } + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus=0x%x\n", dstatus)); + + spi_unlock(sd); + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr, + uint32 *word, uint nbytes) +{ + int status; + + spi_lock(sd); + + if (rw == SDIOH_READ) + status = bcmspi_card_regread(sd, func, addr, nbytes, word); + else + status = bcmspi_card_regwrite(sd, func, addr, nbytes, *word); + + spi_unlock(sd); + return (status == SUCCESS ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +extern SDIOH_API_RC +sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func, + uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt) +{ + int len; + int buflen = (int)buflen_u; + bool fifo = (fix_inc == SDIOH_DATA_FIX); + + spi_lock(sd); + + ASSERT(reg_width == 4); + ASSERT(buflen_u < (1 << 30)); + ASSERT(sd->client_block_size[func]); + + sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n", + __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W', + buflen_u, sd->r_cnt, sd->t_cnt, pkt)); + + /* Break buffer down into blocksize chunks. */ + while (buflen > 0) { + len = MIN(sd->client_block_size[func], buflen); + if (bcmspi_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) { + sd_err(("%s: bcmspi_card_buf %s failed\n", + __FUNCTION__, rw == SDIOH_READ ? "Read" : "Write")); + spi_unlock(sd); + return SDIOH_API_RC_FAIL; + } + buffer += len; + buflen -= len; + if (!fifo) + addr += len; + } + spi_unlock(sd); + return SDIOH_API_RC_SUCCESS; +} + +/* This function allows write to gspi bus when another rd/wr function is deep down the call stack. + * Its main aim is to have simpler spi writes rather than recursive writes. + * e.g. When there is a need to program response delay on the fly after detecting the SPI-func + * this call will allow to program the response delay. + */ +static int +bcmspi_card_byterewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 byte) +{ + uint32 cmd_arg; + uint32 datalen = 1; + uint32 hostlen; + + cmd_arg = 0; + + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, datalen); + + sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); + + + /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen + * according to the wordlen mode(16/32bit) the device is in. + */ + ASSERT(sd->wordlen == 4 || sd->wordlen == 2); + datalen = ROUNDUP(datalen, sd->wordlen); + + /* Start by copying command in the spi-outbuffer */ + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)spi_outbuf2 = SPISWAP_WD4(cmd_arg); + if (datalen & 0x3) + datalen += (4 - (datalen & 0x3)); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint32 *)spi_outbuf2 = SPISWAP_WD2(cmd_arg); + if (datalen & 0x1) + datalen++; + } else { + sd_err(("%s: Host is %d bit spid, could not create SPI command.\n", + __FUNCTION__, 8 * sd->wordlen)); + return ERROR; + } + + /* for Write, put the data into the output buffer */ + if (datalen != 0) { + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)&spi_outbuf2[CMDLEN] = SPISWAP_WD4(byte); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint32 *)&spi_outbuf2[CMDLEN] = SPISWAP_WD2(byte); + } + } + + /* +4 for cmd, +4 for dstatus */ + hostlen = datalen + 8; + hostlen += (4 - (hostlen & 0x3)); + spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, hostlen); + + /* Last 4bytes are dstatus. Device is configured to return status bits. */ + if (sd->wordlen == 4) { /* 32bit spid */ + sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); + } else { + sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n", + __FUNCTION__, 8 * sd->wordlen)); + return ERROR; + } + + if (sd->card_dstatus) + sd_trace(("dstatus after byte rewrite = 0x%x\n", sd->card_dstatus)); + + return (BCME_OK); +} + +/* Program the response delay corresponding to the spi function */ +static int +bcmspi_prog_resp_delay(sdioh_info_t *sd, int func, uint8 resp_delay) +{ + if (sd->resp_delay_all == FALSE) + return (BCME_OK); + + if (sd->prev_fun == func) + return (BCME_OK); + + if (F0_RESPONSE_DELAY == F1_RESPONSE_DELAY) + return (BCME_OK); + + bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_RESPONSE_DELAY, resp_delay); + + /* Remember function for which to avoid reprogramming resp-delay in next iteration */ + sd->prev_fun = func; + + return (BCME_OK); + +} + +#define GSPI_RESYNC_PATTERN 0x0 + +/* A resync pattern is a 32bit MOSI line with all zeros. Its a special command in gSPI. + * It resets the spi-bkplane logic so that all F1 related ping-pong buffer logic is + * synchronised and all queued resuests are cancelled. + */ +static int +bcmspi_resync_f1(sdioh_info_t *sd) +{ + uint32 cmd_arg = GSPI_RESYNC_PATTERN, data = 0, datalen = 0; + + + /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen + * according to the wordlen mode(16/32bit) the device is in. + */ + ASSERT(sd->wordlen == 4 || sd->wordlen == 2); + datalen = ROUNDUP(datalen, sd->wordlen); + + /* Start by copying command in the spi-outbuffer */ + *(uint32 *)spi_outbuf2 = cmd_arg; + + /* for Write, put the data into the output buffer */ + *(uint32 *)&spi_outbuf2[CMDLEN] = data; + + /* +4 for cmd, +4 for dstatus */ + spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, datalen + 8); + + /* Last 4bytes are dstatus. Device is configured to return status bits. */ + if (sd->wordlen == 4) { /* 32bit spid */ + sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); + } else { + sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n", + __FUNCTION__, 8 * sd->wordlen)); + return ERROR; + } + + if (sd->card_dstatus) + sd_trace(("dstatus after resync pattern write = 0x%x\n", sd->card_dstatus)); + + return (BCME_OK); +} + +uint32 dstatus_count = 0; + +static int +bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg) +{ + uint32 dstatus = sd->card_dstatus; + struct spierrstats_t *spierrstats = &sd->spierrstats; + int err = SUCCESS; + + sd_trace(("cmd = 0x%x, dstatus = 0x%x\n", cmd_arg, dstatus)); + + /* Store dstatus of last few gSPI transactions */ + spierrstats->dstatus[dstatus_count % NUM_PREV_TRANSACTIONS] = dstatus; + spierrstats->spicmd[dstatus_count % NUM_PREV_TRANSACTIONS] = cmd_arg; + dstatus_count++; + + if (sd->card_init_done == FALSE) + return err; + + if (dstatus & STATUS_DATA_NOT_AVAILABLE) { + spierrstats->dna++; + sd_trace(("Read data not available on F1 addr = 0x%x\n", + GFIELD(cmd_arg, SPI_REG_ADDR))); + /* Clear dna bit */ + bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, DATA_UNAVAILABLE); + } + + if (dstatus & STATUS_UNDERFLOW) { + spierrstats->rdunderflow++; + sd_err(("FIFO underflow happened due to current F2 read command.\n")); + } + + if (dstatus & STATUS_OVERFLOW) { + spierrstats->wroverflow++; + sd_err(("FIFO overflow happened due to current (F1/F2) write command.\n")); + bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, F1_OVERFLOW); + bcmspi_resync_f1(sd); + sd_err(("Recovering from F1 FIFO overflow.\n")); + } + + if (dstatus & STATUS_F2_INTR) { + spierrstats->f2interrupt++; + sd_trace(("Interrupt from F2. SW should clear corresponding IntStatus bits\n")); + } + + if (dstatus & STATUS_F3_INTR) { + spierrstats->f3interrupt++; + sd_err(("Interrupt from F3. SW should clear corresponding IntStatus bits\n")); + } + + if (dstatus & STATUS_HOST_CMD_DATA_ERR) { + spierrstats->hostcmddataerr++; + sd_err(("Error in CMD or Host data, detected by CRC/Checksum (optional)\n")); + } + + if (dstatus & STATUS_F2_PKT_AVAILABLE) { + spierrstats->f2pktavailable++; + sd_trace(("Packet is available/ready in F2 TX FIFO\n")); + sd_trace(("Packet length = %d\n", sd->dwordmode ? + ((dstatus & STATUS_F2_PKT_LEN_MASK) >> (STATUS_F2_PKT_LEN_SHIFT - 2)) : + ((dstatus & STATUS_F2_PKT_LEN_MASK) >> STATUS_F2_PKT_LEN_SHIFT))); + } + + if (dstatus & STATUS_F3_PKT_AVAILABLE) { + spierrstats->f3pktavailable++; + sd_err(("Packet is available/ready in F3 TX FIFO\n")); + sd_err(("Packet length = %d\n", + (dstatus & STATUS_F3_PKT_LEN_MASK) >> STATUS_F3_PKT_LEN_SHIFT)); + } + + return err; +} + +extern int +sdioh_abort(sdioh_info_t *sd, uint func) +{ + return 0; +} + +int +sdioh_start(sdioh_info_t *sd, int stage) +{ + return SUCCESS; +} + +int +sdioh_stop(sdioh_info_t *sd) +{ + return SUCCESS; +} + +int +sdioh_waitlockfree(sdioh_info_t *sd) +{ + return SUCCESS; +} + + +/* + * Private/Static work routines + */ +static int +bcmspi_host_init(sdioh_info_t *sd) +{ + + /* Default power on mode */ + sd->sd_mode = SDIOH_MODE_SPI; + sd->polled_mode = TRUE; + sd->host_init_done = TRUE; + sd->card_init_done = FALSE; + sd->adapter_slot = 1; + + return (SUCCESS); +} + +static int +get_client_blocksize(sdioh_info_t *sd) +{ + uint32 regdata[2]; + int status; + + /* Find F1/F2/F3 max packet size */ + if ((status = bcmspi_card_regread(sd, 0, SPID_F1_INFO_REG, + 8, regdata)) != SUCCESS) { + return status; + } + + sd_trace(("pkt_size regdata[0] = 0x%x, regdata[1] = 0x%x\n", + regdata[0], regdata[1])); + + sd->client_block_size[1] = (regdata[0] & F1_MAX_PKT_SIZE) >> 2; + sd_trace(("Func1 blocksize = %d\n", sd->client_block_size[1])); + ASSERT(sd->client_block_size[1] == BLOCK_SIZE_F1); + + sd->client_block_size[2] = ((regdata[0] >> 16) & F2_MAX_PKT_SIZE) >> 2; + sd_trace(("Func2 blocksize = %d\n", sd->client_block_size[2])); + ASSERT(sd->client_block_size[2] == BLOCK_SIZE_F2); + + sd->client_block_size[3] = (regdata[1] & F3_MAX_PKT_SIZE) >> 2; + sd_trace(("Func3 blocksize = %d\n", sd->client_block_size[3])); + ASSERT(sd->client_block_size[3] == BLOCK_SIZE_F3); + + return 0; +} + +static int +bcmspi_client_init(sdioh_info_t *sd) +{ + uint32 status_en_reg = 0; + sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot)); + +#ifdef HSMODE + if (!spi_start_clock(sd, (uint16)sd_divisor)) { + sd_err(("spi_start_clock failed\n")); + return ERROR; + } +#else + /* Start at ~400KHz clock rate for initialization */ + if (!spi_start_clock(sd, 128)) { + sd_err(("spi_start_clock failed\n")); + return ERROR; + } +#endif /* HSMODE */ + + if (!bcmspi_host_device_init_adapt(sd)) { + sd_err(("bcmspi_host_device_init_adapt failed\n")); + return ERROR; + } + + if (!bcmspi_test_card(sd)) { + sd_err(("bcmspi_test_card failed\n")); + return ERROR; + } + + sd->num_funcs = SPI_MAX_IOFUNCS; + + get_client_blocksize(sd); + + /* Apply resync pattern cmd with all zeros to reset spi-bkplane F1 logic */ + bcmspi_resync_f1(sd); + + sd->dwordmode = FALSE; + + bcmspi_card_regread(sd, 0, SPID_STATUS_ENABLE, 1, &status_en_reg); + + sd_trace(("%s: Enabling interrupt with dstatus \n", __FUNCTION__)); + status_en_reg |= INTR_WITH_STATUS; + + if (bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_STATUS_ENABLE, 1, + status_en_reg & 0xff) != SUCCESS) { + sd_err(("%s: Unable to set response delay for all fun's.\n", __FUNCTION__)); + return ERROR; + } + +#ifndef HSMODE + /* After configuring for High-Speed mode, set the desired clock rate. */ + if (!spi_start_clock(sd, 4)) { + sd_err(("spi_start_clock failed\n")); + return ERROR; + } +#endif /* HSMODE */ + + /* check to see if the response delay needs to be programmed properly */ + { + uint32 f1_respdelay = 0; + bcmspi_card_regread(sd, 0, SPID_RESP_DELAY_F1, 1, &f1_respdelay); + if ((f1_respdelay == 0) || (f1_respdelay == 0xFF)) { + /* older sdiodevice core and has no separte resp delay for each of */ + sd_err(("older corerev < 4 so use the same resp delay for all funcs\n")); + sd->resp_delay_new = FALSE; + } + else { + /* older sdiodevice core and has no separte resp delay for each of */ + int ret_val; + sd->resp_delay_new = TRUE; + sd_err(("new corerev >= 4 so set the resp delay for each of the funcs\n")); + sd_trace(("resp delay for funcs f0(%d), f1(%d), f2(%d), f3(%d)\n", + GSPI_F0_RESP_DELAY, GSPI_F1_RESP_DELAY, + GSPI_F2_RESP_DELAY, GSPI_F3_RESP_DELAY)); + ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F0, 1, + GSPI_F0_RESP_DELAY); + if (ret_val != SUCCESS) { + sd_err(("%s: Unable to set response delay for F0\n", __FUNCTION__)); + return ERROR; + } + ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F1, 1, + GSPI_F1_RESP_DELAY); + if (ret_val != SUCCESS) { + sd_err(("%s: Unable to set response delay for F1\n", __FUNCTION__)); + return ERROR; + } + ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F2, 1, + GSPI_F2_RESP_DELAY); + if (ret_val != SUCCESS) { + sd_err(("%s: Unable to set response delay for F2\n", __FUNCTION__)); + return ERROR; + } + ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F3, 1, + GSPI_F3_RESP_DELAY); + if (ret_val != SUCCESS) { + sd_err(("%s: Unable to set response delay for F2\n", __FUNCTION__)); + return ERROR; + } + } + } + + + sd->card_init_done = TRUE; + + /* get the device rev to program the prop respdelays */ + + return SUCCESS; +} + +static int +bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode) +{ + uint32 regdata; + int status; + + if ((status = bcmspi_card_regread(sd, 0, SPID_CONFIG, + 4, ®data)) != SUCCESS) + return status; + + sd_trace(("In %s spih-ctrl = 0x%x \n", __FUNCTION__, regdata)); + + + if (hsmode == TRUE) { + sd_trace(("Attempting to enable High-Speed mode.\n")); + + if (regdata & HIGH_SPEED_MODE) { + sd_trace(("Device is already in High-Speed mode.\n")); + return status; + } else { + regdata |= HIGH_SPEED_MODE; + sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG)); + if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG, + 4, regdata)) != SUCCESS) { + return status; + } + } + } else { + sd_trace(("Attempting to disable High-Speed mode.\n")); + + if (regdata & HIGH_SPEED_MODE) { + regdata &= ~HIGH_SPEED_MODE; + sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG)); + if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG, + 4, regdata)) != SUCCESS) + return status; + } + else { + sd_trace(("Device is already in Low-Speed mode.\n")); + return status; + } + } + spi_controller_highspeed_mode(sd, hsmode); + + return TRUE; +} + +#define bcmspi_find_curr_mode(sd) { \ + sd->wordlen = 2; \ + status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, ®data); \ + regdata &= 0xff; \ + if ((regdata == 0xad) || (regdata == 0x5b) || \ + (regdata == 0x5d) || (regdata == 0x5a)) \ + break; \ + sd->wordlen = 4; \ + status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, ®data); \ + regdata &= 0xff; \ + if ((regdata == 0xad) || (regdata == 0x5b) || \ + (regdata == 0x5d) || (regdata == 0x5a)) \ + break; \ + sd_trace(("Silicon testability issue: regdata = 0x%x." \ + " Expected 0xad, 0x5a, 0x5b or 0x5d.\n", regdata)); \ + OSL_DELAY(100000); \ +} + +#define INIT_ADAPT_LOOP 100 + +/* Adapt clock-phase-speed-bitwidth between host and device */ +static bool +bcmspi_host_device_init_adapt(sdioh_info_t *sd) +{ + uint32 wrregdata, regdata = 0; + int status; + int i; + + /* Due to a silicon testability issue, the first command from the Host + * to the device will get corrupted (first bit will be lost). So the + * Host should poll the device with a safe read request. ie: The Host + * should try to read F0 addr 0x14 using the Fixed address mode + * (This will prevent a unintended write command to be detected by device) + */ + for (i = 0; i < INIT_ADAPT_LOOP; i++) { + /* If device was not power-cycled it will stay in 32bit mode with + * response-delay-all bit set. Alternate the iteration so that + * read either with or without response-delay for F0 to succeed. + */ + bcmspi_find_curr_mode(sd); + sd->resp_delay_all = (i & 0x1) ? TRUE : FALSE; + + bcmspi_find_curr_mode(sd); + sd->dwordmode = TRUE; + + bcmspi_find_curr_mode(sd); + sd->dwordmode = FALSE; + } + + /* Bail out, device not detected */ + if (i == INIT_ADAPT_LOOP) + return FALSE; + + /* Softreset the spid logic */ + if ((sd->dwordmode) || (sd->wordlen == 4)) { + bcmspi_card_regwrite(sd, 0, SPID_RESET_BP, 1, RESET_ON_WLAN_BP_RESET|RESET_SPI); + bcmspi_card_regread(sd, 0, SPID_RESET_BP, 1, ®data); + sd_trace(("reset reg read = 0x%x\n", regdata)); + sd_trace(("dwordmode = %d, wordlen = %d, resp_delay_all = %d\n", sd->dwordmode, + sd->wordlen, sd->resp_delay_all)); + /* Restore default state after softreset */ + sd->wordlen = 2; + sd->dwordmode = FALSE; + } + + if (sd->wordlen == 4) { + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != + SUCCESS) + return FALSE; + if (regdata == TEST_RO_DATA_32BIT_LE) { + sd_trace(("Spid is already in 32bit LE mode. Value read = 0x%x\n", + regdata)); + sd_trace(("Spid power was left on.\n")); + } else { + sd_err(("Spid power was left on but signature read failed." + " Value read = 0x%x\n", regdata)); + return FALSE; + } + } else { + sd->wordlen = 2; + +#define CTRL_REG_DEFAULT 0x00010430 /* according to the host m/c */ + + wrregdata = (CTRL_REG_DEFAULT); + + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS) + return FALSE; + sd_trace(("(we are still in 16bit mode) 32bit READ LE regdata = 0x%x\n", regdata)); + +#ifndef HSMODE + wrregdata |= (CLOCK_PHASE | CLOCK_POLARITY); + wrregdata &= ~HIGH_SPEED_MODE; + bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata); +#endif /* HSMODE */ + + for (i = 0; i < INIT_ADAPT_LOOP; i++) { + if ((regdata == 0xfdda7d5b) || (regdata == 0xfdda7d5a)) { + sd_trace(("0xfeedbead was leftshifted by 1-bit.\n")); + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, + ®data)) != SUCCESS) + return FALSE; + } + OSL_DELAY(1000); + } + +#if defined(CHANGE_SPI_INTR_POLARITY_ACTIVE_HIGH) + /* Change to host controller intr-polarity of active-high */ + wrregdata |= INTR_POLARITY; +#else + /* Change to host controller intr-polarity of active-low */ + wrregdata &= ~INTR_POLARITY; +#endif /* CHANGE_SPI_INTR_POLARITY_ACTIVE_HIGH */ + + sd_trace(("(we are still in 16bit mode) 32bit Write LE reg-ctrl-data = 0x%x\n", + wrregdata)); + /* Change to 32bit mode */ + wrregdata |= WORD_LENGTH_32; + bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata); + + /* Change command/data packaging in 32bit LE mode */ + sd->wordlen = 4; + + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS) + return FALSE; + + if (regdata == TEST_RO_DATA_32BIT_LE) { + sd_trace(("Read spid passed. Value read = 0x%x\n", regdata)); + sd_trace(("Spid had power-on cycle OR spi was soft-resetted \n")); + } else { + sd_err(("Stale spid reg values read as it was kept powered. Value read =" + "0x%x\n", regdata)); + return FALSE; + } + } + + + return TRUE; +} + +static bool +bcmspi_test_card(sdioh_info_t *sd) +{ + uint32 regdata; + int status; + + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS) + return FALSE; + + if (regdata == (TEST_RO_DATA_32BIT_LE)) + sd_trace(("32bit LE regdata = 0x%x\n", regdata)); + else { + sd_trace(("Incorrect 32bit LE regdata = 0x%x\n", regdata)); + return FALSE; + } + + +#define RW_PATTERN1 0xA0A1A2A3 +#define RW_PATTERN2 0x4B5B6B7B + + regdata = RW_PATTERN1; + if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS) + return FALSE; + regdata = 0; + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, ®data)) != SUCCESS) + return FALSE; + if (regdata != RW_PATTERN1) { + sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n", + RW_PATTERN1, regdata)); + return FALSE; + } else + sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata)); + + regdata = RW_PATTERN2; + if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS) + return FALSE; + regdata = 0; + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, ®data)) != SUCCESS) + return FALSE; + if (regdata != RW_PATTERN2) { + sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n", + RW_PATTERN2, regdata)); + return FALSE; + } else + sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata)); + + return TRUE; +} + +static int +bcmspi_driver_init(sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + if ((bcmspi_host_init(sd)) != SUCCESS) { + return ERROR; + } + + if (bcmspi_client_init(sd) != SUCCESS) { + return ERROR; + } + + return SUCCESS; +} + +/* Read device reg */ +static int +bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) +{ + int status; + uint32 cmd_arg, dstatus; + + ASSERT(regsize); + + if (func == 2) + sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n")); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize); + + sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d\n", + __FUNCTION__, cmd_arg, func, regaddr, regsize)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize)) != SUCCESS) + return status; + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus =0x%x\n", dstatus)); + + return SUCCESS; +} + +static int +bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) +{ + + int status; + uint32 cmd_arg; + uint32 dstatus; + + ASSERT(regsize); + + if (func == 2) + sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n")); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0); /* Fixed access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize); + + sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d\n", + __FUNCTION__, cmd_arg, func, regaddr, regsize)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize)) != SUCCESS) + return status; + + sd_trace(("%s: RD result=0x%x\n", __FUNCTION__, *data)); + + bcmspi_cmd_getdstatus(sd, &dstatus); + sd_trace(("dstatus =0x%x\n", dstatus)); + return SUCCESS; +} + +/* write a device register */ +static int +bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data) +{ + int status; + uint32 cmd_arg, dstatus; + + ASSERT(regsize); + + cmd_arg = 0; + + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize); + + sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d data=0x%x\n", + __FUNCTION__, cmd_arg, func, regaddr, regsize, data)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, regsize)) != SUCCESS) + return status; + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus=0x%x\n", dstatus)); + + return SUCCESS; +} + +/* write a device register - 1 byte */ +static int +bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 *byte) +{ + int status; + uint32 cmd_arg; + uint32 dstatus; + uint32 data = (uint32)(*byte); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1); + + sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x data=0x%x\n", + __FUNCTION__, cmd_arg, func, regaddr, data)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, 1)) != SUCCESS) + return status; + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus =0x%x\n", dstatus)); + + return SUCCESS; +} + +void +bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer) +{ + *dstatus_buffer = sd->card_dstatus; +} + +/* 'data' is of type uint32 whereas other buffers are of type uint8 */ +static int +bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg, + uint32 *data, uint32 datalen) +{ + uint32 i, j; + uint8 resp_delay = 0; + int err = SUCCESS; + uint32 hostlen; + uint32 spilen = 0; + uint32 dstatus_idx = 0; + uint16 templen, buslen, len, *ptr = NULL; + + sd_trace(("spi cmd = 0x%x\n", cmd_arg)); + + if (DWORDMODE_ON) { + spilen = GFIELD(cmd_arg, SPI_LEN); + if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_0) || + (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_1)) + dstatus_idx = spilen * 3; + + if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) && + (GFIELD(cmd_arg, SPI_RW_FLAG) == 1)) { + spilen = spilen << 2; + dstatus_idx = (spilen % 16) ? (16 - (spilen % 16)) : 0; + /* convert len to mod16 size */ + spilen = ROUNDUP(spilen, 16); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2)); + } + } + + /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen + * according to the wordlen mode(16/32bit) the device is in. + */ + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)spi_outbuf = SPISWAP_WD4(cmd_arg); + if (datalen & 0x3) + datalen += (4 - (datalen & 0x3)); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint32 *)spi_outbuf = SPISWAP_WD2(cmd_arg); + if (datalen & 0x1) + datalen++; + if (datalen < 4) + datalen = ROUNDUP(datalen, 4); + } else { + sd_err(("Host is %d bit spid, could not create SPI command.\n", + 8 * sd->wordlen)); + return ERROR; + } + + /* for Write, put the data into the output buffer */ + if (GFIELD(cmd_arg, SPI_RW_FLAG) == 1) { + /* We send len field of hw-header always a mod16 size, both from host and dongle */ + if (DWORDMODE_ON) { + if (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) { + ptr = (uint16 *)&data[0]; + templen = *ptr; + /* ASSERT(*ptr == ~*(ptr + 1)); */ + templen = ROUNDUP(templen, 16); + *ptr = templen; + sd_trace(("actual tx len = %d\n", (uint16)(~*(ptr+1)))); + } + } + + if (datalen != 0) { + for (i = 0; i < datalen/4; i++) { + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)&spi_outbuf[i * 4 + CMDLEN] = + SPISWAP_WD4(data[i]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint32 *)&spi_outbuf[i * 4 + CMDLEN] = + SPISWAP_WD2(data[i]); + } + } + } + } + + /* Append resp-delay number of bytes and clock them out for F0/1/2 reads. */ + if ((GFIELD(cmd_arg, SPI_RW_FLAG) == 0)) { + int func = GFIELD(cmd_arg, SPI_FUNCTION); + switch (func) { + case 0: + if (sd->resp_delay_new) + resp_delay = GSPI_F0_RESP_DELAY; + else + resp_delay = sd->resp_delay_all ? F0_RESPONSE_DELAY : 0; + break; + case 1: + if (sd->resp_delay_new) + resp_delay = GSPI_F1_RESP_DELAY; + else + resp_delay = F1_RESPONSE_DELAY; + break; + case 2: + if (sd->resp_delay_new) + resp_delay = GSPI_F2_RESP_DELAY; + else + resp_delay = sd->resp_delay_all ? F2_RESPONSE_DELAY : 0; + break; + default: + ASSERT(0); + break; + } + /* Program response delay */ + if (sd->resp_delay_new == FALSE) + bcmspi_prog_resp_delay(sd, func, resp_delay); + } + + /* +4 for cmd and +4 for dstatus */ + hostlen = datalen + 8 + resp_delay; + hostlen += dstatus_idx; + hostlen += (4 - (hostlen & 0x3)); + spi_sendrecv(sd, spi_outbuf, spi_inbuf, hostlen); + + /* for Read, get the data into the input buffer */ + if (datalen != 0) { + if (GFIELD(cmd_arg, SPI_RW_FLAG) == 0) { /* if read cmd */ + for (j = 0; j < datalen/4; j++) { + if (sd->wordlen == 4) { /* 32bit spid */ + data[j] = SPISWAP_WD4(*(uint32 *)&spi_inbuf[j * 4 + + CMDLEN + resp_delay]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + data[j] = SPISWAP_WD2(*(uint32 *)&spi_inbuf[j * 4 + + CMDLEN + resp_delay]); + } + } + + if ((DWORDMODE_ON) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) { + ptr = (uint16 *)&data[0]; + templen = *ptr; + buslen = len = ~(*(ptr + 1)); + buslen = ROUNDUP(buslen, 16); + /* populate actual len in hw-header */ + if (templen == buslen) + *ptr = len; + } + } + } + + /* Restore back the len field of the hw header */ + if (DWORDMODE_ON) { + if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) && + (GFIELD(cmd_arg, SPI_RW_FLAG) == 1)) { + ptr = (uint16 *)&data[0]; + *ptr = (uint16)(~*(ptr+1)); + } + } + + dstatus_idx += (datalen + CMDLEN + resp_delay); + /* Last 4bytes are dstatus. Device is configured to return status bits. */ + if (sd->wordlen == 4) { /* 32bit spid */ + sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf[dstatus_idx]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf[dstatus_idx]); + } else { + sd_err(("Host is %d bit machine, could not read SPI dstatus.\n", + 8 * sd->wordlen)); + return ERROR; + } + if (sd->card_dstatus == 0xffffffff) { + sd_err(("looks like not a GSPI device or device is not powered.\n")); + } + + err = bcmspi_update_stats(sd, cmd_arg); + + return err; + +} + +static int +bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, + uint32 addr, int nbytes, uint32 *data) +{ + int status; + uint32 cmd_arg; + bool write = rw == SDIOH_READ ? 0 : 1; + uint retries = 0; + + bool enable; + uint32 spilen; + + cmd_arg = 0; + + ASSERT(nbytes); + ASSERT(nbytes <= sd->client_block_size[func]); + + if (write) sd->t_cnt++; else sd->r_cnt++; + + if (func == 2) { + /* Frame len check limited by gSPI. */ + if ((nbytes > 2000) && write) { + sd_trace((">2KB write: F2 wr of %d bytes\n", nbytes)); + } + /* ASSERT(nbytes <= 2048); Fix bigger len gspi issue and uncomment. */ + /* If F2 fifo on device is not ready to receive data, don't do F2 transfer */ + if (write) { + uint32 dstatus; + /* check F2 ready with cached one */ + bcmspi_cmd_getdstatus(sd, &dstatus); + if ((dstatus & STATUS_F2_RX_READY) == 0) { + retries = WAIT_F2RXFIFORDY; + enable = 0; + while (retries-- && !enable) { + OSL_DELAY(WAIT_F2RXFIFORDY_DELAY * 1000); + bcmspi_card_regread(sd, SPI_FUNC_0, SPID_STATUS_REG, 4, + &dstatus); + if (dstatus & STATUS_F2_RX_READY) + enable = TRUE; + } + if (!enable) { + struct spierrstats_t *spierrstats = &sd->spierrstats; + spierrstats->f2rxnotready++; + sd_err(("F2 FIFO is not ready to receive data.\n")); + return ERROR; + } + sd_trace(("No of retries on F2 ready %d\n", + (WAIT_F2RXFIFORDY - retries))); + } + } + } + + /* F2 transfers happen on 0 addr */ + addr = (func == 2) ? 0 : addr; + + /* In pio mode buffer is read using fixed address fifo in func 1 */ + if ((func == 1) && (fifo)) + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0); + else + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); + + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, addr); + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, write); + spilen = sd->data_xfer_count = MIN(sd->client_block_size[func], nbytes); + if ((sd->dwordmode == TRUE) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) { + /* convert len to mod4 size */ + spilen = spilen + ((spilen & 0x3) ? (4 - (spilen & 0x3)): 0); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2)); + } else + cmd_arg = SFIELD(cmd_arg, SPI_LEN, spilen); + + if ((func == 2) && (fifo == 1)) { + sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n", + __FUNCTION__, write ? "Wr" : "Rd", func, "INCR", + addr, nbytes, sd->r_cnt, sd->t_cnt)); + } + + sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); + sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n", + __FUNCTION__, write ? "Wd" : "Rd", func, "INCR", + addr, nbytes, sd->r_cnt, sd->t_cnt)); + + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, nbytes)) != SUCCESS) { + sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__, + (write ? "write" : "read"))); + return status; + } + + /* gSPI expects that hw-header-len is equal to spi-command-len */ + if ((func == 2) && (rw == SDIOH_WRITE) && (sd->dwordmode == FALSE)) { + ASSERT((uint16)sd->data_xfer_count == (uint16)(*data & 0xffff)); + ASSERT((uint16)sd->data_xfer_count == (uint16)(~((*data & 0xffff0000) >> 16))); + } + + if ((nbytes > 2000) && !write) { + sd_trace((">2KB read: F2 rd of %d bytes\n", nbytes)); + } + + return SUCCESS; +} + +/* Reset and re-initialize the device */ +int +sdioh_sdio_reset(sdioh_info_t *si) +{ + si->card_init_done = FALSE; + return bcmspi_client_init(si); +} + +SDIOH_API_RC +sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio) +{ + return SDIOH_API_RC_FAIL; +} + +SDIOH_API_RC +sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab) +{ + return SDIOH_API_RC_FAIL; +} + +bool +sdioh_gpioin(sdioh_info_t *sd, uint32 gpio) +{ + return FALSE; +} + +SDIOH_API_RC +sdioh_gpio_init(sdioh_info_t *sd) +{ + return SDIOH_API_RC_FAIL; +} diff --git a/drivers/net/wireless/bcmdhd/bcmutils.c b/drivers/amlogic/wifi/bcmdhd/bcmutils.c similarity index 82% rename from drivers/net/wireless/bcmdhd/bcmutils.c rename to drivers/amlogic/wifi/bcmdhd/bcmutils.c index 765a3ded6c984..badb223754522 100644 --- a/drivers/net/wireless/bcmdhd/bcmutils.c +++ b/drivers/amlogic/wifi/bcmdhd/bcmutils.c @@ -1,8 +1,30 @@ /* * Driver O/S-independent utility routines * - * $Copyright Open Broadcom Corporation$ - * $Id: bcmutils.c 496061 2014-08-11 06:14:48Z $ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmutils.c 591286 2015-10-07 11:59:26Z $ */ #include @@ -42,22 +64,6 @@ void *_bcmutils_dummy_fn = NULL; -#ifdef CUSTOM_DSCP_TO_PRIO_MAPPING -#define CUST_IPV4_TOS_PREC_MASK 0x3F -#define DCSP_MAX_VALUE 64 -/* 0:BE,1:BK,2:RESV(BK):,3:EE,:4:CL,5:VI,6:VO,7:NC */ -int dscp2priomap[DCSP_MAX_VALUE]= -{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, /* BK->BE */ - 2, 0, 0, 0, 0, 0, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 0, - 4, 0, 0, 0, 0, 0, 0, 0, - 5, 0, 0, 0, 0, 0, 0, 0, - 6, 0, 0, 0, 0, 0, 0, 0, - 7, 0, 0, 0, 0, 0, 0, 0 -}; -#endif /* CUSTOM_DSCP_TO_PRIO_MAPPING */ #ifdef BCMDRIVER @@ -735,7 +741,7 @@ prpkt(const char *msg, osl_t *osh, void *p0) for (p = p0; p; p = PKTNEXT(osh, p)) prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p)); } -#endif +#endif /* Takes an Ethernet frame and sets out-of-bound PKTPRIO. * Also updates the inplace vlan tag if requested. @@ -792,6 +798,11 @@ pktsetprio(void *pkt, bool update_vtag) evh->vlan_tag = hton16(vlan_tag); rc |= PKTPRIO_UPD; } +#ifdef DHD_LOSSLESS_ROAMING + } else if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) { + priority = PRIO_8021D_NC; + rc = PKTPRIO_DSCP; +#endif /* DHD_LOSSLESS_ROAMING */ } else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) || (eh->ether_type == hton16(ETHER_TYPE_IPV6))) { uint8 *ip_body = pktdata + sizeof(struct ether_header); @@ -815,12 +826,7 @@ pktsetprio(void *pkt, bool update_vtag) priority = PRIO_8021D_EE; break; default: -#ifndef CUSTOM_DSCP_TO_PRIO_MAPPING priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); -#else - priority = (int)dscp2priomap[((tos_tc >> IPV4_TOS_DSCP_SHIFT) - & CUST_IPV4_TOS_PREC_MASK)]; -#endif break; } @@ -832,6 +838,51 @@ pktsetprio(void *pkt, bool update_vtag) return (rc | priority); } +/* lookup user priority for specified DSCP */ +static uint8 +dscp2up(uint8 *up_table, uint8 dscp) +{ + uint8 user_priority = 255; + + /* lookup up from table if parameters valid */ + if (up_table != NULL && dscp < UP_TABLE_MAX) { + user_priority = up_table[dscp]; + } + + /* 255 is unused value so return up from dscp */ + if (user_priority == 255) { + user_priority = dscp >> (IPV4_TOS_PREC_SHIFT - IPV4_TOS_DSCP_SHIFT); + } + + return user_priority; +} + +/* set user priority by QoS Map Set table (UP table), table size is UP_TABLE_MAX */ +uint BCMFASTPATH +pktsetprio_qms(void *pkt, uint8* up_table, bool update_vtag) +{ + if (up_table) { + uint8 *pktdata; + uint pktlen; + uint8 dscp; + uint user_priority = 0; + uint rc = 0; + + pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt); + pktlen = PKTLEN(OSH_NULL, pkt); + + if (pktgetdscp(pktdata, pktlen, &dscp)) { + rc = PKTPRIO_DSCP; + user_priority = dscp2up(up_table, dscp); + PKTSETPRIO(pkt, user_priority); + } + + return (rc | user_priority); + } else { + return pktsetprio(pkt, update_vtag); + } +} + /* Returns TRUE and DSCP if IP header found, FALSE otherwise. */ bool BCMFASTPATH @@ -983,6 +1034,415 @@ bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set) #endif /* BCMDRIVER */ +#ifdef BCM_OBJECT_TRACE + +#define BCM_OBJECT_MERGE_SAME_OBJ 0 + +/* some place may add / remove the object to trace list for Linux: */ +/* add: osl_alloc_skb dev_alloc_skb skb_realloc_headroom dhd_start_xmit */ +/* remove: osl_pktfree dev_kfree_skb netif_rx */ + +#define BCM_OBJDBG_COUNT (1024 * 100) +static spinlock_t dbgobj_lock; +#define BCM_OBJDBG_LOCK_INIT() spin_lock_init(&dbgobj_lock) +#define BCM_OBJDBG_LOCK_DESTROY() +#define BCM_OBJDBG_LOCK spin_lock_irqsave +#define BCM_OBJDBG_UNLOCK spin_unlock_irqrestore + +#define BCM_OBJDBG_ADDTOHEAD 0 +#define BCM_OBJDBG_ADDTOTAIL 1 + +#define BCM_OBJDBG_CALLER_LEN 32 +struct bcm_dbgobj { + struct bcm_dbgobj *prior; + struct bcm_dbgobj *next; + uint32 flag; + void *obj; + uint32 obj_sn; + uint32 obj_state; + uint32 line; + char caller[BCM_OBJDBG_CALLER_LEN]; +}; + +static struct bcm_dbgobj *dbgobj_freehead = NULL; +static struct bcm_dbgobj *dbgobj_freetail = NULL; +static struct bcm_dbgobj *dbgobj_objhead = NULL; +static struct bcm_dbgobj *dbgobj_objtail = NULL; + +static uint32 dbgobj_sn = 0; +static int dbgobj_count = 0; +static struct bcm_dbgobj bcm_dbg_objs[BCM_OBJDBG_COUNT]; + +void +bcm_object_trace_init(void) +{ + int i = 0; + BCM_OBJDBG_LOCK_INIT(); + memset(&bcm_dbg_objs, 0x00, sizeof(struct bcm_dbgobj) * BCM_OBJDBG_COUNT); + dbgobj_freehead = &bcm_dbg_objs[0]; + dbgobj_freetail = &bcm_dbg_objs[BCM_OBJDBG_COUNT - 1]; + + for (i = 0; i < BCM_OBJDBG_COUNT; ++i) { + bcm_dbg_objs[i].next = (i == (BCM_OBJDBG_COUNT - 1)) ? + dbgobj_freehead : &bcm_dbg_objs[i + 1]; + bcm_dbg_objs[i].prior = (i == 0) ? + dbgobj_freetail : &bcm_dbg_objs[i - 1]; + } +} + +void +bcm_object_trace_deinit(void) +{ + if (dbgobj_objhead || dbgobj_objtail) { + printf("%s: not all objects are released\n", __FUNCTION__); + ASSERT(0); + } + BCM_OBJDBG_LOCK_DESTROY(); +} + +static void +bcm_object_rm_list(struct bcm_dbgobj **head, struct bcm_dbgobj **tail, + struct bcm_dbgobj *dbgobj) +{ + if ((dbgobj == *head) && (dbgobj == *tail)) { + *head = NULL; + *tail = NULL; + } else if (dbgobj == *head) { + *head = (*head)->next; + } else if (dbgobj == *tail) { + *tail = (*tail)->prior; + } + dbgobj->next->prior = dbgobj->prior; + dbgobj->prior->next = dbgobj->next; +} + +static void +bcm_object_add_list(struct bcm_dbgobj **head, struct bcm_dbgobj **tail, + struct bcm_dbgobj *dbgobj, int addtotail) +{ + if (!(*head) && !(*tail)) { + *head = dbgobj; + *tail = dbgobj; + dbgobj->next = dbgobj; + dbgobj->prior = dbgobj; + } else if ((*head) && (*tail)) { + (*tail)->next = dbgobj; + (*head)->prior = dbgobj; + dbgobj->next = *head; + dbgobj->prior = *tail; + if (addtotail == BCM_OBJDBG_ADDTOTAIL) + *tail = dbgobj; + else + *head = dbgobj; + } else { + ASSERT(0); /* can't be this case */ + } +} + +static INLINE void +bcm_object_movetoend(struct bcm_dbgobj **head, struct bcm_dbgobj **tail, + struct bcm_dbgobj *dbgobj, int movetotail) +{ + if ((*head) && (*tail)) { + if (movetotail == BCM_OBJDBG_ADDTOTAIL) { + if (dbgobj != (*tail)) { + bcm_object_rm_list(head, tail, dbgobj); + bcm_object_add_list(head, tail, dbgobj, movetotail); + } + } else { + if (dbgobj != (*head)) { + bcm_object_rm_list(head, tail, dbgobj); + bcm_object_add_list(head, tail, dbgobj, movetotail); + } + } + } else { + ASSERT(0); /* can't be this case */ + } +} + +void +bcm_object_trace_opr(void *obj, uint32 opt, const char *caller, int line) +{ + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + if (opt == BCM_OBJDBG_ADD_PKT || + opt == BCM_OBJDBG_ADD) { + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + printf("%s: obj %p allocated from %s(%d)," + " allocate again from %s(%d)\n", + __FUNCTION__, dbgobj->obj, + dbgobj->caller, dbgobj->line, + caller, line); + ASSERT(0); + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + +#if BCM_OBJECT_MERGE_SAME_OBJ + dbgobj = dbgobj_freetail; + while (dbgobj) { + if (dbgobj->obj == obj) { + goto FREED_ENTRY_FOUND; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_freetail) + break; + } +#endif /* BCM_OBJECT_MERGE_SAME_OBJ */ + + dbgobj = dbgobj_freehead; +#if BCM_OBJECT_MERGE_SAME_OBJ +FREED_ENTRY_FOUND: +#endif /* BCM_OBJECT_MERGE_SAME_OBJ */ + if (!dbgobj) { + printf("%s: already got %d objects ?????????????????????\n", + __FUNCTION__, BCM_OBJDBG_COUNT); + ASSERT(0); + goto EXIT; + } + + bcm_object_rm_list(&dbgobj_freehead, &dbgobj_freetail, dbgobj); + dbgobj->obj = obj; + strncpy(dbgobj->caller, caller, BCM_OBJDBG_CALLER_LEN); + dbgobj->caller[BCM_OBJDBG_CALLER_LEN-1] = '\0'; + dbgobj->line = line; + dbgobj->flag = 0; + if (opt == BCM_OBJDBG_ADD_PKT) { + dbgobj->obj_sn = dbgobj_sn++; + dbgobj->obj_state = 0; + /* first 4 bytes is pkt sn */ + if (((unsigned long)PKTTAG(obj)) & 0x3) + printf("pkt tag address not aligned by 4: %p\n", PKTTAG(obj)); + *(uint32*)PKTTAG(obj) = dbgobj->obj_sn; + } + bcm_object_add_list(&dbgobj_objhead, &dbgobj_objtail, dbgobj, + BCM_OBJDBG_ADDTOTAIL); + + dbgobj_count++; + + } else if (opt == BCM_OBJDBG_REMOVE) { + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + if (dbgobj->flag) { + printf("%s: rm flagged obj %p flag 0x%08x from %s(%d)\n", + __FUNCTION__, obj, dbgobj->flag, caller, line); + } + bcm_object_rm_list(&dbgobj_objhead, &dbgobj_objtail, dbgobj); + memset(dbgobj->caller, 0x00, BCM_OBJDBG_CALLER_LEN); + strncpy(dbgobj->caller, caller, BCM_OBJDBG_CALLER_LEN); + dbgobj->caller[BCM_OBJDBG_CALLER_LEN-1] = '\0'; + dbgobj->line = line; + bcm_object_add_list(&dbgobj_freehead, &dbgobj_freetail, dbgobj, + BCM_OBJDBG_ADDTOTAIL); + dbgobj_count--; + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + + dbgobj = dbgobj_freetail; + while (dbgobj && dbgobj->obj) { + if (dbgobj->obj == obj) { + printf("%s: obj %p already freed from from %s(%d)," + " try free again from %s(%d)\n", + __FUNCTION__, obj, + dbgobj->caller, dbgobj->line, + caller, line); + //ASSERT(0); /* release same obj more than one time? */ + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_freetail) + break; + } + + printf("%s: ################### release none-existing obj %p from %s(%d)\n", + __FUNCTION__, obj, caller, line); + //ASSERT(0); /* release same obj more than one time? */ + + } + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return; +} + +void +bcm_object_trace_upd(void *obj, void *obj_new) +{ + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + dbgobj->obj = obj_new; + if (dbgobj != dbgobj_objtail) { + bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail, + dbgobj, BCM_OBJDBG_ADDTOTAIL); + } + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return; +} + +void +bcm_object_trace_chk(void *obj, uint32 chksn, uint32 sn, + const char *caller, int line) +{ + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + dbgobj = dbgobj_objtail; + while (dbgobj) { + if ((dbgobj->obj == obj) && + ((!chksn) || (dbgobj->obj_sn == sn))) { + if (dbgobj != dbgobj_objtail) { + bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail, + dbgobj, BCM_OBJDBG_ADDTOTAIL); + } + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + + dbgobj = dbgobj_freetail; + while (dbgobj) { + if ((dbgobj->obj == obj) && + ((!chksn) || (dbgobj->obj_sn == sn))) { + printf("%s: (%s:%d) obj %p (sn %d state %d) was freed from %s(%d)\n", + __FUNCTION__, caller, line, + dbgobj->obj, dbgobj->obj_sn, dbgobj->obj_state, + dbgobj->caller, dbgobj->line); + goto EXIT; + } + else if (dbgobj->obj == NULL) { + break; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_freetail) + break; + } + + printf("%s: obj %p not found, check from %s(%d), chksn %s, sn %d\n", + __FUNCTION__, obj, caller, line, chksn ? "yes" : "no", sn); + dbgobj = dbgobj_objtail; + while (dbgobj) { + printf("%s: (%s:%d) obj %p sn %d was allocated from %s(%d)\n", + __FUNCTION__, caller, line, + dbgobj->obj, dbgobj->obj_sn, dbgobj->caller, dbgobj->line); + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return; +} + +void +bcm_object_feature_set(void *obj, uint32 type, uint32 value) +{ + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + if (type == BCM_OBJECT_FEATURE_FLAG) { + if (value & BCM_OBJECT_FEATURE_CLEAR) + dbgobj->flag &= ~(value); + else + dbgobj->flag |= (value); + } else if (type == BCM_OBJECT_FEATURE_PKT_STATE) { + dbgobj->obj_state = value; + } + if (dbgobj != dbgobj_objtail) { + bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail, + dbgobj, BCM_OBJDBG_ADDTOTAIL); + } + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + + printf("%s: obj %p not found in active list\n", __FUNCTION__, obj); + ASSERT(0); + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return; +} + +int +bcm_object_feature_get(void *obj, uint32 type, uint32 value) +{ + int rtn = 0; + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + if (type == BCM_OBJECT_FEATURE_FLAG) { + rtn = (dbgobj->flag & value) & (~BCM_OBJECT_FEATURE_CLEAR); + } + if (dbgobj != dbgobj_objtail) { + bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail, + dbgobj, BCM_OBJDBG_ADDTOTAIL); + } + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + + printf("%s: obj %p not found in active list\n", __FUNCTION__, obj); + ASSERT(0); + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return rtn; +} + +#endif /* BCM_OBJECT_TRACE */ uint8 * bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst) @@ -1392,7 +1852,9 @@ bcm_parse_tlvs(void *buf, int buflen, uint key) bcm_tlv_t *elt; int totlen; - elt = (bcm_tlv_t*)buf; + if ((elt = (bcm_tlv_t*)buf) == NULL) { + return NULL; + } totlen = buflen; /* find tagged parameter */ @@ -1536,14 +1998,12 @@ bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len) /* indicate the str was too short */ if (flags != 0) { - if (len < 2) - p -= 2 - len; /* overwrite last char */ p += snprintf(p, 2, ">"); } return (int)(p - buf); } -#endif +#endif /* print bytes formatted as hex to a string. return the resulting string length */ int @@ -1575,7 +2035,7 @@ prhex(const char *msg, uchar *buf, uint nbytes) p = line; for (i = 0; i < nbytes; i++) { if (i % 16 == 0) { - nchar = snprintf(p, len, " %04d: ", i); /* line prefix */ + nchar = snprintf(p, len, " %04x: ", i); /* line prefix */ p += nchar; len -= nchar; } @@ -1605,18 +2065,11 @@ static const char *crypto_algo_names[] = { "AES_CCM", "AES_OCB_MSDU", "AES_OCB_MPDU", -#ifdef BCMCCX - "CKIP", - "CKIP_MMH", - "WEP_MMH", - "NALG", -#else "NALG", "UNDEF", "UNDEF", "UNDEF", -#endif /* BCMCCX */ - "WAPI", + "UNDEF" "PMK", "BIP", "AES_GCM", @@ -1713,7 +2166,7 @@ bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc } uint -bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen) +bcm_mkiovar(const char *name, char *data, uint datalen, char *buf, uint buflen) { uint len; @@ -1832,7 +2285,10 @@ bcm_bitcount(uint8 *bitmap, uint length) return bitcount; } -#ifdef BCMDRIVER +#if defined(BCMDRIVER) || defined(WL_UNITTEST) + +/* triggers bcm_bprintf to print to kernel log */ +bool bcm_bprintf_bypass = FALSE; /* Initialization of bcmstrbuf structure */ void @@ -1852,6 +2308,10 @@ bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...) va_start(ap, fmt); r = vsnprintf(b->buf, b->size, fmt, ap); + if (bcm_bprintf_bypass == TRUE) { + printf("%s", b->buf); + goto exit; + } /* Non Ansi C99 compliant returns -1, * Ansi compliant return r >= b->size, @@ -1867,13 +2327,14 @@ bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...) b->buf += r; } +exit: va_end(ap); return r; } void -bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, uint8 *buf, int len) +bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, const uint8 *buf, int len) { int i; @@ -1989,9 +2450,9 @@ bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len) return (int)(p - buf); } -#endif +#endif -#endif /* BCMDRIVER */ +#endif /* BCMDRIVER || WL_UNITTEST */ /* * ProcessVars:Takes a buffer of "=\n" lines read from a file and ending in a NUL. @@ -2179,11 +2640,11 @@ set_bitrange(void *array, uint start, uint end, uint maxbit) } void -bcm_bitprint32(const uint32 u32) +bcm_bitprint32(const uint32 u32arg) { int i; for (i = NBITS(uint32) - 1; i >= 0; i--) { - isbitset(u32, i) ? printf("1") : printf("0"); + isbitset(u32arg, i) ? printf("1") : printf("0"); if ((i % NBBY) == 0) printf(" "); } printf("\n"); @@ -2209,8 +2670,7 @@ bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum) return ((uint16)~sum); } - -#ifdef BCMDRIVER +#if defined(BCMDRIVER) && !defined(_CFEZ_) /* * Hierarchical Multiword bitmap based small id allocator. * @@ -2242,7 +2702,7 @@ bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum) * with savings in not having to use an indirect access, had it been dynamically * allocated. */ -#define BCM_MWBMAP_ITEMS_MAX (4 * 1024) /* May increase to 16K */ +#define BCM_MWBMAP_ITEMS_MAX (64 * 1024) /* May increase to 64K */ #define BCM_MWBMAP_BITS_WORD (NBITS(uint32)) #define BCM_MWBMAP_WORDS_MAX (BCM_MWBMAP_ITEMS_MAX / BCM_MWBMAP_BITS_WORD) @@ -2269,13 +2729,13 @@ bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum) #define BCM_MWBMAP_AUDIT(mwb) do {} while (0) #define MWBMAP_ASSERT(exp) do {} while (0) #define MWBMAP_DBG(x) -#endif /* !BCM_MWBMAP_DEBUG */ +#endif /* !BCM_MWBMAP_DEBUG */ typedef struct bcm_mwbmap { /* Hierarchical multiword bitmap allocator */ uint16 wmaps; /* Total number of words in free wd bitmap */ uint16 imaps; /* Total number of words in free id bitmap */ - int16 ifree; /* Count of free indices. Used only in audits */ + int32 ifree; /* Count of free indices. Used only in audits */ uint16 total; /* Total indices managed by multiword bitmap */ void * magic; /* Audit handle parameter from user */ @@ -2319,7 +2779,7 @@ bcm_mwbmap_init(osl_t *osh, uint32 items_max) /* Initialize runtime multiword bitmap state */ mwbmap_p->imaps = (uint16)words; - mwbmap_p->ifree = (int16)items_max; + mwbmap_p->ifree = (int32)items_max; mwbmap_p->total = (uint16)items_max; /* Setup magic, for use in audit of handle */ @@ -2672,10 +3132,10 @@ bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl) /* Simple 16bit Id allocator using a stack implementation. */ typedef struct id16_map { - uint16 total; /* total number of ids managed by allocator */ - uint16 start; /* start value of 16bit ids to be managed */ uint32 failures; /* count of failures */ void *dbg; /* debug placeholder */ + uint16 total; /* total number of ids managed by allocator */ + uint16 start; /* start value of 16bit ids to be managed */ int stack_idx; /* index into stack of available ids */ uint16 stack[0]; /* stack of 16 bit ids */ } id16_map_t; @@ -2706,7 +3166,12 @@ id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16) id16_map_t * id16_map; ASSERT(total_ids > 0); - ASSERT((start_val16 + total_ids) < ID16_INVALID); + + /* A start_val16 of ID16_UNDEFINED, allows the caller to fill the id16 map + * with random values. + */ + ASSERT((start_val16 == ID16_UNDEFINED) || + (start_val16 + total_ids) < ID16_INVALID); id16_map = (id16_map_t *) MALLOC(osh, ID16_MAP_SZ(total_ids)); if (id16_map == NULL) { @@ -2718,24 +3183,32 @@ id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16) id16_map->failures = 0; id16_map->dbg = NULL; - /* Populate stack with 16bit id values, commencing with start_val16 */ - id16_map->stack_idx = 0; - val16 = start_val16; + /* + * Populate stack with 16bit id values, commencing with start_val16. + * if start_val16 is ID16_UNDEFINED, then do not populate the id16 map. + */ + id16_map->stack_idx = -1; - for (idx = 0; idx < total_ids; idx++, val16++) { - id16_map->stack_idx = idx; - id16_map->stack[id16_map->stack_idx] = val16; + if (id16_map->start != ID16_UNDEFINED) { + val16 = start_val16; + + for (idx = 0; idx < total_ids; idx++, val16++) { + id16_map->stack_idx = idx; + id16_map->stack[id16_map->stack_idx] = val16; + } } #if defined(BCM_DBG) && defined(BCM_DBG_ID16) - id16_map->dbg = MALLOC(osh, ID16_MAP_DBG_SZ(total_ids)); + if (id16_map->start != ID16_UNDEFINED) { + id16_map->dbg = MALLOC(osh, ID16_MAP_DBG_SZ(total_ids)); - if (id16_map->dbg) { - id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; + if (id16_map->dbg) { + id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; - id16_map_dbg->total = total_ids; - for (idx = 0; idx < total_ids; idx++) { - id16_map_dbg->avail[idx] = TRUE; + id16_map_dbg->total = total_ids; + for (idx = 0; idx < total_ids; idx++) { + id16_map_dbg->avail[idx] = TRUE; + } } } #endif /* BCM_DBG && BCM_DBG_ID16 */ @@ -2777,7 +3250,11 @@ id16_map_clear(void * id16_map_hndl, uint16 total_ids, uint16 start_val16) id16_map_t * id16_map; ASSERT(total_ids > 0); - ASSERT((start_val16 + total_ids) < ID16_INVALID); + /* A start_val16 of ID16_UNDEFINED, allows the caller to fill the id16 map + * with random values. + */ + ASSERT((start_val16 == ID16_UNDEFINED) || + (start_val16 + total_ids) < ID16_INVALID); id16_map = (id16_map_t *)id16_map_hndl; if (id16_map == NULL) { @@ -2789,21 +3266,26 @@ id16_map_clear(void * id16_map_hndl, uint16 total_ids, uint16 start_val16) id16_map->failures = 0; /* Populate stack with 16bit id values, commencing with start_val16 */ - id16_map->stack_idx = 0; - val16 = start_val16; + id16_map->stack_idx = -1; + + if (id16_map->start != ID16_UNDEFINED) { + val16 = start_val16; - for (idx = 0; idx < total_ids; idx++, val16++) { - id16_map->stack_idx = idx; - id16_map->stack[id16_map->stack_idx] = val16; + for (idx = 0; idx < total_ids; idx++, val16++) { + id16_map->stack_idx = idx; + id16_map->stack[id16_map->stack_idx] = val16; + } } #if defined(BCM_DBG) && defined(BCM_DBG_ID16) - if (id16_map->dbg) { - id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; + if (id16_map->start != ID16_UNDEFINED) { + if (id16_map->dbg) { + id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; - id16_map_dbg->total = total_ids; - for (idx = 0; idx < total_ids; idx++) { - id16_map_dbg->avail[idx] = TRUE; + id16_map_dbg->total = total_ids; + for (idx = 0; idx < total_ids; idx++) { + id16_map_dbg->avail[idx] = TRUE; + } } } #endif /* BCM_DBG && BCM_DBG_ID16 */ @@ -2830,8 +3312,8 @@ id16_map_alloc(void * id16_map_hndl) id16_map->stack_idx--; #if defined(BCM_DBG) && defined(BCM_DBG_ID16) - - ASSERT(val16 < (id16_map->start + id16_map->total)); + ASSERT((id16_map->start == ID16_UNDEFINED) || + (val16 < (id16_map->start + id16_map->total))); if (id16_map->dbg) { /* Validate val16 */ id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; @@ -2855,8 +3337,8 @@ id16_map_free(void * id16_map_hndl, uint16 val16) id16_map = (id16_map_t *)id16_map_hndl; #if defined(BCM_DBG) && defined(BCM_DBG_ID16) - - ASSERT(val16 < (id16_map->start + id16_map->total)); + ASSERT((id16_map->start == ID16_UNDEFINED) || + (val16 < (id16_map->start + id16_map->total))); if (id16_map->dbg) { /* Validate val16 */ id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; @@ -2888,7 +3370,12 @@ id16_map_audit(void * id16_map_hndl) id16_map = (id16_map_t *)id16_map_hndl; - ASSERT((id16_map->stack_idx > 0) && (id16_map->stack_idx < id16_map->total)); + ASSERT(id16_map->stack_idx >= -1); + ASSERT(id16_map->stack_idx < (int)id16_map->total); + + if (id16_map->start == ID16_UNDEFINED) + goto done; + for (idx = 0; idx <= id16_map->stack_idx; idx++) { ASSERT(id16_map->stack[idx] >= id16_map->start); ASSERT(id16_map->stack[idx] < (id16_map->start + id16_map->total)); @@ -2899,7 +3386,7 @@ id16_map_audit(void * id16_map_hndl) if (((id16_map_dbg_t *)(id16_map->dbg))->avail[val16] != TRUE) { insane |= 1; ID16_MAP_MSG(("id16_map<%p>: stack_idx %u invalid val16 %u\n", - id16_map_hndl, idx, val16)); + id16_map_hndl, idx, val16)); } } #endif /* BCM_DBG && BCM_DBG_ID16 */ @@ -2915,17 +3402,19 @@ id16_map_audit(void * id16_map_hndl) if (avail && (avail != (id16_map->stack_idx + 1))) { insane |= 1; ID16_MAP_MSG(("id16_map<%p>: avail %u stack_idx %u\n", - id16_map_hndl, avail, id16_map->stack_idx)); + id16_map_hndl, avail, id16_map->stack_idx)); } } #endif /* BCM_DBG && BCM_DBG_ID16 */ +done: + /* invoke any other system audits */ return (!!insane); } /* END: Simple id16 allocator */ -#endif /* BCMDRIVER */ +#endif /* calculate a >> b; and returns only lower 32 bits */ void @@ -3004,7 +3493,7 @@ void counter_printlog(counter_tbl_t *ctr_tbl) #endif /* OSL_SYSUPTIME_SUPPORT == TRUE */ #endif /* DEBUG_COUNTER */ -#ifdef BCMDRIVER +#if defined(BCMDRIVER) && !defined(_CFEZ_) void dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size) { @@ -3024,15 +3513,13 @@ dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size) mem_size = sizeof(dll_pool_t) + (elems_max * elem_size); - if ((dll_pool_p = (dll_pool_t *)MALLOC(osh, mem_size)) == NULL) { + if ((dll_pool_p = (dll_pool_t *)MALLOCZ(osh, mem_size)) == NULL) { printf("dll_pool_init: elems_max<%u> elem_size<%u> malloc failure\n", elems_max, elem_size); ASSERT(0); return dll_pool_p; } - bzero(dll_pool_p, mem_size); - dll_init(&dll_pool_p->free_list); dll_pool_p->elems_max = elems_max; dll_pool_p->elem_size = elem_size; @@ -3083,4 +3570,4 @@ dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p) dll_pool_p->free_count += 1; } -#endif /* BCMDRIVER */ +#endif diff --git a/drivers/net/wireless/bcmdhd/bcmwifi_channels.c b/drivers/amlogic/wifi/bcmdhd/bcmwifi_channels.c similarity index 93% rename from drivers/net/wireless/bcmdhd/bcmwifi_channels.c rename to drivers/amlogic/wifi/bcmdhd/bcmwifi_channels.c index 4a848d2674de1..be884cc33cc12 100644 --- a/drivers/net/wireless/bcmdhd/bcmwifi_channels.c +++ b/drivers/amlogic/wifi/bcmdhd/bcmwifi_channels.c @@ -3,8 +3,30 @@ * Contents are wifi-specific, used by any kernel or app-level * software that might want wifi things as it grows. * - * $Copyright Open Broadcom Corporation$ - * $Id: bcmwifi_channels.c 309193 2012-01-19 00:03:57Z $ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmwifi_channels.c 591285 2015-10-07 11:56:29Z $ */ #include @@ -107,7 +129,11 @@ static const char *wf_chspec_bw_str[] = "80", "160", "80+80", +#ifdef WL11ULB + "2.5" +#else /* WL11ULB */ "na" +#endif /* WL11ULB */ }; static const uint8 wf_chspec_bw_mhz[] = @@ -331,11 +357,10 @@ wf_chspec_aton(const char *a) /* parse channel num or band */ if (!read_uint(&a, &num)) return 0; - /* if we are looking at a 'g', then the first number was a band */ c = tolower((int)a[0]); if (c == 'g') { - a ++; /* consume the char */ + a++; /* consume the char */ /* band must be "2" or "5" */ if (num == 2) @@ -382,7 +407,13 @@ wf_chspec_aton(const char *a) return 0; /* convert to chspec value */ - if (bw == 20) { + if (bw == 2) { + chspec_bw = WL_CHANSPEC_BW_2P5; + } else if (bw == 5) { + chspec_bw = WL_CHANSPEC_BW_5; + } else if (bw == 10) { + chspec_bw = WL_CHANSPEC_BW_10; + } else if (bw == 20) { chspec_bw = WL_CHANSPEC_BW_20; } else if (bw == 40) { chspec_bw = WL_CHANSPEC_BW_40; @@ -396,7 +427,8 @@ wf_chspec_aton(const char *a) /* So far we have g/ * Can now be followed by u/l if bw = 40, - * or '+80' if bw = 80, to make '80+80' bw. + * or '+80' if bw = 80, to make '80+80' bw, + * or '.5' if bw = 2.5 to make '2.5' bw . */ c = tolower((int)a[0]); @@ -413,7 +445,7 @@ wf_chspec_aton(const char *a) /* check for 80+80 */ if (c == '+') { /* 80+80 */ - static const char *plus80 = "80/"; + const char plus80[] = "80/"; /* must be looking at '+80/' * check and consume this string. @@ -424,7 +456,7 @@ wf_chspec_aton(const char *a) /* consume the '80/' string */ for (i = 0; i < 3; i++) { - if (*a++ != *plus80++) { + if (*a++ != plus80[i]) { return 0; } } @@ -441,6 +473,19 @@ wf_chspec_aton(const char *a) /* read secondary 80MHz channel */ if (!read_uint(&a, &ch2)) return 0; + } else if (c == '.') { + /* 2.5 */ + /* must be looking at '.5' + * check and consume this string. + */ + chspec_bw = WL_CHANSPEC_BW_2P5; + + a ++; /* consume the char '.' */ + + /* consume the '5' string */ + if (*a++ != '5') { + return 0; + } } done_read: @@ -473,7 +518,7 @@ wf_chspec_aton(const char *a) } } /* if the bw is 20, center and sideband are trivial */ - else if (chspec_bw == WL_CHANSPEC_BW_20) { + else if (BW_LE20(chspec_bw)) { chspec_ch = ctl_ch; chspec_sb = WL_CHANSPEC_CTL_SB_NONE; } @@ -565,8 +610,7 @@ wf_chspec_malformed(chanspec_t chanspec) /* must be 2G or 5G band */ if (CHSPEC_IS2G(chanspec)) { /* must be valid bandwidth */ - if (chspec_bw != WL_CHANSPEC_BW_20 && - chspec_bw != WL_CHANSPEC_BW_40) { + if (!BW_LE40(chspec_bw)) { return TRUE; } } else if (CHSPEC_IS5G(chanspec)) { @@ -579,9 +623,7 @@ wf_chspec_malformed(chanspec_t chanspec) if (ch1_id >= WF_NUM_5G_80M_CHANS || ch2_id >= WF_NUM_5G_80M_CHANS) return TRUE; - } else if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40 || - chspec_bw == WL_CHANSPEC_BW_80 || chspec_bw == WL_CHANSPEC_BW_160) { - + } else if (BW_LE160(chspec_bw)) { if (chspec_ch > MAXCHANNEL) { return TRUE; } @@ -595,7 +637,7 @@ wf_chspec_malformed(chanspec_t chanspec) } /* side band needs to be consistent with bandwidth */ - if (chspec_bw == WL_CHANSPEC_BW_20) { + if (BW_LE20(chspec_bw)) { if (CHSPEC_CTL_SB(chanspec) != WL_CHANSPEC_CTL_SB_LLL) return TRUE; } else if (chspec_bw == WL_CHANSPEC_BW_40) { @@ -627,7 +669,7 @@ wf_chspec_valid(chanspec_t chanspec) if (CHSPEC_IS2G(chanspec)) { /* must be valid bandwidth and channel range */ - if (chspec_bw == WL_CHANSPEC_BW_20) { + if (BW_LE20(chspec_bw)) { if (chspec_ch >= 1 && chspec_ch <= 14) return TRUE; } else if (chspec_bw == WL_CHANSPEC_BW_40) { @@ -649,7 +691,7 @@ wf_chspec_valid(chanspec_t chanspec) const uint8 *center_ch; uint num_ch, i; - if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40) { + if (BW_LE40(chspec_bw)) { center_ch = wf_5g_40m_chans; num_ch = WF_NUM_5G_40M_CHANS; } else if (chspec_bw == WL_CHANSPEC_BW_80) { @@ -664,7 +706,7 @@ wf_chspec_valid(chanspec_t chanspec) } /* check for a valid center channel */ - if (chspec_bw == WL_CHANSPEC_BW_20) { + if (BW_LE20(chspec_bw)) { /* We don't have an array of legal 20MHz 5G channels, but they are * each side of the legal 40MHz channels. Check the chanspec * channel against either side of the 40MHz channels. @@ -720,7 +762,7 @@ wf_chspec_ctlchan(chanspec_t chspec) ASSERT(!wf_chspec_malformed(chspec)); /* Is there a sideband ? */ - if (CHSPEC_IS20(chspec)) { + if (CHSPEC_BW_LE20(chspec)) { return CHSPEC_CHANNEL(chspec); } else { sb = CHSPEC_CTL_SB(chspec) >> WL_CHANSPEC_CTL_SB_SHIFT; @@ -749,7 +791,7 @@ wf_chspec_ctlchan(chanspec_t chspec) char * wf_chspec_to_bw_str(chanspec_t chspec) { - return (char *)wf_chspec_bw_str[(CHSPEC_BW(chspec) >> WL_CHANSPEC_BW_SHIFT)]; + return (char *)wf_chspec_bw_str[(CHSPEC_BW(chspec) >> WL_CHANSPEC_BW_SHIFT)]; } /* @@ -764,7 +806,7 @@ wf_chspec_ctlchspec(chanspec_t chspec) ASSERT(!wf_chspec_malformed(chspec)); /* Is there a sideband ? */ - if (!CHSPEC_IS20(chspec)) { + if (!CHSPEC_BW_LE20(chspec)) { ctl_chan = wf_chspec_ctlchan(chspec); ctl_chspec = ctl_chan | WL_CHANSPEC_BW_20; ctl_chspec |= CHSPEC_BAND(chspec); @@ -800,7 +842,7 @@ wf_channel2chspec(uint ctl_ch, uint bw) center_ch = wf_5g_160m_chans; num_ch = WF_NUM_5G_160M_CHANS; bw = 160; - } else if (bw == WL_CHANSPEC_BW_20) { + } else if (BW_LE20(bw)) { chspec |= ctl_ch; return chspec; } else { diff --git a/drivers/net/wireless/bcmdhd/include/bcmwifi_channels.h b/drivers/amlogic/wifi/bcmdhd/bcmwifi_channels.h similarity index 77% rename from drivers/net/wireless/bcmdhd/include/bcmwifi_channels.h rename to drivers/amlogic/wifi/bcmdhd/bcmwifi_channels.h index b96aee6e1f8e9..186c0e18ee921 100644 --- a/drivers/net/wireless/bcmdhd/include/bcmwifi_channels.h +++ b/drivers/amlogic/wifi/bcmdhd/bcmwifi_channels.h @@ -3,9 +3,30 @@ * This header file housing the define and function prototype use by * both the wl driver, tools & Apps. * - * $Copyright Open Broadcom Corporation$ - * - * $Id: bcmwifi_channels.h 309193 2012-01-19 00:03:57Z $ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmwifi_channels.h 591285 2015-10-07 11:56:29Z $ */ #ifndef _bcmwifi_channels_h_ @@ -25,17 +46,24 @@ typedef uint16 chanspec_t; #define CH_10MHZ_APART 2 #define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */ #define CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */ + +/* maximum # channels the s/w supports */ #define MAXCHANNEL 224 /* max # supported channels. The max channel no is above, * this is that + 1 rounded up to a multiple of NBBY (8). * DO NOT MAKE it > 255: channels are uint8's all over */ #define MAXCHANNEL_NUM (MAXCHANNEL - 1) /* max channel number */ +/* channel bitvec */ +typedef struct { + uint8 vec[MAXCHANNEL/8]; /* bitvec of channels */ +} chanvec_t; + /* make sure channel num is within valid range */ #define CH_NUM_VALID_RANGE(ch_num) ((ch_num) > 0 && (ch_num) <= MAXCHANNEL_NUM) -#define CHSPEC_CTLOVLP(sp1, sp2, sep) (ABS(wf_chspec_ctlchan(sp1) - wf_chspec_ctlchan(sp2)) < \ - (sep)) +#define CHSPEC_CTLOVLP(sp1, sp2, sep) \ + (ABS(wf_chspec_ctlchan(sp1) - wf_chspec_ctlchan(sp2)) < (sep)) /* All builds use the new 11ac ratespec/chanspec */ #undef D11AC_IOTYPES @@ -77,6 +105,7 @@ typedef uint16 chanspec_t; #define WL_CHANSPEC_BW_80 0x2000 #define WL_CHANSPEC_BW_160 0x2800 #define WL_CHANSPEC_BW_8080 0x3000 +#define WL_CHANSPEC_BW_2P5 0x3800 #define WL_CHANSPEC_BAND_MASK 0xc000 #define WL_CHANSPEC_BAND_SHIFT 14 @@ -85,6 +114,7 @@ typedef uint16 chanspec_t; #define WL_CHANSPEC_BAND_4G 0x8000 #define WL_CHANSPEC_BAND_5G 0xc000 #define INVCHANSPEC 255 +#define MAX_CHANSPEC 0xFFFF /* channel defines */ #define LOWER_20_SB(channel) (((channel) > CH_10MHZ_APART) ? \ @@ -104,6 +134,15 @@ typedef uint16 chanspec_t; #define CH20MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \ (((channel) <= CH_MAX_2G_CHANNEL) ? \ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)) +#define CH2P5MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_2P5 | \ + (((channel) <= CH_MAX_2G_CHANNEL) ? \ + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)) +#define CH5MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_5 | \ + (((channel) <= CH_MAX_2G_CHANNEL) ? \ + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)) +#define CH10MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_10 | \ + (((channel) <= CH_MAX_2G_CHANNEL) ? \ + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)) #define NEXT_20MHZ_CHAN(channel) (((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \ ((channel) + CH_20MHZ_APART) : 0) #define CH40MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \ @@ -116,6 +155,9 @@ typedef uint16 chanspec_t; #define CH160MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \ ((channel) | (ctlsb) | \ WL_CHANSPEC_BW_160 | WL_CHANSPEC_BAND_5G) +#define CHBW_CHSPEC(bw, channel) (chanspec_t)((chanspec_t)(channel) | (bw) | \ + (((channel) <= CH_MAX_2G_CHANNEL) ? \ + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)) /* simple MACROs to get different fields of chanspec */ #ifdef WL11AC_80P80 @@ -131,6 +173,8 @@ typedef uint16 chanspec_t; #ifdef WL11N_20MHZONLY +#define CHSPEC_IS2P5(chspec) 0 +#define CHSPEC_IS5(chspec) 0 #define CHSPEC_IS10(chspec) 0 #define CHSPEC_IS20(chspec) 1 #ifndef CHSPEC_IS40 @@ -145,9 +189,12 @@ typedef uint16 chanspec_t; #ifndef CHSPEC_IS8080 #define CHSPEC_IS8080(chspec) 0 #endif - +#define BW_LE20(bw) TRUE +#define CHSPEC_ISLE20(chspec) TRUE #else /* !WL11N_20MHZONLY */ +#define CHSPEC_IS2P5(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_2P5) +#define CHSPEC_IS5(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_5) #define CHSPEC_IS10(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10) #define CHSPEC_IS20(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) #ifndef CHSPEC_IS40 @@ -163,8 +210,30 @@ typedef uint16 chanspec_t; #define CHSPEC_IS8080(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_8080) #endif +#ifdef WL11ULB +#define BW_LT20(bw) (((bw) == WL_CHANSPEC_BW_2P5) || \ + ((bw) == WL_CHANSPEC_BW_5) || \ + ((bw) == WL_CHANSPEC_BW_10)) +#define CHSPEC_BW_LT20(chspec) (BW_LT20(CHSPEC_BW(chspec))) +/* This MACRO is strictly to avoid abandons in existing code with ULB feature and is in no way + * optimial to use. Should be replaced with CHSPEC_BW_LE() instead + */ +#define BW_LE20(bw) (((bw) == WL_CHANSPEC_BW_2P5) || \ + ((bw) == WL_CHANSPEC_BW_5) || \ + ((bw) == WL_CHANSPEC_BW_10) || \ + ((bw) == WL_CHANSPEC_BW_20)) +#define CHSPEC_ISLE20(chspec) (BW_LE20(CHSPEC_BW(chspec))) + +#else /* WL11ULB */ +#define BW_LE20(bw) ((bw) == WL_CHANSPEC_BW_20) +#define CHSPEC_ISLE20(chspec) (CHSPEC_IS20(chspec)) +#endif /* WL11ULB */ #endif /* !WL11N_20MHZONLY */ +#define BW_LE40(bw) (BW_LE20(bw) || ((bw) == WL_CHANSPEC_BW_40)) +#define BW_LE80(bw) (BW_LE40(bw) || ((bw) == WL_CHANSPEC_BW_80)) +#define BW_LE160(bw) (BW_LE80(bw) || ((bw) == WL_CHANSPEC_BW_160)) +#define CHSPEC_BW_LE20(chspec) (BW_LE20(CHSPEC_BW(chspec))) #define CHSPEC_IS5G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G) #define CHSPEC_IS2G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G) #define CHSPEC_SB_UPPER(chspec) \ @@ -191,25 +260,57 @@ typedef uint16 chanspec_t; * The LT/LE/GT/GE macros check first checks whether both chspec bandwidth and bw are 160 wide. * If both chspec bandwidth and bw is not 160 wide, then the comparison is made. */ +#ifdef WL11ULB +#define CHSPEC_BW_GE(chspec, bw) \ + (((CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\ + (CHSPEC_BW(chspec) >= (bw))) && \ + (!(CHSPEC_BW(chspec) == WL_CHANSPEC_BW_2P5 && (bw) != WL_CHANSPEC_BW_2P5))) +#else /* WL11ULB */ #define CHSPEC_BW_GE(chspec, bw) \ - ((CHSPEC_IS_BW_160_WIDE(chspec) &&\ - (bw == WL_CHANSPEC_BW_160 || bw == WL_CHANSPEC_BW_8080)) ||\ - (CHSPEC_BW(chspec) >= bw)) + ((CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\ + (CHSPEC_BW(chspec) >= (bw))) +#endif /* WL11ULB */ +#ifdef WL11ULB #define CHSPEC_BW_LE(chspec, bw) \ - ((CHSPEC_IS_BW_160_WIDE(chspec) &&\ - (bw == WL_CHANSPEC_BW_160 || bw == WL_CHANSPEC_BW_8080)) ||\ - (CHSPEC_BW(chspec) <= bw)) + (((CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\ + (CHSPEC_BW(chspec) <= (bw))) || \ + (CHSPEC_BW(chspec) == WL_CHANSPEC_BW_2P5)) +#else /* WL11ULB */ +#define CHSPEC_BW_LE(chspec, bw) \ + ((CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\ + (CHSPEC_BW(chspec) <= (bw))) +#endif /* WL11ULB */ +#ifdef WL11ULB +#define CHSPEC_BW_GT(chspec, bw) \ + ((!(CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\ + (CHSPEC_BW(chspec) > (bw))) && \ + (CHSPEC_BW(chspec) != WL_CHANSPEC_BW_2P5)) +#else /* WL11ULB */ #define CHSPEC_BW_GT(chspec, bw) \ - (!(CHSPEC_IS_BW_160_WIDE(chspec) &&\ - (bw == WL_CHANSPEC_BW_160 || bw == WL_CHANSPEC_BW_8080)) &&\ - (CHSPEC_BW(chspec) > bw)) + (!(CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\ + (CHSPEC_BW(chspec) > (bw))) +#endif /* WL11ULB */ +#ifdef WL11ULB +#define CHSPEC_BW_LT(chspec, bw) \ + ((!(CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\ + (CHSPEC_BW(chspec) < (bw))) || \ + ((CHSPEC_BW(chspec) == WL_CHANSPEC_BW_2P5 && (bw) != WL_CHANSPEC_BW_2P5))) +#else /* WL11ULB */ #define CHSPEC_BW_LT(chspec, bw) \ - (!(CHSPEC_IS_BW_160_WIDE(chspec) &&\ - (bw == WL_CHANSPEC_BW_160 || bw == WL_CHANSPEC_BW_8080)) &&\ - (CHSPEC_BW(chspec) < bw)) + (!(CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\ + (CHSPEC_BW(chspec) < (bw))) +#endif /* WL11ULB */ /* Legacy Chanspec defines * These are the defines for the previous format of the chanspec_t @@ -362,7 +463,7 @@ extern bool wf_chspec_valid(chanspec_t chanspec); */ extern uint8 wf_chspec_ctlchan(chanspec_t chspec); -/** +/* * Return the bandwidth string. * * This function returns the bandwidth string for the passed chanspec. diff --git a/drivers/amlogic/wifi/bcmdhd/bcmwifi_rates.h b/drivers/amlogic/wifi/bcmdhd/bcmwifi_rates.h new file mode 100644 index 0000000000000..1329e9bc80da7 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/bcmwifi_rates.h @@ -0,0 +1,793 @@ +/* + * Indices for 802.11 a/b/g/n/ac 1-3 chain symmetric transmit rates + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmwifi_rates.h 591285 2015-10-07 11:56:29Z $ + */ + +#ifndef _bcmwifi_rates_h_ +#define _bcmwifi_rates_h_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + + +#define WL_RATESET_SZ_DSSS 4 +#define WL_RATESET_SZ_OFDM 8 +#define WL_RATESET_SZ_VHT_MCS 10 +#define WL_RATESET_SZ_VHT_MCS_P 12 + +#if defined(WLPROPRIETARY_11N_RATES) +#define WL_RATESET_SZ_HT_MCS WL_RATESET_SZ_VHT_MCS +#else +#define WL_RATESET_SZ_HT_MCS 8 +#endif + +#define WL_RATESET_SZ_HT_IOCTL 8 /* MAC histogram, compatibility with wl utility */ + +#define WL_TX_CHAINS_MAX 4 + +#define WL_RATE_DISABLED (-128) /* Power value corresponding to unsupported rate */ + +/* Transmit channel bandwidths */ +typedef enum wl_tx_bw { + WL_TX_BW_20, + WL_TX_BW_40, + WL_TX_BW_80, + WL_TX_BW_20IN40, + WL_TX_BW_20IN80, + WL_TX_BW_40IN80, + WL_TX_BW_160, + WL_TX_BW_20IN160, + WL_TX_BW_40IN160, + WL_TX_BW_80IN160, + WL_TX_BW_ALL, + WL_TX_BW_8080, + WL_TX_BW_8080CHAN2, + WL_TX_BW_20IN8080, + WL_TX_BW_40IN8080, + WL_TX_BW_80IN8080, + WL_TX_BW_2P5, + WL_TX_BW_5, + WL_TX_BW_10 +} wl_tx_bw_t; + + +/* + * Transmit modes. + * Not all modes are listed here, only those required for disambiguation. e.g. SPEXP is not listed + */ +typedef enum wl_tx_mode { + WL_TX_MODE_NONE, + WL_TX_MODE_STBC, + WL_TX_MODE_CDD, + WL_TX_MODE_TXBF, + WL_NUM_TX_MODES +} wl_tx_mode_t; + + +/* Number of transmit chains */ +typedef enum wl_tx_chains { + WL_TX_CHAINS_1 = 1, + WL_TX_CHAINS_2, + WL_TX_CHAINS_3, + WL_TX_CHAINS_4 +} wl_tx_chains_t; + + +/* Number of transmit streams */ +typedef enum wl_tx_nss { + WL_TX_NSS_1 = 1, + WL_TX_NSS_2, + WL_TX_NSS_3, + WL_TX_NSS_4 +} wl_tx_nss_t; + + +/* This enum maps each rate to a CLM index */ + +typedef enum clm_rates { + /************ + * 1 chain * + ************ + */ + + /* 1 Stream */ + WL_RATE_1X1_DSSS_1 = 0, + WL_RATE_1X1_DSSS_2 = 1, + WL_RATE_1X1_DSSS_5_5 = 2, + WL_RATE_1X1_DSSS_11 = 3, + + WL_RATE_1X1_OFDM_6 = 4, + WL_RATE_1X1_OFDM_9 = 5, + WL_RATE_1X1_OFDM_12 = 6, + WL_RATE_1X1_OFDM_18 = 7, + WL_RATE_1X1_OFDM_24 = 8, + WL_RATE_1X1_OFDM_36 = 9, + WL_RATE_1X1_OFDM_48 = 10, + WL_RATE_1X1_OFDM_54 = 11, + + WL_RATE_1X1_MCS0 = 12, + WL_RATE_1X1_MCS1 = 13, + WL_RATE_1X1_MCS2 = 14, + WL_RATE_1X1_MCS3 = 15, + WL_RATE_1X1_MCS4 = 16, + WL_RATE_1X1_MCS5 = 17, + WL_RATE_1X1_MCS6 = 18, + WL_RATE_1X1_MCS7 = 19, + WL_RATE_P_1X1_MCS87 = 20, + WL_RATE_P_1X1_MCS88 = 21, + + WL_RATE_1X1_VHT0SS1 = 12, + WL_RATE_1X1_VHT1SS1 = 13, + WL_RATE_1X1_VHT2SS1 = 14, + WL_RATE_1X1_VHT3SS1 = 15, + WL_RATE_1X1_VHT4SS1 = 16, + WL_RATE_1X1_VHT5SS1 = 17, + WL_RATE_1X1_VHT6SS1 = 18, + WL_RATE_1X1_VHT7SS1 = 19, + WL_RATE_1X1_VHT8SS1 = 20, + WL_RATE_1X1_VHT9SS1 = 21, + WL_RATE_P_1X1_VHT10SS1 = 22, + WL_RATE_P_1X1_VHT11SS1 = 23, + + + /************ + * 2 chains * + ************ + */ + + /* 1 Stream expanded + 1 */ + WL_RATE_1X2_DSSS_1 = 24, + WL_RATE_1X2_DSSS_2 = 25, + WL_RATE_1X2_DSSS_5_5 = 26, + WL_RATE_1X2_DSSS_11 = 27, + + WL_RATE_1X2_CDD_OFDM_6 = 28, + WL_RATE_1X2_CDD_OFDM_9 = 29, + WL_RATE_1X2_CDD_OFDM_12 = 30, + WL_RATE_1X2_CDD_OFDM_18 = 31, + WL_RATE_1X2_CDD_OFDM_24 = 32, + WL_RATE_1X2_CDD_OFDM_36 = 33, + WL_RATE_1X2_CDD_OFDM_48 = 34, + WL_RATE_1X2_CDD_OFDM_54 = 35, + + WL_RATE_1X2_CDD_MCS0 = 36, + WL_RATE_1X2_CDD_MCS1 = 37, + WL_RATE_1X2_CDD_MCS2 = 38, + WL_RATE_1X2_CDD_MCS3 = 39, + WL_RATE_1X2_CDD_MCS4 = 40, + WL_RATE_1X2_CDD_MCS5 = 41, + WL_RATE_1X2_CDD_MCS6 = 42, + WL_RATE_1X2_CDD_MCS7 = 43, + WL_RATE_P_1X2_CDD_MCS87 = 44, + WL_RATE_P_1X2_CDD_MCS88 = 45, + + WL_RATE_1X2_VHT0SS1 = 36, + WL_RATE_1X2_VHT1SS1 = 37, + WL_RATE_1X2_VHT2SS1 = 38, + WL_RATE_1X2_VHT3SS1 = 39, + WL_RATE_1X2_VHT4SS1 = 40, + WL_RATE_1X2_VHT5SS1 = 41, + WL_RATE_1X2_VHT6SS1 = 42, + WL_RATE_1X2_VHT7SS1 = 43, + WL_RATE_1X2_VHT8SS1 = 44, + WL_RATE_1X2_VHT9SS1 = 45, + WL_RATE_P_1X2_VHT10SS1 = 46, + WL_RATE_P_1X2_VHT11SS1 = 47, + + /* 2 Streams */ + WL_RATE_2X2_STBC_MCS0 = 48, + WL_RATE_2X2_STBC_MCS1 = 49, + WL_RATE_2X2_STBC_MCS2 = 50, + WL_RATE_2X2_STBC_MCS3 = 51, + WL_RATE_2X2_STBC_MCS4 = 52, + WL_RATE_2X2_STBC_MCS5 = 53, + WL_RATE_2X2_STBC_MCS6 = 54, + WL_RATE_2X2_STBC_MCS7 = 55, + WL_RATE_P_2X2_STBC_MCS87 = 56, + WL_RATE_P_2X2_STBC_MCS88 = 57, + + WL_RATE_2X2_STBC_VHT0SS1 = 48, + WL_RATE_2X2_STBC_VHT1SS1 = 49, + WL_RATE_2X2_STBC_VHT2SS1 = 50, + WL_RATE_2X2_STBC_VHT3SS1 = 51, + WL_RATE_2X2_STBC_VHT4SS1 = 52, + WL_RATE_2X2_STBC_VHT5SS1 = 53, + WL_RATE_2X2_STBC_VHT6SS1 = 54, + WL_RATE_2X2_STBC_VHT7SS1 = 55, + WL_RATE_2X2_STBC_VHT8SS1 = 56, + WL_RATE_2X2_STBC_VHT9SS1 = 57, + WL_RATE_P_2X2_STBC_VHT10SS1 = 58, + WL_RATE_P_2X2_STBC_VHT11SS1 = 59, + + WL_RATE_2X2_SDM_MCS8 = 60, + WL_RATE_2X2_SDM_MCS9 = 61, + WL_RATE_2X2_SDM_MCS10 = 62, + WL_RATE_2X2_SDM_MCS11 = 63, + WL_RATE_2X2_SDM_MCS12 = 64, + WL_RATE_2X2_SDM_MCS13 = 65, + WL_RATE_2X2_SDM_MCS14 = 66, + WL_RATE_2X2_SDM_MCS15 = 67, + WL_RATE_P_2X2_SDM_MCS99 = 68, + WL_RATE_P_2X2_SDM_MCS100 = 69, + + WL_RATE_2X2_VHT0SS2 = 60, + WL_RATE_2X2_VHT1SS2 = 61, + WL_RATE_2X2_VHT2SS2 = 62, + WL_RATE_2X2_VHT3SS2 = 63, + WL_RATE_2X2_VHT4SS2 = 64, + WL_RATE_2X2_VHT5SS2 = 65, + WL_RATE_2X2_VHT6SS2 = 66, + WL_RATE_2X2_VHT7SS2 = 67, + WL_RATE_2X2_VHT8SS2 = 68, + WL_RATE_2X2_VHT9SS2 = 69, + WL_RATE_P_2X2_VHT10SS2 = 70, + WL_RATE_P_2X2_VHT11SS2 = 71, + + /**************************** + * TX Beamforming, 2 chains * + **************************** + */ + + /* 1 Stream expanded + 1 */ + WL_RATE_1X2_TXBF_OFDM_6 = 72, + WL_RATE_1X2_TXBF_OFDM_9 = 73, + WL_RATE_1X2_TXBF_OFDM_12 = 74, + WL_RATE_1X2_TXBF_OFDM_18 = 75, + WL_RATE_1X2_TXBF_OFDM_24 = 76, + WL_RATE_1X2_TXBF_OFDM_36 = 77, + WL_RATE_1X2_TXBF_OFDM_48 = 78, + WL_RATE_1X2_TXBF_OFDM_54 = 79, + + WL_RATE_1X2_TXBF_MCS0 = 80, + WL_RATE_1X2_TXBF_MCS1 = 81, + WL_RATE_1X2_TXBF_MCS2 = 82, + WL_RATE_1X2_TXBF_MCS3 = 83, + WL_RATE_1X2_TXBF_MCS4 = 84, + WL_RATE_1X2_TXBF_MCS5 = 85, + WL_RATE_1X2_TXBF_MCS6 = 86, + WL_RATE_1X2_TXBF_MCS7 = 87, + WL_RATE_P_1X2_TXBF_MCS87 = 88, + WL_RATE_P_1X2_TXBF_MCS88 = 89, + + WL_RATE_1X2_TXBF_VHT0SS1 = 80, + WL_RATE_1X2_TXBF_VHT1SS1 = 81, + WL_RATE_1X2_TXBF_VHT2SS1 = 82, + WL_RATE_1X2_TXBF_VHT3SS1 = 83, + WL_RATE_1X2_TXBF_VHT4SS1 = 84, + WL_RATE_1X2_TXBF_VHT5SS1 = 85, + WL_RATE_1X2_TXBF_VHT6SS1 = 86, + WL_RATE_1X2_TXBF_VHT7SS1 = 87, + WL_RATE_1X2_TXBF_VHT8SS1 = 88, + WL_RATE_1X2_TXBF_VHT9SS1 = 89, + WL_RATE_P_1X2_TXBF_VHT10SS1 = 90, + WL_RATE_P_1X2_TXBF_VHT11SS1 = 91, + + /* 2 Streams */ + WL_RATE_2X2_TXBF_SDM_MCS8 = 92, + WL_RATE_2X2_TXBF_SDM_MCS9 = 93, + WL_RATE_2X2_TXBF_SDM_MCS10 = 94, + WL_RATE_2X2_TXBF_SDM_MCS11 = 95, + WL_RATE_2X2_TXBF_SDM_MCS12 = 96, + WL_RATE_2X2_TXBF_SDM_MCS13 = 97, + WL_RATE_2X2_TXBF_SDM_MCS14 = 98, + WL_RATE_2X2_TXBF_SDM_MCS15 = 99, + WL_RATE_P_2X2_TXBF_SDM_MCS99 = 100, + WL_RATE_P_2X2_TXBF_SDM_MCS100 = 101, + + WL_RATE_2X2_TXBF_VHT0SS2 = 92, + WL_RATE_2X2_TXBF_VHT1SS2 = 93, + WL_RATE_2X2_TXBF_VHT2SS2 = 94, + WL_RATE_2X2_TXBF_VHT3SS2 = 95, + WL_RATE_2X2_TXBF_VHT4SS2 = 96, + WL_RATE_2X2_TXBF_VHT5SS2 = 97, + WL_RATE_2X2_TXBF_VHT6SS2 = 98, + WL_RATE_2X2_TXBF_VHT7SS2 = 99, + WL_RATE_2X2_TXBF_VHT8SS2 = 100, + WL_RATE_2X2_TXBF_VHT9SS2 = 101, + WL_RATE_P_2X2_TXBF_VHT10SS2 = 102, + WL_RATE_P_2X2_TXBF_VHT11SS2 = 103, + + + /************ + * 3 chains * + ************ + */ + + /* 1 Stream expanded + 2 */ + WL_RATE_1X3_DSSS_1 = 104, + WL_RATE_1X3_DSSS_2 = 105, + WL_RATE_1X3_DSSS_5_5 = 106, + WL_RATE_1X3_DSSS_11 = 107, + + WL_RATE_1X3_CDD_OFDM_6 = 108, + WL_RATE_1X3_CDD_OFDM_9 = 109, + WL_RATE_1X3_CDD_OFDM_12 = 110, + WL_RATE_1X3_CDD_OFDM_18 = 111, + WL_RATE_1X3_CDD_OFDM_24 = 112, + WL_RATE_1X3_CDD_OFDM_36 = 113, + WL_RATE_1X3_CDD_OFDM_48 = 114, + WL_RATE_1X3_CDD_OFDM_54 = 115, + + WL_RATE_1X3_CDD_MCS0 = 116, + WL_RATE_1X3_CDD_MCS1 = 117, + WL_RATE_1X3_CDD_MCS2 = 118, + WL_RATE_1X3_CDD_MCS3 = 119, + WL_RATE_1X3_CDD_MCS4 = 120, + WL_RATE_1X3_CDD_MCS5 = 121, + WL_RATE_1X3_CDD_MCS6 = 122, + WL_RATE_1X3_CDD_MCS7 = 123, + WL_RATE_P_1X3_CDD_MCS87 = 124, + WL_RATE_P_1X3_CDD_MCS88 = 125, + + WL_RATE_1X3_VHT0SS1 = 116, + WL_RATE_1X3_VHT1SS1 = 117, + WL_RATE_1X3_VHT2SS1 = 118, + WL_RATE_1X3_VHT3SS1 = 119, + WL_RATE_1X3_VHT4SS1 = 120, + WL_RATE_1X3_VHT5SS1 = 121, + WL_RATE_1X3_VHT6SS1 = 122, + WL_RATE_1X3_VHT7SS1 = 123, + WL_RATE_1X3_VHT8SS1 = 124, + WL_RATE_1X3_VHT9SS1 = 125, + WL_RATE_P_1X3_VHT10SS1 = 126, + WL_RATE_P_1X3_VHT11SS1 = 127, + + /* 2 Streams expanded + 1 */ + WL_RATE_2X3_STBC_MCS0 = 128, + WL_RATE_2X3_STBC_MCS1 = 129, + WL_RATE_2X3_STBC_MCS2 = 130, + WL_RATE_2X3_STBC_MCS3 = 131, + WL_RATE_2X3_STBC_MCS4 = 132, + WL_RATE_2X3_STBC_MCS5 = 133, + WL_RATE_2X3_STBC_MCS6 = 134, + WL_RATE_2X3_STBC_MCS7 = 135, + WL_RATE_P_2X3_STBC_MCS87 = 136, + WL_RATE_P_2X3_STBC_MCS88 = 137, + + WL_RATE_2X3_STBC_VHT0SS1 = 128, + WL_RATE_2X3_STBC_VHT1SS1 = 129, + WL_RATE_2X3_STBC_VHT2SS1 = 130, + WL_RATE_2X3_STBC_VHT3SS1 = 131, + WL_RATE_2X3_STBC_VHT4SS1 = 132, + WL_RATE_2X3_STBC_VHT5SS1 = 133, + WL_RATE_2X3_STBC_VHT6SS1 = 134, + WL_RATE_2X3_STBC_VHT7SS1 = 135, + WL_RATE_2X3_STBC_VHT8SS1 = 136, + WL_RATE_2X3_STBC_VHT9SS1 = 137, + WL_RATE_P_2X3_STBC_VHT10SS1 = 138, + WL_RATE_P_2X3_STBC_VHT11SS1 = 139, + + WL_RATE_2X3_SDM_MCS8 = 140, + WL_RATE_2X3_SDM_MCS9 = 141, + WL_RATE_2X3_SDM_MCS10 = 142, + WL_RATE_2X3_SDM_MCS11 = 143, + WL_RATE_2X3_SDM_MCS12 = 144, + WL_RATE_2X3_SDM_MCS13 = 145, + WL_RATE_2X3_SDM_MCS14 = 146, + WL_RATE_2X3_SDM_MCS15 = 147, + WL_RATE_P_2X3_SDM_MCS99 = 148, + WL_RATE_P_2X3_SDM_MCS100 = 149, + + WL_RATE_2X3_VHT0SS2 = 140, + WL_RATE_2X3_VHT1SS2 = 141, + WL_RATE_2X3_VHT2SS2 = 142, + WL_RATE_2X3_VHT3SS2 = 143, + WL_RATE_2X3_VHT4SS2 = 144, + WL_RATE_2X3_VHT5SS2 = 145, + WL_RATE_2X3_VHT6SS2 = 146, + WL_RATE_2X3_VHT7SS2 = 147, + WL_RATE_2X3_VHT8SS2 = 148, + WL_RATE_2X3_VHT9SS2 = 149, + WL_RATE_P_2X3_VHT10SS2 = 150, + WL_RATE_P_2X3_VHT11SS2 = 151, + + /* 3 Streams */ + WL_RATE_3X3_SDM_MCS16 = 152, + WL_RATE_3X3_SDM_MCS17 = 153, + WL_RATE_3X3_SDM_MCS18 = 154, + WL_RATE_3X3_SDM_MCS19 = 155, + WL_RATE_3X3_SDM_MCS20 = 156, + WL_RATE_3X3_SDM_MCS21 = 157, + WL_RATE_3X3_SDM_MCS22 = 158, + WL_RATE_3X3_SDM_MCS23 = 159, + WL_RATE_P_3X3_SDM_MCS101 = 160, + WL_RATE_P_3X3_SDM_MCS102 = 161, + + WL_RATE_3X3_VHT0SS3 = 152, + WL_RATE_3X3_VHT1SS3 = 153, + WL_RATE_3X3_VHT2SS3 = 154, + WL_RATE_3X3_VHT3SS3 = 155, + WL_RATE_3X3_VHT4SS3 = 156, + WL_RATE_3X3_VHT5SS3 = 157, + WL_RATE_3X3_VHT6SS3 = 158, + WL_RATE_3X3_VHT7SS3 = 159, + WL_RATE_3X3_VHT8SS3 = 160, + WL_RATE_3X3_VHT9SS3 = 161, + WL_RATE_P_3X3_VHT10SS3 = 162, + WL_RATE_P_3X3_VHT11SS3 = 163, + + + /**************************** + * TX Beamforming, 3 chains * + **************************** + */ + + /* 1 Stream expanded + 2 */ + WL_RATE_1X3_TXBF_OFDM_6 = 164, + WL_RATE_1X3_TXBF_OFDM_9 = 165, + WL_RATE_1X3_TXBF_OFDM_12 = 166, + WL_RATE_1X3_TXBF_OFDM_18 = 167, + WL_RATE_1X3_TXBF_OFDM_24 = 168, + WL_RATE_1X3_TXBF_OFDM_36 = 169, + WL_RATE_1X3_TXBF_OFDM_48 = 170, + WL_RATE_1X3_TXBF_OFDM_54 = 171, + + WL_RATE_1X3_TXBF_MCS0 = 172, + WL_RATE_1X3_TXBF_MCS1 = 173, + WL_RATE_1X3_TXBF_MCS2 = 174, + WL_RATE_1X3_TXBF_MCS3 = 175, + WL_RATE_1X3_TXBF_MCS4 = 176, + WL_RATE_1X3_TXBF_MCS5 = 177, + WL_RATE_1X3_TXBF_MCS6 = 178, + WL_RATE_1X3_TXBF_MCS7 = 179, + WL_RATE_P_1X3_TXBF_MCS87 = 180, + WL_RATE_P_1X3_TXBF_MCS88 = 181, + + WL_RATE_1X3_TXBF_VHT0SS1 = 172, + WL_RATE_1X3_TXBF_VHT1SS1 = 173, + WL_RATE_1X3_TXBF_VHT2SS1 = 174, + WL_RATE_1X3_TXBF_VHT3SS1 = 175, + WL_RATE_1X3_TXBF_VHT4SS1 = 176, + WL_RATE_1X3_TXBF_VHT5SS1 = 177, + WL_RATE_1X3_TXBF_VHT6SS1 = 178, + WL_RATE_1X3_TXBF_VHT7SS1 = 179, + WL_RATE_1X3_TXBF_VHT8SS1 = 180, + WL_RATE_1X3_TXBF_VHT9SS1 = 181, + WL_RATE_P_1X3_TXBF_VHT10SS1 = 182, + WL_RATE_P_1X3_TXBF_VHT11SS1 = 183, + + /* 2 Streams expanded + 1 */ + WL_RATE_2X3_TXBF_SDM_MCS8 = 184, + WL_RATE_2X3_TXBF_SDM_MCS9 = 185, + WL_RATE_2X3_TXBF_SDM_MCS10 = 186, + WL_RATE_2X3_TXBF_SDM_MCS11 = 187, + WL_RATE_2X3_TXBF_SDM_MCS12 = 188, + WL_RATE_2X3_TXBF_SDM_MCS13 = 189, + WL_RATE_2X3_TXBF_SDM_MCS14 = 190, + WL_RATE_2X3_TXBF_SDM_MCS15 = 191, + WL_RATE_P_2X3_TXBF_SDM_MCS99 = 192, + WL_RATE_P_2X3_TXBF_SDM_MCS100 = 193, + + WL_RATE_2X3_TXBF_VHT0SS2 = 184, + WL_RATE_2X3_TXBF_VHT1SS2 = 185, + WL_RATE_2X3_TXBF_VHT2SS2 = 186, + WL_RATE_2X3_TXBF_VHT3SS2 = 187, + WL_RATE_2X3_TXBF_VHT4SS2 = 188, + WL_RATE_2X3_TXBF_VHT5SS2 = 189, + WL_RATE_2X3_TXBF_VHT6SS2 = 190, + WL_RATE_2X3_TXBF_VHT7SS2 = 191, + WL_RATE_2X3_TXBF_VHT8SS2 = 192, + WL_RATE_2X3_TXBF_VHT9SS2 = 193, + WL_RATE_P_2X3_TXBF_VHT10SS2 = 194, + WL_RATE_P_2X3_TXBF_VHT11SS2 = 195, + + /* 3 Streams */ + WL_RATE_3X3_TXBF_SDM_MCS16 = 196, + WL_RATE_3X3_TXBF_SDM_MCS17 = 197, + WL_RATE_3X3_TXBF_SDM_MCS18 = 198, + WL_RATE_3X3_TXBF_SDM_MCS19 = 199, + WL_RATE_3X3_TXBF_SDM_MCS20 = 200, + WL_RATE_3X3_TXBF_SDM_MCS21 = 201, + WL_RATE_3X3_TXBF_SDM_MCS22 = 202, + WL_RATE_3X3_TXBF_SDM_MCS23 = 203, + WL_RATE_P_3X3_TXBF_SDM_MCS101 = 204, + WL_RATE_P_3X3_TXBF_SDM_MCS102 = 205, + + WL_RATE_3X3_TXBF_VHT0SS3 = 196, + WL_RATE_3X3_TXBF_VHT1SS3 = 197, + WL_RATE_3X3_TXBF_VHT2SS3 = 198, + WL_RATE_3X3_TXBF_VHT3SS3 = 199, + WL_RATE_3X3_TXBF_VHT4SS3 = 200, + WL_RATE_3X3_TXBF_VHT5SS3 = 201, + WL_RATE_3X3_TXBF_VHT6SS3 = 202, + WL_RATE_3X3_TXBF_VHT7SS3 = 203, + WL_RATE_3X3_TXBF_VHT8SS3 = 204, + WL_RATE_3X3_TXBF_VHT9SS3 = 205, + WL_RATE_P_3X3_TXBF_VHT10SS3 = 206, + WL_RATE_P_3X3_TXBF_VHT11SS3 = 207, + + + /************ + * 4 chains * + ************ + */ + + /* 1 Stream expanded + 3 */ + WL_RATE_1X4_DSSS_1 = 208, + WL_RATE_1X4_DSSS_2 = 209, + WL_RATE_1X4_DSSS_5_5 = 210, + WL_RATE_1X4_DSSS_11 = 211, + + WL_RATE_1X4_CDD_OFDM_6 = 212, + WL_RATE_1X4_CDD_OFDM_9 = 213, + WL_RATE_1X4_CDD_OFDM_12 = 214, + WL_RATE_1X4_CDD_OFDM_18 = 215, + WL_RATE_1X4_CDD_OFDM_24 = 216, + WL_RATE_1X4_CDD_OFDM_36 = 217, + WL_RATE_1X4_CDD_OFDM_48 = 218, + WL_RATE_1X4_CDD_OFDM_54 = 219, + + WL_RATE_1X4_CDD_MCS0 = 220, + WL_RATE_1X4_CDD_MCS1 = 221, + WL_RATE_1X4_CDD_MCS2 = 222, + WL_RATE_1X4_CDD_MCS3 = 223, + WL_RATE_1X4_CDD_MCS4 = 224, + WL_RATE_1X4_CDD_MCS5 = 225, + WL_RATE_1X4_CDD_MCS6 = 226, + WL_RATE_1X4_CDD_MCS7 = 227, + WL_RATE_P_1X4_CDD_MCS87 = 228, + WL_RATE_P_1X4_CDD_MCS88 = 229, + + WL_RATE_1X4_VHT0SS1 = 220, + WL_RATE_1X4_VHT1SS1 = 221, + WL_RATE_1X4_VHT2SS1 = 222, + WL_RATE_1X4_VHT3SS1 = 223, + WL_RATE_1X4_VHT4SS1 = 224, + WL_RATE_1X4_VHT5SS1 = 225, + WL_RATE_1X4_VHT6SS1 = 226, + WL_RATE_1X4_VHT7SS1 = 227, + WL_RATE_1X4_VHT8SS1 = 228, + WL_RATE_1X4_VHT9SS1 = 229, + WL_RATE_P_1X4_VHT10SS1 = 230, + WL_RATE_P_1X4_VHT11SS1 = 231, + + /* 2 Streams expanded + 2 */ + WL_RATE_2X4_STBC_MCS0 = 232, + WL_RATE_2X4_STBC_MCS1 = 233, + WL_RATE_2X4_STBC_MCS2 = 234, + WL_RATE_2X4_STBC_MCS3 = 235, + WL_RATE_2X4_STBC_MCS4 = 236, + WL_RATE_2X4_STBC_MCS5 = 237, + WL_RATE_2X4_STBC_MCS6 = 238, + WL_RATE_2X4_STBC_MCS7 = 239, + WL_RATE_P_2X4_STBC_MCS87 = 240, + WL_RATE_P_2X4_STBC_MCS88 = 241, + + WL_RATE_2X4_STBC_VHT0SS1 = 232, + WL_RATE_2X4_STBC_VHT1SS1 = 233, + WL_RATE_2X4_STBC_VHT2SS1 = 234, + WL_RATE_2X4_STBC_VHT3SS1 = 235, + WL_RATE_2X4_STBC_VHT4SS1 = 236, + WL_RATE_2X4_STBC_VHT5SS1 = 237, + WL_RATE_2X4_STBC_VHT6SS1 = 238, + WL_RATE_2X4_STBC_VHT7SS1 = 239, + WL_RATE_2X4_STBC_VHT8SS1 = 240, + WL_RATE_2X4_STBC_VHT9SS1 = 241, + WL_RATE_P_2X4_STBC_VHT10SS1 = 242, + WL_RATE_P_2X4_STBC_VHT11SS1 = 243, + + WL_RATE_2X4_SDM_MCS8 = 244, + WL_RATE_2X4_SDM_MCS9 = 245, + WL_RATE_2X4_SDM_MCS10 = 246, + WL_RATE_2X4_SDM_MCS11 = 247, + WL_RATE_2X4_SDM_MCS12 = 248, + WL_RATE_2X4_SDM_MCS13 = 249, + WL_RATE_2X4_SDM_MCS14 = 250, + WL_RATE_2X4_SDM_MCS15 = 251, + WL_RATE_P_2X4_SDM_MCS99 = 252, + WL_RATE_P_2X4_SDM_MCS100 = 253, + + WL_RATE_2X4_VHT0SS2 = 244, + WL_RATE_2X4_VHT1SS2 = 245, + WL_RATE_2X4_VHT2SS2 = 246, + WL_RATE_2X4_VHT3SS2 = 247, + WL_RATE_2X4_VHT4SS2 = 248, + WL_RATE_2X4_VHT5SS2 = 249, + WL_RATE_2X4_VHT6SS2 = 250, + WL_RATE_2X4_VHT7SS2 = 251, + WL_RATE_2X4_VHT8SS2 = 252, + WL_RATE_2X4_VHT9SS2 = 253, + WL_RATE_P_2X4_VHT10SS2 = 254, + WL_RATE_P_2X4_VHT11SS2 = 255, + + /* 3 Streams expanded + 1 */ + WL_RATE_3X4_SDM_MCS16 = 256, + WL_RATE_3X4_SDM_MCS17 = 257, + WL_RATE_3X4_SDM_MCS18 = 258, + WL_RATE_3X4_SDM_MCS19 = 259, + WL_RATE_3X4_SDM_MCS20 = 260, + WL_RATE_3X4_SDM_MCS21 = 261, + WL_RATE_3X4_SDM_MCS22 = 262, + WL_RATE_3X4_SDM_MCS23 = 263, + WL_RATE_P_3X4_SDM_MCS101 = 264, + WL_RATE_P_3X4_SDM_MCS102 = 265, + + WL_RATE_3X4_VHT0SS3 = 256, + WL_RATE_3X4_VHT1SS3 = 257, + WL_RATE_3X4_VHT2SS3 = 258, + WL_RATE_3X4_VHT3SS3 = 259, + WL_RATE_3X4_VHT4SS3 = 260, + WL_RATE_3X4_VHT5SS3 = 261, + WL_RATE_3X4_VHT6SS3 = 262, + WL_RATE_3X4_VHT7SS3 = 263, + WL_RATE_3X4_VHT8SS3 = 264, + WL_RATE_3X4_VHT9SS3 = 265, + WL_RATE_P_3X4_VHT10SS3 = 266, + WL_RATE_P_3X4_VHT11SS3 = 267, + + + /* 4 Streams */ + WL_RATE_4X4_SDM_MCS24 = 268, + WL_RATE_4X4_SDM_MCS25 = 269, + WL_RATE_4X4_SDM_MCS26 = 270, + WL_RATE_4X4_SDM_MCS27 = 271, + WL_RATE_4X4_SDM_MCS28 = 272, + WL_RATE_4X4_SDM_MCS29 = 273, + WL_RATE_4X4_SDM_MCS30 = 274, + WL_RATE_4X4_SDM_MCS31 = 275, + WL_RATE_P_4X4_SDM_MCS103 = 276, + WL_RATE_P_4X4_SDM_MCS104 = 277, + + WL_RATE_4X4_VHT0SS4 = 268, + WL_RATE_4X4_VHT1SS4 = 269, + WL_RATE_4X4_VHT2SS4 = 270, + WL_RATE_4X4_VHT3SS4 = 271, + WL_RATE_4X4_VHT4SS4 = 272, + WL_RATE_4X4_VHT5SS4 = 273, + WL_RATE_4X4_VHT6SS4 = 274, + WL_RATE_4X4_VHT7SS4 = 275, + WL_RATE_4X4_VHT8SS4 = 276, + WL_RATE_4X4_VHT9SS4 = 277, + WL_RATE_P_4X4_VHT10SS4 = 278, + WL_RATE_P_4X4_VHT11SS4 = 279, + + + /**************************** + * TX Beamforming, 4 chains * + **************************** + */ + + /* 1 Stream expanded + 3 */ + WL_RATE_1X4_TXBF_OFDM_6 = 280, + WL_RATE_1X4_TXBF_OFDM_9 = 281, + WL_RATE_1X4_TXBF_OFDM_12 = 282, + WL_RATE_1X4_TXBF_OFDM_18 = 283, + WL_RATE_1X4_TXBF_OFDM_24 = 284, + WL_RATE_1X4_TXBF_OFDM_36 = 285, + WL_RATE_1X4_TXBF_OFDM_48 = 286, + WL_RATE_1X4_TXBF_OFDM_54 = 287, + + WL_RATE_1X4_TXBF_MCS0 = 288, + WL_RATE_1X4_TXBF_MCS1 = 289, + WL_RATE_1X4_TXBF_MCS2 = 290, + WL_RATE_1X4_TXBF_MCS3 = 291, + WL_RATE_1X4_TXBF_MCS4 = 292, + WL_RATE_1X4_TXBF_MCS5 = 293, + WL_RATE_1X4_TXBF_MCS6 = 294, + WL_RATE_1X4_TXBF_MCS7 = 295, + WL_RATE_P_1X4_TXBF_MCS87 = 296, + WL_RATE_P_1X4_TXBF_MCS88 = 297, + + WL_RATE_1X4_TXBF_VHT0SS1 = 288, + WL_RATE_1X4_TXBF_VHT1SS1 = 289, + WL_RATE_1X4_TXBF_VHT2SS1 = 290, + WL_RATE_1X4_TXBF_VHT3SS1 = 291, + WL_RATE_1X4_TXBF_VHT4SS1 = 292, + WL_RATE_1X4_TXBF_VHT5SS1 = 293, + WL_RATE_1X4_TXBF_VHT6SS1 = 294, + WL_RATE_1X4_TXBF_VHT7SS1 = 295, + WL_RATE_1X4_TXBF_VHT8SS1 = 296, + WL_RATE_1X4_TXBF_VHT9SS1 = 297, + WL_RATE_P_1X4_TXBF_VHT10SS1 = 298, + WL_RATE_P_1X4_TXBF_VHT11SS1 = 299, + + /* 2 Streams expanded + 2 */ + WL_RATE_2X4_TXBF_SDM_MCS8 = 300, + WL_RATE_2X4_TXBF_SDM_MCS9 = 301, + WL_RATE_2X4_TXBF_SDM_MCS10 = 302, + WL_RATE_2X4_TXBF_SDM_MCS11 = 303, + WL_RATE_2X4_TXBF_SDM_MCS12 = 304, + WL_RATE_2X4_TXBF_SDM_MCS13 = 305, + WL_RATE_2X4_TXBF_SDM_MCS14 = 306, + WL_RATE_2X4_TXBF_SDM_MCS15 = 307, + WL_RATE_P_2X4_TXBF_SDM_MCS99 = 308, + WL_RATE_P_2X4_TXBF_SDM_MCS100 = 309, + + WL_RATE_2X4_TXBF_VHT0SS2 = 300, + WL_RATE_2X4_TXBF_VHT1SS2 = 301, + WL_RATE_2X4_TXBF_VHT2SS2 = 302, + WL_RATE_2X4_TXBF_VHT3SS2 = 303, + WL_RATE_2X4_TXBF_VHT4SS2 = 304, + WL_RATE_2X4_TXBF_VHT5SS2 = 305, + WL_RATE_2X4_TXBF_VHT6SS2 = 306, + WL_RATE_2X4_TXBF_VHT7SS2 = 307, + WL_RATE_2X4_TXBF_VHT8SS2 = 308, + WL_RATE_2X4_TXBF_VHT9SS2 = 309, + WL_RATE_P_2X4_TXBF_VHT10SS2 = 310, + WL_RATE_P_2X4_TXBF_VHT11SS2 = 311, + + /* 3 Streams expanded + 1 */ + WL_RATE_3X4_TXBF_SDM_MCS16 = 312, + WL_RATE_3X4_TXBF_SDM_MCS17 = 313, + WL_RATE_3X4_TXBF_SDM_MCS18 = 314, + WL_RATE_3X4_TXBF_SDM_MCS19 = 315, + WL_RATE_3X4_TXBF_SDM_MCS20 = 316, + WL_RATE_3X4_TXBF_SDM_MCS21 = 317, + WL_RATE_3X4_TXBF_SDM_MCS22 = 318, + WL_RATE_3X4_TXBF_SDM_MCS23 = 319, + WL_RATE_P_3X4_TXBF_SDM_MCS101 = 320, + WL_RATE_P_3X4_TXBF_SDM_MCS102 = 321, + + WL_RATE_3X4_TXBF_VHT0SS3 = 312, + WL_RATE_3X4_TXBF_VHT1SS3 = 313, + WL_RATE_3X4_TXBF_VHT2SS3 = 314, + WL_RATE_3X4_TXBF_VHT3SS3 = 315, + WL_RATE_3X4_TXBF_VHT4SS3 = 316, + WL_RATE_3X4_TXBF_VHT5SS3 = 317, + WL_RATE_3X4_TXBF_VHT6SS3 = 318, + WL_RATE_3X4_TXBF_VHT7SS3 = 319, + WL_RATE_P_3X4_TXBF_VHT8SS3 = 320, + WL_RATE_P_3X4_TXBF_VHT9SS3 = 321, + WL_RATE_P_3X4_TXBF_VHT10SS3 = 322, + WL_RATE_P_3X4_TXBF_VHT11SS3 = 323, + + /* 4 Streams */ + WL_RATE_4X4_TXBF_SDM_MCS24 = 324, + WL_RATE_4X4_TXBF_SDM_MCS25 = 325, + WL_RATE_4X4_TXBF_SDM_MCS26 = 326, + WL_RATE_4X4_TXBF_SDM_MCS27 = 327, + WL_RATE_4X4_TXBF_SDM_MCS28 = 328, + WL_RATE_4X4_TXBF_SDM_MCS29 = 329, + WL_RATE_4X4_TXBF_SDM_MCS30 = 330, + WL_RATE_4X4_TXBF_SDM_MCS31 = 331, + WL_RATE_P_4X4_TXBF_SDM_MCS103 = 332, + WL_RATE_P_4X4_TXBF_SDM_MCS104 = 333, + + WL_RATE_4X4_TXBF_VHT0SS4 = 324, + WL_RATE_4X4_TXBF_VHT1SS4 = 325, + WL_RATE_4X4_TXBF_VHT2SS4 = 326, + WL_RATE_4X4_TXBF_VHT3SS4 = 327, + WL_RATE_4X4_TXBF_VHT4SS4 = 328, + WL_RATE_4X4_TXBF_VHT5SS4 = 329, + WL_RATE_4X4_TXBF_VHT6SS4 = 330, + WL_RATE_4X4_TXBF_VHT7SS4 = 331, + WL_RATE_P_4X4_TXBF_VHT8SS4 = 332, + WL_RATE_P_4X4_TXBF_VHT9SS4 = 333, + WL_RATE_P_4X4_TXBF_VHT10SS4 = 334, + WL_RATE_P_4X4_TXBF_VHT11SS4 = 335 + +} clm_rates_t; + +/* Number of rate codes */ +#define WL_NUMRATES 336 + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _bcmwifi_rates_h_ */ diff --git a/drivers/amlogic/wifi/bcmdhd/bcmxtlv.c b/drivers/amlogic/wifi/bcmdhd/bcmxtlv.c new file mode 100644 index 0000000000000..26cfb9ac264a1 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/bcmxtlv.c @@ -0,0 +1,457 @@ +/* + * Driver O/S-independent utility routines + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmxtlv.c 527361 2015-01-17 01:48:34Z $ + */ + +#include + +#include +#include + +#include + +#ifdef BCMDRIVER +#include +#else /* !BCMDRIVER */ + #include /* AS!!! */ +#include +#include +#include +#ifndef ASSERT +#define ASSERT(exp) +#endif +INLINE void* MALLOCZ(void *o, size_t s) { BCM_REFERENCE(o); return calloc(1, s); } +INLINE void MFREE(void *o, void *p, size_t s) { BCM_REFERENCE(o); BCM_REFERENCE(s); free(p); } +#endif /* !BCMDRIVER */ + +#include +#include + +static INLINE int bcm_xtlv_size_for_data(int dlen, bcm_xtlv_opts_t opts) +{ + return ((opts & BCM_XTLV_OPTION_ALIGN32) ? ALIGN_SIZE(dlen + BCM_XTLV_HDR_SIZE, 4) + : (dlen + BCM_XTLV_HDR_SIZE)); +} + +bcm_xtlv_t * +bcm_next_xtlv(bcm_xtlv_t *elt, int *buflen, bcm_xtlv_opts_t opts) +{ + int sz; + /* advance to next elt */ + sz = BCM_XTLV_SIZE(elt, opts); + elt = (bcm_xtlv_t*)((uint8 *)elt + sz); + *buflen -= sz; + + /* validate next elt */ + if (!bcm_valid_xtlv(elt, *buflen, opts)) + return NULL; + + return elt; +} + +int +bcm_xtlv_buf_init(bcm_xtlvbuf_t *tlv_buf, uint8 *buf, uint16 len, bcm_xtlv_opts_t opts) +{ + if (!tlv_buf || !buf || !len) + return BCME_BADARG; + + tlv_buf->opts = opts; + tlv_buf->size = len; + tlv_buf->head = buf; + tlv_buf->buf = buf; + return BCME_OK; +} + +uint16 +bcm_xtlv_buf_len(bcm_xtlvbuf_t *tbuf) +{ + if (tbuf == NULL) return 0; + return (uint16)(tbuf->buf - tbuf->head); +} +uint16 +bcm_xtlv_buf_rlen(bcm_xtlvbuf_t *tbuf) +{ + if (tbuf == NULL) return 0; + return tbuf->size - bcm_xtlv_buf_len(tbuf); +} +uint8 * +bcm_xtlv_buf(bcm_xtlvbuf_t *tbuf) +{ + if (tbuf == NULL) return NULL; + return tbuf->buf; +} +uint8 * +bcm_xtlv_head(bcm_xtlvbuf_t *tbuf) +{ + if (tbuf == NULL) return NULL; + return tbuf->head; +} +int +bcm_xtlv_put_data(bcm_xtlvbuf_t *tbuf, uint16 type, const void *data, uint16 dlen) +{ + bcm_xtlv_t *xtlv; + int size; + + if (tbuf == NULL) + return BCME_BADARG; + size = bcm_xtlv_size_for_data(dlen, tbuf->opts); + if (bcm_xtlv_buf_rlen(tbuf) < size) + return BCME_NOMEM; + xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf); + xtlv->id = htol16(type); + xtlv->len = htol16(dlen); + memcpy(xtlv->data, data, dlen); + tbuf->buf += size; + return BCME_OK; +} +int +bcm_xtlv_put_8(bcm_xtlvbuf_t *tbuf, uint16 type, const int8 data) +{ + bcm_xtlv_t *xtlv; + int size; + + if (tbuf == NULL) + return BCME_BADARG; + size = bcm_xtlv_size_for_data(1, tbuf->opts); + if (bcm_xtlv_buf_rlen(tbuf) < size) + return BCME_NOMEM; + xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf); + xtlv->id = htol16(type); + xtlv->len = htol16(sizeof(data)); + xtlv->data[0] = data; + tbuf->buf += size; + return BCME_OK; +} +int +bcm_xtlv_put_16(bcm_xtlvbuf_t *tbuf, uint16 type, const int16 data) +{ + bcm_xtlv_t *xtlv; + int size; + + if (tbuf == NULL) + return BCME_BADARG; + size = bcm_xtlv_size_for_data(2, tbuf->opts); + if (bcm_xtlv_buf_rlen(tbuf) < size) + return BCME_NOMEM; + + xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf); + xtlv->id = htol16(type); + xtlv->len = htol16(sizeof(data)); + htol16_ua_store(data, xtlv->data); + tbuf->buf += size; + return BCME_OK; +} +int +bcm_xtlv_put_32(bcm_xtlvbuf_t *tbuf, uint16 type, const int32 data) +{ + bcm_xtlv_t *xtlv; + int size; + + if (tbuf == NULL) + return BCME_BADARG; + size = bcm_xtlv_size_for_data(4, tbuf->opts); + if (bcm_xtlv_buf_rlen(tbuf) < size) + return BCME_NOMEM; + xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf); + xtlv->id = htol16(type); + xtlv->len = htol16(sizeof(data)); + htol32_ua_store(data, xtlv->data); + tbuf->buf += size; + return BCME_OK; +} + +/* + * upacks xtlv record from buf checks the type + * copies data to callers buffer + * advances tlv pointer to next record + * caller's resposible for dst space check + */ +int +bcm_unpack_xtlv_entry(uint8 **tlv_buf, uint16 xpct_type, uint16 xpct_len, void *dst, + bcm_xtlv_opts_t opts) +{ + bcm_xtlv_t *ptlv = (bcm_xtlv_t *)*tlv_buf; + uint16 len; + uint16 type; + + ASSERT(ptlv); + /* tlv headr is always packed in LE order */ + len = ltoh16(ptlv->len); + type = ltoh16(ptlv->id); + if (len == 0) { + /* z-len tlv headers: allow, but don't process */ + printf("z-len, skip unpack\n"); + } else { + if ((type != xpct_type) || + (len > xpct_len)) { + printf("xtlv_unpack Error: found[type:%d,len:%d] != xpct[type:%d,len:%d]\n", + type, len, xpct_type, xpct_len); + return BCME_BADARG; + } + /* copy tlv record to caller's buffer */ + memcpy(dst, ptlv->data, ptlv->len); + } + *tlv_buf += BCM_XTLV_SIZE(ptlv, opts); + return BCME_OK; +} + +/* + * packs user data into tlv record + * advances tlv pointer to next xtlv slot + * buflen is used for tlv_buf space check + */ +int +bcm_pack_xtlv_entry(uint8 **tlv_buf, uint16 *buflen, uint16 type, uint16 len, void *src, + bcm_xtlv_opts_t opts) +{ + bcm_xtlv_t *ptlv = (bcm_xtlv_t *)*tlv_buf; + int size; + + ASSERT(ptlv); + ASSERT(src); + + size = bcm_xtlv_size_for_data(len, opts); + + /* copy data from tlv buffer to dst provided by user */ + if (size > *buflen) { + printf("bcm_pack_xtlv_entry: no space tlv_buf: requested:%d, available:%d\n", + size, *buflen); + return BCME_BADLEN; + } + ptlv->id = htol16(type); + ptlv->len = htol16(len); + + /* copy callers data */ + memcpy(ptlv->data, src, len); + + /* advance callers pointer to tlv buff */ + *tlv_buf += size; + /* decrement the len */ + *buflen -= (uint16)size; + return BCME_OK; +} + +/* + * unpack all xtlv records from the issue a callback + * to set function one call per found tlv record + */ +int +bcm_unpack_xtlv_buf(void *ctx, uint8 *tlv_buf, uint16 buflen, bcm_xtlv_opts_t opts, + bcm_xtlv_unpack_cbfn_t *cbfn) +{ + uint16 len; + uint16 type; + int res = BCME_OK; + int size; + bcm_xtlv_t *ptlv; + int sbuflen = buflen; + + ASSERT(!buflen || tlv_buf); + ASSERT(!buflen || cbfn); + + while (sbuflen >= (int)BCM_XTLV_HDR_SIZE) { + ptlv = (bcm_xtlv_t *)tlv_buf; + + /* tlv header is always packed in LE order */ + len = ltoh16(ptlv->len); + type = ltoh16(ptlv->id); + + size = bcm_xtlv_size_for_data(len, opts); + + sbuflen -= size; + /* check for possible buffer overrun */ + if (sbuflen < 0) + break; + + if ((res = cbfn(ctx, ptlv->data, type, len)) != BCME_OK) + break; + tlv_buf += size; + } + return res; +} + +int +bcm_pack_xtlv_buf(void *ctx, void *tlv_buf, uint16 buflen, bcm_xtlv_opts_t opts, + bcm_pack_xtlv_next_info_cbfn_t get_next, bcm_pack_xtlv_pack_next_cbfn_t pack_next, + int *outlen) +{ + int res = BCME_OK; + uint16 tlv_id; + uint16 tlv_len; + uint8 *startp; + uint8 *endp; + uint8 *buf; + bool more; + int size; + + ASSERT(get_next && pack_next); + + buf = (uint8 *)tlv_buf; + startp = buf; + endp = (uint8 *)buf + buflen; + more = TRUE; + while (more && (buf < endp)) { + more = get_next(ctx, &tlv_id, &tlv_len); + size = bcm_xtlv_size_for_data(tlv_len, opts); + if ((buf + size) >= endp) { + res = BCME_BUFTOOSHORT; + goto done; + } + + htol16_ua_store(tlv_id, buf); + htol16_ua_store(tlv_len, buf + sizeof(tlv_id)); + pack_next(ctx, tlv_id, tlv_len, buf + BCM_XTLV_HDR_SIZE); + buf += size; + } + + if (more) + res = BCME_BUFTOOSHORT; + +done: + if (outlen) { + *outlen = (int)(buf - startp); + } + return res; +} + +/* + * pack xtlv buffer from memory according to xtlv_desc_t + */ +int +bcm_pack_xtlv_buf_from_mem(void **tlv_buf, uint16 *buflen, xtlv_desc_t *items, + bcm_xtlv_opts_t opts) +{ + int res = BCME_OK; + uint8 *ptlv = (uint8 *)*tlv_buf; + + while (items->type != 0) { + if ((res = bcm_pack_xtlv_entry(&ptlv, + buflen, items->type, + items->len, items->ptr, opts) != BCME_OK)) { + break; + } + items++; + } + *tlv_buf = ptlv; /* update the external pointer */ + return res; +} + +/* + * unpack xtlv buffer to memory according to xtlv_desc_t + * + */ +int +bcm_unpack_xtlv_buf_to_mem(void *tlv_buf, int *buflen, xtlv_desc_t *items, bcm_xtlv_opts_t opts) +{ + int res = BCME_OK; + bcm_xtlv_t *elt; + + elt = bcm_valid_xtlv((bcm_xtlv_t *)tlv_buf, *buflen, opts) ? (bcm_xtlv_t *)tlv_buf : NULL; + if (!elt || !items) { + res = BCME_BADARG; + return res; + } + + for (; elt != NULL && res == BCME_OK; elt = bcm_next_xtlv(elt, buflen, opts)) { + /* find matches in desc_t items */ + xtlv_desc_t *dst_desc = items; + uint16 len = ltoh16(elt->len); + + while (dst_desc->type != 0) { + if (ltoh16(elt->id) == dst_desc->type) { + if (len != dst_desc->len) { + res = BCME_BADLEN; + } else { + memcpy(dst_desc->ptr, elt->data, len); + } + break; + } + dst_desc++; + } + } + + if (res == BCME_OK && *buflen != 0) + res = BCME_BUFTOOSHORT; + + return res; +} + +/* + * return data pointer of a given ID from xtlv buffer. + * If the specified xTLV ID is found, on return *data_len_out will contain + * the the data length of the xTLV ID. + */ +void * +bcm_get_data_from_xtlv_buf(uint8 *tlv_buf, uint16 buflen, uint16 id, + uint16 *datalen_out, bcm_xtlv_opts_t opts) +{ + void *retptr = NULL; + uint16 type, len; + int size; + bcm_xtlv_t *ptlv; + int sbuflen = buflen; + + while (sbuflen >= (int)BCM_XTLV_HDR_SIZE) { + ptlv = (bcm_xtlv_t *)tlv_buf; + + /* tlv header is always packed in LE order */ + type = ltoh16(ptlv->id); + len = ltoh16(ptlv->len); + size = bcm_xtlv_size_for_data(len, opts); + + sbuflen -= size; + /* check for possible buffer overrun */ + if (sbuflen < 0) { + printf("%s %d: Invalid sbuflen %d\n", + __FUNCTION__, __LINE__, sbuflen); + break; + } + + if (id == type) { + retptr = ptlv->data; + if (datalen_out) { + *datalen_out = len; + } + break; + } + tlv_buf += size; + } + + return retptr; +} + +int bcm_xtlv_size(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts) +{ + int size; /* entire size of the XTLV including header, data, and optional padding */ + int len; /* XTLV's value real length wthout padding */ + + len = BCM_XTLV_LEN(elt); + + size = bcm_xtlv_size_for_data(len, opts); + + return size; +} diff --git a/drivers/net/wireless/bcmdhd/dhd.h b/drivers/amlogic/wifi/bcmdhd/dhd.h similarity index 57% rename from drivers/net/wireless/bcmdhd/dhd.h rename to drivers/amlogic/wifi/bcmdhd/dhd.h index 6ed6f2f31a8fb..4cbbbb338fc0c 100644 --- a/drivers/net/wireless/bcmdhd/dhd.h +++ b/drivers/amlogic/wifi/bcmdhd/dhd.h @@ -4,9 +4,30 @@ * Provides type definitions and function prototypes used to link the * DHD OS, bus, and protocol modules. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd.h 504503 2014-09-24 11:28:56Z $ + * + * <> + * + * $Id: dhd.h 610267 2016-01-06 16:03:53Z $ */ /**************** @@ -30,6 +51,7 @@ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK) #include #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */ +#include /* The kernel threading is sdio-specific */ struct task_struct; struct sched_param; @@ -48,15 +70,26 @@ int get_scheduler_policy(struct task_struct *p); #include #endif /* (BCMWDF) */ -#if defined(WL11U) && !defined(MFP) -#define MFP /* Applying interaction with MFP by spec HS2.0 REL2 */ -#endif /* WL11U */ +#ifdef DEBUG_DPC_THREAD_WATCHDOG +#define MAX_RESCHED_CNT 600 +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) && LINUX_VERSION_CODE < \ + KERNEL_VERSION(3, 18, 0) || defined(CONFIG_BCMDHD_VENDOR_EXT)) +//#define WL_VENDOR_EXT_SUPPORT +#endif /* 3.13.0 <= LINUX_KERNEL_VERSION < 3.18.0 || CONFIG_BCMDHD_VENDOR_EXT */ +#if defined(CONFIG_ANDROID) && defined(WL_VENDOR_EXT_SUPPORT) +#if !defined(GSCAN_SUPPORT) +#define GSCAN_SUPPORT +#endif +#endif /* CONFIG_ANDROID && WL_VENDOR_EXT_SUPPORT */ #if defined(KEEP_ALIVE) /* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */ #define KEEP_ALIVE_PERIOD 55000 #define NULL_PKT_STR "null_pkt" #endif /* KEEP_ALIVE */ + /* Forward decls */ struct dhd_bus; struct dhd_prot; @@ -69,23 +102,37 @@ enum dhd_bus_state { DHD_BUS_LOAD, /* Download access only (CPU reset) */ DHD_BUS_DATA, /* Ready for frame transfers */ DHD_BUS_SUSPEND, /* Bus has been suspended */ + DHD_BUS_DOWN_IN_PROGRESS, /* Bus going Down */ }; -#if defined(NDISVER) -#if (NDISVER >= 0x0600) -/* Firmware requested operation mode */ -#define STA_MASK 0x0001 -#define HOSTAPD_MASK 0x0002 -#define WFD_MASK 0x0004 -#define SOFTAP_FW_MASK 0x0008 -#define P2P_GO_ENABLED 0x0010 -#define P2P_GC_ENABLED 0x0020 -#define CONCURENT_MASK 0x00F0 -#endif /* (NDISVER >= 0x0600) */ -#endif /* #if defined(NDISVER) */ - -#define DHD_IF_ROLE_STA(role) (role == WLC_E_IF_ROLE_STA ||\ - role == WLC_E_IF_ROLE_P2P_CLIENT) +/* + * Bit fields to Indicate clean up process that wait till they are finished. + * Future synchronizable processes can add their bit filed below and update + * their functionalities accordingly + */ +#define DHD_BUS_BUSY_IN_TX 0x01 +#define DHD_BUS_BUSY_IN_SEND_PKT 0x02 +#define DHD_BUS_BUSY_IN_DPC 0x04 +#define DHD_BUS_BUSY_IN_WD 0x08 +#define DHD_BUS_BUSY_IN_IOVAR 0x10 +#define DHD_BUS_BUSY_IN_DHD_IOVAR 0x20 +#define DHD_BUS_BUSY_IN_SUSPEND 0x40 +#define DHD_BUS_BUSY_IN_RESUME 0x80 +#define DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS 0x100 +#define DHD_BUS_BUSY_RPM_SUSPEND_DONE 0x200 +#define DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS 0x400 +#define DHD_BUS_BUSY_RPM_ALL (DHD_BUS_BUSY_RPM_SUSPEND_DONE | \ + DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS | \ + DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS) + +/* Download Types */ +typedef enum download_type { + FW, + NVRAM, + CLM_BLOB, + CLMINFO +} download_type_t; + /* For supporting multiple interfaces */ #define DHD_MAX_IFS 16 @@ -105,9 +152,14 @@ enum dhd_op_flags { DHD_FLAG_P2P_GO_MODE = (1 << (6)), DHD_FLAG_MBSS_MODE = (1 << (7)), /* MBSS in future */ DHD_FLAG_IBSS_MODE = (1 << (8)), - DHD_FLAG_MFG_MODE = (1 << (9)) + DHD_FLAG_MFG_MODE = (1 << (9)), + DHD_FLAG_RSDB_MODE = (1 << (10)), + DHD_FLAG_MP2P_MODE = (1 << (11)) }; +#define DHD_OPMODE_SUPPORTED(dhd, opmode_flag) \ + (dhd ? ((((dhd_pub_t *)dhd)->op_mode) & opmode_flag) : -1) + /* Max sequential TX/RX Control timeouts to set HANG event */ #ifndef MAX_CNTL_TX_TIMEOUT #define MAX_CNTL_TX_TIMEOUT 2 @@ -126,6 +178,21 @@ enum dhd_op_flags { #ifndef POWERUP_WAIT_MS #define POWERUP_WAIT_MS 2000 /* ms: time out in waiting wifi to come up */ #endif +#define MAX_NVRAMBUF_SIZE (16 * 1024) /* max nvram buf size */ +#define MAX_CLMINFO_BUF_SIZE (4 * 1024) /* max clminfo buf size */ +#define MAX_CLM_BUF_SIZE (48 * 1024) /* max clm blob size */ +#ifdef DHD_DEBUG +#define DHD_JOIN_MAX_TIME_DEFAULT 10000 /* ms: Max time out for joining AP */ +#define DHD_SCAN_DEF_TIMEOUT 10000 /* ms: Max time out for scan in progress */ +#endif + +#ifndef CONFIG_BCMDHD_CLM_PATH +#define CONFIG_BCMDHD_CLM_PATH "/system/etc/wifi/bcmdhd_clm.blob" +#endif /* CONFIG_BCMDHD_CLM_PATH */ +#define WL_CCODE_NULL_COUNTRY "#n" + +#define FW_VER_STR_LEN 128 +#define CLM_VER_STR_LEN 128 enum dhd_bus_wake_state { WAKE_LOCK_OFF, @@ -154,7 +221,58 @@ enum dhd_prealloc_index { DHD_PREALLOC_DHD_INFO = 7, DHD_PREALLOC_DHD_WLFC_INFO = 8, DHD_PREALLOC_IF_FLOW_LKUP = 9, - DHD_PREALLOC_FLOWRING = 10 + DHD_PREALLOC_MEMDUMP_BUF = 10, + DHD_PREALLOC_MEMDUMP_RAM = 11, + DHD_PREALLOC_DHD_WLFC_HANGER = 12, + DHD_PREALLOC_PKTID_MAP = 13, + DHD_PREALLOC_PKTID_MAP_IOCTL = 14, + DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15 +}; + +enum dhd_dongledump_mode { + DUMP_DISABLED = 0, + DUMP_MEMONLY, + DUMP_MEMFILE, + DUMP_MEMFILE_BUGON, + DUMP_MEMFILE_MAX +}; + +enum dhd_dongledump_type { + DUMP_TYPE_RESUMED_ON_TIMEOUT = 1, + DUMP_TYPE_D3_ACK_TIMEOUT, + DUMP_TYPE_DONGLE_TRAP, + DUMP_TYPE_MEMORY_CORRUPTION, + DUMP_TYPE_PKTID_AUDIT_FAILURE, + DUMP_TYPE_SCAN_TIMEOUT, + DUMP_TYPE_SCAN_BUSY, + DUMP_TYPE_BY_SYSDUMP, + DUMP_TYPE_BY_LIVELOCK, + DUMP_TYPE_AP_LINKUP_FAILURE +}; + +enum dhd_hang_reason { + HANG_REASON_MASK = 0x8000, + HANG_REASON_IOCTL_RESP_TIMEOUT = 0x8001, + HANG_REASON_DONGLE_TRAP = 0x8002, + HANG_REASON_D3_ACK_TIMEOUT = 0x8003, + HANG_REASON_BUS_DOWN = 0x8004, + HANG_REASON_PCIE_LINK_DOWN = 0x8005, + HANG_REASON_MSGBUF_LIVELOCK = 0x8006, + HANG_REASON_P2P_IFACE_DEL_FAILURE = 0x8007, + HANG_REASON_HT_AVAIL_ERROR = 0x8008, + HANG_REASON_PCIE_RC_LINK_UP_FAIL = 0x8009, + HANG_REASON_MAX = 0x800a +}; + +enum dhd_rsdb_scan_features { + /* Downgraded scan feature for AP active */ + RSDB_SCAN_DOWNGRADED_AP_SCAN = 0x01, + /* Downgraded scan feature for P2P Discovery */ + RSDB_SCAN_DOWNGRADED_P2P_DISC_SCAN = 0x02, + /* Enable channel pruning for ROAM SCAN */ + RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM = 0x10, + /* Enable channel pruning for any SCAN */ + RSDB_SCAN_DOWNGRADED_CH_PRUNE_ALL = 0x20 }; /* Packet alignment for most efficient SDIO (can change based on platform) */ @@ -166,6 +284,21 @@ enum dhd_prealloc_index { #endif #endif +/** + * DMA-able buffer parameters + * - dmaaddr_t is 32bits on a 32bit host. + * dhd_dma_buf::pa may not be used as a sh_addr_t, bcm_addr64_t or uintptr + * - dhd_dma_buf::_alloced is ONLY for freeing a DMA-able buffer. + */ +typedef struct dhd_dma_buf { + void *va; /* virtual address of buffer */ + uint32 len; /* user requested buffer length */ + dmaaddr_t pa; /* physical address of buffer */ + void *dmah; /* dma mapper handle */ + void *secdma; /* secure dma sec_cma_info handle */ + uint32 _alloced; /* actual size of buffer allocated with align and pad */ +} dhd_dma_buf_t; + /* host reordering packts logic */ /* followed the structure to hold the reorder buffers (void **p) */ typedef struct reorder_info { @@ -196,6 +329,38 @@ enum { #endif /* DHDTCPACK_SUPPRESS */ +/* + * Accumulating the queue lengths of all flowring queues in a parent object, + * to assert flow control, when the cummulative queue length crosses an upper + * threshold defined on a parent object. Upper threshold may be maintained + * at a station level, at an interface level, or at a dhd instance. + * + * cumm_ctr_t abstraction: + * cumm_ctr_t abstraction may be enhanced to use an object with a hysterisis + * pause on/off threshold callback. + * All macros use the address of the cummulative length in the parent objects. + * + * BCM_GMAC3 builds use a single perimeter lock, as opposed to a per queue lock. + * Cummulative counters in parent objects may be updated without spinlocks. + * + * In non BCM_GMAC3, if a cummulative queue length is desired across all flows + * belonging to either of (a station, or an interface or a dhd instance), then + * an atomic operation is required using an atomic_t cummulative counters or + * using a spinlock. BCM_ROUTER_DHD uses the Linux atomic_t construct. + */ + +/* Cummulative length not supported. */ +typedef uint32 cumm_ctr_t; +#define DHD_CUMM_CTR_PTR(clen) ((cumm_ctr_t*)(clen)) +#define DHD_CUMM_CTR(clen) *(DHD_CUMM_CTR_PTR(clen)) /* accessor */ +#define DHD_CUMM_CTR_READ(clen) DHD_CUMM_CTR(clen) /* read access */ +#define DHD_CUMM_CTR_INIT(clen) \ + ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL)); +#define DHD_CUMM_CTR_INCR(clen) \ + ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL)); +#define DHD_CUMM_CTR_DECR(clen) \ + ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL)); + /* DMA'ing r/w indices for rings supported */ #ifdef BCM_INDX_TCM /* FW gets r/w indices in TCM */ #define DMA_INDX_ENAB(dma_indxsup) 0 @@ -217,6 +382,27 @@ typedef struct { } tdls_peer_tbl_t; #endif /* defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */ +#ifdef DHD_LOG_DUMP +/* below structure describe ring buffer. */ +struct dhd_log_dump_buf +{ + spinlock_t lock; + unsigned int wraparound; + unsigned long max; + unsigned int remain; + char* present; + char* front; + char* buffer; +}; + +#define DHD_LOG_DUMP_BUFFER_SIZE (1024 * 1024) +#define DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE 256 + +extern void dhd_log_dump_print(const char *fmt, ...); +extern char *dhd_log_dump_get_timestamp(void); +#endif /* DHD_LOG_DUMP */ +#define DHD_COMMON_DUMP_PATH "/data/misc/wifi/log/" + /* Common structure for module and instance linkage */ typedef struct dhd_pub { /* Linkage ponters */ @@ -235,6 +421,7 @@ typedef struct dhd_pub { bool txoff; /* Transmit flow-controlled */ bool dongle_reset; /* TRUE = DEVRESET put dongle into reset */ enum dhd_bus_state busstate; + uint dhd_bus_busy_state; /* Bus busy state */ uint hdrlen; /* Total DHD header length (proto + bus) */ uint maxctl; /* Max size rxctl request from proto to bus */ uint rxsz; /* Rx buffer size bus module should use */ @@ -261,7 +448,8 @@ typedef struct dhd_pub { ulong rx_dropped; /* Packets dropped locally (no memory) */ ulong rx_flushed; /* Packets flushed due to unscheduled sendup thread */ ulong wd_dpc_sched; /* Number of times dhd dpc scheduled by watchdog timer */ - + ulong rx_pktgetfail; /* Number of PKTGET failures in DHD on RX */ + ulong tx_pktgetfail; /* Number of PKTGET failures in DHD on TX */ ulong rx_readahead_cnt; /* Number of packets where header read-ahead was used. */ ulong tx_realloc; /* Number of tx packets we had to realloc for headroom */ ulong fc_packets; /* Number of flow control pkts recvd */ @@ -296,6 +484,10 @@ typedef struct dhd_pub { int pktfilter_count; wl_country_t dhd_cspec; /* Current Locale info */ +#ifdef CUSTOM_COUNTRY_CODE + u32 dhd_cflags; +#endif /* CUSTOM_COUNTRY_CODE */ + bool force_country_change; char eventmask[WL_EVENTING_MASK_LEN]; int op_mode; /* STA, HostAPD, WFD, SoftAP */ @@ -305,14 +497,11 @@ typedef struct dhd_pub { */ /* #define WL_ENABLE_P2P_IF 1 */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) struct mutex wl_start_stop_lock; /* lock/unlock for Android start/stop */ struct mutex wl_softap_lock; /* lock/unlock for any SoftAP/STA settings */ -#endif +#endif -#ifdef WLBTAMP - uint16 maxdatablks; -#endif /* WLBTAMP */ #ifdef PROP_TXSTATUS bool wlfc_enabled; int wlfc_mode; @@ -340,16 +529,28 @@ typedef struct dhd_pub { /* platform specific function for wlfc_enable and wlfc_deinit */ void (*plat_init)(void *dhd); void (*plat_deinit)(void *dhd); +#ifdef DHD_WLFC_THREAD + bool wlfc_thread_go; + struct task_struct* wlfc_thread; + wait_queue_head_t wlfc_wqhead; +#endif /* DHD_WLFC_THREAD */ #endif /* PROP_TXSTATUS */ #ifdef PNO_SUPPORT void *pno_state; +#endif +#ifdef RTT_SUPPORT + void *rtt_state; #endif bool dongle_isolation; bool dongle_trap_occured; /* flag for sending HANG event to upper layer */ int hang_was_sent; int rxcnt_timeout; /* counter rxcnt timeout to send HANG */ int txcnt_timeout; /* counter txcnt timeout to send HANG */ +#ifdef BCMPCIE + int d3ackcnt_timeout; /* counter d3ack timeout to send HANG */ +#endif /* BCMPCIE */ bool hang_report; /* enable hang report by default */ + uint16 hang_reason; /* reason codes for HANG event */ #ifdef WLMEDIA_HTSF uint8 htsfdlystat_sz; /* Size of delay stats, max 255B */ #endif @@ -357,7 +558,8 @@ typedef struct dhd_pub { bool tdls_enable; #endif struct reorder_info *reorder_bufs[WLHOST_REORDERDATA_MAXFLOWS]; - char fw_capabilities[WLC_IOCTL_SMLEN]; + #define WLC_IOCTL_MAXBUF_FWCAP 512 + char fw_capabilities[WLC_IOCTL_MAXBUF_FWCAP]; #define MAXSKBPEND 1024 void *skbbuf[MAXSKBPEND]; uint32 store_idx; @@ -371,38 +573,35 @@ typedef struct dhd_pub { #if defined(ARP_OFFLOAD_SUPPORT) uint32 arp_version; #endif -#if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X) - bool fw_4way_handshake; /* Whether firmware will to do the 4way handshake. */ -#endif -#if defined(CUSTOM_PLATFORM_NV_TEGRA) -#ifdef PKT_FILTER_SUPPORT - uint pkt_filter_mode; - uint pkt_filter_ports_count; - uint16 pkt_filter_ports[WL_PKT_FILTER_PORTS_MAX]; -#endif /* PKT_FILTER_SUPPORT */ -#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */ +#ifdef DEBUG_DPC_THREAD_WATCHDOG + bool dhd_bug_on; +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ #ifdef CUSTOM_SET_CPUCORE struct task_struct * current_dpc; struct task_struct * current_rxf; int chan_isvht80; #endif /* CUSTOM_SET_CPUCORE */ + void *sta_pool; /* pre-allocated pool of sta objects */ void *staid_allocator; /* allocator of sta indexes */ - +#ifdef PCIE_FULL_DONGLE + bool flow_rings_inited; /* set this flag after initializing flow rings */ +#endif /* PCIE_FULL_DONGLE */ void *flowid_allocator; /* unique flowid allocator */ void *flow_ring_table; /* flow ring table, include prot and bus info */ void *if_flow_lkup; /* per interface flowid lkup hash table */ void *flowid_lock; /* per os lock for flowid info protection */ + void *flowring_list_lock; /* per os lock for flowring list protection */ uint32 num_flow_rings; - - uint32 d2h_sync_mode; /* D2H DMA completion sync mode */ - + cumm_ctr_t cumm_ctr; /* cumm queue length placeholder */ + uint32 d2h_sync_mode; /* D2H DMA completion sync mode */ uint8 flow_prio_map[NUMPRIO]; uint8 flow_prio_map_type; char enable_log[MAX_EVENT]; bool dma_d2h_ring_upd_support; bool dma_h2d_ring_upd_support; + #ifdef DHD_WMF bool wmf_ucast_igmp; #ifdef DHD_IGMP_UCQUERY @@ -412,19 +611,83 @@ typedef struct dhd_pub { bool wmf_ucast_upnp; #endif #endif /* DHD_WMF */ -#ifdef DHD_UNICAST_DHCP - bool dhcp_unicast; -#endif /* DHD_UNICAST_DHCP */ #ifdef DHD_L2_FILTER - bool block_ping; -#endif -#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) + unsigned long l2_filter_cnt; /* for L2_FILTER ARP table timeout */ +#endif /* DHD_L2_FILTER */ + uint8 *soc_ram; + uint32 soc_ram_length; + uint32 memdump_type; +#ifdef DHD_FW_COREDUMP + uint32 memdump_enabled; +#endif /* DHD_FW_COREDUMP */ +#ifdef PCIE_FULL_DONGLE +#ifdef WLTDLS tdls_peer_tbl_t peer_tbl; -#endif /* defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */ +#endif /* WLTDLS */ +#endif /* PCIE_FULL_DONGLE */ +#ifdef CACHE_FW_IMAGES + char *cached_fw; + int cached_fw_length; + char *cached_nvram; + int cached_nvram_length; +#endif +#ifdef WLTDLS + uint32 tdls_mode; +#endif +#ifdef DHD_LOSSLESS_ROAMING + uint8 dequeue_prec_map; +#endif + struct mutex wl_up_lock; + bool is_fw_download_done; +#ifdef DHD_LOG_DUMP + struct dhd_log_dump_buf dld_buf; + unsigned int dld_enable; +#endif /* DHD_LOG_DUMP */ + char *clm_path; /* module_param: path to clm vars file */ char *conf_path; /* module_param: path to config vars file */ struct dhd_conf *conf; /* Bus module handle */ } dhd_pub_t; +#if defined(PCIE_FULL_DONGLE) + +/* Packet Tag for PCIE Full Dongle DHD */ +typedef struct dhd_pkttag_fd { + uint16 flowid; /* Flowring Id */ + uint16 dataoff; /* start of packet */ + uint16 dma_len; /* pkt len for DMA_MAP/UNMAP */ + dmaaddr_t pa; /* physical address */ + void *dmah; /* dma mapper handle */ + void *secdma; /* secure dma sec_cma_info handle */ +} dhd_pkttag_fd_t; + +/* Packet Tag for DHD PCIE Full Dongle */ +#define DHD_PKTTAG_FD(pkt) ((dhd_pkttag_fd_t *)(PKTTAG(pkt))) + +#define DHD_PKT_GET_FLOWID(pkt) ((DHD_PKTTAG_FD(pkt))->flowid) +#define DHD_PKT_SET_FLOWID(pkt, pkt_flowid) \ + DHD_PKTTAG_FD(pkt)->flowid = (uint16)(pkt_flowid) + +#define DHD_PKT_GET_DATAOFF(pkt) ((DHD_PKTTAG_FD(pkt))->dataoff) +#define DHD_PKT_SET_DATAOFF(pkt, pkt_dataoff) \ + DHD_PKTTAG_FD(pkt)->dataoff = (uint16)(pkt_dataoff) + +#define DHD_PKT_GET_DMA_LEN(pkt) ((DHD_PKTTAG_FD(pkt))->dma_len) +#define DHD_PKT_SET_DMA_LEN(pkt, pkt_dma_len) \ + DHD_PKTTAG_FD(pkt)->dma_len = (uint16)(pkt_dma_len) + +#define DHD_PKT_GET_PA(pkt) ((DHD_PKTTAG_FD(pkt))->pa) +#define DHD_PKT_SET_PA(pkt, pkt_pa) \ + DHD_PKTTAG_FD(pkt)->pa = (dmaaddr_t)(pkt_pa) + +#define DHD_PKT_GET_DMAH(pkt) ((DHD_PKTTAG_FD(pkt))->dmah) +#define DHD_PKT_SET_DMAH(pkt, pkt_dmah) \ + DHD_PKTTAG_FD(pkt)->dmah = (void *)(pkt_dmah) + +#define DHD_PKT_GET_SECDMA(pkt) ((DHD_PKTTAG_FD(pkt))->secdma) +#define DHD_PKT_SET_SECDMA(pkt, pkt_secdma) \ + DHD_PKTTAG_FD(pkt)->secdma = (void *)(pkt_secdma) +#endif /* PCIE_FULL_DONGLE */ + #if defined(BCMWDF) typedef struct { dhd_pub_t *dhd_pub; @@ -446,18 +709,13 @@ WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(dhd_workitem_context_t, dhd_get_dhd_workitem_ } while (0) #define DHD_PM_RESUME_WAIT(a) _DHD_PM_RESUME_WAIT(a, 200) #define DHD_PM_RESUME_WAIT_FOREVER(a) _DHD_PM_RESUME_WAIT(a, ~0) - #ifdef CUSTOMER_HW4 - #define DHD_PM_RESUME_RETURN_ERROR(a) do { \ - if (dhd_mmc_suspend) { \ - printf("%s[%d]: mmc is still in suspend state!!!\n", \ - __FUNCTION__, __LINE__); \ - return a; \ - } \ - } while (0) - #else - #define DHD_PM_RESUME_RETURN_ERROR(a) do { \ - if (dhd_mmc_suspend) return a; } while (0) - #endif + #define DHD_PM_RESUME_RETURN_ERROR(a) do { \ + if (dhd_mmc_suspend) { \ + printf("%s[%d]: mmc is still in suspend state!!!\n", \ + __FUNCTION__, __LINE__); \ + return a; \ + } \ + } while (0) #define DHD_PM_RESUME_RETURN do { if (dhd_mmc_suspend) return; } while (0) #define DHD_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a); @@ -503,42 +761,52 @@ int dhd_pno_clean(dhd_pub_t *dhd); */ extern int dhd_os_wake_lock(dhd_pub_t *pub); extern int dhd_os_wake_unlock(dhd_pub_t *pub); +extern int dhd_event_wake_lock(dhd_pub_t *pub); +extern int dhd_event_wake_unlock(dhd_pub_t *pub); +extern int dhd_os_wake_lock_waive(dhd_pub_t *pub); +extern int dhd_os_wake_lock_restore(dhd_pub_t *pub); extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub); extern int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val); extern int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val); extern int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub); extern int dhd_os_wd_wake_lock(dhd_pub_t *pub); extern int dhd_os_wd_wake_unlock(dhd_pub_t *pub); +extern void dhd_os_wake_lock_init(struct dhd_info *dhd); +extern void dhd_os_wake_lock_destroy(struct dhd_info *dhd); #ifdef BCMPCIE_OOB_HOST_WAKE -extern int dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val); -extern int dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub); +extern void dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val); +extern void dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub); #endif /* BCMPCIE_OOB_HOST_WAKE */ -extern int dhd_os_wake_lock_waive(dhd_pub_t *pub); -extern int dhd_os_wake_lock_restore(dhd_pub_t *pub); +#ifdef DHD_USE_SCAN_WAKELOCK +extern void dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val); +extern void dhd_os_scan_wake_unlock(dhd_pub_t *pub); +#endif /* BCMPCIE_SCAN_WAKELOCK */ inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) mutex_init(&dhdp->wl_softap_lock); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ } inline static void MUTEX_LOCK_SOFTAP_SET(dhd_pub_t * dhdp) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) mutex_lock(&dhdp->wl_softap_lock); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ } inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) mutex_unlock(&dhdp->wl_softap_lock); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ } #define DHD_OS_WAKE_LOCK(pub) dhd_os_wake_lock(pub) #define DHD_OS_WAKE_UNLOCK(pub) dhd_os_wake_unlock(pub) +#define DHD_EVENT_WAKE_LOCK(pub) dhd_event_wake_lock(pub) +#define DHD_EVENT_WAKE_UNLOCK(pub) dhd_event_wake_unlock(pub) #define DHD_OS_WAKE_LOCK_TIMEOUT(pub) dhd_os_wake_lock_timeout(pub) #define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) \ dhd_os_wake_lock_rx_timeout_enable(pub, val) @@ -546,19 +814,53 @@ inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp) dhd_os_wake_lock_ctrl_timeout_enable(pub, val) #define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub) \ dhd_os_wake_lock_ctrl_timeout_cancel(pub) -#define DHD_OS_WAKE_LOCK_WAIVE(pub) dhd_os_wake_lock_waive(pub) -#define DHD_OS_WAKE_LOCK_RESTORE(pub) dhd_os_wake_lock_restore(pub) +#define DHD_OS_WAKE_LOCK_WAIVE(pub) dhd_os_wake_lock_waive(pub) +#define DHD_OS_WAKE_LOCK_RESTORE(pub) dhd_os_wake_lock_restore(pub) +#define DHD_OS_WAKE_LOCK_INIT(dhd) dhd_os_wake_lock_init(dhd); +#define DHD_OS_WAKE_LOCK_DESTROY(dhd) dhd_os_wake_lock_destroy(dhd); #define DHD_OS_WD_WAKE_LOCK(pub) dhd_os_wd_wake_lock(pub) #define DHD_OS_WD_WAKE_UNLOCK(pub) dhd_os_wd_wake_unlock(pub) + #ifdef BCMPCIE_OOB_HOST_WAKE #define OOB_WAKE_LOCK_TIMEOUT 500 -#define DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_oob_irq_wake_lock_timeout(pub, val) -#define DHD_OS_OOB_IRQ_WAKE_UNLOCK(pub) dhd_os_oob_irq_wake_unlock(pub) +#define DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_oob_irq_wake_lock_timeout(pub, val) +#define DHD_OS_OOB_IRQ_WAKE_UNLOCK(pub) dhd_os_oob_irq_wake_unlock(pub) #endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK +#ifdef DHD_DEBUG_SCAN_WAKELOCK +#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) \ + do { \ + printf("call wake_lock_scan: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_scan_wake_lock_timeout(pub, val); \ + } while (0) +#define DHD_OS_SCAN_WAKE_UNLOCK(pub) \ + do { \ + printf("call wake_unlock_scan: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_scan_wake_unlock(pub); \ + } while (0) +#else +#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_scan_wake_lock_timeout(pub, val) +#define DHD_OS_SCAN_WAKE_UNLOCK(pub) dhd_os_scan_wake_unlock(pub) +#endif /* DHD_DEBUG_SCAN_WAKELOCK */ +#else +#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) +#define DHD_OS_SCAN_WAKE_UNLOCK(pub) +#endif /* DHD_USE_SCAN_WAKELOCK */ #define DHD_PACKET_TIMEOUT_MS 500 #define DHD_EVENT_TIMEOUT_MS 1500 +#define SCAN_WAKE_LOCK_TIMEOUT 10000 +/* Enum for IOCTL recieved status */ +typedef enum dhd_ioctl_recieved_status +{ + IOCTL_WAIT = 0, + IOCTL_RETURN_ON_SUCCESS, + IOCTL_RETURN_ON_TRAP, + IOCTL_RETURN_ON_BUS_STOP +} dhd_ioctl_recieved_status_t; /* interface operations (register, remove) should be atomic, use this lock to prevent race * condition among wifi on/off and interface operation functions @@ -633,19 +935,58 @@ extern void dhd_sched_dpc(dhd_pub_t *dhdp); /* Notify tx completion */ extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success); - +extern void dhd_dpc_enable(dhd_pub_t *dhdp); + +#define WIFI_FEATURE_INFRA 0x0001 /* Basic infrastructure mode */ +#define WIFI_FEATURE_INFRA_5G 0x0002 /* Support for 5 GHz Band */ +#define WIFI_FEATURE_HOTSPOT 0x0004 /* Support for GAS/ANQP */ +#define WIFI_FEATURE_P2P 0x0008 /* Wifi-Direct */ +#define WIFI_FEATURE_SOFT_AP 0x0010 /* Soft AP */ +#define WIFI_FEATURE_GSCAN 0x0020 /* Google-Scan APIs */ +#define WIFI_FEATURE_NAN 0x0040 /* Neighbor Awareness Networking */ +#define WIFI_FEATURE_D2D_RTT 0x0080 /* Device-to-device RTT */ +#define WIFI_FEATURE_D2AP_RTT 0x0100 /* Device-to-AP RTT */ +#define WIFI_FEATURE_BATCH_SCAN 0x0200 /* Batched Scan (legacy) */ +#define WIFI_FEATURE_PNO 0x0400 /* Preferred network offload */ +#define WIFI_FEATURE_ADDITIONAL_STA 0x0800 /* Support for two STAs */ +#define WIFI_FEATURE_TDLS 0x1000 /* Tunnel directed link setup */ +#define WIFI_FEATURE_TDLS_OFFCHANNEL 0x2000 /* Support for TDLS off channel */ +#define WIFI_FEATURE_EPR 0x4000 /* Enhanced power reporting */ +#define WIFI_FEATURE_AP_STA 0x8000 /* Support for AP STA Concurrency */ +#define WIFI_FEATURE_LINKSTAT 0x10000 /* Support for Linkstats */ + +#define MAX_FEATURE_SET_CONCURRRENT_GROUPS 3 + +extern int dhd_dev_get_feature_set(struct net_device *dev); +extern int *dhd_dev_get_feature_set_matrix(struct net_device *dev, int *num); +#ifdef CUSTOM_FORCE_NODFS_FLAG +extern int dhd_dev_set_nodfs(struct net_device *dev, uint nodfs); +#endif /* CUSTOM_FORCE_NODFS_FLAG */ /* OS independent layer functions */ +extern void dhd_os_dhdiovar_lock(dhd_pub_t *pub); +extern void dhd_os_dhdiovar_unlock(dhd_pub_t *pub); extern int dhd_os_proto_block(dhd_pub_t * pub); extern int dhd_os_proto_unblock(dhd_pub_t * pub); -extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition, bool * pending); +extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition); extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub); extern unsigned int dhd_os_get_ioctl_resp_timeout(void); extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec); +extern void dhd_os_ioctl_resp_lock(dhd_pub_t * pub); +extern void dhd_os_ioctl_resp_unlock(dhd_pub_t * pub); +extern int dhd_wakeup_ioctl_event(dhd_pub_t *pub, dhd_ioctl_recieved_status_t reason); + +#define DHD_OS_IOCTL_RESP_LOCK(x) +#define DHD_OS_IOCTL_RESP_UNLOCK(x) + extern int dhd_os_get_image_block(char * buf, int len, void * image); +extern int dhd_os_get_image_size(void * image); extern void * dhd_os_open_image(char * filename); extern void dhd_os_close_image(void * image); extern void dhd_os_wd_timer(void *bus, uint wdtick); +#ifdef DHD_PCIE_RUNTIMEPM +extern void dhd_os_runtimepm_timer(void *bus, uint tick); +#endif /* DHD_PCIE_RUNTIMEPM */ extern void dhd_os_sdlock(dhd_pub_t * pub); extern void dhd_os_sdunlock(dhd_pub_t * pub); extern void dhd_os_sdlock_txq(dhd_pub_t * pub); @@ -654,14 +995,19 @@ extern void dhd_os_sdlock_rxq(dhd_pub_t * pub); extern void dhd_os_sdunlock_rxq(dhd_pub_t * pub); extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t * pub); #ifdef DHDTCPACK_SUPPRESS -extern void dhd_os_tcpacklock(dhd_pub_t *pub); -extern void dhd_os_tcpackunlock(dhd_pub_t *pub); +extern unsigned long dhd_os_tcpacklock(dhd_pub_t *pub); +extern void dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags); #endif /* DHDTCPACK_SUPPRESS */ extern int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr); extern int dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff); extern int dhd_custom_get_mac_address(void *adapter, unsigned char *buf); +#ifdef CUSTOM_COUNTRY_CODE +extern void get_customized_country_code(void *adapter, char *country_iso_code, +wl_country_t *cspec, u32 flags); +#else extern void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec); +#endif /* CUSTOM_COUNTRY_CODE */ extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t * pub); extern void dhd_os_sdlock_eventq(dhd_pub_t * pub); extern void dhd_os_sdunlock_eventq(dhd_pub_t * pub); @@ -684,6 +1030,14 @@ extern int dhd_keep_alive_onoff(dhd_pub_t *dhd); extern int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable); #endif +#if defined(DHD_FW_COREDUMP) +void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size); +#endif /* DHD_FW_COREDUMP */ + +#ifdef SUPPORT_AP_POWERSAVE +extern int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable); +#endif /* SUPPORT_AP_POWERSAVE */ + #ifdef PKT_FILTER_SUPPORT #define DHD_UNICAST_FILTER_NUM 0 @@ -692,32 +1046,17 @@ extern int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable); #define DHD_MULTICAST6_FILTER_NUM 3 #define DHD_MDNS_FILTER_NUM 4 #define DHD_ARP_FILTER_NUM 5 - -#if defined(CUSTOM_PLATFORM_NV_TEGRA) -/* Port based packet filtering command actions */ -#define PKT_FILTER_PORTS_CLEAR 0 -#define PKT_FILTER_PORTS_ADD 1 -#define PKT_FILTER_PORTS_DEL 2 -#define PKT_FILTER_PORTS_LOOPBACK 3 -#define PKT_FILTER_PORTS_MAX PKT_FILTER_PORTS_LOOPBACK -#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */ - -extern int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val); +extern int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val); extern void dhd_enable_packet_filter(int value, dhd_pub_t *dhd); extern int net_os_enable_packet_filter(struct net_device *dev, int val); extern int net_os_rxfilter_add_remove(struct net_device *dev, int val, int num); -#if defined(CUSTOM_PLATFORM_NV_TEGRA) -extern void dhd_set_packet_filter_mode(struct net_device *dev, char *command); -extern int dhd_set_packet_filter_ports(struct net_device *dev, char *command); -#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */ +extern int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val); #endif /* PKT_FILTER_SUPPORT */ extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd); extern bool dhd_support_sta_mode(dhd_pub_t *dhd); -#ifdef DHD_DEBUG extern int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size); -#endif /* DHD_DEBUG */ typedef struct { uint32 limit; /* Expiration time (usec) */ @@ -731,6 +1070,14 @@ typedef struct { int num_fmts; char **fmts; char *raw_fmts; + char *raw_sstr; + uint32 ramstart; + uint32 rodata_start; + uint32 rodata_end; + char *rom_raw_sstr; + uint32 rom_ramstart; + uint32 rom_rodata_start; + uint32 rom_rodata_end; } dhd_event_log_t; #endif /* SHOW_LOGTRACE */ @@ -738,17 +1085,24 @@ extern void dhd_timeout_start(dhd_timeout_t *tmo, uint usec); extern int dhd_timeout_expired(dhd_timeout_t *tmo); extern int dhd_ifname2idx(struct dhd_info *dhd, char *name); -extern int dhd_ifidx2hostidx(struct dhd_info *dhd, int ifidx); extern int dhd_net2idx(struct dhd_info *dhd, struct net_device *net); extern struct net_device * dhd_idx2net(void *pub, int ifidx); extern int net_os_send_hang_message(struct net_device *dev); +extern int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num); +extern bool dhd_wowl_cap(void *bus); + extern int wl_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata, wl_event_msg_t *, void **data_ptr, void *); extern void wl_event_to_host_order(wl_event_msg_t * evt); +extern int wl_host_event_get_data(void *pktdata, wl_event_msg_t *event, void **data_ptr); extern int dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len); extern int dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifindex); +extern int dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval, + int cmd, uint8 set, int ifidx); +extern int dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val, + int cmd, uint8 set, int ifidx); extern void dhd_common_init(osl_t *osh); extern int dhd_do_driver_init(struct net_device *net); @@ -757,7 +1111,7 @@ extern int dhd_event_ifadd(struct dhd_info *dhd, struct wl_event_data_if *ifeven extern int dhd_event_ifdel(struct dhd_info *dhd, struct wl_event_data_if *ifevent, char *name, uint8 *mac); extern struct net_device* dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name, - uint8 *mac, uint8 bssidx, bool need_rtnl_lock); + uint8 *mac, uint8 bssidx, bool need_rtnl_lock, char *dngl_name); extern int dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock); extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name); extern void dhd_vif_del(struct dhd_info *dhd, int ifidx); @@ -781,7 +1135,7 @@ extern int dhd_bus_suspend(dhd_pub_t *dhdpub); extern int dhd_bus_resume(dhd_pub_t *dhdpub, int stage); extern int dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size); extern void dhd_print_buf(void *pbuf, int len, int bytes_per_line); -extern bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf, int *retval); +extern bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval); #if defined(BCMSDIO) || defined(BCMPCIE) extern uint dhd_bus_chip_id(dhd_pub_t *dhdp); extern uint dhd_bus_chiprev_id(dhd_pub_t *dhdp); @@ -807,7 +1161,14 @@ extern struct dhd_sta *dhd_findadd_sta(void *pub, int ifidx, void *ea); extern void dhd_del_sta(void *pub, int ifidx, void *ea); extern int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx); extern int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val); +#if defined(BCM_GMAC3) +extern int dhd_set_dev_def(dhd_pub_t *dhdp, uint32 idx, int val); +#endif extern int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx); +extern int dhd_os_d3ack_wait(dhd_pub_t * pub, uint * condition); +extern int dhd_os_d3ack_wake(dhd_pub_t * pub); +extern int dhd_os_busbusy_wait_negation(dhd_pub_t * pub, uint * condition); +extern int dhd_os_busbusy_wake(dhd_pub_t * pub); extern bool dhd_is_concurrent_mode(dhd_pub_t *dhd); extern int dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set); @@ -828,25 +1189,9 @@ extern int wl_iw_send_priv_event(struct net_device *dev, char *flag); extern uint dhd_watchdog_ms; extern bool dhd_os_wd_timer_enabled(void *bus); -#ifdef PKT_STATICS -typedef struct pkt_statics { - uint16 event_count; - uint32 event_size; - uint16 ctrl_count; - uint32 ctrl_size; - uint32 data_count; - uint32 data_size; - uint16 glom_1_count; - uint16 glom_3_count; - uint16 glom_3_8_count; - uint16 glom_8_count; - uint16 glom_max; - uint16 glom_count; - uint32 glom_size; - uint16 test_count; - uint32 test_size; -} pkt_statics_t; -#endif +#ifdef DHD_PCIE_RUNTIMEPM +extern uint dhd_runtimepm_ms; +#endif /* DHD_PCIE_RUNTIMEPM */ #if defined(DHD_DEBUG) /* Console output poll interval */ @@ -902,6 +1247,9 @@ extern int dhd_idletime; /* SDIO Drive Strength */ extern uint dhd_sdiod_drive_strength; +/* triggers bcm_bprintf to print to kernel log */ +extern bool bcm_bprintf_bypass; + /* Override to force tx queueing all the time */ extern uint dhd_force_tx_queueing; /* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */ @@ -959,9 +1307,7 @@ extern uint dhd_force_tx_queueing; #endif #define DEFAULT_WIFI_TURNOFF_DELAY 0 -#ifndef WIFI_TURNOFF_DELAY #define WIFI_TURNOFF_DELAY DEFAULT_WIFI_TURNOFF_DELAY -#endif /* WIFI_TURNOFF_DELAY */ #define DEFAULT_WIFI_TURNON_DELAY 200 #ifndef WIFI_TURNON_DELAY @@ -977,6 +1323,12 @@ extern uint dhd_force_tx_queueing; #define CUSTOM_DHD_WATCHDOG_MS DEFAULT_DHD_WATCHDOG_INTERVAL_MS #endif /* DEFAULT_DHD_WATCHDOG_INTERVAL_MS */ +#define DEFAULT_ASSOC_RETRY_MAX 3 +#ifndef CUSTOM_ASSOC_RETRY_MAX +#define CUSTOM_ASSOC_RETRY_MAX DEFAULT_ASSOC_RETRY_MAX +#endif /* DEFAULT_ASSOC_RETRY_MAX */ + + #ifdef WLTDLS #ifndef CUSTOM_TDLS_IDLE_MODE_SETTING #define CUSTOM_TDLS_IDLE_MODE_SETTING 60000 /* 60sec to tear down TDLS of not active */ @@ -1019,6 +1371,7 @@ extern char fw_path2[MOD_PARAM_PATHLEN]; /* Flag to indicate if we should download firmware on driver load */ extern uint dhd_download_fw_on_driverload; +extern int allow_delay_fwdl; extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar); @@ -1029,7 +1382,7 @@ extern void dhd_wait_event_wakeup(dhd_pub_t*dhd); NdisStallExecution(1); #define IFUNLOCK(lock) InterlockedExchange((lock), 0) #define IFLOCK_FREE(lock) -#define FW_SUPPORTED(dhd, capa) ((strstr(dhd->fw_capabilities, #capa) != NULL)) +#define FW_SUPPORTED(dhd, capa) ((strstr(dhd->fw_capabilities, " " #capa " ") != NULL)) #ifdef ARP_OFFLOAD_SUPPORT #define MAX_IPV4_ENTRIES 8 void dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode); @@ -1043,18 +1396,20 @@ void dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx); #endif /* ARP_OFFLOAD_SUPPORT */ #ifdef WLTDLS int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac); +int dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode); #ifdef PCIE_FULL_DONGLE void dhd_tdls_update_peer_info(struct net_device *dev, bool connect_disconnect, uint8 *addr); #endif /* PCIE_FULL_DONGLE */ #endif /* WLTDLS */ /* Neighbor Discovery Offload Support */ -int dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable); +extern int dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable); int dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipaddr, int idx); int dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx); /* ioctl processing for nl80211 */ int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, struct dhd_ioctl *ioc, void *data_buf); -void dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path, char *pconf_path); +void dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path, + char *pclm_path, char *pconf_path); void dhd_set_bus_state(void *bus, uint32 state); /* Remove proper pkts(either one no-frag pkt or whole fragmented pkts) */ @@ -1080,6 +1435,24 @@ int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost); #define DHD_OS_PREFREE(dhdpub, addr, size) MFREE(dhdpub->osh, addr, size) #endif /* defined(CONFIG_DHD_USE_STATIC_BUF) */ +#ifdef USE_WFA_CERT_CONF +enum { + SET_PARAM_BUS_TXGLOM_MODE, + SET_PARAM_ROAMOFF, +#ifdef USE_WL_FRAMEBURST + SET_PARAM_FRAMEBURST, +#endif /* USE_WL_FRAMEBURST */ +#ifdef USE_WL_TXBF + SET_PARAM_TXBF, +#endif /* USE_WL_TXBF */ +#ifdef PROP_TXSTATUS + SET_PARAM_PROPTX, + SET_PARAM_PROPTXMODE, +#endif /* PROP_TXSTATUS */ + PARAM_LAST_VALUE +}; +extern int sec_get_param_wfa_cert(dhd_pub_t *dhd, int mode, uint* read_val); +#endif /* USE_WFA_CERT_CONF */ #define dhd_add_flowid(pub, ifidx, ac_prio, ea, flowid) do {} while (0) #define dhd_del_flowid(pub, ifidx, flowid) do {} while (0) @@ -1092,6 +1465,8 @@ extern void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags); /* Disable router 3GMAC bypass path perimeter lock */ #define DHD_PERIM_LOCK(dhdp) do {} while (0) #define DHD_PERIM_UNLOCK(dhdp) do {} while (0) +#define DHD_PERIM_LOCK_ALL(processor_id) do {} while (0) +#define DHD_PERIM_UNLOCK_ALL(processor_id) do {} while (0) /* Enable DHD general spin lock/unlock */ #define DHD_GENERAL_LOCK(dhdp, flags) \ @@ -1107,15 +1482,168 @@ extern void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags); #define DHD_FLOWID_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) #define DHD_FLOWID_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) +/* Enable DHD common flowring list spin lock/unlock */ +#define DHD_FLOWRING_LIST_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_FLOWRING_LIST_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) +extern void dhd_dump_to_kernelog(dhd_pub_t *dhdp); + + +#ifdef DHD_L2_FILTER +extern int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val); +extern int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val); +extern int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val); +extern int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val); +#endif /* DHD_L2_FILTER */ typedef struct wl_io_pport { dhd_pub_t *dhd_pub; uint ifidx; } wl_io_pport_t; -extern void *dhd_pub_wlinfo(dhd_pub_t *dhd_pub); -#ifdef CONFIG_MACH_UNIVERSAL5433 -extern int check_rev(void); +typedef struct wl_evt_pport { + dhd_pub_t *dhd_pub; + int *ifidx; + void *pktdata; + void **data_ptr; + void *raw_event; +} wl_evt_pport_t; + +extern void *dhd_pub_shim(dhd_pub_t *dhd_pub); +#ifdef DHD_FW_COREDUMP +void dhd_save_fwdump(dhd_pub_t *dhd_pub, void * buffer, uint32 length); +#endif /* DHD_FW_COREDUMP */ + +#if defined(SET_RPS_CPUS) +int dhd_rps_cpus_enable(struct net_device *net, int enable); +int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len); +void custom_rps_map_clear(struct netdev_rx_queue *queue); +#define PRIMARY_INF 0 +#define VIRTUAL_INF 1 +#if defined(CONFIG_MACH_UNIVERSAL5433) || defined(CONFIG_MACH_UNIVERSAL7420) || \ + defined(CONFIG_SOC_EXYNOS8890) +#define RPS_CPUS_MASK "10" +#define RPS_CPUS_MASK_P2P "10" +#define RPS_CPUS_MASK_IBSS "10" +#define RPS_CPUS_WLAN_CORE_ID 4 +#else +#define RPS_CPUS_MASK "6" +#define RPS_CPUS_MASK_P2P "6" +#define RPS_CPUS_MASK_IBSS "6" +#endif /* CONFIG_MACH_UNIVERSAL5433 || CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 */ +#endif + +int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component, + char ** buffer, int *length); + +void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length); + +int dhd_download_clm_blob(dhd_pub_t *dhd, unsigned char *image, uint32 len); + +int dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path); +#define dhd_is_device_removed(x) FALSE +#define dhd_os_ind_firmware_stall(x) + +#ifdef DHD_FW_COREDUMP +extern void dhd_get_memdump_info(dhd_pub_t *dhd); +#endif /* DHD_FW_COREDUMP */ +#ifdef BCMASSERT_LOG +extern void dhd_get_assert_info(dhd_pub_t *dhd); +#endif /* BCMASSERT_LOG */ + + +#if defined(DHD_LB_STATS) +#include +extern void dhd_lb_stats_init(dhd_pub_t *dhd); +extern void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); +extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count); +extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count); +extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count); +extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp); +extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp); +#define DHD_LB_STATS_INIT(dhdp) dhd_lb_stats_init(dhdp) +/* Reset is called from common layer so it takes dhd_pub_t as argument */ +#define DHD_LB_STATS_RESET(dhdp) dhd_lb_stats_init(dhdp) +#define DHD_LB_STATS_CLR(x) (x) = 0U +#define DHD_LB_STATS_INCR(x) (x) = (x) + 1 +#define DHD_LB_STATS_ADD(x, c) (x) = (x) + (c) +#define DHD_LB_STATS_PERCPU_ARR_INCR(x) \ + { \ + int cpu = get_cpu(); put_cpu(); \ + DHD_LB_STATS_INCR(x[cpu]); \ + } +#define DHD_LB_STATS_UPDATE_NAPI_HISTO(dhdp, x) dhd_lb_stats_update_napi_histo(dhdp, x) +#define DHD_LB_STATS_UPDATE_TXC_HISTO(dhdp, x) dhd_lb_stats_update_txc_histo(dhdp, x) +#define DHD_LB_STATS_UPDATE_RXC_HISTO(dhdp, x) dhd_lb_stats_update_rxc_histo(dhdp, x) +#define DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhdp) dhd_lb_stats_txc_percpu_cnt_incr(dhdp) +#define DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhdp) dhd_lb_stats_rxc_percpu_cnt_incr(dhdp) +#else /* !DHD_LB_STATS */ +#define DHD_LB_STATS_NOOP do { /* noop */ } while (0) +#define DHD_LB_STATS_INIT(dhdp) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_RESET(dhdp) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_CLR(x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_INCR(x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_ADD(x, c) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_PERCPU_ARR_INCR(x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_UPDATE_NAPI_HISTO(dhd, x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhdp) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhdp) DHD_LB_STATS_NOOP +#endif /* !DHD_LB_STATS */ + +#ifdef DHD_PCIE_RUNTIMEPM +extern bool dhd_runtimepm_state(dhd_pub_t *dhd); +extern bool dhd_runtime_bus_wake(struct dhd_bus *bus, bool wait, void *func_addr); +extern bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void *func_addr); +extern void dhdpcie_block_runtime_pm(dhd_pub_t *dhdp); +extern bool dhdpcie_is_resume_done(dhd_pub_t *dhdp); +extern void dhd_runtime_pm_disable(dhd_pub_t *dhdp); +extern void dhd_runtime_pm_enable(dhd_pub_t *dhdp); +/* Disable the Runtime PM and wake up if the bus is already in suspend */ +#define DHD_DISABLE_RUNTIME_PM(dhdp) \ +do { \ + dhd_runtime_pm_disable(dhdp); \ +} while (0); + +/* Enable the Runtime PM */ +#define DHD_ENABLE_RUNTIME_PM(dhdp) \ +do { \ + dhd_runtime_pm_enable(dhdp); \ +} while (0); +#else +#define DHD_DISABLE_RUNTIME_PM(dhdp) +#define DHD_ENABLE_RUNTIME_PM(dhdp) +#endif /* DHD_PCIE_RUNTIMEPM */ + +extern void dhd_memdump_work_schedule(dhd_pub_t *dhdp, unsigned long msecs); + +/* + * Enable this macro if you want to track the calls to wake lock + * This records can be printed using the following command + * cat /sys/bcm-dhd/wklock_trace + * DHD_TRACE_WAKE_LOCK supports over linux 2.6.0 version + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#undef DHD_TRACE_WAKE_LOCK +#endif /* KERNEL_VER < KERNEL_VERSION(2, 6, 0) */ + +#if defined(DHD_TRACE_WAKE_LOCK) +void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp); #endif + +extern int dhd_prot_debug_info_print(dhd_pub_t *dhd); + +#ifdef ENABLE_TEMP_THROTTLING +#define TEMP_THROTTLE_CONTROL_BIT 0xf //Enable all feature. +#endif /* ENABLE_TEMP_THROTTLING */ + +#ifdef DHD_PKTID_AUDIT_ENABLED +void dhd_pktid_audit_fail_cb(dhd_pub_t *dhdp); +#endif /* DHD_PKTID_AUDIT_ENABLED */ + #endif /* _dhd_h_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_bta.c b/drivers/amlogic/wifi/bcmdhd/dhd_bta.c similarity index 87% rename from drivers/net/wireless/bcmdhd/dhd_bta.c rename to drivers/amlogic/wifi/bcmdhd/dhd_bta.c index 6a4ef7938a91d..dc24edbb5c300 100644 --- a/drivers/net/wireless/bcmdhd/dhd_bta.c +++ b/drivers/amlogic/wifi/bcmdhd/dhd_bta.c @@ -1,13 +1,32 @@ /* * BT-AMP support routines * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_bta.c 434434 2013-11-06 07:16:02Z $ + * + * <> + * + * $Id: dhd_bta.c 514727 2014-11-12 03:02:48Z $ */ -#ifndef WLBTAMP #error "WLBTAMP is not defined" -#endif /* WLBTAMP */ #include #include diff --git a/drivers/amlogic/wifi/bcmdhd/dhd_bta.h b/drivers/amlogic/wifi/bcmdhd/dhd_bta.h new file mode 100644 index 0000000000000..df9d1f91b9cea --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/dhd_bta.h @@ -0,0 +1,42 @@ +/* + * BT-AMP support routines + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_bta.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef __dhd_bta_h__ +#define __dhd_bta_h__ + +struct dhd_pub; + +extern int dhd_bta_docmd(struct dhd_pub *pub, void *cmd_buf, uint cmd_len); + +extern void dhd_bta_doevt(struct dhd_pub *pub, void *data_buf, uint data_len); + +extern int dhd_bta_tx_hcidata(struct dhd_pub *pub, void *data_buf, uint data_len); +extern void dhd_bta_tx_hcidata_complete(struct dhd_pub *dhdp, void *txp, bool success); + + +#endif /* __dhd_bta_h__ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_bus.h b/drivers/amlogic/wifi/bcmdhd/dhd_bus.h similarity index 66% rename from drivers/net/wireless/bcmdhd/dhd_bus.h rename to drivers/amlogic/wifi/bcmdhd/dhd_bus.h index 1874290c3f3ed..3517d800cb0e8 100644 --- a/drivers/net/wireless/bcmdhd/dhd_bus.h +++ b/drivers/amlogic/wifi/bcmdhd/dhd_bus.h @@ -4,9 +4,30 @@ * Provides type definitions and function prototypes used to link the * DHD OS, bus, and protocol modules. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_bus.h 497466 2014-08-19 15:41:01Z $ + * + * <> + * + * $Id: dhd_bus.h 602721 2015-11-27 10:32:48Z $ */ #ifndef _dhd_bus_h_ @@ -22,7 +43,7 @@ extern void dhd_bus_unregister(void); /* Download firmware image and nvram image */ extern int dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, - char *fw_path, char *nv_path, char *conf_path); + char *fw_path, char *nv_path, char *clm_path, char *conf_path); /* Stop bus module: clear pending frames, disable data flow */ extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex); @@ -95,6 +116,8 @@ extern void *dhd_bus_sih(struct dhd_bus *bus); extern uint dhd_bus_hdrlen(struct dhd_bus *bus); #ifdef BCMSDIO extern void dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val); +/* return sdio io status */ +extern uint8 dhd_bus_is_ioready(struct dhd_bus *bus); #else #define dhd_bus_set_dotxinrx(a, b) do {} while (0) #endif @@ -112,27 +135,39 @@ extern int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_nu #ifdef BCMPCIE enum { - DNGL_TO_HOST_BUF_IOCT, - DNGL_TO_HOST_DMA_SCRATCH_BUFFER, - DNGL_TO_HOST_DMA_SCRATCH_BUFFER_LEN, - HOST_TO_DNGL_DMA_WRITEINDX_BUFFER, - HOST_TO_DNGL_DMA_READINDX_BUFFER, - DNGL_TO_HOST_DMA_WRITEINDX_BUFFER, - DNGL_TO_HOST_DMA_READINDX_BUFFER, + /* Scratch buffer confiuguration update */ + D2H_DMA_SCRATCH_BUF, + D2H_DMA_SCRATCH_BUF_LEN, + + /* DMA Indices array buffers for: H2D WR and RD, and D2H WR and RD */ + H2D_DMA_INDX_WR_BUF, /* update H2D WR dma indices buf base addr to dongle */ + H2D_DMA_INDX_RD_BUF, /* update H2D RD dma indices buf base addr to dongle */ + D2H_DMA_INDX_WR_BUF, /* update D2H WR dma indices buf base addr to dongle */ + D2H_DMA_INDX_RD_BUF, /* update D2H RD dma indices buf base addr to dongle */ + + /* DHD sets/gets WR or RD index, in host's H2D and D2H DMA indices buffer */ + H2D_DMA_INDX_WR_UPD, /* update H2D WR index in H2D WR dma indices buf */ + H2D_DMA_INDX_RD_UPD, /* update H2D RD index in H2D RD dma indices buf */ + D2H_DMA_INDX_WR_UPD, /* update D2H WR index in D2H WR dma indices buf */ + D2H_DMA_INDX_RD_UPD, /* update D2H RD index in D2H RD dma indices buf */ + + /* H2D and D2H Mailbox data update */ + H2D_MB_DATA, + D2H_MB_DATA, + + /* (Common) MsgBuf Ring configuration update */ + RING_BUF_ADDR, /* update ring base address to dongle */ + RING_ITEM_LEN, /* update ring item size to dongle */ + RING_MAX_ITEMS, /* update ring max items to dongle */ + + /* Update of WR or RD index, for a MsgBuf Ring */ + RING_RD_UPD, /* update ring read index from/to dongle */ + RING_WR_UPD, /* update ring write index from/to dongle */ + TOTAL_LFRAG_PACKET_CNT, - HTOD_MB_DATA, - DTOH_MB_DATA, - RING_BUF_ADDR, - H2D_DMA_WRITEINDX, - H2D_DMA_READINDX, - D2H_DMA_WRITEINDX, - D2H_DMA_READINDX, - RING_READ_PTR, - RING_WRITE_PTR, - RING_LEN_ITEMS, - RING_MAX_ITEM, MAX_HOST_RXBUFS }; + typedef void (*dhd_mb_ring_t) (struct dhd_bus *, uint32); extern void dhd_bus_cmn_writeshared(struct dhd_bus *bus, void * data, uint32 len, uint8 type, uint16 ringid); @@ -142,8 +177,7 @@ extern uint32 dhd_bus_get_sharedflags(struct dhd_bus *bus); extern void dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count); extern void dhd_bus_start_queue(struct dhd_bus *bus); extern void dhd_bus_stop_queue(struct dhd_bus *bus); -extern void dhd_bus_update_retlen(struct dhd_bus *bus, uint32 retlen, uint32 cmd_id, uint16 status, - uint32 resp_len); + extern dhd_mb_ring_t dhd_bus_get_mbintr_fn(struct dhd_bus *bus); extern void dhd_bus_write_flow_ring_states(struct dhd_bus *bus, void * data, uint16 flowid); @@ -156,9 +190,11 @@ extern int dhd_bus_flow_ring_delete_request(struct dhd_bus *bus, void *flow_ring extern void dhd_bus_flow_ring_delete_response(struct dhd_bus *bus, uint16 flowid, uint32 status); extern int dhd_bus_flow_ring_flush_request(struct dhd_bus *bus, void *flow_ring_node); extern void dhd_bus_flow_ring_flush_response(struct dhd_bus *bus, uint16 flowid, uint32 status); -extern uint8 dhd_bus_is_txmode_push(struct dhd_bus *bus); -extern uint32 dhd_bus_max_h2d_queues(struct dhd_bus *bus, uint8 *txpush); +extern uint32 dhd_bus_max_h2d_queues(struct dhd_bus *bus); extern int dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs); +extern void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val); + + extern int dhdpcie_bus_clock_start(struct dhd_bus *bus); extern int dhdpcie_bus_clock_stop(struct dhd_bus *bus); extern int dhdpcie_bus_enable_device(struct dhd_bus *bus); @@ -170,5 +206,9 @@ extern int dhd_bus_release_dongle(struct dhd_bus *bus); extern int dhd_bus_request_irq(struct dhd_bus *bus); +#ifdef DHD_FW_COREDUMP +extern int dhd_bus_mem_dump(dhd_pub_t *dhd); +#endif /* DHD_FW_COREDUMP */ + #endif /* BCMPCIE */ #endif /* _dhd_bus_h_ */ diff --git a/drivers/amlogic/wifi/bcmdhd/dhd_buzzz.h b/drivers/amlogic/wifi/bcmdhd/dhd_buzzz.h new file mode 100644 index 0000000000000..a5422d58d7ef6 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/dhd_buzzz.h @@ -0,0 +1,37 @@ +#ifndef _DHD_BUZZZ_H_INCLUDED_ +#define _DHD_BUZZZ_H_INCLUDED_ + +/* + * Broadcom logging system - Empty implementaiton + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_buzzz.h 591283 2015-10-07 11:52:00Z $ + */ + +#define dhd_buzzz_attach() do { /* noop */ } while (0) +#define dhd_buzzz_detach() do { /* noop */ } while (0) +#define dhd_buzzz_panic(x) do { /* noop */ } while (0) +#define BUZZZ_LOG(ID, N, ARG...) do { /* noop */ } while (0) + +#endif /* _DHD_BUZZZ_H_INCLUDED_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_cdc.c b/drivers/amlogic/wifi/bcmdhd/dhd_cdc.c similarity index 92% rename from drivers/net/wireless/bcmdhd/dhd_cdc.c rename to drivers/amlogic/wifi/bcmdhd/dhd_cdc.c index b93af470479d4..b1fb4094e3dbb 100644 --- a/drivers/net/wireless/bcmdhd/dhd_cdc.c +++ b/drivers/amlogic/wifi/bcmdhd/dhd_cdc.c @@ -1,9 +1,30 @@ /* * DHD Protocol Module for CDC and BDC. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_cdc.c 492377 2014-07-21 19:54:06Z $ + * + * <> + * + * $Id: dhd_cdc.c 596022 2015-10-29 11:02:47Z $ * * BDC is like CDC, except it includes a header for data packets to convey * packet priority over the bus, and flags (e.g. to indicate checksum status @@ -83,14 +104,12 @@ dhdcdc_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len) DHD_TRACE(("%s: Enter\n", __FUNCTION__)); - do { ret = dhd_bus_rxctl(dhd->bus, (uchar*)&prot->msg, cdc_len); if (ret < 0) break; } while (CDC_IOC_ID(ltoh32(prot->msg.flags)) != id); - return ret; } @@ -202,6 +221,10 @@ dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 return -EIO; } + if (cmd == WLC_SET_PM) { + DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, *(char *)buf)); + } + memset(msg, 0, sizeof(cdc_ioctl_t)); msg->cmd = htol32(cmd); @@ -330,6 +353,8 @@ dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name, void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) { + if (!dhdp || !dhdp->prot) + return; bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid); #ifdef PROP_TXSTATUS dhd_wlfc_dump(dhdp, strbuf); @@ -411,11 +436,7 @@ dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf, uchar *reorder_buf_in goto exit; } - if ((*ifidx = BDC_GET_IF_IDX(h)) >= DHD_MAX_IFS) { - DHD_ERROR(("%s: rx data ifnum out of range (%d)\n", - __FUNCTION__, *ifidx)); - return BCME_ERROR; - } + *ifidx = BDC_GET_IF_IDX(h); if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) != BDC_PROTO_VER) { DHD_ERROR(("%s: non-BDC packet received, flags = 0x%x\n", @@ -437,15 +458,6 @@ dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf, uchar *reorder_buf_in PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN); #endif /* BDC */ -#if defined(NDISVER) -#if (NDISVER < 0x0630) - if (PKTLEN(dhd->osh, pktbuf) < (uint32) (data_offset << 2)) { - DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__, - PKTLEN(dhd->osh, pktbuf), (data_offset * 4))); - return BCME_ERROR; - } -#endif /* #if defined(NDISVER) */ -#endif /* (NDISVER < 0x0630) */ #ifdef PROP_TXSTATUS if (!DHD_PKTTAG_PKTDIR(PKTTAG(pktbuf))) { @@ -526,6 +538,9 @@ dhd_sync_with_dongle(dhd_pub_t *dhd) wlc_rev_info_t revinfo; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); +#ifdef BCMASSERT_LOG + dhd_get_assert_info(dhd); +#endif /* BCMASSERT_LOG */ /* Get the device rev info */ memset(&revinfo, 0, sizeof(revinfo)); @@ -550,7 +565,7 @@ dhd_sync_with_dongle(dhd_pub_t *dhd) int dhd_prot_init(dhd_pub_t *dhd) { - return TRUE; + return BCME_OK; } void diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg80211.c b/drivers/amlogic/wifi/bcmdhd/dhd_cfg80211.c similarity index 62% rename from drivers/net/wireless/bcmdhd/dhd_cfg80211.c rename to drivers/amlogic/wifi/bcmdhd/dhd_cfg80211.c index e5c28823305e2..390747fe101d0 100644 --- a/drivers/net/wireless/bcmdhd/dhd_cfg80211.c +++ b/drivers/amlogic/wifi/bcmdhd/dhd_cfg80211.c @@ -1,9 +1,30 @@ /* * Linux cfg80211 driver - Dongle Host Driver (DHD) related * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: wl_cfg80211.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $ + * + * <> + * + * $Id: dhd_cfg80211.c 591285 2015-10-07 11:56:29Z $ */ #include @@ -106,9 +127,9 @@ s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg) } struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, char *name, - uint8 *mac, uint8 bssidx) + uint8 *mac, uint8 bssidx, char *dngl_name) { - return dhd_allocate_if(cfg->pub, ifidx, name, mac, bssidx, FALSE); + return dhd_allocate_if(cfg->pub, ifidx, name, mac, bssidx, FALSE, dngl_name); } int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev) @@ -199,3 +220,49 @@ s32 dhd_config_dongle(struct bcm_cfg80211 *cfg) return err; } + +int dhd_cfgvendor_priv_string_handler(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev, + const struct bcm_nlmsg_hdr *nlioc, void *buf) +{ + struct net_device *ndev = NULL; + dhd_pub_t *dhd; + dhd_ioctl_t ioc = { 0 }; + int ret = 0; + int8 index; + + WL_TRACE(("entry: cmd = %d\n", nlioc->cmd)); + + dhd = cfg->pub; + DHD_OS_WAKE_LOCK(dhd); + + /* send to dongle only if we are not waiting for reload already */ + if (dhd->hang_was_sent) { + WL_ERR(("HANG was sent up earlier\n")); + DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhd, DHD_EVENT_TIMEOUT_MS); + DHD_OS_WAKE_UNLOCK(dhd); + return OSL_ERROR(BCME_DONGLE_DOWN); + } + + ndev = wdev_to_wlc_ndev(wdev, cfg); + index = dhd_net2idx(dhd->info, ndev); + if (index == DHD_BAD_IF) { + WL_ERR(("Bad ifidx from wdev:%p\n", wdev)); + ret = BCME_ERROR; + goto done; + } + + ioc.cmd = nlioc->cmd; + ioc.len = nlioc->len; + ioc.set = nlioc->set; + ioc.driver = nlioc->magic; + ret = dhd_ioctl_process(dhd, index, &ioc, buf); + if (ret) { + WL_TRACE(("dhd_ioctl_process return err %d\n", ret)); + ret = OSL_ERROR(ret); + goto done; + } + +done: + DHD_OS_WAKE_UNLOCK(dhd); + return ret; +} diff --git a/drivers/amlogic/wifi/bcmdhd/dhd_cfg80211.h b/drivers/amlogic/wifi/bcmdhd/dhd_cfg80211.h new file mode 100644 index 0000000000000..cae7cc9b247bb --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/dhd_cfg80211.h @@ -0,0 +1,54 @@ +/* + * Linux cfg80211 driver - Dongle Host Driver (DHD) related + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_cfg80211.h 591285 2015-10-07 11:56:29Z $ + */ + + +#ifndef __DHD_CFG80211__ +#define __DHD_CFG80211__ + +#include +#include +#include + +#ifndef WL_ERR +#define WL_ERR CFG80211_ERR +#endif +#ifndef WL_TRACE +#define WL_TRACE CFG80211_TRACE +#endif + +s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg); +s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg); +s32 dhd_cfg80211_down(struct bcm_cfg80211 *cfg); +s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val); +s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg); +s32 dhd_config_dongle(struct bcm_cfg80211 *cfg); +int dhd_cfgvendor_priv_string_handler(struct bcm_cfg80211 *cfg, + struct wireless_dev *wdev, const struct bcm_nlmsg_hdr *nlioc, void *data); + +#endif /* __DHD_CFG80211__ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg_vendor.c b/drivers/amlogic/wifi/bcmdhd/dhd_cfg_vendor.c similarity index 73% rename from drivers/net/wireless/bcmdhd/dhd_cfg_vendor.c rename to drivers/amlogic/wifi/bcmdhd/dhd_cfg_vendor.c index 299a27d210f73..c72f8299aadbf 100644 --- a/drivers/net/wireless/bcmdhd/dhd_cfg_vendor.c +++ b/drivers/amlogic/wifi/bcmdhd/dhd_cfg_vendor.c @@ -1,9 +1,30 @@ /* * Linux cfg80211 vendor command/event handlers of DHD * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_cfg_vendor.c 495605 2014-08-07 18:41:34Z $ + * + * <> + * + * $Id: dhd_cfg_vendor.c 525516 2015-01-09 23:12:53Z $ */ #include diff --git a/drivers/net/wireless/bcmdhd/dhd_common.c b/drivers/amlogic/wifi/bcmdhd/dhd_common.c similarity index 60% rename from drivers/net/wireless/bcmdhd/dhd_common.c rename to drivers/amlogic/wifi/bcmdhd/dhd_common.c index 3ad21394db093..631cb4fb2372d 100644 --- a/drivers/net/wireless/bcmdhd/dhd_common.c +++ b/drivers/amlogic/wifi/bcmdhd/dhd_common.c @@ -1,9 +1,30 @@ /* * Broadcom Dongle Host Driver (DHD), common DHD core. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_common.c 492215 2014-07-20 16:44:15Z $ + * + * <> + * + * $Id: dhd_common.c 609263 2015-12-31 16:21:33Z $ */ #include #include @@ -29,16 +50,13 @@ #include #include #include +#include #include #include #ifdef WL_CFG80211 #include #endif -#ifdef WLBTAMP -#include -#include -#endif #ifdef PNO_SUPPORT #include #endif @@ -60,12 +78,30 @@ #include #endif /* DHD_WMF */ +#ifdef DHD_L2_FILTER +#include +#endif /* DHD_L2_FILTER */ + +#ifdef DHD_PSTA +#include +#endif /* DHD_PSTA */ + #ifdef WLMEDIA_HTSF extern void htsf_update(struct dhd_info *dhd, void *data); #endif -int dhd_msg_level = DHD_ERROR_VAL; +#ifdef DHD_LOG_DUMP +int dhd_msg_level = DHD_ERROR_VAL | DHD_MSGTRACE_VAL | DHD_FWLOG_VAL | DHD_EVENT_VAL; +#else +int dhd_msg_level = DHD_ERROR_VAL | DHD_MSGTRACE_VAL | DHD_FWLOG_VAL; +#endif /* DHD_LOG_DUMP */ + + +#if defined(WL_WLC_SHIM) +#include +#else +#endif /* WL_WLC_SHIM */ #include @@ -91,6 +127,11 @@ extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx); #if !defined(AP) && defined(WLP2P) extern int dhd_get_concurrent_capabilites(dhd_pub_t *dhd); #endif + +extern int dhd_socram_dump(struct dhd_bus *bus); + +#define MAX_CHUNK_LEN 1408 /* 8 * 8 * 22 */ + bool ap_cfg_running = FALSE; bool ap_fw_loaded = FALSE; @@ -102,12 +143,15 @@ bool ap_fw_loaded = FALSE; #define DHD_COMPILED "\nCompiled in " SRCBASE #endif /* DHD_DEBUG */ +#define CHIPID_MISMATCH 8 + #if defined(DHD_DEBUG) -const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR - DHD_COMPILED ;//" on " __DATE__ " at " __TIME__; +const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR; #else const char dhd_version[] = "\nDongle Host Driver, version " EPI_VERSION_STR "\nCompiled from "; -#endif +#endif +char fw_version[FW_VER_STR_LEN] = "\0"; +char clm_version[CLM_VER_STR_LEN] = "\0"; void dhd_set_timer(void *bus, uint wdtick); @@ -128,22 +172,16 @@ enum { IOV_LOGSTAMP, IOV_GPIOOB, IOV_IOCTLTIMEOUT, -#ifdef WLBTAMP - IOV_HCI_CMD, /* HCI command */ - IOV_HCI_ACL_DATA, /* HCI data packet */ -#endif #if defined(DHD_DEBUG) IOV_CONS, IOV_DCONSOLE_POLL, + IOV_DHD_JOIN_TIMEOUT_DBG, + IOV_SCAN_TIMEOUT, #endif /* defined(DHD_DEBUG) */ #ifdef PROP_TXSTATUS IOV_PROPTXSTATUS_ENABLE, IOV_PROPTXSTATUS_MODE, IOV_PROPTXSTATUS_OPT, -#ifdef QMONITOR - IOV_QMON_TIME_THRES, - IOV_QMON_TIME_PERCENT, -#endif /* QMONITOR */ IOV_PROPTXSTATUS_MODULE_IGNORE, IOV_PROPTXSTATUS_CREDIT_IGNORE, IOV_PROPTXSTATUS_TXSTATUS_IGNORE, @@ -170,12 +208,18 @@ enum { #endif /* DHD_UCAST_UPNP */ #endif /* DHD_WMF */ IOV_AP_ISOLATE, -#ifdef DHD_UNICAST_DHCP - IOV_DHCP_UNICAST, -#endif /* DHD_UNICAST_DHCP */ #ifdef DHD_L2_FILTER + IOV_DHCP_UNICAST, IOV_BLOCK_PING, -#endif + IOV_PROXY_ARP, + IOV_GRAT_ARP, +#endif /* DHD_L2_FILTER */ +#ifdef DHD_PSTA + IOV_PSTA, +#endif /* DHD_PSTA */ + IOV_CFG80211_OPMODE, + IOV_ASSERT_TYPE, + IOV_LMTEST, IOV_LAST }; @@ -196,10 +240,6 @@ const bcm_iovar_t dhd_iovars[] = { {"clearcounts", IOV_CLEARCOUNTS, 0, IOVT_VOID, 0 }, {"gpioob", IOV_GPIOOB, 0, IOVT_UINT32, 0 }, {"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, IOVT_UINT32, 0 }, -#ifdef WLBTAMP - {"HCI_cmd", IOV_HCI_CMD, 0, IOVT_BUFFER, 0}, - {"HCI_ACL_data", IOV_HCI_ACL_DATA, 0, IOVT_BUFFER, 0}, -#endif #ifdef PROP_TXSTATUS {"proptx", IOV_PROPTXSTATUS_ENABLE, 0, IOVT_BOOL, 0 }, /* @@ -210,10 +250,6 @@ const bcm_iovar_t dhd_iovars[] = { */ {"ptxmode", IOV_PROPTXSTATUS_MODE, 0, IOVT_UINT32, 0 }, {"proptx_opt", IOV_PROPTXSTATUS_OPT, 0, IOVT_UINT32, 0 }, -#ifdef QMONITOR - {"qtime_thres", IOV_QMON_TIME_THRES, 0, IOVT_UINT32, 0 }, - {"qtime_percent", IOV_QMON_TIME_PERCENT, 0, IOVT_UINT32, 0 }, -#endif /* QMONITOR */ {"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE, 0, IOVT_BOOL, 0 }, {"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE, 0, IOVT_BOOL, 0 }, {"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, IOVT_BOOL, 0 }, @@ -240,18 +276,58 @@ const bcm_iovar_t dhd_iovars[] = { {"wmf_ucast_upnp", IOV_WMF_UCAST_UPNP, (0), IOVT_BOOL, 0 }, #endif /* DHD_UCAST_UPNP */ #endif /* DHD_WMF */ -#ifdef DHD_UNICAST_DHCP +#ifdef DHD_L2_FILTER {"dhcp_unicast", IOV_DHCP_UNICAST, (0), IOVT_BOOL, 0 }, -#endif /* DHD_UNICAST_DHCP */ +#endif /* DHD_L2_FILTER */ {"ap_isolate", IOV_AP_ISOLATE, (0), IOVT_BOOL, 0}, #ifdef DHD_L2_FILTER {"block_ping", IOV_BLOCK_PING, (0), IOVT_BOOL, 0}, -#endif + {"proxy_arp", IOV_PROXY_ARP, (0), IOVT_BOOL, 0}, + {"grat_arp", IOV_GRAT_ARP, (0), IOVT_BOOL, 0}, +#endif /* DHD_L2_FILTER */ +#ifdef DHD_PSTA + /* PSTA/PSR Mode configuration. 0: DIABLED 1: PSTA 2: PSR */ + {"psta", IOV_PSTA, 0, IOVT_UINT32, 0}, +#endif /* DHD PSTA */ + {"op_mode", IOV_CFG80211_OPMODE, 0, IOVT_UINT32, 0 }, + {"assert_type", IOV_ASSERT_TYPE, (0), IOVT_UINT32, 0}, + {"lmtest", IOV_LMTEST, 0, IOVT_UINT32, 0 }, {NULL, 0, 0, 0, 0 } }; #define DHD_IOVAR_BUF_SIZE 128 +#ifdef DHD_FW_COREDUMP +void dhd_save_fwdump(dhd_pub_t *dhd_pub, void * buffer, uint32 length) +{ + if (dhd_pub->soc_ram) { +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + DHD_OS_PREFREE(dhd_pub, dhd_pub->soc_ram, dhd_pub->soc_ram_length); +#else + MFREE(dhd_pub->osh, dhd_pub->soc_ram, dhd_pub->soc_ram_length); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + dhd_pub->soc_ram = NULL; + dhd_pub->soc_ram_length = 0; + } + +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + dhd_pub->soc_ram = (uint8*)DHD_OS_PREALLOC(dhd_pub, + DHD_PREALLOC_MEMDUMP_RAM, length); + memset(dhd_pub->soc_ram, 0, length); +#else + dhd_pub->soc_ram = (uint8*) MALLOCZ(dhd_pub->osh, length); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + if (dhd_pub->soc_ram == NULL) { + DHD_ERROR(("%s: Failed to allocate memory for fw crash snap shot.\n", + __FUNCTION__)); + return; + } + + dhd_pub->soc_ram_length = length; + memcpy(dhd_pub->soc_ram, buffer, length); +} +#endif /* DHD_FW_COREDUMP */ + /* to NDIS developer, the structure dhd_common is redundant, * please do NOT merge it back from other branches !!! */ @@ -263,6 +339,8 @@ dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen) struct bcmstrbuf b; struct bcmstrbuf *strbuf = &b; + if (!dhdp || !dhdp->prot || !buf) + return BCME_ERROR; bcm_binit(strbuf, buf, buflen); @@ -297,6 +375,8 @@ dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen) dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped); bcm_bprintf(strbuf, "rx_readahead_cnt %lu tx_realloc %lu\n", dhdp->rx_readahead_cnt, dhdp->tx_realloc); + bcm_bprintf(strbuf, "tx_pktgetfail %lu rx_pktgetfail %lu\n", + dhdp->tx_pktgetfail, dhdp->rx_pktgetfail); bcm_bprintf(strbuf, "\n"); /* Add any prot info */ @@ -307,9 +387,24 @@ dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen) dhd_bus_dump(dhdp, strbuf); +#if defined(DHD_LB_STATS) + dhd_lb_stats_dump(dhdp, strbuf); +#endif /* DHD_LB_STATS */ + return (!strbuf->size ? BCME_BUFTOOSHORT : 0); } +void +dhd_dump_to_kernelog(dhd_pub_t *dhdp) +{ + char buf[512]; + + DHD_ERROR(("F/W version: %s\n", fw_version)); + bcm_bprintf_bypass = TRUE; + dhd_dump(dhdp, buf, sizeof(buf)); + bcm_bprintf_bypass = FALSE; +} + int dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifidx) { @@ -323,23 +418,102 @@ dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int return dhd_wl_ioctl(dhd_pub, ifidx, &ioc, arg, len); } +int +dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval, + int cmd, uint8 set, int ifidx) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + int ret = -1; + + /* memset(iovbuf, 0, sizeof(iovbuf)); */ + if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) { + ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, sizeof(iovbuf), set, ifidx); + if (!ret) { + *pval = ltoh32(*((uint*)iovbuf)); + } else { + DHD_ERROR(("%s: get int iovar %s failed, ERR %d\n", + __FUNCTION__, name, ret)); + } + } else { + DHD_ERROR(("%s: mkiovar %s failed\n", + __FUNCTION__, name)); + } + + return ret; +} + +int +dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val, + int cmd, uint8 set, int ifidx) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + int ret = -1; + int lval = htol32(val); + + /* memset(iovbuf, 0, sizeof(iovbuf)); */ + if (bcm_mkiovar(name, (char*)&lval, sizeof(lval), iovbuf, sizeof(iovbuf))) { + ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, sizeof(iovbuf), set, ifidx); + if (ret) { + DHD_ERROR(("%s: set int iovar %s failed, ERR %d\n", + __FUNCTION__, name, ret)); + } + } else { + DHD_ERROR(("%s: mkiovar %s failed\n", + __FUNCTION__, name)); + } + + return ret; +} + int dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len) { int ret = BCME_ERROR; + unsigned long flags; if (dhd_os_proto_block(dhd_pub)) { +#ifdef DHD_LOG_DUMP + int slen, i, val, rem; + long int lval; + char *pval, *pos, *msg; + char tmp[64]; +#endif /* DHD_LOG_DUMP */ + DHD_GENERAL_LOCK(dhd_pub, flags); + if (dhd_pub->busstate == DHD_BUS_DOWN || + dhd_pub->busstate == DHD_BUS_DOWN_IN_PROGRESS) { + DHD_ERROR(("%s: returning as busstate=%d\n", + __FUNCTION__, dhd_pub->busstate)); + DHD_GENERAL_UNLOCK(dhd_pub, flags); + dhd_os_proto_unblock(dhd_pub); + return -ENODEV; + } + dhd_pub->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_IOVAR; + DHD_GENERAL_UNLOCK(dhd_pub, flags); + +#ifdef DHD_LOG_DUMP + /* WLC_GET_VAR */ + if (ioc->cmd == WLC_GET_VAR) { + memset(tmp, 0, sizeof(tmp)); + bcopy(ioc->buf, tmp, strlen(ioc->buf) + 1); + } +#endif /* DHD_LOG_DUMP */ +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_wl_ioctl); +#endif /* DHD_PCIE_RUNTIMEPM */ #if defined(WL_WLC_SHIM) - wl_info_t *wl = dhd_pub_wlinfo(dhd_pub); + { + struct wl_shim_node *shim = dhd_pub_shim(dhd_pub); - wl_io_pport_t io_pport; - io_pport.dhd_pub = dhd_pub; - io_pport.ifidx = ifidx; + wl_io_pport_t io_pport; + io_pport.dhd_pub = dhd_pub; + io_pport.ifidx = ifidx; - ret = wl_shim_ioctl(wl->shim, ioc, &io_pport); - if (ret != BCME_OK) { - DHD_ERROR(("%s: wl_shim_ioctl(%d) ERR %d\n", __FUNCTION__, ioc->cmd, ret)); + ret = wl_shim_ioctl(shim, ioc, len, &io_pport); + if (ret != BCME_OK) { + DHD_TRACE(("%s: wl_shim_ioctl(%d) ERR %d\n", + __FUNCTION__, ioc->cmd, ret)); + } } #else ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len); @@ -357,8 +531,49 @@ dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len) dhd_pub->busstate = DHD_BUS_DOWN; } + DHD_GENERAL_LOCK(dhd_pub, flags); + dhd_pub->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_IOVAR; + dhd_os_busbusy_wake(dhd_pub); + DHD_GENERAL_UNLOCK(dhd_pub, flags); + dhd_os_proto_unblock(dhd_pub); +#ifdef DHD_LOG_DUMP + if (ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) { + lval = 0; + slen = strlen(ioc->buf) + 1; + msg = (char*)ioc->buf; + if (ioc->cmd == WLC_GET_VAR) { + bcopy(msg, &lval, sizeof(long int)); + msg = tmp; + } else { + bcopy((msg + slen), &lval, sizeof(long int)); + } + DHD_ERROR_EX(("%s: cmd: %d, msg: %s, val: 0x%lx, len: %d, set: %d\n", + ioc->cmd == WLC_GET_VAR ? "WLC_GET_VAR" : "WLC_SET_VAR", + ioc->cmd, msg, lval, ioc->len, ioc->set)); + } else { + slen = ioc->len; + if (ioc->buf != NULL) { + val = *(int*)ioc->buf; + pval = (char*)ioc->buf; + pos = tmp; + rem = sizeof(tmp); + memset(tmp, 0, sizeof(tmp)); + for (i = 0; i < slen; i++) { + pos += snprintf(pos, rem, "%02x ", pval[i]); + rem = sizeof(tmp) - (int)(pos - tmp); + if (rem <= 0) { + break; + } + } + DHD_ERROR_EX(("WLC_IOCTL: cmd: %d, val: %d(%s), len: %d, set: %d\n", + ioc->cmd, val, tmp, ioc->len, ioc->set)); + } else { + DHD_ERROR_EX(("WLC_IOCTL: cmd: %d, buf is NULL\n", ioc->cmd)); + } + } +#endif /* DHD_LOG_DUMP */ } return ret; @@ -392,7 +607,7 @@ dhd_iovar_parse_bssidx(dhd_pub_t *dhd_pub, char *params, int *idx, char **val) p = p + 1; bcopy(p, &bssidx, sizeof(uint32)); /* Get corresponding dhd index */ - bssidx = dhd_bssidx2idx(dhd_pub, bssidx); + bssidx = dhd_bssidx2idx(dhd_pub, htod32(bssidx)); if (bssidx >= DHD_MAX_IFS) { DHD_ERROR(("%s Wrong bssidx provided\n", __FUNCTION__)); @@ -411,6 +626,17 @@ dhd_iovar_parse_bssidx(dhd_pub_t *dhd_pub, char *params, int *idx, char **val) return BCME_OK; } +#if defined(DHD_DEBUG) && defined(BCMDHDUSB) +/* USB Device console input function */ +int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen) +{ + DHD_TRACE(("%s \n", __FUNCTION__)); + + return dhd_iovar(dhd, 0, "cons", msg, msglen, 1); + +} +#endif /* DHD_DEBUG && BCMDHDUSB */ + #ifdef PKT_STATICS extern pkt_statics_t tx_statics; extern void dhdsdio_txpktstatics(void); @@ -452,9 +678,6 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch int_val = (int32)wl_dbg_level; bcopy(&int_val, arg, val_size); printf("cfg_msg_level=0x%x\n", wl_dbg_level); -#endif -#ifdef PKT_STATICS - dhdsdio_txpktstatics(); #endif break; @@ -483,6 +706,9 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch case IOV_GVAL(IOV_MSGLEVEL): int_val = (int32)dhd_msg_level; bcopy(&int_val, arg, val_size); +#ifdef PKT_STATICS + dhdsdio_txpktstatics(); +#endif break; case IOV_SVAL(IOV_MSGLEVEL): @@ -509,6 +735,11 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch bcmerror = BCME_NOTUP; break; } + + if (CUSTOM_DHD_WATCHDOG_MS == 0 && int_val == 0) { + dhd_watchdog_ms = (uint)int_val; + } + dhd_os_wd_timer(dhd_pub, (uint)int_val); break; @@ -539,6 +770,8 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0; dhd_pub->tx_dropped = 0; dhd_pub->rx_dropped = 0; + dhd_pub->tx_pktgetfail = 0; + dhd_pub->rx_pktgetfail = 0; dhd_pub->rx_readahead_cnt = 0; dhd_pub->tx_realloc = 0; dhd_pub->wd_dpc_sched = 0; @@ -548,6 +781,7 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch /* clear proptxstatus related counters */ dhd_wlfc_clear_counts(dhd_pub); #endif /* PROP_TXSTATUS */ + DHD_LB_STATS_RESET(dhd_pub); break; @@ -565,37 +799,6 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch break; } -#ifdef WLBTAMP - case IOV_SVAL(IOV_HCI_CMD): { - amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)arg; - - /* sanity check: command preamble present */ - if (len < HCI_CMD_PREAMBLE_SIZE) - return BCME_BUFTOOSHORT; - - /* sanity check: command parameters are present */ - if (len < (int)(HCI_CMD_PREAMBLE_SIZE + cmd->plen)) - return BCME_BUFTOOSHORT; - - dhd_bta_docmd(dhd_pub, cmd, len); - break; - } - - case IOV_SVAL(IOV_HCI_ACL_DATA): { - amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)arg; - - /* sanity check: HCI header present */ - if (len < HCI_ACL_DATA_PREAMBLE_SIZE) - return BCME_BUFTOOSHORT; - - /* sanity check: ACL data is present */ - if (len < (int)(HCI_ACL_DATA_PREAMBLE_SIZE + ACL_data->dlen)) - return BCME_BUFTOOSHORT; - - dhd_bta_tx_hcidata(dhd_pub, ACL_data, len); - break; - } -#endif /* WLBTAMP */ #ifdef PROP_TXSTATUS case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE): { @@ -634,24 +837,6 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch case IOV_SVAL(IOV_PROPTXSTATUS_MODE): dhd_wlfc_set_mode(dhd_pub, int_val); break; -#ifdef QMONITOR - case IOV_GVAL(IOV_QMON_TIME_THRES): { - int_val = dhd_qmon_thres(dhd_pub, FALSE, 0); - bcopy(&int_val, arg, val_size); - break; - } - - case IOV_SVAL(IOV_QMON_TIME_THRES): { - dhd_qmon_thres(dhd_pub, TRUE, int_val); - break; - } - - case IOV_GVAL(IOV_QMON_TIME_PERCENT): { - int_val = dhd_qmon_getpercent(dhd_pub); - bcopy(&int_val, arg, val_size); - break; - } -#endif /* QMONITOR */ case IOV_GVAL(IOV_PROPTXSTATUS_MODULE_IGNORE): bcmerror = dhd_wlfc_get_module_ignore(dhd_pub, &int_val); @@ -867,38 +1052,124 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch #endif /* DHD_WMF */ -#ifdef DHD_UNICAST_DHCP - case IOV_GVAL(IOV_DHCP_UNICAST): - int_val = dhd_pub->dhcp_unicast; - bcopy(&int_val, arg, val_size); +#ifdef DHD_L2_FILTER + case IOV_GVAL(IOV_DHCP_UNICAST): { + uint32 bssidx; + char *val; + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n", + __FUNCTION__, name)); + bcmerror = BCME_BADARG; + break; + } + int_val = dhd_get_dhcp_unicast_status(dhd_pub, bssidx); + memcpy(arg, &int_val, val_size); break; - case IOV_SVAL(IOV_DHCP_UNICAST): - if (dhd_pub->dhcp_unicast == int_val) + } + case IOV_SVAL(IOV_DHCP_UNICAST): { + uint32 bssidx; + char *val; + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n", + __FUNCTION__, name)); + bcmerror = BCME_BADARG; break; + } + memcpy(&int_val, val, sizeof(int_val)); + bcmerror = dhd_set_dhcp_unicast_status(dhd_pub, bssidx, int_val ? 1 : 0); + break; + } + case IOV_GVAL(IOV_BLOCK_PING): { + uint32 bssidx; + char *val; - if (int_val >= OFF || int_val <= ON) { - dhd_pub->dhcp_unicast = int_val; - } else { - bcmerror = BCME_RANGE; + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; } + int_val = dhd_get_block_ping_status(dhd_pub, bssidx); + memcpy(arg, &int_val, val_size); break; -#endif /* DHD_UNICAST_DHCP */ -#ifdef DHD_L2_FILTER - case IOV_GVAL(IOV_BLOCK_PING): - int_val = dhd_pub->block_ping; + } + case IOV_SVAL(IOV_BLOCK_PING): { + uint32 bssidx; + char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + memcpy(&int_val, val, sizeof(int_val)); + bcmerror = dhd_set_block_ping_status(dhd_pub, bssidx, int_val ? 1 : 0); + break; + } + case IOV_GVAL(IOV_PROXY_ARP): { + uint32 bssidx; + char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + int_val = dhd_get_parp_status(dhd_pub, bssidx); bcopy(&int_val, arg, val_size); break; - case IOV_SVAL(IOV_BLOCK_PING): - if (dhd_pub->block_ping == int_val) + } + case IOV_SVAL(IOV_PROXY_ARP): { + uint32 bssidx; + char *val; + char iobuf[32]; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; break; - if (int_val >= OFF || int_val <= ON) { - dhd_pub->block_ping = int_val; - } else { - bcmerror = BCME_RANGE; + } + bcopy(val, &int_val, sizeof(int_val)); + + /* Issue a iovar request to WL to update the proxy arp capability bit + * in the Extended Capability IE of beacons/probe responses. + */ + bcm_mkiovar("proxy_arp_advertise", val, sizeof(int_val), iobuf, + sizeof(iobuf)); + bcmerror = dhd_wl_ioctl_cmd(dhd_pub, WLC_SET_VAR, iobuf, + sizeof(iobuf), TRUE, bssidx); + + if (bcmerror == BCME_OK) { + dhd_set_parp_status(dhd_pub, bssidx, int_val ? 1 : 0); } break; -#endif + } + case IOV_GVAL(IOV_GRAT_ARP): { + uint32 bssidx; + char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + int_val = dhd_get_grat_arp_status(dhd_pub, bssidx); + memcpy(arg, &int_val, val_size); + break; + } + case IOV_SVAL(IOV_GRAT_ARP): { + uint32 bssidx; + char *val; + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + memcpy(&int_val, val, sizeof(int_val)); + bcmerror = dhd_set_grat_arp_status(dhd_pub, bssidx, int_val ? 1 : 0); + break; + } +#endif /* DHD_L2_FILTER */ case IOV_GVAL(IOV_AP_ISOLATE): { uint32 bssidx; char *val; @@ -928,6 +1199,60 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch dhd_set_ap_isolate(dhd_pub, bssidx, int_val); break; } +#ifdef DHD_PSTA + case IOV_GVAL(IOV_PSTA): { + int_val = dhd_get_psta_mode(dhd_pub); + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_PSTA): { + if (int_val >= DHD_MODE_PSTA_DISABLED && int_val <= DHD_MODE_PSR) { + dhd_set_psta_mode(dhd_pub, int_val); + } else { + bcmerror = BCME_RANGE; + } + break; + } +#endif /* DHD_PSTA */ + case IOV_GVAL(IOV_CFG80211_OPMODE): { + int_val = (int32)dhd_pub->op_mode; + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + case IOV_SVAL(IOV_CFG80211_OPMODE): { + if (int_val <= 0) + bcmerror = BCME_BADARG; + else + dhd_pub->op_mode = int_val; + break; + } + + case IOV_GVAL(IOV_ASSERT_TYPE): + int_val = g_assert_type; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_ASSERT_TYPE): + g_assert_type = (uint32)int_val; + break; + + + case IOV_GVAL(IOV_LMTEST): { + *(uint32 *)arg = (uint32)lmtest; + break; + } + + case IOV_SVAL(IOV_LMTEST): { + uint32 val = *(uint32 *)arg; + if (val > 50) + bcmerror = BCME_BADARG; + else { + lmtest = (uint)val; + DHD_ERROR(("%s: lmtest %s\n", + __FUNCTION__, (lmtest == FALSE)? "OFF" : "ON")); + } + break; + } default: bcmerror = BCME_UNSUPPORTED; @@ -1150,6 +1475,7 @@ int dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen) { int bcmerror = 0; + unsigned long flags; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); @@ -1157,81 +1483,123 @@ dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen) return BCME_BADARG; } + dhd_os_dhdiovar_lock(dhd_pub); switch (ioc->cmd) { - case DHD_GET_MAGIC: - if (buflen < sizeof(int)) - bcmerror = BCME_BUFTOOSHORT; - else - *(int*)buf = DHD_IOCTL_MAGIC; - break; - - case DHD_GET_VERSION: - if (buflen < sizeof(int)) - bcmerror = BCME_BUFTOOSHORT; - else - *(int*)buf = DHD_IOCTL_VERSION; - break; - - case DHD_GET_VAR: - case DHD_SET_VAR: { - char *arg; - uint arglen; - - /* scan past the name to any arguments */ - for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--) - ; + case DHD_GET_MAGIC: + if (buflen < sizeof(int)) + bcmerror = BCME_BUFTOOSHORT; + else + *(int*)buf = DHD_IOCTL_MAGIC; + break; - if (*arg) { - bcmerror = BCME_BUFTOOSHORT; + case DHD_GET_VERSION: + if (buflen < sizeof(int)) + bcmerror = BCME_BUFTOOSHORT; + else + *(int*)buf = DHD_IOCTL_VERSION; break; - } - /* account for the NUL terminator */ - arg++, arglen--; + case DHD_GET_VAR: + case DHD_SET_VAR: + { + char *arg; + uint arglen; + + DHD_GENERAL_LOCK(dhd_pub, flags); + if (dhd_pub->busstate == DHD_BUS_DOWN || + dhd_pub->busstate == DHD_BUS_DOWN_IN_PROGRESS) { + /* In platforms like FC19, the FW download is done via IOCTL + * and should not return error for IOCTLs fired before FW + * Download is done + */ + if (dhd_pub->is_fw_download_done) { + DHD_ERROR(("%s: returning as busstate=%d\n", + __FUNCTION__, dhd_pub->busstate)); + DHD_GENERAL_UNLOCK(dhd_pub, flags); + dhd_os_dhdiovar_unlock(dhd_pub); + return -ENODEV; + } + } + dhd_pub->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DHD_IOVAR; + DHD_GENERAL_UNLOCK(dhd_pub, flags); +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_ioctl); +#endif /* DHD_PCIE_RUNTIMEPM */ + + /* scan past the name to any arguments */ + for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--) + ; + + if (*arg) { + bcmerror = BCME_BUFTOOSHORT; + goto unlock_exit; + } - /* call with the appropriate arguments */ - if (ioc->cmd == DHD_GET_VAR) - bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen, - buf, buflen, IOV_GET); - else - bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0, arg, arglen, IOV_SET); - if (bcmerror != BCME_UNSUPPORTED) - break; + /* account for the NUL terminator */ + arg++, arglen--; - /* not in generic table, try protocol module */ - if (ioc->cmd == DHD_GET_VAR) - bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg, - arglen, buf, buflen, IOV_GET); - else - bcmerror = dhd_prot_iovar_op(dhd_pub, buf, - NULL, 0, arg, arglen, IOV_SET); - if (bcmerror != BCME_UNSUPPORTED) - break; + /* call with the appropriate arguments */ + if (ioc->cmd == DHD_GET_VAR) { + bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen, + buf, buflen, IOV_GET); + } else { + bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0, + arg, arglen, IOV_SET); + } + if (bcmerror != BCME_UNSUPPORTED) { + goto unlock_exit; + } - /* if still not found, try bus module */ - if (ioc->cmd == DHD_GET_VAR) { - bcmerror = dhd_bus_iovar_op(dhd_pub, buf, - arg, arglen, buf, buflen, IOV_GET); - } else { - bcmerror = dhd_bus_iovar_op(dhd_pub, buf, - NULL, 0, arg, arglen, IOV_SET); - } + /* not in generic table, try protocol module */ + if (ioc->cmd == DHD_GET_VAR) { + bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg, + arglen, buf, buflen, IOV_GET); + } else { + bcmerror = dhd_prot_iovar_op(dhd_pub, buf, + NULL, 0, arg, arglen, IOV_SET); + } + if (bcmerror != BCME_UNSUPPORTED) { + goto unlock_exit; + } - break; - } + /* if still not found, try bus module */ + if (ioc->cmd == DHD_GET_VAR) { + bcmerror = dhd_bus_iovar_op(dhd_pub, buf, + arg, arglen, buf, buflen, IOV_GET); + } else { + bcmerror = dhd_bus_iovar_op(dhd_pub, buf, + NULL, 0, arg, arglen, IOV_SET); + } + } + goto unlock_exit; - default: - bcmerror = BCME_UNSUPPORTED; + default: + bcmerror = BCME_UNSUPPORTED; } + dhd_os_dhdiovar_unlock(dhd_pub); + return bcmerror; +unlock_exit: + DHD_GENERAL_LOCK(dhd_pub, flags); + dhd_pub->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DHD_IOVAR; + dhd_os_busbusy_wake(dhd_pub); + DHD_GENERAL_UNLOCK(dhd_pub, flags); + dhd_os_dhdiovar_unlock(dhd_pub); return bcmerror; } #ifdef SHOW_EVENTS #ifdef SHOW_LOGTRACE -#define AVOID_BYTE 64 -#define MAX_NO_OF_ARG 16 +#define MAX_NO_OF_ARG 16 + +#define FMTSTR_SIZE 132 +#define SIZE_LOC_STR 50 +#define MIN_DLEN 4 +#define TAG_BYTES 12 +#define TAG_WORDS 3 +#define ROMSTR_SIZE 200 + static int check_event_log_sequence_number(uint32 seq_no) @@ -1273,6 +1641,372 @@ check_event_log_sequence_number(uint32 seq_no) return ret; } + +static void +dhd_eventmsg_print(dhd_pub_t *dhd_pub, void *event_data, void *raw_event_ptr, + uint datalen, const char *event_name) +{ + msgtrace_hdr_t hdr; + uint32 nblost; + uint8 count; + char *s, *p; + static uint32 seqnum_prev = 0; + uint32 *log_ptr = NULL; + uchar *buf; + event_log_hdr_t event_hdr; + uint32 i; + int32 j; + + dhd_event_log_t *raw_event = (dhd_event_log_t *) raw_event_ptr; + + char fmtstr_loc_buf[FMTSTR_SIZE] = {0}; + char (*str_buf)[SIZE_LOC_STR] = NULL; + char * str_tmpptr = NULL; + uint32 addr = 0; + uint32 **hdr_ptr = NULL; + uint32 h_i = 0; + uint32 hdr_ptr_len = 0; + + typedef union { + uint32 val; + char * addr; + } u_arg; + u_arg arg[MAX_NO_OF_ARG] = {{0}}; + char *c_ptr = NULL; + char rom_log_str[ROMSTR_SIZE] = {0}; + uint32 rom_str_len = 0; + + BCM_REFERENCE(arg); + + if (!DHD_FWLOG_ON()) + return; + + buf = (uchar *) event_data; + memcpy(&hdr, buf, MSGTRACE_HDRLEN); + + if (hdr.version != MSGTRACE_VERSION) { + DHD_EVENT(("\nMACEVENT: %s [unsupported version --> " + "dhd version:%d dongle version:%d]\n", + event_name, MSGTRACE_VERSION, hdr.version)); + /* Reset datalen to avoid display below */ + datalen = 0; + return; + } + + if (hdr.trace_type == MSGTRACE_HDR_TYPE_MSG) { + /* There are 2 bytes available at the end of data */ + buf[MSGTRACE_HDRLEN + ntoh16(hdr.len)] = '\0'; + + if (ntoh32(hdr.discarded_bytes) || ntoh32(hdr.discarded_printf)) { + DHD_FWLOG(("WLC_E_TRACE: [Discarded traces in dongle -->" + "discarded_bytes %d discarded_printf %d]\n", + ntoh32(hdr.discarded_bytes), + ntoh32(hdr.discarded_printf))); + } + + nblost = ntoh32(hdr.seqnum) - seqnum_prev - 1; + if (nblost > 0) { + DHD_FWLOG(("WLC_E_TRACE:" + "[Event lost (msg) --> seqnum %d nblost %d\n", + ntoh32(hdr.seqnum), nblost)); + } + seqnum_prev = ntoh32(hdr.seqnum); + + /* Display the trace buffer. Advance from + * \n to \n to avoid display big + * printf (issue with Linux printk ) + */ + p = (char *)&buf[MSGTRACE_HDRLEN]; + while (*p != '\0' && (s = strstr(p, "\n")) != NULL) { + *s = '\0'; + DHD_FWLOG(("[FWLOG] %s\n", p)); + p = s+1; + } + if (*p) + DHD_FWLOG(("[FWLOG] %s", p)); + + /* Reset datalen to avoid display below */ + datalen = 0; + + } else if (hdr.trace_type == MSGTRACE_HDR_TYPE_LOG) { + /* Let the standard event printing work for now */ + uint32 timestamp, seq, pktlen; + + if (check_event_log_sequence_number(hdr.seqnum)) { + + DHD_EVENT(("%s: WLC_E_TRACE:" + "[Event duplicate (log) %d] dropping!!\n", + __FUNCTION__, hdr.seqnum)); + return; /* drop duplicate events */ + } + + p = (char *)&buf[MSGTRACE_HDRLEN]; + datalen -= MSGTRACE_HDRLEN; + pktlen = ltoh16(*((uint16 *)p)); + seq = ltoh16(*((uint16 *)(p + 2))); + p += MIN_DLEN; + datalen -= MIN_DLEN; + timestamp = ltoh32(*((uint32 *)p)); + BCM_REFERENCE(pktlen); + BCM_REFERENCE(seq); + BCM_REFERENCE(timestamp); + + /* + * Allocating max possible number of event TAGs in the received buffer + * considering that each event requires minimum of TAG_BYTES. + */ + hdr_ptr_len = ((datalen/TAG_BYTES)+1) * sizeof(uint32*); + + if ((raw_event->fmts)) { + if (!(str_buf = MALLOCZ(dhd_pub->osh, (MAX_NO_OF_ARG * SIZE_LOC_STR)))) { + DHD_ERROR(("%s: malloc failed str_buf \n", __FUNCTION__)); + } + } + + if (!(hdr_ptr = MALLOCZ(dhd_pub->osh, hdr_ptr_len))) { + DHD_ERROR(("%s: malloc failed hdr_ptr \n", __FUNCTION__)); + } + + + DHD_MSGTRACE_LOG(("EVENT_LOG_HDR[No.%d]: timestamp 0x%08x length = %d\n", + seq, timestamp, pktlen)); + + /* (raw_event->fmts) has value */ + + log_ptr = (uint32 *) (p + datalen); + + /* Store all hdr pointer while parsing from last of the log buffer + * sample format of + * 001d3c54 00000064 00000064 001d3c54 001dba08 035d6ce1 0c540639 + * 001d3c54 00000064 00000064 035d6d89 0c580439 + * in above example 0c580439 -- 39 is tag , 04 is count, 580c is format number + * all these uint32 values comes in reverse order as group as EL data + * while decoding we can parse only from last to first + */ + + while (datalen > MIN_DLEN) { + log_ptr--; + datalen -= MIN_DLEN; + event_hdr.t = *log_ptr; + /* + * Check for partially overriten entries + */ + if (log_ptr - (uint32 *) p < event_hdr.count) { + break; + } + /* + * Check argument count (only when format is valid) + */ + if ((event_hdr.count > MAX_NO_OF_ARG) && + (event_hdr.fmt_num != 0xffff)) { + break; + } + /* + * Check for end of the Frame. + */ + if (event_hdr.tag == EVENT_LOG_TAG_NULL) { + continue; + } + log_ptr[0] = event_hdr.t; + if (h_i < (hdr_ptr_len / sizeof(uint32*))) { + hdr_ptr[h_i++] = log_ptr; + } + + /* Now place the header at the front + * and copy back. + */ + log_ptr -= event_hdr.count; + + c_ptr = NULL; + datalen = datalen - (event_hdr.count * MIN_DLEN); + } + datalen = 0; + + /* print all log using stored hdr pointer in reverse order of EL data + * which is actually print older log first and then other in order + */ + + for (j = (h_i-1); j >= 0; j--) { + if (!(hdr_ptr[j])) { + break; + } + + event_hdr.t = *hdr_ptr[j]; + + log_ptr = hdr_ptr[j]; + + /* Now place the header at the front + * and copy back. + */ + log_ptr -= event_hdr.count; + + if (event_hdr.tag == EVENT_LOG_TAG_ROM_PRINTF) { + + rom_str_len = ((event_hdr.count)-1) * sizeof(uint32); + + if (rom_str_len >= (ROMSTR_SIZE -1)) { + rom_str_len = ROMSTR_SIZE - 1; + } + + /* copy all ascii data for ROM printf to local string */ + memcpy(rom_log_str, log_ptr, rom_str_len); + /* add end of line at last */ + rom_log_str[rom_str_len] = '\0'; + + DHD_MSGTRACE_LOG(("EVENT_LOG_ROM[0x%08x]: %s", + log_ptr[event_hdr.count - 1], rom_log_str)); + + /* Add newline if missing */ + if (rom_log_str[strlen(rom_log_str) - 1] != '\n') { + DHD_EVENT(("\n")); + } + + memset(rom_log_str, 0, ROMSTR_SIZE); + + continue; + } + + /* + * Check For Special Time Stamp Packet + */ + if (event_hdr.tag == EVENT_LOG_TAG_TS) { + DHD_MSGTRACE_LOG(("EVENT_LOG_TS[0x%08x]: SYS:%08x CPU:%08x\n", + log_ptr[event_hdr.count-1], log_ptr[0], log_ptr[1])); + continue; + } + + /* Simply print out event dump buffer (fmt_num = 0xffff) */ + if (!str_buf || event_hdr.fmt_num == 0xffff) { + /* + * Print out raw value if unable to interpret + */ +#ifdef DHD_LOG_DUMP + char buf[256]; + char *pos = buf; + memset(buf, 0, sizeof(buf)); + pos += snprintf(pos, 256, +#else + DHD_MSGTRACE_LOG(( +#endif /* DHD_LOG_DUMP */ + "EVENT_LOG_BUF[0x%08x]: tag=%d len=%d fmt=%04x", + log_ptr[event_hdr.count-1], event_hdr.tag, + event_hdr.count, event_hdr.fmt_num +#ifdef DHD_LOG_DUMP +); +#else +)); +#endif /* DHD_LOG_DUMP */ + + for (count = 0; count < (event_hdr.count-1); count++) { +#ifdef DHD_LOG_DUMP + if (strlen(buf) >= (256 - 1)) { + DHD_MSGTRACE_LOG(("%s\n", buf)); + memset(buf, 0, sizeof(buf)); + pos = buf; + } + pos += snprintf(pos, (256 - (int)(pos-buf)), + " %08x", log_ptr[count]); +#else + if (count % 8 == 0) + DHD_MSGTRACE_LOG(("\n\t%08x", log_ptr[count])); + else + DHD_MSGTRACE_LOG((" %08x", log_ptr[count])); +#endif /* DHD_LOG_DUMP */ + } +#ifdef DHD_LOG_DUMP + DHD_MSGTRACE_LOG(("%s\n", buf)); +#else + DHD_MSGTRACE_LOG(("\n")); +#endif /* DHD_LOG_DUMP */ + continue; + } + + /* Copy the format string to parse %s and add "EVENT_LOG: */ + if ((event_hdr.fmt_num >> 2) < raw_event->num_fmts) { + snprintf(fmtstr_loc_buf, FMTSTR_SIZE, + "EVENT_LOG[0x%08x]: %s", log_ptr[event_hdr.count-1], + raw_event->fmts[event_hdr.fmt_num >> 2]); + c_ptr = fmtstr_loc_buf; + } else { + DHD_ERROR(("%s: fmt number out of range \n", __FUNCTION__)); + continue; + } + + for (count = 0; count < (event_hdr.count-1); count++) { + if (c_ptr != NULL) { + if ((c_ptr = strstr(c_ptr, "%")) != NULL) { + c_ptr++; + } + } + + if ((c_ptr != NULL) && (*c_ptr == 's')) { + if ((raw_event->raw_sstr) && + ((log_ptr[count] > raw_event->rodata_start) && + (log_ptr[count] < raw_event->rodata_end))) { + /* ram static string */ + addr = log_ptr[count] - raw_event->rodata_start; + str_tmpptr = raw_event->raw_sstr + addr; + memcpy(str_buf[count], str_tmpptr, SIZE_LOC_STR); + str_buf[count][SIZE_LOC_STR-1] = '\0'; + arg[count].addr = str_buf[count]; + } else if ((raw_event->rom_raw_sstr) && + ((log_ptr[count] > + raw_event->rom_rodata_start) && + (log_ptr[count] < + raw_event->rom_rodata_end))) { + /* rom static string */ + addr = log_ptr[count] - raw_event->rom_rodata_start; + str_tmpptr = raw_event->rom_raw_sstr + addr; + memcpy(str_buf[count], str_tmpptr, SIZE_LOC_STR); + str_buf[count][SIZE_LOC_STR-1] = '\0'; + arg[count].addr = str_buf[count]; + } else { + /* + * Dynamic string OR + * No data for static string. + * So store all string's address as string. + */ + snprintf(str_buf[count], SIZE_LOC_STR, "(s)0x%x", + log_ptr[count]); + arg[count].addr = str_buf[count]; + } + } else { + /* Other than string */ + arg[count].val = log_ptr[count]; + } + } + + DHD_MSGTRACE_LOG((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3], + arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10], + arg[11], arg[12], arg[13], arg[14], arg[15])); + + if (fmtstr_loc_buf[strlen(fmtstr_loc_buf) - 1] != '\n') { + /* Add newline if missing */ + DHD_MSGTRACE_LOG(("\n")); + } + + memset(fmtstr_loc_buf, 0, FMTSTR_SIZE); + + for (i = 0; i < MAX_NO_OF_ARG; i++) { + arg[i].addr = 0; + } + for (i = 0; i < MAX_NO_OF_ARG; i++) { + memset(str_buf[i], 0, SIZE_LOC_STR); + } + + } + DHD_MSGTRACE_LOG(("\n")); + + if (str_buf) { + MFREE(dhd_pub->osh, str_buf, (MAX_NO_OF_ARG * SIZE_LOC_STR)); + } + + if (hdr_ptr) { + MFREE(dhd_pub->osh, hdr_ptr, hdr_ptr_len); + } + } +} + #endif /* SHOW_LOGTRACE */ static void @@ -1281,6 +2015,7 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, { uint i, status, reason; bool group = FALSE, flush_txq = FALSE, link = FALSE; + bool host_data = FALSE; /* prints event data after the case when set */ const char *auth_str; const char *event_name; uchar *buf; @@ -1418,229 +2153,41 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, break; case WLC_E_TXFAIL: - DHD_EVENT(("MACEVENT: %s, RA %s\n", event_name, eabuf)); + DHD_EVENT(("MACEVENT: %s, RA %s status %d\n", event_name, eabuf, status)); break; - case WLC_E_SCAN_COMPLETE: case WLC_E_ASSOC_REQ_IE: case WLC_E_ASSOC_RESP_IE: case WLC_E_PMKID_CACHE: + case WLC_E_SCAN_COMPLETE: DHD_EVENT(("MACEVENT: %s\n", event_name)); - break; - - case WLC_E_PFN_NET_FOUND: - case WLC_E_PFN_NET_LOST: - case WLC_E_PFN_SCAN_COMPLETE: - case WLC_E_PFN_SCAN_NONE: - case WLC_E_PFN_SCAN_ALLGONE: - DHD_EVENT(("PNOEVENT: %s\n", event_name)); - break; - - case WLC_E_PSK_SUP: - case WLC_E_PRUNE: - DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n", - event_name, (int)status, (int)reason)); - break; - -#ifdef WIFI_ACT_FRAME - case WLC_E_ACTION_FRAME: - DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf)); - break; -#endif /* WIFI_ACT_FRAME */ - -#ifdef SHOW_LOGTRACE - case WLC_E_TRACE: - { - msgtrace_hdr_t hdr; - uint32 nblost; - uint8 count; - char *s, *p; - static uint32 seqnum_prev = 0; - uint32 *record = NULL; - uint32 *log_ptr = NULL; - uint32 writeindex = 0; - event_log_hdr_t event_hdr; - int no_of_fmts = 0; - char *fmt = NULL; - dhd_event_log_t *raw_event = (dhd_event_log_t *) raw_event_ptr; - - buf = (uchar *) event_data; - memcpy(&hdr, buf, MSGTRACE_HDRLEN); - - if (hdr.version != MSGTRACE_VERSION) { - DHD_EVENT(("\nMACEVENT: %s [unsupported version --> " - "dhd version:%d dongle version:%d]\n", - event_name, MSGTRACE_VERSION, hdr.version)); - /* Reset datalen to avoid display below */ - datalen = 0; - break; - } - - if (hdr.trace_type == MSGTRACE_HDR_TYPE_MSG) { - /* There are 2 bytes available at the end of data */ - buf[MSGTRACE_HDRLEN + ntoh16(hdr.len)] = '\0'; - - if (ntoh32(hdr.discarded_bytes) || ntoh32(hdr.discarded_printf)) { - DHD_EVENT(("WLC_E_TRACE: [Discarded traces in dongle -->" - "discarded_bytes %d discarded_printf %d]\n", - ntoh32(hdr.discarded_bytes), - ntoh32(hdr.discarded_printf))); - } - - nblost = ntoh32(hdr.seqnum) - seqnum_prev - 1; - if (nblost > 0) { - DHD_EVENT(("WLC_E_TRACE:" - "[Event lost (msg) --> seqnum %d nblost %d\n", - ntoh32(hdr.seqnum), nblost)); - } - seqnum_prev = ntoh32(hdr.seqnum); - - /* Display the trace buffer. Advance from - * \n to \n to avoid display big - * printf (issue with Linux printk ) - */ - p = (char *)&buf[MSGTRACE_HDRLEN]; - while (*p != '\0' && (s = strstr(p, "\n")) != NULL) { - *s = '\0'; - DHD_EVENT(("%s\n", p)); - p = s+1; - } - if (*p) - DHD_EVENT(("%s", p)); - - /* Reset datalen to avoid display below */ - datalen = 0; - - } else if (hdr.trace_type == MSGTRACE_HDR_TYPE_LOG) { - /* Let the standard event printing work for now */ - uint32 timestamp, w, malloc_len; - - if (check_event_log_sequence_number(hdr.seqnum)) { - - DHD_EVENT(("%s: WLC_E_TRACE:" - "[Event duplicate (log) %d] dropping!!\n", - __FUNCTION__, hdr.seqnum)); - return; /* drop duplicate events */ - } + break; - p = (char *)&buf[MSGTRACE_HDRLEN]; - datalen -= MSGTRACE_HDRLEN; - w = ntoh32((uint32)*p); - p += 4; - datalen -= 4; - timestamp = ntoh32((uint32)*p); - BCM_REFERENCE(timestamp); - BCM_REFERENCE(w); - - DHD_EVENT(("timestamp %x%x\n", timestamp, w)); - - if (raw_event->fmts) { - malloc_len = datalen+ AVOID_BYTE; - record = (uint32 *)MALLOC(dhd_pub->osh, malloc_len); - if (record == NULL) { - DHD_EVENT(("MSGTRACE_HDR_TYPE_LOG:" - "malloc failed\n")); - return; - } - log_ptr = (uint32 *) (p + datalen); - writeindex = datalen/4; - - if (record) { - while (datalen > 4) { - log_ptr--; - datalen -= 4; - event_hdr.t = *log_ptr; - /* - * Check for partially overriten entries - */ - if (log_ptr - (uint32 *) p < event_hdr.count) { - break; - } - /* - * Check for end of the Frame. - */ - if (event_hdr.tag == EVENT_LOG_TAG_NULL) { - continue; - } - /* - * Check For Special Time Stamp Packet - */ - if (event_hdr.tag == EVENT_LOG_TAG_TS) { - datalen -= 12; - log_ptr = log_ptr - 3; - continue; - } - - log_ptr[0] = event_hdr.t; - if (event_hdr.count > MAX_NO_OF_ARG) { - break; - } - /* Now place the header at the front - * and copy back. - */ - log_ptr -= event_hdr.count; - - writeindex = writeindex - event_hdr.count; - record[writeindex++] = event_hdr.t; - for (count = 0; count < (event_hdr.count-1); - count++) { - record[writeindex++] = log_ptr[count]; - } - writeindex = writeindex - event_hdr.count; - datalen = datalen - (event_hdr.count * 4); - no_of_fmts++; - } - } + case WLC_E_PFN_NET_FOUND: + case WLC_E_PFN_NET_LOST: + case WLC_E_PFN_SCAN_NONE: + case WLC_E_PFN_SCAN_ALLGONE: + case WLC_E_PFN_GSCAN_FULL_RESULT: + case WLC_E_PFN_SWC: + DHD_EVENT(("PNOEVENT: %s\n", event_name)); + break; - while (no_of_fmts--) - { - event_log_hdr_t event_hdr; - event_hdr.t = record[writeindex]; - - if ((event_hdr.fmt_num>>2) < raw_event->num_fmts) { - fmt = raw_event->fmts[event_hdr.fmt_num>>2]; - DHD_EVENT((fmt, - record[writeindex + 1], - record[writeindex + 2], - record[writeindex + 3], - record[writeindex + 4], - record[writeindex + 5], - record[writeindex + 6], - record[writeindex + 7], - record[writeindex + 8], - record[writeindex + 9], - record[writeindex + 10], - record[writeindex + 11], - record[writeindex + 12], - record[writeindex + 13], - record[writeindex + 14], - record[writeindex + 15], - record[writeindex + 16])); - - if (fmt[strlen(fmt) - 1] != '\n') { - /* Add newline if missing */ - DHD_EVENT(("\n")); - } - } + case WLC_E_PSK_SUP: + case WLC_E_PRUNE: + DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n", + event_name, (int)status, (int)reason)); + break; - writeindex = writeindex + event_hdr.count; - } +#ifdef WIFI_ACT_FRAME + case WLC_E_ACTION_FRAME: + DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf)); + break; +#endif /* WIFI_ACT_FRAME */ - if (record) { - MFREE(dhd_pub->osh, record, malloc_len); - record = NULL; - } - } else { - while (datalen > 4) { - p += 4; - datalen -= 4; - /* Print each word. DO NOT ntoh it. */ - DHD_EVENT((" %8.8x", *((uint32 *) p))); - } - DHD_EVENT(("\n")); - } - datalen = 0; - } +#ifdef SHOW_LOGTRACE + case WLC_E_TRACE: + { + dhd_eventmsg_print(dhd_pub, event_data, raw_event_ptr, datalen, event_name); break; } #endif /* SHOW_LOGTRACE */ @@ -1661,6 +2208,22 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, break; #endif + case WLC_E_CCA_CHAN_QUAL: + if (datalen) { + buf = (uchar *) event_data; + DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d, " + "channel 0x%02x \n", event_name, event_type, eabuf, (int)status, + (int)reason, (int)auth_type, *(buf + 4))); + } + break; + case WLC_E_ESCAN_RESULT: + { +#ifndef DHD_IFDEBUG + DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d \n", + event_name, event_type, eabuf, (int)status)); +#endif + } + break; default: DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n", event_name, event_type, eabuf, (int)status, (int)reason, @@ -1668,8 +2231,8 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, break; } - /* show any appended data */ - if (DHD_BYTES_ON() && DHD_EVENT_ON() && datalen) { + /* show any appended data if message level is set to bytes or host_data is set */ + if ((DHD_BYTES_ON() || (host_data == TRUE)) && DHD_EVENT_ON() && datalen) { buf = (uchar *) event_data; BCM_REFERENCE(buf); DHD_EVENT((" data (%d) : ", datalen)); @@ -1680,45 +2243,100 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, } #endif /* SHOW_EVENTS */ +/* Stub for now. Will become real function as soon as shim + * is being integrated to Android, Linux etc. + */ int -wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, - wl_event_msg_t *event, void **data_ptr, void *raw_event) +wl_event_process_default(wl_event_msg_t *event, struct wl_evt_pport *evt_pport) +{ + return BCME_OK; +} + +int +wl_event_process(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, void **data_ptr, void *raw_event) +{ + wl_evt_pport_t evt_pport; + wl_event_msg_t event; + + /* make sure it is a BRCM event pkt and record event data */ + int ret = wl_host_event_get_data(pktdata, &event, data_ptr); + if (ret != BCME_OK) { + return ret; + } + + /* convert event from network order to host order */ + wl_event_to_host_order(&event); + + /* record event params to evt_pport */ + evt_pport.dhd_pub = dhd_pub; + evt_pport.ifidx = ifidx; + evt_pport.pktdata = pktdata; + evt_pport.data_ptr = data_ptr; + evt_pport.raw_event = raw_event; + +#if defined(WL_WLC_SHIM) && defined(WL_WLC_SHIM_EVENTS) + { + struct wl_shim_node *shim = dhd_pub_shim(dhd_pub); + ASSERT(shim); + ret = wl_shim_event_process(shim, &event, &evt_pport); + } +#else + ret = wl_event_process_default(&event, &evt_pport); +#endif + + return ret; +} + +/* Check whether packet is a BRCM event pkt. If it is, record event data. */ +int +wl_host_event_get_data(void *pktdata, wl_event_msg_t *event, void **data_ptr) { - /* check whether packet is a BRCM event pkt */ bcm_event_t *pvt_data = (bcm_event_t *)pktdata; - uint8 *event_data; - uint32 type, status, datalen; - uint16 flags; - int evlen; - int hostidx; if (bcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) { DHD_ERROR(("%s: mismatched OUI, bailing\n", __FUNCTION__)); - return (BCME_ERROR); + return BCME_ERROR; } /* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */ if (ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype) != BCMILCP_BCM_SUBTYPE_EVENT) { DHD_ERROR(("%s: mismatched subtype, bailing\n", __FUNCTION__)); - return (BCME_ERROR); + return BCME_ERROR; } *data_ptr = &pvt_data[1]; - event_data = *data_ptr; - /* memcpy since BRCM event pkt may be unaligned. */ memcpy(event, &pvt_data->event, sizeof(wl_event_msg_t)); + return BCME_OK; +} + +int +wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, + wl_event_msg_t *event, void **data_ptr, void *raw_event) +{ + bcm_event_t *pvt_data; + uint8 *event_data; + uint32 type, status, datalen; + uint16 flags; + int evlen; + + /* make sure it is a BRCM event pkt and record event data */ + int ret = wl_host_event_get_data(pktdata, event, data_ptr); + if (ret != BCME_OK) { + return ret; + } + + pvt_data = (bcm_event_t *)pktdata; + event_data = *data_ptr; + type = ntoh32_ua((void *)&event->event_type); flags = ntoh16_ua((void *)&event->flags); status = ntoh32_ua((void *)&event->status); datalen = ntoh32_ua((void *)&event->datalen); evlen = datalen + sizeof(bcm_event_t); - /* find equivalent host index for event ifidx */ - hostidx = dhd_ifidx2hostidx(dhd_pub->info, event->ifidx); - switch (type) { #ifdef PROP_TXSTATUS case WLC_E_FIFO_CREDIT_MAP: @@ -1794,13 +2412,13 @@ wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, #endif /* WL_CFG80211 */ } } else { -#if !defined(PROP_TXSTATUS) || !defined(PCIE_FULL_DONGLE) +#if !defined(PROP_TXSTATUS) && !defined(PCIE_FULL_DONGLE) && defined(WL_CFG80211) DHD_ERROR(("%s: Invalid ifidx %d for %s\n", __FUNCTION__, ifevent->ifidx, event->ifname)); -#endif /* !PROP_TXSTATUS */ +#endif /* !PROP_TXSTATUS && !PCIE_FULL_DONGLE && WL_CFG80211 */ } /* send up the if event: btamp user needs it */ - *ifidx = hostidx; + *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname); /* push up to external supp/auth */ dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx); break; @@ -1811,41 +2429,42 @@ wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, htsf_update(dhd_pub->info, event_data); break; #endif /* WLMEDIA_HTSF */ -#if defined(NDISVER) && (NDISVER >= 0x0630) case WLC_E_NDIS_LINK: break; -#else - case WLC_E_NDIS_LINK: { - uint32 temp = hton32(WLC_E_LINK); - - memcpy((void *)(&pvt_data->event.event_type), &temp, - sizeof(pvt_data->event.event_type)); - break; - } -#endif /* NDISVER >= 0x0630 */ case WLC_E_PFN_NET_FOUND: + case WLC_E_PFN_SCAN_ALLGONE: /* share with WLC_E_PFN_BSSID_NET_LOST */ case WLC_E_PFN_NET_LOST: break; #if defined(PNO_SUPPORT) case WLC_E_PFN_BSSID_NET_FOUND: - case WLC_E_PFN_BSSID_NET_LOST: case WLC_E_PFN_BEST_BATCHING: dhd_pno_event_handler(dhd_pub, event, (void *)event_data); break; -#endif +#endif /* These are what external supplicant/authenticator wants */ case WLC_E_ASSOC_IND: case WLC_E_AUTH_IND: case WLC_E_REASSOC_IND: - dhd_findadd_sta(dhd_pub, hostidx, &event->addr.octet); - break; + dhd_findadd_sta(dhd_pub, + dhd_ifname2idx(dhd_pub->info, event->ifname), + &event->addr.octet); + break; +#if defined(DHD_FW_COREDUMP) + case WLC_E_PSM_WATCHDOG: + DHD_ERROR(("%s: WLC_E_PSM_WATCHDOG event received : \n", __FUNCTION__)); + if (dhd_socram_dump(dhd_pub->bus) != BCME_OK) { + DHD_ERROR(("%s: socram dump ERROR : \n", __FUNCTION__)); + } + break; +#endif case WLC_E_LINK: #ifdef PCIE_FULL_DONGLE - if (dhd_update_interface_link_status(dhd_pub, (uint8)hostidx, - (uint8)flags) != BCME_OK) + if (dhd_update_interface_link_status(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info, + event->ifname), (uint8)flags) != BCME_OK) break; if (!flags) { - dhd_flow_rings_delete(dhd_pub, hostidx); + dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info, + event->ifname)); } /* fall through */ #endif @@ -1853,26 +2472,35 @@ wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, case WLC_E_DEAUTH_IND: case WLC_E_DISASSOC: case WLC_E_DISASSOC_IND: - if (type != WLC_E_LINK) { - dhd_del_sta(dhd_pub, hostidx, &event->addr.octet); - } DHD_EVENT(("%s: Link event %d, flags %x, status %x\n", __FUNCTION__, type, flags, status)); #ifdef PCIE_FULL_DONGLE if (type != WLC_E_LINK) { - uint8 ifindex = (uint8)hostidx; + uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname); uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex); - if (DHD_IF_ROLE_STA(role)) { - dhd_flow_rings_delete(dhd_pub, ifindex); - } else { - dhd_flow_rings_delete_for_peer(dhd_pub, ifindex, - &event->addr.octet[0]); + uint8 del_sta = TRUE; +#ifdef WL_CFG80211 + if (role == WLC_E_IF_ROLE_STA && !wl_cfg80211_is_roam_offload() && + !wl_cfg80211_is_event_from_connected_bssid(event, *ifidx)) { + del_sta = FALSE; + } +#endif /* WL_CFG80211 */ + + if (del_sta) { + dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info, + event->ifname), &event->addr.octet); + if (role == WLC_E_IF_ROLE_STA) { + dhd_flow_rings_delete(dhd_pub, ifindex); + } else { + dhd_flow_rings_delete_for_peer(dhd_pub, ifindex, + &event->addr.octet[0]); + } } } -#endif +#endif /* PCIE_FULL_DONGLE */ /* fall through */ default: - *ifidx = hostidx; + *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname); /* push up to external supp/auth */ dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx); DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n", @@ -1884,28 +2512,15 @@ wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, } #ifdef SHOW_EVENTS - wl_show_host_event(dhd_pub, event, - (void *)event_data, raw_event, dhd_pub->enable_log); + if (DHD_FWLOG_ON() || DHD_EVENT_ON()) { + wl_show_host_event(dhd_pub, event, + (void *)event_data, raw_event, dhd_pub->enable_log); + } #endif /* SHOW_EVENTS */ return (BCME_OK); } -void -wl_event_to_host_order(wl_event_msg_t * evt) -{ - /* Event struct members passed from dongle to host are stored in network - * byte order. Convert all members to host-order. - */ - evt->event_type = ntoh32(evt->event_type); - evt->flags = ntoh16(evt->flags); - evt->status = ntoh32(evt->status); - evt->reason = ntoh32(evt->reason); - evt->auth_type = ntoh32(evt->auth_type); - evt->datalen = ntoh32(evt->datalen); - evt->version = ntoh16(evt->version); -} - void dhd_print_buf(void *pbuf, int len, int bytes_per_line) { @@ -2024,12 +2639,12 @@ dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_ __FUNCTION__, enable?"enable":"disable", arg)); /* Contorl the master mode */ - bcm_mkiovar("pkt_filter_mode", (char *)&master_mode, 4, buf, sizeof(buf)); - rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0); + rc = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_mode", + master_mode, WLC_SET_VAR, TRUE, 0); rc = rc >= 0 ? 0 : rc; if (rc) DHD_TRACE(("%s: failed to set pkt_filter_mode %d, retcode = %d\n", - __FUNCTION__, master_mode, rc)); + __FUNCTION__, master_mode, rc)); fail: if (arg_org) @@ -2177,11 +2792,10 @@ dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg) void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id) { - char iovbuf[32]; int ret; - bcm_mkiovar("pkt_filter_delete", (char *)&id, 4, iovbuf, sizeof(iovbuf)); - ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + ret = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_delete", + id, WLC_SET_VAR, TRUE, 0); if (ret < 0) { DHD_ERROR(("%s: Failed to delete filter ID:%d, ret=%d\n", __FUNCTION__, id, ret)); @@ -2199,18 +2813,11 @@ void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id) void dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode) { - char iovbuf[DHD_IOVAR_BUF_SIZE]; - int iovar_len; int retcode; - iovar_len = bcm_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf)); - if (!iovar_len) { - DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", - __FUNCTION__, sizeof(iovbuf))); - return; - } + retcode = dhd_wl_ioctl_set_intiovar(dhd, "arp_ol", + arp_mode, WLC_SET_VAR, TRUE, 0); - retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iovar_len, TRUE, 0); retcode = retcode >= 0 ? 0 : retcode; if (retcode) DHD_ERROR(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n", @@ -2223,18 +2830,11 @@ dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode) void dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable) { - char iovbuf[DHD_IOVAR_BUF_SIZE]; - int iovar_len; int retcode; - iovar_len = bcm_mkiovar("arpoe", (char *)&arp_enable, 4, iovbuf, sizeof(iovbuf)); - if (!iovar_len) { - DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", - __FUNCTION__, sizeof(iovbuf))); - return; - } + retcode = dhd_wl_ioctl_set_intiovar(dhd, "arpoe", + arp_enable, WLC_SET_VAR, TRUE, 0); - retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iovar_len, TRUE, 0); retcode = retcode >= 0 ? 0 : retcode; if (retcode) DHD_ERROR(("%s: failed to enabe ARP offload to %d, retcode = %d\n", @@ -2244,15 +2844,14 @@ dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable) __FUNCTION__, arp_enable)); if (arp_enable) { uint32 version; - bcm_mkiovar("arp_version", 0, 0, iovbuf, sizeof(iovbuf)); - retcode = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0); + retcode = dhd_wl_ioctl_get_intiovar(dhd, "arp_version", + &version, WLC_GET_VAR, FALSE, 0); if (retcode) { DHD_INFO(("%s: fail to get version (maybe version 1:retcode = %d\n", __FUNCTION__, retcode)); dhd->arp_version = 1; } else { - memcpy(&version, iovbuf, sizeof(version)); DHD_INFO(("%s: ARP Version= %x\n", __FUNCTION__, version)); dhd->arp_version = version; } @@ -2376,20 +2975,13 @@ dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx) int dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable) { - char iovbuf[DHD_IOVAR_BUF_SIZE]; - int iov_len; int retcode; if (dhd == NULL) return -1; - iov_len = bcm_mkiovar("ndoe", (char *)&ndo_enable, 4, iovbuf, sizeof(iovbuf)); - if (!iov_len) { - DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", - __FUNCTION__, sizeof(iovbuf))); - return -1; - } - retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0); + retcode = dhd_wl_ioctl_set_intiovar(dhd, "ndoe", + ndo_enable, WLC_SET_VAR, TRUE, 0); if (retcode) DHD_ERROR(("%s: failed to enabe ndo to %d, retcode = %d\n", __FUNCTION__, ndo_enable, retcode)); @@ -2465,28 +3057,12 @@ dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx) return retcode; } -/* send up locally generated event */ -void -dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data) -{ - switch (ntoh32(event->event_type)) { -#ifdef WLBTAMP - case WLC_E_BTA_HCI_EVENT: - break; -#endif /* WLBTAMP */ - default: - break; - } - - /* Call per-port handler. */ - dhd_sendup_event(dhdp, event, data); -} /* * returns = TRUE if associated, FALSE if not associated */ -bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf, int *retval) +bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval) { char bssid[6], zbuf[6]; int ret = -1; @@ -2494,7 +3070,8 @@ bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf, int *retval) bzero(bssid, 6); bzero(zbuf, 6); - ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid, ETHER_ADDR_LEN, FALSE, 0); + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid, + ETHER_ADDR_LEN, FALSE, ifidx); DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret)); if (ret == BCME_NOTASSOCIATED) { @@ -2507,18 +3084,11 @@ bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf, int *retval) if (ret < 0) return FALSE; - if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) != 0)) { - /* STA is assocoated BSSID is non zero */ - - if (bss_buf) { - /* return bss if caller provided buf */ - memcpy(bss_buf, bssid, ETHER_ADDR_LEN); - } - return TRUE; - } else { + if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) == 0)) { DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__)); return FALSE; } + return TRUE; } /* Function to estimate possible DTIM_SKIP value */ @@ -2529,9 +3099,11 @@ dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd) int ret = -1; int dtim_period = 0; int ap_beacon = 0; +#ifndef ENABLE_MAX_DTIM_IN_SUSPEND int allowed_skip_dtim_cnt = 0; +#endif /* !ENABLE_MAX_DTIM_IN_SUSPEND */ /* Check if associated */ - if (dhd_is_associated(dhd, NULL, NULL) == FALSE) { + if (dhd_is_associated(dhd, 0, NULL) == FALSE) { DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret)); goto exit; } @@ -2555,6 +3127,13 @@ dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd) goto exit; } +#ifdef ENABLE_MAX_DTIM_IN_SUSPEND + bcn_li_dtim = (int) (MAX_DTIM_ALLOWED_INTERVAL / (ap_beacon * dtim_period)); + if (bcn_li_dtim == 0) { + bcn_li_dtim = 1; + } + bcn_li_dtim = MAX(dhd->suspend_bcn_li_dtim, bcn_li_dtim); +#else /* ENABLE_MAX_DTIM_IN_SUSPEND */ /* attemp to use platform defined dtim skip interval */ bcn_li_dtim = dhd->suspend_bcn_li_dtim; @@ -2577,6 +3156,7 @@ dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd) bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / dtim_period); DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim)); } +#endif /* ENABLE_MAX_DTIM_IN_SUSPEND */ DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n", __FUNCTION__, ap_beacon, bcn_li_dtim, dtim_period, CUSTOM_LISTEN_INTERVAL)); @@ -2742,7 +3322,7 @@ wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list, * SSIDs list parsing from cscan tlv list */ int -wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid, int max, int *bytes_left) +wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid, int max, int *bytes_left) { char* str; int idx = 0; @@ -2790,6 +3370,7 @@ wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid, int max, int *bytes *bytes_left -= ssid[idx].SSID_len; str += ssid[idx].SSID_len; + ssid[idx].hidden = TRUE; DHD_TRACE(("%s :size=%d left=%d\n", (char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left)); @@ -2863,7 +3444,7 @@ wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num) char* str; char* endptr = NULL; - if ((list_str == NULL) || (*list_str == NULL)) + if ((list_str == NULL)||(*list_str == NULL)) return -1; str = *list_str; @@ -2889,3 +3470,329 @@ wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num) *list_str = str; return num; } + + +/* Given filename and download type, returns a buffer pointer and length + * for download to f/w. Type can be FW or NVRAM. + * + */ +int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component, + char ** buffer, int *length) + +{ + int ret = BCME_ERROR; + int len = 0; + int file_len; + void *image = NULL; + uint8 *buf = NULL; + + /* Point to cache if available. */ +#ifdef CACHE_FW_IMAGES + if (component == FW) { + if (dhd->cached_fw_length) { + len = dhd->cached_fw_length; + buf = dhd->cached_fw; + } + } else if (component == NVRAM) { + if (dhd->cached_nvram_length) { + len = dhd->cached_nvram_length; + buf = dhd->cached_nvram; + } + } + else if (component == CLM_BLOB) { + if (dhd->cached_clm_length) { + len = dhd->cached_clm_length; + buf = dhd->cached_clm; + } + } else { + return ret; + } +#endif + /* No Valid cache found on this call */ + if (!len) { + file_len = *length; + *length = 0; + + if (file_path) { + image = dhd_os_open_image(file_path); + if (image == NULL) { + printf("%s: Open image file failed %s\n", __FUNCTION__, file_path); + goto err; + } + } + + buf = MALLOCZ(dhd->osh, file_len); + if (buf == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", + __FUNCTION__, file_len)); + goto err; + } + + /* Download image */ + len = dhd_os_get_image_block(buf, file_len, image); + if ((len <= 0 || len > file_len)) { + MFREE(dhd->osh, buf, file_len); + goto err; + } + } + + ret = BCME_OK; + *length = len; + *buffer = buf; + + /* Cache if first call. */ +#ifdef CACHE_FW_IMAGES + if (component == FW) { + if (!dhd->cached_fw_length) { + dhd->cached_fw = buf; + dhd->cached_fw_length = len; + } + } else if (component == NVRAM) { + if (!dhd->cached_nvram_length) { + dhd->cached_nvram = buf; + dhd->cached_nvram_length = len; + } + } + else if (component == CLM_BLOB) { + if (!dhd->cached_clm_length) { + dhd->cached_clm = buf; + dhd->cached_clm_length = len; + } + } +#endif + +err: + if (image) + dhd_os_close_image(image); + + return ret; +} + +int +dhd_download_2_dongle(dhd_pub_t *dhd, char *iovar, uint16 flag, uint16 dload_type, + unsigned char *dload_buf, int len) +{ + struct wl_dload_data *dload_ptr = (struct wl_dload_data *)dload_buf; + int err = 0; + int dload_data_offset; + static char iovar_buf[WLC_IOCTL_MEDLEN]; + int iovar_len; + + memset(iovar_buf, 0, sizeof(iovar_buf)); + + dload_data_offset = OFFSETOF(wl_dload_data_t, data); + dload_ptr->flag = (DLOAD_HANDLER_VER << DLOAD_FLAG_VER_SHIFT) | flag; + dload_ptr->dload_type = dload_type; + dload_ptr->len = htod32(len - dload_data_offset); + dload_ptr->crc = 0; + len = len + 8 - (len%8); + + iovar_len = bcm_mkiovar(iovar, dload_buf, + (uint)len, iovar_buf, sizeof(iovar_buf)); + if (iovar_len == 0) { + DHD_ERROR(("%s: insufficient buffer space passed to bcm_mkiovar for '%s' \n", + __FUNCTION__, iovar)); + return BCME_BUFTOOSHORT; + } + + err = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovar_buf, + iovar_len, IOV_SET, 0); + + return err; +} + +int +dhd_download_clm_blob(dhd_pub_t *dhd, unsigned char *image, uint32 len) +{ + int chunk_len; + int size2alloc; + unsigned char *new_buf; + int err = 0, data_offset; + uint16 dl_flag = DL_BEGIN; + + data_offset = OFFSETOF(wl_dload_data_t, data); + size2alloc = data_offset + MAX_CHUNK_LEN; + + if ((new_buf = (unsigned char *)MALLOCZ(dhd->osh, size2alloc)) != NULL) { + do { + chunk_len = dhd_os_get_image_block((char *)(new_buf + data_offset), + MAX_CHUNK_LEN, image); + if (chunk_len < 0) { + DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", + __FUNCTION__, chunk_len)); + err = BCME_ERROR; + goto exit; + } + + if (len - chunk_len == 0) + dl_flag |= DL_END; + + err = dhd_download_2_dongle(dhd, "clmload", dl_flag, DL_TYPE_CLM, + new_buf, data_offset + chunk_len); + + dl_flag &= ~DL_BEGIN; + + len = len - chunk_len; + } while ((len > 0) && (err == 0)); + } else { + err = BCME_NOMEM; + } +exit: + if (new_buf) { + MFREE(dhd->osh, new_buf, size2alloc); + } + + return err; +} + +int +dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path) +{ + char *clm_blob_path; + int len; + unsigned char *imgbuf = NULL; + int err = BCME_OK; + char iovbuf[WLC_IOCTL_SMLEN]; + wl_country_t *cspec; + + if (clm_path[0] != '\0') { + if (strlen(clm_path) > MOD_PARAM_PATHLEN) { + DHD_ERROR(("clm path exceeds max len\n")); + return BCME_ERROR; + } + clm_blob_path = clm_path; + DHD_TRACE(("clm path from module param:%s\n", clm_path)); + } else { + clm_blob_path = CONFIG_BCMDHD_CLM_PATH; + } + + /* If CLM blob file is found on the filesystem, download the file. + * After CLM file download or If the blob file is not present, + * validate the country code before proceeding with the initialization. + * If country code is not valid, fail the initialization. + */ + + imgbuf = dhd_os_open_image((char *)clm_blob_path); + if (imgbuf == NULL) { + printf("%s: Ignore clm file %s\n", __FUNCTION__, clm_path); + goto exit; + } + + len = dhd_os_get_image_size(imgbuf); + + if ((len > 0) && (len < MAX_CLM_BUF_SIZE) && imgbuf) { + bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf)); + err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0); + if (err) { + DHD_ERROR(("%s: country code get failed\n", __FUNCTION__)); + goto exit; + } + + cspec = (wl_country_t *)iovbuf; + if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) != 0) { + DHD_ERROR(("%s: CLM already exist in F/W, " + "new CLM data will be added to the end of existing CLM data!\n", + __FUNCTION__)); + } + + /* Found blob file. Download the file */ + DHD_ERROR(("clm file download from %s \n", clm_blob_path)); + err = dhd_download_clm_blob(dhd, imgbuf, len); + if (err) { + DHD_ERROR(("%s: CLM download failed err=%d\n", __FUNCTION__, err)); + /* Retrieve clmload_status and print */ + bcm_mkiovar("clmload_status", NULL, 0, iovbuf, sizeof(iovbuf)); + err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0); + if (err) { + DHD_ERROR(("%s: clmload_status get failed err=%d \n", + __FUNCTION__, err)); + } else { + DHD_ERROR(("%s: clmload_status: %d \n", + __FUNCTION__, *((int *)iovbuf))); + if (*((int *)iovbuf) == CHIPID_MISMATCH) { + DHD_ERROR(("Chip ID mismatch error \n")); + } + } + err = BCME_ERROR; + goto exit; + } else { + DHD_INFO(("%s: CLM download succeeded \n", __FUNCTION__)); + } + } else { + DHD_INFO(("Skipping the clm download. len:%d memblk:%p \n", len, imgbuf)); +#ifdef DHD_USE_CLMINFO_PARSER + err = BCME_ERROR; + goto exit; +#endif /* DHD_USE_CLMINFO_PARSER */ + } + + /* Verify country code */ + bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf)); + err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0); + if (err) { + DHD_ERROR(("%s: country code get failed\n", __FUNCTION__)); + goto exit; + } + + cspec = (wl_country_t *)iovbuf; + if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) == 0) { + /* Country code not initialized or CLM download not proper */ + DHD_ERROR(("country code not initialized\n")); + err = BCME_ERROR; + } +exit: + + if (imgbuf) { + dhd_os_close_image(imgbuf); + } + + return err; +} + +void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length) +{ +#ifdef CACHE_FW_IMAGES + return; +#endif + MFREE(dhd->osh, buffer, length); +} +/* Parse EAPOL 4 way handshake messages */ +void +dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction) +{ + unsigned char type; + int pair, ack, mic, kerr, req, sec, install; + unsigned short us_tmp; + type = dump_data[18]; + if (type == 2 || type == 254) { + us_tmp = (dump_data[19] << 8) | dump_data[20]; + pair = 0 != (us_tmp & 0x08); + ack = 0 != (us_tmp & 0x80); + mic = 0 != (us_tmp & 0x100); + kerr = 0 != (us_tmp & 0x400); + req = 0 != (us_tmp & 0x800); + sec = 0 != (us_tmp & 0x200); + install = 0 != (us_tmp & 0x40); + if (!sec && !mic && ack && !install && pair && !kerr && !req) { + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s] : M1 of 4way\n", + ifname, direction ? "TX" : "RX")); + } else if (pair && !install && !ack && mic && !sec && !kerr && !req) { + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s] : M2 of 4way\n", + ifname, direction ? "TX" : "RX")); + } else if (pair && ack && mic && sec && !kerr && !req) { + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s] : M3 of 4way\n", + ifname, direction ? "TX" : "RX")); + } else if (pair && !install && !ack && mic && sec && !req && !kerr) { + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s] : M4 of 4way\n", + ifname, direction ? "TX" : "RX")); + } else { + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n", + ifname, direction ? "TX" : "RX", + dump_data[14], dump_data[15], dump_data[30])); + } + } else { + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n", + ifname, direction ? "TX" : "RX", + dump_data[14], dump_data[15], dump_data[30])); + } +} diff --git a/drivers/amlogic/wifi/bcmdhd/dhd_config.c b/drivers/amlogic/wifi/bcmdhd/dhd_config.c new file mode 100644 index 0000000000000..a6508328f57e6 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/dhd_config.c @@ -0,0 +1,2581 @@ + +#include +#include + +#include +#include +#include +#if defined(HW_OOB) || defined(FORCE_WOWLAN) +#include +#include +#include +#include +#endif + +#include +#include + +/* message levels */ +#define CONFIG_ERROR_LEVEL 0x0001 +#define CONFIG_TRACE_LEVEL 0x0002 + +uint config_msg_level = CONFIG_ERROR_LEVEL; + +#define CONFIG_ERROR(x) \ + do { \ + if (config_msg_level & CONFIG_ERROR_LEVEL) { \ + printk(KERN_ERR "CONFIG-ERROR) "); \ + printk x; \ + } \ + } while (0) +#define CONFIG_TRACE(x) \ + do { \ + if (config_msg_level & CONFIG_TRACE_LEVEL) { \ + printk(KERN_ERR "CONFIG-TRACE) "); \ + printk x; \ + } \ + } while (0) + +#define MAXSZ_BUF 1000 +#define MAXSZ_CONFIG 4096 + +#define FW_TYPE_STA 0 +#define FW_TYPE_APSTA 1 +#define FW_TYPE_P2P 2 +#define FW_TYPE_ES 3 +#define FW_TYPE_MFG 4 +#define FW_TYPE_G 0 +#define FW_TYPE_AG 1 + +#ifdef CONFIG_PATH_AUTO_SELECT +#ifdef BCMSDIO +#define CONFIG_BCM4330B2 "config_40183b2.txt" +#define CONFIG_BCM43362A0 "config_40181a0.txt" +#define CONFIG_BCM43362A2 "config_40181a2.txt" +#define CONFIG_BCM43438A0 "config_43438a0.txt" +#define CONFIG_BCM43438A1 "config_43438a1.txt" +#define CONFIG_BCM43436B0 "config_43436b0.txt" +#define CONFIG_BCM4334B1 "config_4334b1.txt" +#define CONFIG_BCM43341B0 "config_43341b0.txt" +#define CONFIG_BCM43241B4 "config_43241b4.txt" +#define CONFIG_BCM4339A0 "config_4339a0.txt" +#define CONFIG_BCM43455C0 "config_43455c0.txt" +#define CONFIG_BCM43456C5 "config_43456c5.txt" +#define CONFIG_BCM4354A1 "config_4354a1.txt" +#endif +#define CONFIG_BCM4356A2 "config_4356a2.txt" +#define CONFIG_BCM4358A3 "config_4358.txt" +#define CONFIG_BCM4359B1 "config_4359b1.txt" +#define CONFIG_BCM4359C0 "config_4359c0.txt" +#endif + +#ifdef BCMSDIO +#define SBSDIO_CIS_SIZE_LIMIT 0x200 /* maximum bytes in one CIS */ + +#define FW_BCM4330B2 "fw_bcm40183b2" +#define FW_BCM4330B2_AG "fw_bcm40183b2_ag" +#define FW_BCM43362A0 "fw_bcm40181a0" +#define FW_BCM43362A2 "fw_bcm40181a2" +#define FW_BCM4334B1 "fw_bcm4334b1_ag" +#define FW_BCM43438A0 "fw_bcm43438a0" +#define FW_BCM43438A1 "fw_bcm43438a1" +#define FW_BCM43436B0 "fw_bcm43436b0" +#define FW_BCM43012B0 "fw_bcm43012b0" +#define FW_BCM43341B1 "fw_bcm43341b0_ag" +#define FW_BCM43241B4 "fw_bcm43241b4_ag" +#define FW_BCM4339A0 "fw_bcm4339a0_ag" +#define FW_BCM43455C0 "fw_bcm43455c0_ag" +#define FW_BCM43456C5 "fw_bcm43456c5_ag" +#define FW_BCM4354A1 "fw_bcm4354a1_ag" +#define FW_BCM4356A2 "fw_bcm4356a2_ag" +#define FW_BCM4358A3 "fw_bcm4358_ag" +#define FW_BCM4359B1 "fw_bcm4359b1_ag" +#define FW_BCM4359C0 "fw_bcm4359c0_ag" + +#define CLM_BCM43012B0 "clm_bcm43012b0" +#endif +#ifdef BCMPCIE +#define FW_BCM4356A2 "fw_bcm4356a2_pcie_ag" +#endif + +#define htod32(i) i +#define htod16(i) i +#define dtoh32(i) i +#define dtoh16(i) i +#define htodchanspec(i) i +#define dtohchanspec(i) i + +#ifdef BCMSDIO +void +dhd_conf_free_mac_list(wl_mac_list_ctrl_t *mac_list) +{ + int i; + + CONFIG_TRACE(("%s called\n", __FUNCTION__)); + if (mac_list->m_mac_list_head) { + for (i=0; icount; i++) { + if (mac_list->m_mac_list_head[i].mac) { + CONFIG_TRACE(("%s Free mac %p\n", __FUNCTION__, mac_list->m_mac_list_head[i].mac)); + kfree(mac_list->m_mac_list_head[i].mac); + } + } + CONFIG_TRACE(("%s Free m_mac_list_head %p\n", __FUNCTION__, mac_list->m_mac_list_head)); + kfree(mac_list->m_mac_list_head); + } + mac_list->count = 0; +} + +void +dhd_conf_free_chip_nv_path_list(wl_chip_nv_path_list_ctrl_t *chip_nv_list) +{ + CONFIG_TRACE(("%s called\n", __FUNCTION__)); + + if (chip_nv_list->m_chip_nv_path_head) { + CONFIG_TRACE(("%s Free %p\n", __FUNCTION__, chip_nv_list->m_chip_nv_path_head)); + kfree(chip_nv_list->m_chip_nv_path_head); + } + chip_nv_list->count = 0; +} + +#if defined(HW_OOB) || defined(FORCE_WOWLAN) +void +dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, uint chip) +{ + uint32 gpiocontrol, addr; + + if (CHIPID(chip) == BCM43362_CHIP_ID) { + printf("%s: Enable HW OOB for 43362\n", __FUNCTION__); + addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, gpiocontrol); + gpiocontrol = bcmsdh_reg_read(sdh, addr, 4); + gpiocontrol |= 0x2; + bcmsdh_reg_write(sdh, addr, 4, gpiocontrol); + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10005, 0xf, NULL); + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10006, 0x0, NULL); + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10007, 0x2, NULL); + } +} +#endif + +int +dhd_conf_get_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, uint8 *mac) +{ + int i, err = -1; + uint8 *ptr = 0; + unsigned char tpl_code, tpl_link='\0'; + uint8 header[3] = {0x80, 0x07, 0x19}; + uint8 *cis; + + if (!(cis = MALLOC(dhd->osh, SBSDIO_CIS_SIZE_LIMIT))) { + CONFIG_ERROR(("%s: cis malloc failed\n", __FUNCTION__)); + return err; + } + bzero(cis, SBSDIO_CIS_SIZE_LIMIT); + + if ((err = bcmsdh_cis_read(sdh, 0, cis, SBSDIO_CIS_SIZE_LIMIT))) { + CONFIG_ERROR(("%s: cis read err %d\n", __FUNCTION__, err)); + MFREE(dhd->osh, cis, SBSDIO_CIS_SIZE_LIMIT); + return err; + } + err = -1; // reset err; + ptr = cis; + do { + /* 0xff means we're done */ + tpl_code = *ptr; + ptr++; + if (tpl_code == 0xff) + break; + + /* null entries have no link field or data */ + if (tpl_code == 0x00) + continue; + + tpl_link = *ptr; + ptr++; + /* a size of 0xff also means we're done */ + if (tpl_link == 0xff) + break; + if (config_msg_level & CONFIG_TRACE_LEVEL) { + printf("%s: tpl_code=0x%02x, tpl_link=0x%02x, tag=0x%02x\n", + __FUNCTION__, tpl_code, tpl_link, *ptr); + printk("%s: value:", __FUNCTION__); + for (i=0; iosh, cis, SBSDIO_CIS_SIZE_LIMIT); + + return err; +} + +void +dhd_conf_set_fw_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *fw_path) +{ + int i, j; + uint8 mac[6]={0}; + int fw_num=0, mac_num=0; + uint32 oui, nic; + wl_mac_list_t *mac_list; + wl_mac_range_t *mac_range; + char *pfw_name; + int fw_type, fw_type_new; + + mac_list = dhd->conf->fw_by_mac.m_mac_list_head; + fw_num = dhd->conf->fw_by_mac.count; + if (!mac_list || !fw_num) + return; + + if (dhd_conf_get_mac(dhd, sdh, mac)) { + CONFIG_ERROR(("%s: Can not read MAC address\n", __FUNCTION__)); + return; + } + oui = (mac[0] << 16) | (mac[1] << 8) | (mac[2]); + nic = (mac[3] << 16) | (mac[4] << 8) | (mac[5]); + + /* find out the last '/' */ + i = strlen(fw_path); + while (i > 0) { + if (fw_path[i] == '/') break; + i--; + } + pfw_name = &fw_path[i+1]; + fw_type = (strstr(pfw_name, "_mfg") ? + FW_TYPE_MFG : (strstr(pfw_name, "_apsta") ? + FW_TYPE_APSTA : (strstr(pfw_name, "_p2p") ? + FW_TYPE_P2P : FW_TYPE_STA))); + + for (i=0; i= mac_range[j].nic_start && nic <= mac_range[j].nic_end) { + strcpy(pfw_name, mac_list[i].name); + printf("%s: matched oui=0x%06X, nic=0x%06X\n", + __FUNCTION__, oui, nic); + printf("%s: fw_path=%s\n", __FUNCTION__, fw_path); + return; + } + } + } + } +} + +void +dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *nv_path) +{ + int i, j; + uint8 mac[6]={0}; + int nv_num=0, mac_num=0; + uint32 oui, nic; + wl_mac_list_t *mac_list; + wl_mac_range_t *mac_range; + char *pnv_name; + + mac_list = dhd->conf->nv_by_mac.m_mac_list_head; + nv_num = dhd->conf->nv_by_mac.count; + if (!mac_list || !nv_num) + return; + + if (dhd_conf_get_mac(dhd, sdh, mac)) { + CONFIG_ERROR(("%s: Can not read MAC address\n", __FUNCTION__)); + return; + } + oui = (mac[0] << 16) | (mac[1] << 8) | (mac[2]); + nic = (mac[3] << 16) | (mac[4] << 8) | (mac[5]); + + /* find out the last '/' */ + i = strlen(nv_path); + while (i > 0) { + if (nv_path[i] == '/') break; + i--; + } + pnv_name = &nv_path[i+1]; + + for (i=0; i= mac_range[j].nic_start && nic <= mac_range[j].nic_end) { + strcpy(pnv_name, mac_list[i].name); + printf("%s: matched oui=0x%06X, nic=0x%06X\n", + __FUNCTION__, oui, nic); + printf("%s: nv_path=%s\n", __FUNCTION__, nv_path); + return; + } + } + } + } +} +#endif + +void +dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path) +{ + int fw_type, ag_type; + uint chip, chiprev; + int i; + char fw_tail[20]; + + chip = dhd->conf->chip; + chiprev = dhd->conf->chiprev; + + if (fw_path[0] == '\0') { +#ifdef CONFIG_BCMDHD_FW_PATH + bcm_strncpy_s(fw_path, MOD_PARAM_PATHLEN-1, CONFIG_BCMDHD_FW_PATH, MOD_PARAM_PATHLEN-1); + if (fw_path[0] == '\0') +#endif + { + printf("firmware path is null\n"); + return; + } + } +#ifndef FW_PATH_AUTO_SELECT + return; +#endif + + /* find out the last '/' */ + i = strlen(fw_path); + while (i > 0) { + if (fw_path[i] == '/') break; + i--; + } +#ifdef BAND_AG + ag_type = FW_TYPE_AG; +#else + ag_type = strstr(&fw_path[i], "_ag") ? FW_TYPE_AG : FW_TYPE_G; +#endif + fw_type = (strstr(&fw_path[i], "_mfg") ? FW_TYPE_MFG : + (strstr(&fw_path[i], "_apsta") ? FW_TYPE_APSTA : + (strstr(&fw_path[i], "_p2p") ? FW_TYPE_P2P : + (strstr(&fw_path[i], "_es") ? FW_TYPE_ES : + FW_TYPE_STA)))); + + if (fw_type == FW_TYPE_STA) + strcpy(fw_tail, ".bin"); + else if (fw_type == FW_TYPE_APSTA) + strcpy(fw_tail, "_apsta.bin"); + else if (fw_type == FW_TYPE_P2P) + strcpy(fw_tail, "_p2p.bin"); + else if (fw_type == FW_TYPE_ES) + strcpy(fw_tail, "_es.bin"); + else if (fw_type == FW_TYPE_MFG) + strcpy(fw_tail, "_mfg.bin"); + + switch (chip) { +#ifdef BCMSDIO + case BCM4330_CHIP_ID: + if (ag_type == FW_TYPE_G) { + if (chiprev == BCM4330B2_CHIP_REV) + strcpy(&fw_path[i+1], FW_BCM4330B2); + } else { + if (chiprev == BCM4330B2_CHIP_REV) + strcpy(&fw_path[i+1], FW_BCM4330B2_AG); + } + break; + case BCM43362_CHIP_ID: + if (chiprev == BCM43362A0_CHIP_REV) + strcpy(&fw_path[i+1], FW_BCM43362A0); + else + strcpy(&fw_path[i+1], FW_BCM43362A2); + break; + case BCM43430_CHIP_ID: + if (chiprev == BCM43430A0_CHIP_REV) + strcpy(&fw_path[i+1], FW_BCM43438A0); + else if (chiprev == BCM43430A1_CHIP_REV) + strcpy(&fw_path[i+1], FW_BCM43438A1); + else if (chiprev == BCM43430A2_CHIP_REV) + strcpy(&fw_path[i+1], FW_BCM43436B0); + break; + case BCM43012_CHIP_ID: + if (chiprev == BCM43012B0_CHIP_REV) + strcpy(&fw_path[i+1], FW_BCM43012B0); + case BCM4334_CHIP_ID: + if (chiprev == BCM4334B1_CHIP_REV) + strcpy(&fw_path[i+1], FW_BCM4334B1); + break; + case BCM43340_CHIP_ID: + case BCM43341_CHIP_ID: + if (chiprev == BCM43341B0_CHIP_REV) + strcpy(&fw_path[i+1], FW_BCM43341B1); + break; + case BCM4324_CHIP_ID: + if (chiprev == BCM43241B4_CHIP_REV) + strcpy(&fw_path[i+1], FW_BCM43241B4); + break; + case BCM4335_CHIP_ID: + if (chiprev == BCM4335A0_CHIP_REV) + strcpy(&fw_path[i+1], FW_BCM4339A0); + break; + case BCM4339_CHIP_ID: + if (chiprev == BCM4339A0_CHIP_REV) + strcpy(&fw_path[i+1], FW_BCM4339A0); + break; + case BCM4345_CHIP_ID: + case BCM43454_CHIP_ID: + if (chiprev == BCM43455C0_CHIP_REV) + strcpy(&fw_path[i+1], FW_BCM43455C0); + else if (chiprev == BCM43455C5_CHIP_REV) + strcpy(&fw_path[i+1], FW_BCM43456C5); + break; + case BCM4354_CHIP_ID: + if (chiprev == BCM4354A1_CHIP_REV) + strcpy(&fw_path[i+1], FW_BCM4354A1); + else if (chiprev == BCM4356A2_CHIP_REV) + strcpy(&fw_path[i+1], FW_BCM4356A2); + break; + case BCM4356_CHIP_ID: + case BCM4371_CHIP_ID: + if (chiprev == BCM4356A2_CHIP_REV) + strcpy(&fw_path[i+1], FW_BCM4356A2); + break; + case BCM43569_CHIP_ID: + if (chiprev == BCM4358A3_CHIP_REV) + strcpy(&fw_path[i+1], FW_BCM4358A3); + break; + case BCM4359_CHIP_ID: + if (chiprev == BCM4359B1_CHIP_REV) + strcpy(&fw_path[i+1], FW_BCM4359B1); + else if (chiprev == BCM4359C0_CHIP_REV) + strcpy(&fw_path[i+1], FW_BCM4359C0); + break; +#endif +#ifdef BCMPCIE + case BCM4354_CHIP_ID: + case BCM4356_CHIP_ID: + if (chiprev == BCM4356A2_CHIP_REV) + strcpy(&fw_path[i+1], FW_BCM4356A2); + break; +#endif + default: + strcpy(&fw_path[i+1], "fw_bcmdhd"); + } + strcat(fw_path, fw_tail); + + CONFIG_TRACE(("%s: firmware_path=%s\n", __FUNCTION__, fw_path)); +} + +void +dhd_conf_set_clm_name_by_chip(dhd_pub_t *dhd, char *clm_path) +{ + uint chip, chiprev; + int i; + char fw_tail[20]; + + chip = dhd->conf->chip; + chiprev = dhd->conf->chiprev; + + if (clm_path[0] == '\0') { + printf("clm path is null\n"); + return; + } + + /* find out the last '/' */ + i = strlen(clm_path); + while (i > 0) { + if (clm_path[i] == '/') break; + i--; + } + + strcpy(fw_tail, ".blob"); + + switch (chip) { +#ifdef BCMSDIO + case BCM43012_CHIP_ID: + if (chiprev == BCM43012B0_CHIP_REV) + strcpy(&clm_path[i+1], CLM_BCM43012B0); + break; +#endif + default: + strcpy(&clm_path[i+1], "clm_bcmdhd"); + } + strcat(clm_path, fw_tail); + + CONFIG_TRACE(("%s: clm_path=%s\n", __FUNCTION__, clm_path)); +} + +void +dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path) +{ + int matched=-1; + uint chip, chiprev; + int i; + + chip = dhd->conf->chip; + chiprev = dhd->conf->chiprev; + + for (i=0; iconf->nv_by_chip.count; i++) { + if (chip==dhd->conf->nv_by_chip.m_chip_nv_path_head[i].chip && + chiprev==dhd->conf->nv_by_chip.m_chip_nv_path_head[i].chiprev) { + matched = i; + break; + } + } + if (matched < 0) + return; + + if (nv_path[0] == '\0') { +#ifdef CONFIG_BCMDHD_NVRAM_PATH + bcm_strncpy_s(nv_path, MOD_PARAM_PATHLEN-1, CONFIG_BCMDHD_NVRAM_PATH, MOD_PARAM_PATHLEN-1); + if (nv_path[0] == '\0') +#endif + { + printf("nvram path is null\n"); + return; + } + } + + /* find out the last '/' */ + i = strlen(nv_path); + while (i > 0) { + if (nv_path[i] == '/') break; + i--; + } + + strcpy(&nv_path[i+1], dhd->conf->nv_by_chip.m_chip_nv_path_head[matched].name); + + CONFIG_TRACE(("%s: nvram_path=%s\n", __FUNCTION__, nv_path)); +} + +void +dhd_conf_set_path(dhd_pub_t *dhd, char *dst_name, char *dst_path, char *src_path) +{ + int i; + + if (src_path[0] == '\0') { + printf("src_path is null\n"); + return; + } else + strcpy(dst_path, src_path); + + /* find out the last '/' */ + i = strlen(dst_path); + while (i > 0) { + if (dst_path[i] == '/') break; + i--; + } + strcpy(&dst_path[i+1], dst_name); + + CONFIG_TRACE(("%s: dst_path=%s\n", __FUNCTION__, dst_path)); +} + +#ifdef CONFIG_PATH_AUTO_SELECT +void +dhd_conf_set_conf_name_by_chip(dhd_pub_t *dhd, char *conf_path) +{ + uint chip, chiprev; + int i; + + chip = dhd->conf->chip; + chiprev = dhd->conf->chiprev; + + if (conf_path[0] == '\0') { + printf("config path is null\n"); + return; + } + + /* find out the last '/' */ + i = strlen(conf_path); + while (i > 0) { + if (conf_path[i] == '/') break; + i--; + } + + switch (chip) { +#ifdef BCMSDIO + case BCM4330_CHIP_ID: + if (chiprev == BCM4330B2_CHIP_REV) + strcpy(&conf_path[i+1], CONFIG_BCM4330B2); + break; + case BCM43362_CHIP_ID: + if (chiprev == BCM43362A0_CHIP_REV) + strcpy(&conf_path[i+1], CONFIG_BCM43362A0); + else + strcpy(&conf_path[i+1], CONFIG_BCM43362A2); + break; + case BCM43430_CHIP_ID: + if (chiprev == BCM43430A0_CHIP_REV) + strcpy(&conf_path[i+1], CONFIG_BCM43438A0); + else if (chiprev == BCM43430A1_CHIP_REV) + strcpy(&conf_path[i+1], CONFIG_BCM43438A1); + else if (chiprev == BCM43430A2_CHIP_REV) + strcpy(&conf_path[i+1], CONFIG_BCM43436B0); + break; + case BCM4334_CHIP_ID: + if (chiprev == BCM4334B1_CHIP_REV) + strcpy(&conf_path[i+1], CONFIG_BCM4334B1); + break; + case BCM43340_CHIP_ID: + case BCM43341_CHIP_ID: + if (chiprev == BCM43341B0_CHIP_REV) + strcpy(&conf_path[i+1], CONFIG_BCM43341B0); + break; + case BCM4324_CHIP_ID: + if (chiprev == BCM43241B4_CHIP_REV) + strcpy(&conf_path[i+1], CONFIG_BCM43241B4); + break; + case BCM4335_CHIP_ID: + if (chiprev == BCM4335A0_CHIP_REV) + strcpy(&conf_path[i+1], CONFIG_BCM4339A0); + break; + case BCM4345_CHIP_ID: + case BCM43454_CHIP_ID: + if (chiprev == BCM43455C0_CHIP_REV) + strcpy(&conf_path[i+1], CONFIG_BCM43455C0); + else if (chiprev == BCM43455C5_CHIP_REV) + strcpy(&conf_path[i+1], CONFIG_BCM43456C5); + break; + case BCM4339_CHIP_ID: + if (chiprev == BCM4339A0_CHIP_REV) + strcpy(&conf_path[i+1], CONFIG_BCM4339A0); + break; + case BCM4354_CHIP_ID: + if (chiprev == BCM4354A1_CHIP_REV) + strcpy(&conf_path[i+1], CONFIG_BCM4354A1); + else if (chiprev == BCM4356A2_CHIP_REV) + strcpy(&conf_path[i+1], CONFIG_BCM4356A2); + break; + case BCM4356_CHIP_ID: + case BCM4371_CHIP_ID: + if (chiprev == BCM4356A2_CHIP_REV) + strcpy(&conf_path[i+1], CONFIG_BCM4356A2); + break; + case BCM43569_CHIP_ID: + if (chiprev == BCM4358A3_CHIP_REV) + strcpy(&conf_path[i+1], CONFIG_BCM4358A3); + break; + case BCM4359_CHIP_ID: + if (chiprev == BCM4359B1_CHIP_REV) + strcpy(&conf_path[i+1], CONFIG_BCM4359B1); + else if (chiprev == BCM4359C0_CHIP_REV) + strcpy(&conf_path[i+1], CONFIG_BCM4359C0); + break; +#endif +#ifdef BCMPCIE + case BCM4354_CHIP_ID: + case BCM4356_CHIP_ID: + if (chiprev == BCM4356A2_CHIP_REV) + strcpy(&conf_path[i+1], CONFIG_BCM4356A2); + break; +#endif + } + + CONFIG_TRACE(("%s: config_path=%s\n", __FUNCTION__, conf_path)); +} +#endif + +int +dhd_conf_set_intiovar(dhd_pub_t *dhd, uint cmd, char *name, int val, + int def, bool down) +{ + int ret = -1; + char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */ + + if (val >= def) { + if (down) { + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0) + CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, ret)); + } + if (cmd == WLC_SET_VAR) { + printf("%s: set %s %d\n", __FUNCTION__, name, val); + bcm_mkiovar(name, (char *)&val, sizeof(val), iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret)); + } else { + printf("%s: set %s %d %d\n", __FUNCTION__, name, cmd, val); + if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, &val, sizeof(val), TRUE, 0)) < 0) + CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret)); + } + } + + return ret; +} + +int +dhd_conf_set_bufiovar(dhd_pub_t *dhd, uint cmd, char *name, char *buf, + int len, bool down) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + int ret = -1; + + if (down) { + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0) + CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, ret)); + } + + if (cmd == WLC_SET_VAR) { + bcm_mkiovar(name, buf, len, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret)); + } else { + if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, buf, len, TRUE, 0)) < 0) + CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret)); + } + + return ret; +} + +int +dhd_conf_get_iovar(dhd_pub_t *dhd, int cmd, char *name, char *buf, int len, int ifidx) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + int ret = -1; + + if (cmd == WLC_GET_VAR) { + if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) { + ret = dhd_wl_ioctl_cmd(dhd, cmd, iovbuf, sizeof(iovbuf), FALSE, ifidx); + if (!ret) { + memcpy(buf, iovbuf, len); + } else { + CONFIG_ERROR(("%s: get iovar %s failed %d\n", __FUNCTION__, name, ret)); + } + } else { + CONFIG_ERROR(("%s: mkiovar %s failed\n", __FUNCTION__, name)); + } + } else { + ret = dhd_wl_ioctl_cmd(dhd, cmd, buf, len, FALSE, 0); + if (ret < 0) + CONFIG_ERROR(("%s: get iovar %s failed %d\n", __FUNCTION__, name, ret)); + } + + return ret; +} + +uint +dhd_conf_get_band(dhd_pub_t *dhd) +{ + uint band = WLC_BAND_AUTO; + + if (dhd && dhd->conf) + band = dhd->conf->band; + else + CONFIG_ERROR(("%s: dhd or conf is NULL\n", __FUNCTION__)); + + return band; +} + +int +dhd_conf_set_country(dhd_pub_t *dhd) +{ + int bcmerror = -1; + + memset(&dhd->dhd_cspec, 0, sizeof(wl_country_t)); + printf("%s: set country %s, revision %d\n", __FUNCTION__, + dhd->conf->cspec.ccode, dhd->conf->cspec.rev); + dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "country", (char *)&dhd->conf->cspec, sizeof(wl_country_t), FALSE); + + return bcmerror; +} + +int +dhd_conf_get_country(dhd_pub_t *dhd, wl_country_t *cspec) +{ + int bcmerror = -1; + + memset(cspec, 0, sizeof(wl_country_t)); + bcm_mkiovar("country", NULL, 0, (char*)cspec, sizeof(wl_country_t)); + if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, cspec, sizeof(wl_country_t), FALSE, 0)) < 0) + CONFIG_ERROR(("%s: country code getting failed %d\n", __FUNCTION__, bcmerror)); + else + printf("Country code: %s (%s/%d)\n", cspec->country_abbrev, cspec->ccode, cspec->rev); + + return bcmerror; +} + +int +dhd_conf_get_country_from_config(dhd_pub_t *dhd, wl_country_t *cspec) +{ + int bcmerror = -1, i; + struct dhd_conf *conf = dhd->conf; + + for (i = 0; i < conf->country_list.count; i++) { + if (!strncmp(cspec->country_abbrev, conf->country_list.cspec[i].country_abbrev, 2)) { + memcpy(cspec->ccode, + conf->country_list.cspec[i].ccode, WLC_CNTRY_BUF_SZ); + cspec->rev = conf->country_list.cspec[i].rev; + printf("%s: %s/%d\n", __FUNCTION__, cspec->ccode, cspec->rev); + return 0; + } + } + + return bcmerror; +} + +int +dhd_conf_fix_country(dhd_pub_t *dhd) +{ + int bcmerror = -1; + uint band; + wl_uint32_list_t *list; + u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)]; + + if (!(dhd && dhd->conf)) { + return bcmerror; + } + + memset(valid_chan_list, 0, sizeof(valid_chan_list)); + list = (wl_uint32_list_t *)(void *) valid_chan_list; + list->count = htod32(WL_NUMCHANNELS); + if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, valid_chan_list, sizeof(valid_chan_list), FALSE, 0)) < 0) { + CONFIG_ERROR(("%s: get channels failed with %d\n", __FUNCTION__, bcmerror)); + } + + band = dhd_conf_get_band(dhd); + + if (bcmerror || ((band==WLC_BAND_AUTO || band==WLC_BAND_2G) && + dtoh32(list->count)<11)) { + CONFIG_ERROR(("%s: bcmerror=%d, # of channels %d\n", + __FUNCTION__, bcmerror, dtoh32(list->count))); + if ((bcmerror = dhd_conf_set_country(dhd)) < 0) { + strcpy(dhd->conf->cspec.country_abbrev, "US"); + dhd->conf->cspec.rev = 0; + strcpy(dhd->conf->cspec.ccode, "US"); + dhd_conf_set_country(dhd); + } + } + + return bcmerror; +} + +bool +dhd_conf_match_channel(dhd_pub_t *dhd, uint32 channel) +{ + int i; + bool match = false; + + if (dhd && dhd->conf) { + if (dhd->conf->channels.count == 0) + return true; + for (i=0; iconf->channels.count; i++) { + if (channel == dhd->conf->channels.channel[i]) + match = true; + } + } else { + match = true; + CONFIG_ERROR(("%s: dhd or conf is NULL\n", __FUNCTION__)); + } + + return match; +} + +int +dhd_conf_set_roam(dhd_pub_t *dhd) +{ + int bcmerror = -1; + struct dhd_conf *conf = dhd->conf; + + dhd_roam_disable = conf->roam_off; + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "roam_off", dhd->conf->roam_off, 0, FALSE); + + if (!conf->roam_off || !conf->roam_off_suspend) { + printf("%s: set roam_trigger %d\n", __FUNCTION__, conf->roam_trigger[0]); + dhd_conf_set_bufiovar(dhd, WLC_SET_ROAM_TRIGGER, "WLC_SET_ROAM_TRIGGER", + (char *)conf->roam_trigger, sizeof(conf->roam_trigger), FALSE); + + printf("%s: set roam_scan_period %d\n", __FUNCTION__, conf->roam_scan_period[0]); + dhd_conf_set_bufiovar(dhd, WLC_SET_ROAM_SCAN_PERIOD, "WLC_SET_ROAM_SCAN_PERIOD", + (char *)conf->roam_scan_period, sizeof(conf->roam_scan_period), FALSE); + + printf("%s: set roam_delta %d\n", __FUNCTION__, conf->roam_delta[0]); + dhd_conf_set_bufiovar(dhd, WLC_SET_ROAM_DELTA, "WLC_SET_ROAM_DELTA", + (char *)conf->roam_delta, sizeof(conf->roam_delta), FALSE); + + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "fullroamperiod", dhd->conf->fullroamperiod, 1, FALSE); + } + + return bcmerror; +} + +void +dhd_conf_set_bw_cap(dhd_pub_t *dhd) +{ + struct { + u32 band; + u32 bw_cap; + } param = {0, 0}; + + if (dhd->conf->bw_cap_2g >= 0) { + param.band = WLC_BAND_2G; + param.bw_cap = (uint)dhd->conf->bw_cap_2g; + printf("%s: set bw_cap 2g %d\n", __FUNCTION__, param.bw_cap); + dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "bw_cap", (char *)¶m, sizeof(param), TRUE); + } + + if (dhd->conf->bw_cap_5g >= 0) { + param.band = WLC_BAND_5G; + param.bw_cap = (uint)dhd->conf->bw_cap_5g; + printf("%s: set bw_cap 5g %d\n", __FUNCTION__, param.bw_cap); + dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "bw_cap", (char *)¶m, sizeof(param), TRUE); + } +} + +void +dhd_conf_get_wme(dhd_pub_t *dhd, int mode, edcf_acparam_t *acp) +{ + int bcmerror = -1; + char iovbuf[WLC_IOCTL_SMLEN]; + edcf_acparam_t *acparam; + + bzero(iovbuf, sizeof(iovbuf)); + + /* + * Get current acparams, using buf as an input buffer. + * Return data is array of 4 ACs of wme params. + */ + if (mode == 0) + bcm_mkiovar("wme_ac_sta", NULL, 0, iovbuf, sizeof(iovbuf)); + else + bcm_mkiovar("wme_ac_ap", NULL, 0, iovbuf, sizeof(iovbuf)); + if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) { + CONFIG_ERROR(("%s: wme_ac_sta getting failed %d\n", __FUNCTION__, bcmerror)); + return; + } + memcpy((char*)acp, iovbuf, sizeof(edcf_acparam_t)*AC_COUNT); + + acparam = &acp[AC_BK]; + CONFIG_TRACE(("%s: BK: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", + __FUNCTION__, + acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK, + acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, + acparam->TXOP)); + acparam = &acp[AC_BE]; + CONFIG_TRACE(("%s: BE: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", + __FUNCTION__, + acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK, + acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, + acparam->TXOP)); + acparam = &acp[AC_VI]; + CONFIG_TRACE(("%s: VI: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", + __FUNCTION__, + acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK, + acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, + acparam->TXOP)); + acparam = &acp[AC_VO]; + CONFIG_TRACE(("%s: VO: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", + __FUNCTION__, + acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK, + acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, + acparam->TXOP)); + + return; +} + +void +dhd_conf_update_wme(dhd_pub_t *dhd, int mode, edcf_acparam_t *acparam_cur, int aci) +{ + int aifsn, ecwmin, ecwmax, txop; + edcf_acparam_t *acp; + struct dhd_conf *conf = dhd->conf; + wme_param_t *wme; + + if (mode == 0) + wme = &conf->wme_sta; + else + wme = &conf->wme_ap; + + /* Default value */ + aifsn = acparam_cur->ACI&EDCF_AIFSN_MASK; + ecwmin = acparam_cur->ECW&EDCF_ECWMIN_MASK; + ecwmax = (acparam_cur->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT; + txop = acparam_cur->TXOP; + + /* Modified value */ + if (wme->aifsn[aci] > 0) + aifsn = wme->aifsn[aci]; + if (wme->ecwmin[aci] > 0) + ecwmin = wme->ecwmin[aci]; + if (wme->ecwmax[aci] > 0) + ecwmax = wme->ecwmax[aci]; + if (wme->txop[aci] > 0) + txop = wme->txop[aci]; + + if (!(wme->aifsn[aci] || wme->ecwmin[aci] || + wme->ecwmax[aci] || wme->txop[aci])) + return; + + /* Update */ + acp = acparam_cur; + acp->ACI = (acp->ACI & ~EDCF_AIFSN_MASK) | (aifsn & EDCF_AIFSN_MASK); + acp->ECW = ((ecwmax << EDCF_ECWMAX_SHIFT) & EDCF_ECWMAX_MASK) | (acp->ECW & EDCF_ECWMIN_MASK); + acp->ECW = ((acp->ECW & EDCF_ECWMAX_MASK) | (ecwmin & EDCF_ECWMIN_MASK)); + acp->TXOP = txop; + + printf("%s: wme_ac %s aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", + __FUNCTION__, mode?"ap":"sta", + acp->ACI, acp->ACI&EDCF_AIFSN_MASK, + acp->ECW&EDCF_ECWMIN_MASK, (acp->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, + acp->TXOP); + + /* + * Now use buf as an output buffer. + * Put WME acparams after "wme_ac\0" in buf. + * NOTE: only one of the four ACs can be set at a time. + */ + if (mode == 0) + dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "wme_ac_sta", (char *)acp, sizeof(edcf_acparam_t), FALSE); + else + dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "wme_ac_ap", (char *)acp, sizeof(edcf_acparam_t), FALSE); + +} + +void +dhd_conf_set_wme(dhd_pub_t *dhd, int mode) +{ + edcf_acparam_t acparam_cur[AC_COUNT]; + + if (dhd && dhd->conf) { + if (!dhd->conf->force_wme_ac) { + CONFIG_TRACE(("%s: force_wme_ac is not enabled %d\n", + __FUNCTION__, dhd->conf->force_wme_ac)); + return; + } + + CONFIG_TRACE(("%s: Before change:\n", __FUNCTION__)); + dhd_conf_get_wme(dhd, mode, acparam_cur); + + dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_BK], AC_BK); + dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_BE], AC_BE); + dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_VI], AC_VI); + dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_VO], AC_VO); + + CONFIG_TRACE(("%s: After change:\n", __FUNCTION__)); + dhd_conf_get_wme(dhd, mode, acparam_cur); + } else { + CONFIG_ERROR(("%s: dhd or conf is NULL\n", __FUNCTION__)); + } + + return; +} + +#ifdef PKT_FILTER_SUPPORT +void +dhd_conf_add_pkt_filter(dhd_pub_t *dhd) +{ + int i, j; + char str[12]; +#define MACS "%02x%02x%02x%02x%02x%02x" + + /* + * 1. Filter out all pkt: actually not to enable this since 4-way handshake will be filter out as well. + * 1) dhd_master_mode=0 + * 2) pkt_filter_add=99 0 0 0 0x000000000000 0x000000000000 + * 2. Filter in less pkt: ARP(0x0806, ID is 105), BRCM(0x886C), 802.1X(0x888E) + * 1) dhd_master_mode=1 + * 2) pkt_filter_del=100, 102, 103, 104, 105 + * 3) pkt_filter_add=131 0 0 12 0xFFFF 0x886C, 132 0 0 12 0xFFFF 0x888E + * 3. magic pkt: magic_pkt_filter_add=141 0 1 12 + * 4. Filter out netbios pkt: + * Netbios: 121 0 0 12 0xFFFF000000000000000000FF000000000000000000000000FFFF 0x0800000000000000000000110000000000000000000000000089 + */ + for(i=0; iconf->pkt_filter_add.count; i++) { + dhd->pktfilter[i+dhd->pktfilter_count] = dhd->conf->pkt_filter_add.filter[i]; + printf("%s: %s\n", __FUNCTION__, dhd->pktfilter[i+dhd->pktfilter_count]); + } + dhd->pktfilter_count += i; + + for(i=0; iconf->magic_pkt_filter_add.count; i++) { + strcat(&dhd->conf->magic_pkt_filter_add.filter[i][0], " 0x"); + strcat(&dhd->conf->magic_pkt_filter_add.filter[i][0], "FFFFFFFFFFFF"); + for (j=0; j<16; j++) + strcat(&dhd->conf->magic_pkt_filter_add.filter[i][0], "FFFFFFFFFFFF"); + strcat(&dhd->conf->magic_pkt_filter_add.filter[i][0], " 0x"); + strcat(&dhd->conf->magic_pkt_filter_add.filter[i][0], "FFFFFFFFFFFF"); + sprintf(str, MACS, MAC2STRDBG(dhd->mac.octet)); + for (j=0; j<16; j++) + strcat(&dhd->conf->magic_pkt_filter_add.filter[i][0], str); + dhd->pktfilter[i+dhd->pktfilter_count] = dhd->conf->magic_pkt_filter_add.filter[i]; + dhd->pktfilter_count += 1; + } +} + +bool +dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id) +{ + int i; + + if (dhd && dhd->conf) { + for (i=0; iconf->pkt_filter_del.count; i++) { + if (id == dhd->conf->pkt_filter_del.id[i]) { + printf("%s: %d\n", __FUNCTION__, dhd->conf->pkt_filter_del.id[i]); + return true; + } + } + return false; + } + return false; +} + +void +dhd_conf_discard_pkt_filter(dhd_pub_t *dhd) +{ + dhd->pktfilter_count = 6; + dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = NULL; + dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF"; + dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "102 0 0 0 0xFFFFFF 0x01005E"; + dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = "103 0 0 0 0xFFFF 0x3333"; + dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL; + /* Do not enable ARP to pkt filter if dhd_master_mode is false.*/ + dhd->pktfilter[DHD_ARP_FILTER_NUM] = NULL; + + /* IPv4 broadcast address XXX.XXX.XXX.255 */ + dhd->pktfilter[dhd->pktfilter_count] = "110 0 0 12 0xFFFF00000000000000000000000000000000000000FF 0x080000000000000000000000000000000000000000FF"; + dhd->pktfilter_count++; + /* discard IPv4 multicast address 224.0.0.0/4 */ + dhd->pktfilter[dhd->pktfilter_count] = "111 0 0 12 0xFFFF00000000000000000000000000000000F0 0x080000000000000000000000000000000000E0"; + dhd->pktfilter_count++; + /* discard IPv6 multicast address FF00::/8 */ + dhd->pktfilter[dhd->pktfilter_count] = "112 0 0 12 0xFFFF000000000000000000000000000000000000000000000000FF 0x86DD000000000000000000000000000000000000000000000000FF"; + dhd->pktfilter_count++; + /* discard Netbios pkt */ + dhd->pktfilter[dhd->pktfilter_count] = "121 0 0 12 0xFFFF000000000000000000FF000000000000000000000000FFFF 0x0800000000000000000000110000000000000000000000000089"; + dhd->pktfilter_count++; + +} +#endif /* PKT_FILTER_SUPPORT */ + +int +dhd_conf_get_pm(dhd_pub_t *dhd) +{ + if (dhd && dhd->conf) + return dhd->conf->pm; + return -1; +} + +#ifdef PROP_TXSTATUS +int +dhd_conf_get_disable_proptx(dhd_pub_t *dhd) +{ + struct dhd_conf *conf = dhd->conf; + int disable_proptx = -1; + int fw_proptx = 0; + + /* check fw proptx priority: + * 1st: check fw support by wl cap + * 2nd: 4334/43340/43341/43241 support proptx but not show in wl cap, so enable it by default + * if you would like to disable it, please set disable_proptx=1 in config.txt + * 3th: disable when proptxstatus not support in wl cap + */ + if (FW_SUPPORTED(dhd, proptxstatus)) { + fw_proptx = 1; + } else if (conf->chip == BCM4334_CHIP_ID || conf->chip == BCM43340_CHIP_ID || + dhd->conf->chip == BCM43340_CHIP_ID || conf->chip == BCM4324_CHIP_ID) { + fw_proptx = 1; + } else { + fw_proptx = 0; + } + + /* returned disable_proptx value: + * -1: disable in STA and enable in P2P(follow original dhd settings when PROP_TXSTATUS_VSDB enabled) + * 0: depend on fw support + * 1: always disable proptx + */ + if (conf->disable_proptx == 0) { + // check fw support as well + if (fw_proptx) + disable_proptx = 0; + else + disable_proptx = 1; + } else if (conf->disable_proptx >= 1) { + disable_proptx = 1; + } else { + // check fw support as well + if (fw_proptx) + disable_proptx = -1; + else + disable_proptx = 1; + } + + printf("%s: fw_proptx=%d, disable_proptx=%d\n", __FUNCTION__, fw_proptx, disable_proptx); + + return disable_proptx; +} +#endif + +uint +pick_config_vars(char *varbuf, uint len, uint start_pos, char *pickbuf) +{ + bool findNewline, changenewline=FALSE, pick=FALSE; + int column; + uint n, pick_column=0; + + findNewline = FALSE; + column = 0; + + if (start_pos >= len) { + CONFIG_ERROR(("%s: wrong start pos\n", __FUNCTION__)); + return 0; + } + + for (n = start_pos; n < len; n++) { + if (varbuf[n] == '\r') + continue; + if ((findNewline || changenewline) && varbuf[n] != '\n') + continue; + findNewline = FALSE; + if (varbuf[n] == '#') { + findNewline = TRUE; + continue; + } + if (varbuf[n] == '\\') { + changenewline = TRUE; + continue; + } + if (!changenewline && varbuf[n] == '\n') { + if (column == 0) + continue; + column = 0; + continue; + } + if (changenewline && varbuf[n] == '\n') { + changenewline = FALSE; + continue; + } + + if (column==0 && !pick) { // start to pick + pick = TRUE; + column++; + pick_column = 0; + } else { + if (pick && column==0) { // stop to pick + pick = FALSE; + break; + } else + column++; + } + if (pick) { + if (varbuf[n] == 0x9) + continue; + if (pick_column>0 && pickbuf[pick_column-1]==' ' && varbuf[n]==' ') + continue; + pickbuf[pick_column] = varbuf[n]; + pick_column++; + } + } + + return n; // return current position +} + +bool +dhd_conf_read_log_level(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + char *data = full_param+len_param; + + if (!strncmp("dhd_msg_level=", full_param, len_param)) { + dhd_msg_level = (int)simple_strtol(data, NULL, 0); + printf("%s: dhd_msg_level = 0x%X\n", __FUNCTION__, dhd_msg_level); + } +#ifdef BCMSDIO + else if (!strncmp("sd_msglevel=", full_param, len_param)) { + sd_msglevel = (int)simple_strtol(data, NULL, 0); + printf("%s: sd_msglevel = 0x%X\n", __FUNCTION__, sd_msglevel); + } +#endif + else if (!strncmp("android_msg_level=", full_param, len_param)) { + android_msg_level = (int)simple_strtol(data, NULL, 0); + printf("%s: android_msg_level = 0x%X\n", __FUNCTION__, android_msg_level); + } + else if (!strncmp("config_msg_level=", full_param, len_param)) { + config_msg_level = (int)simple_strtol(data, NULL, 0); + printf("%s: config_msg_level = 0x%X\n", __FUNCTION__, config_msg_level); + } +#ifdef WL_CFG80211 + else if (!strncmp("wl_dbg_level=", full_param, len_param)) { + wl_dbg_level = (int)simple_strtol(data, NULL, 0); + printf("%s: wl_dbg_level = 0x%X\n", __FUNCTION__, wl_dbg_level); + } +#endif +#if defined(WL_WIRELESS_EXT) + else if (!strncmp("iw_msg_level=", full_param, len_param)) { + iw_msg_level = (int)simple_strtol(data, NULL, 0); + printf("%s: iw_msg_level = 0x%X\n", __FUNCTION__, iw_msg_level); + } +#endif +#if defined(DHD_DEBUG) + else if (!strncmp("dhd_console_ms=", full_param, len_param)) { + dhd_console_ms = (int)simple_strtol(data, NULL, 0); + printf("%s: dhd_console_ms = 0x%X\n", __FUNCTION__, dhd_console_ms); + } +#endif + else + return false; + + return true; +} + +void +dhd_conf_read_wme_ac_value(wme_param_t *wme, char *pick, int ac_val) +{ + char *pick_tmp, *pch; + + /* Process WMM parameters */ + pick_tmp = pick; + pch = bcmstrstr(pick_tmp, "aifsn "); + if (pch) { + wme->aifsn[ac_val] = (int)simple_strtol(pch+strlen("aifsn "), NULL, 0); + printf("%s: ac_val=%d, aifsn=%d\n", __FUNCTION__, ac_val, wme->aifsn[ac_val]); + } + pick_tmp = pick; + pch = bcmstrstr(pick_tmp, "ecwmin "); + if (pch) { + wme->ecwmin[ac_val] = (int)simple_strtol(pch+strlen("ecwmin "), NULL, 0); + printf("%s: ac_val=%d, ecwmin=%d\n", __FUNCTION__, ac_val, wme->ecwmin[ac_val]); + } + pick_tmp = pick; + pch = bcmstrstr(pick_tmp, "ecwmax "); + if (pch) { + wme->ecwmax[ac_val] = (int)simple_strtol(pch+strlen("ecwmax "), NULL, 0); + printf("%s: ac_val=%d, ecwmax=%d\n", __FUNCTION__, ac_val, wme->ecwmax[ac_val]); + } + pick_tmp = pick; + pch = bcmstrstr(pick_tmp, "txop "); + if (pch) { + wme->txop[ac_val] = (int)simple_strtol(pch+strlen("txop "), NULL, 0); + printf("%s: ac_val=%d, txop=0x%x\n", __FUNCTION__, ac_val, wme->txop[ac_val]); + } + +} + +bool +dhd_conf_read_wme_ac_params(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + // wme_ac_sta_be=aifsn 1 ecwmin 2 ecwmax 3 txop 0x5e + // wme_ac_sta_vo=aifsn 1 ecwmin 1 ecwmax 1 txop 0x5e + + /* Process WMM parameters */ + if (!strncmp("force_wme_ac=", full_param, len_param)) { + conf->force_wme_ac = (int)simple_strtol(data, NULL, 10); + printf("%s: force_wme_ac = %d\n", __FUNCTION__, conf->force_wme_ac); + } + else if (!strncmp("wme_ac_sta_be=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_BE); + } + else if (!strncmp("wme_ac_sta_bk=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_BK); + } + else if (!strncmp("wme_ac_sta_vi=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_VI); + } + else if (!strncmp("wme_ac_sta_vo=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_VO); + } + else if (!strncmp("wme_ac_ap_be=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_BE); + } + else if (!strncmp("wme_ac_ap_bk=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_BK); + } + else if (!strncmp("wme_ac_ap_vi=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_VI); + } + else if (!strncmp("wme_ac_ap_vo=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_VO); + } + else + return false; + + return true; +} + +bool +dhd_conf_read_fw_by_mac(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + int i, j; + char *pch, *pick_tmp; + wl_mac_list_t *mac_list; + wl_mac_range_t *mac_range; + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + /* Process fw_by_mac: + * fw_by_mac=[fw_mac_num] \ + * [fw_name1] [mac_num1] [oui1-1] [nic_start1-1] [nic_end1-1] \ + * [oui1-1] [nic_start1-1] [nic_end1-1]... \ + * [oui1-n] [nic_start1-n] [nic_end1-n] \ + * [fw_name2] [mac_num2] [oui2-1] [nic_start2-1] [nic_end2-1] \ + * [oui2-1] [nic_start2-1] [nic_end2-1]... \ + * [oui2-n] [nic_start2-n] [nic_end2-n] \ + * Ex: fw_by_mac=2 \ + * fw_bcmdhd1.bin 2 0x0022F4 0xE85408 0xE8549D 0x983B16 0x3557A9 0x35582A \ + * fw_bcmdhd2.bin 3 0x0022F4 0xE85408 0xE8549D 0x983B16 0x3557A9 0x35582A \ + * 0x983B16 0x916157 0x916487 + */ + + if (!strncmp("fw_by_mac=", full_param, len_param)) { + pick_tmp = data; + pch = bcmstrtok(&pick_tmp, " ", 0); + conf->fw_by_mac.count = (uint32)simple_strtol(pch, NULL, 0); + if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->fw_by_mac.count, GFP_KERNEL))) { + conf->fw_by_mac.count = 0; + CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + } + printf("%s: fw_count=%d\n", __FUNCTION__, conf->fw_by_mac.count); + conf->fw_by_mac.m_mac_list_head = mac_list; + for (i=0; ifw_by_mac.count; i++) { + pch = bcmstrtok(&pick_tmp, " ", 0); + strcpy(mac_list[i].name, pch); + pch = bcmstrtok(&pick_tmp, " ", 0); + mac_list[i].count = (uint32)simple_strtol(pch, NULL, 0); + printf("%s: name=%s, mac_count=%d\n", __FUNCTION__, + mac_list[i].name, mac_list[i].count); + if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count, GFP_KERNEL))) { + mac_list[i].count = 0; + CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + break; + } + mac_list[i].mac = mac_range; + for (j=0; jconf; + char *data = full_param+len_param; + + /* Process nv_by_mac: + * [nv_by_mac]: The same format as fw_by_mac + */ + if (!strncmp("nv_by_mac=", full_param, len_param)) { + pick_tmp = data; + pch = bcmstrtok(&pick_tmp, " ", 0); + conf->nv_by_mac.count = (uint32)simple_strtol(pch, NULL, 0); + if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_mac.count, GFP_KERNEL))) { + conf->nv_by_mac.count = 0; + CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + } + printf("%s: nv_count=%d\n", __FUNCTION__, conf->nv_by_mac.count); + conf->nv_by_mac.m_mac_list_head = mac_list; + for (i=0; inv_by_mac.count; i++) { + pch = bcmstrtok(&pick_tmp, " ", 0); + strcpy(mac_list[i].name, pch); + pch = bcmstrtok(&pick_tmp, " ", 0); + mac_list[i].count = (uint32)simple_strtol(pch, NULL, 0); + printf("%s: name=%s, mac_count=%d\n", __FUNCTION__, + mac_list[i].name, mac_list[i].count); + if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count, GFP_KERNEL))) { + mac_list[i].count = 0; + CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + break; + } + mac_list[i].mac = mac_range; + for (j=0; jconf; + char *data = full_param+len_param; + + /* Process nv_by_chip: + * nv_by_chip=[nv_chip_num] \ + * [chip1] [chiprev1] [nv_name1] [chip2] [chiprev2] [nv_name2] \ + * Ex: nv_by_chip=2 \ + * 43430 0 nvram_ap6212.txt 43430 1 nvram_ap6212a.txt \ + */ + if (!strncmp("nv_by_chip=", full_param, len_param)) { + pick_tmp = data; + pch = bcmstrtok(&pick_tmp, " ", 0); + conf->nv_by_chip.count = (uint32)simple_strtol(pch, NULL, 0); + if (!(chip_nv_path = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_chip.count, GFP_KERNEL))) { + conf->nv_by_chip.count = 0; + CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + } + printf("%s: nv_by_chip_count=%d\n", __FUNCTION__, conf->nv_by_chip.count); + conf->nv_by_chip.m_chip_nv_path_head = chip_nv_path; + for (i=0; inv_by_chip.count; i++) { + pch = bcmstrtok(&pick_tmp, " ", 0); + chip_nv_path[i].chip = (uint32)simple_strtol(pch, NULL, 0); + pch = bcmstrtok(&pick_tmp, " ", 0); + chip_nv_path[i].chiprev = (uint32)simple_strtol(pch, NULL, 0); + pch = bcmstrtok(&pick_tmp, " ", 0); + strcpy(chip_nv_path[i].name, pch); + printf("%s: chip=0x%x, chiprev=%d, name=%s\n", __FUNCTION__, + chip_nv_path[i].chip, chip_nv_path[i].chiprev, chip_nv_path[i].name); + } + } + else + return false; + + return true; +} + +bool +dhd_conf_read_roam_params(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + if (!strncmp("roam_off=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->roam_off = 0; + else + conf->roam_off = 1; + printf("%s: roam_off = %d\n", __FUNCTION__, conf->roam_off); + } + else if (!strncmp("roam_off_suspend=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->roam_off_suspend = 0; + else + conf->roam_off_suspend = 1; + printf("%s: roam_off_suspend = %d\n", __FUNCTION__, conf->roam_off_suspend); + } + else if (!strncmp("roam_trigger=", full_param, len_param)) { + conf->roam_trigger[0] = (int)simple_strtol(data, NULL, 10); + printf("%s: roam_trigger = %d\n", __FUNCTION__, + conf->roam_trigger[0]); + } + else if (!strncmp("roam_scan_period=", full_param, len_param)) { + conf->roam_scan_period[0] = (int)simple_strtol(data, NULL, 10); + printf("%s: roam_scan_period = %d\n", __FUNCTION__, + conf->roam_scan_period[0]); + } + else if (!strncmp("roam_delta=", full_param, len_param)) { + conf->roam_delta[0] = (int)simple_strtol(data, NULL, 10); + printf("%s: roam_delta = %d\n", __FUNCTION__, conf->roam_delta[0]); + } + else if (!strncmp("fullroamperiod=", full_param, len_param)) { + conf->fullroamperiod = (int)simple_strtol(data, NULL, 10); + printf("%s: fullroamperiod = %d\n", __FUNCTION__, + conf->fullroamperiod); + } else + return false; + + return true; +} + +bool +dhd_conf_read_country_list(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + int i; + char *pch, *pick_tmp; + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + /* Process country_list: + * country_list=[country1]:[ccode1]/[regrev1], + * [country2]:[ccode2]/[regrev2] \ + * Ex: country_list=US:US/0, TW:TW/1 + */ + if (!strncmp("country_list=", full_param, len_param)) { + pick_tmp = data; + for (i=0; icountry_list.cspec[i].country_abbrev, pch); + pch = bcmstrtok(&pick_tmp, "/", 0); + if (!pch) + break; + memcpy(conf->country_list.cspec[i].ccode, pch, 2); + pch = bcmstrtok(&pick_tmp, ", ", 0); + if (!pch) + break; + conf->country_list.cspec[i].rev = (int32)simple_strtol(pch, NULL, 10); + conf->country_list.count ++; + CONFIG_TRACE(("%s: country_list abbrev=%s, ccode=%s, regrev=%d\n", __FUNCTION__, + conf->country_list.cspec[i].country_abbrev, + conf->country_list.cspec[i].ccode, + conf->country_list.cspec[i].rev)); + } + printf("%s: %d country in list\n", __FUNCTION__, conf->country_list.count); + } + else + return false; + + return true; +} + +#ifdef PKT_FILTER_SUPPORT +bool +dhd_conf_read_pkt_filter(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + char *pch, *pick_tmp; + int i; + + /* Process pkt filter: + * 1) pkt_filter_add=99 0 0 0 0x000000000000 0x000000000000 + * 2) pkt_filter_del=100, 102, 103, 104, 105 + * 3) magic_pkt_filter_add=141 0 1 12 + */ + if (!strncmp("dhd_master_mode=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + dhd_master_mode = FALSE; + else + dhd_master_mode = TRUE; + printf("%s: dhd_master_mode = %d\n", __FUNCTION__, dhd_master_mode); + } + else if (!strncmp("pkt_filter_add=", full_param, len_param)) { + pick_tmp = data; + pch = bcmstrtok(&pick_tmp, ",.-", 0); + i=0; + while (pch != NULL && ipkt_filter_add.filter[i][0], pch); + printf("%s: pkt_filter_add[%d][] = %s\n", __FUNCTION__, i, &conf->pkt_filter_add.filter[i][0]); + pch = bcmstrtok(&pick_tmp, ",.-", 0); + i++; + } + conf->pkt_filter_add.count = i; + } + else if (!strncmp("pkt_filter_del=", full_param, len_param)) { + pick_tmp = data; + pch = bcmstrtok(&pick_tmp, " ,.-", 0); + i=0; + while (pch != NULL && ipkt_filter_del.id[i] = (uint32)simple_strtol(pch, NULL, 10); + pch = bcmstrtok(&pick_tmp, " ,.-", 0); + i++; + } + conf->pkt_filter_del.count = i; + printf("%s: pkt_filter_del id = ", __FUNCTION__); + for (i=0; ipkt_filter_del.count; i++) + printf("%d ", conf->pkt_filter_del.id[i]); + printf("\n"); + } + else if (!strncmp("magic_pkt_filter_add=", full_param, len_param)) { + pick_tmp = data; + pch = bcmstrtok(&pick_tmp, ",.-", 0); + i=0; + while (pch != NULL && imagic_pkt_filter_add.filter[i][0], pch); + printf("%s: magic_pkt_filter_add[%d][] = %s\n", __FUNCTION__, i, &conf->magic_pkt_filter_add.filter[i][0]); + pch = bcmstrtok(&pick_tmp, ",.-", 0); + i++; + } + conf->magic_pkt_filter_add.count = i; + } + else + return false; + + return true; +} +#endif + +#ifdef IAPSTA_PREINIT +/* + * iapsta_init=mode [sta|ap|apsta|dualap] vifname [wlan1] + * iapsta_config=ifname [wlan0|wlan1] ssid [xxx] chan [x] + hidden [y|n] maxassoc [x] + amode [open|shared|wpapsk|wpa2psk|wpawpa2psk] + emode [none|wep|tkip|aes|tkipaes] + key [xxxxx] + * iapsta_enable=ifname [wlan0|wlan1] +*/ +bool +dhd_conf_read_iapsta(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + if (!strncmp("iapsta_init=", full_param, len_param)) { + sprintf(conf->iapsta_init, "iapsta_init %s", data); + printf("%s: iapsta_init=%s\n", __FUNCTION__, conf->iapsta_init); + } + else if (!strncmp("iapsta_config=", full_param, len_param)) { + sprintf(conf->iapsta_config, "iapsta_config %s", data); + printf("%s: iapsta_config=%s\n", __FUNCTION__, conf->iapsta_config); + } + else if (!strncmp("iapsta_enable=", full_param, len_param)) { + sprintf(conf->iapsta_enable, "iapsta_enable %s", data); + printf("%s: iapsta_enable=%s\n", __FUNCTION__, conf->iapsta_enable); + } + else + return false; + + return true; +} +#endif + +#ifdef IDHCPC +bool +dhd_conf_read_dhcp_params(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + if (!strncmp("dhcpc_enable=", full_param, len_param)) { + conf->dhcpc_enable = (int)simple_strtol(data, NULL, 10); + printf("%s: dhcpc_enable = %d\n", __FUNCTION__, conf->dhcpc_enable); + } + else + return false; + + return true; +} +#endif + +#ifdef BCMSDIO +bool +dhd_conf_read_sdio_params(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + if (!strncmp("dhd_doflow=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + dhd_doflow = FALSE; + else + dhd_doflow = TRUE; + printf("%s: dhd_doflow = %d\n", __FUNCTION__, dhd_doflow); + } + else if (!strncmp("dhd_slpauto=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + dhd_slpauto = FALSE; + else + dhd_slpauto = TRUE; + printf("%s: dhd_slpauto = %d\n", __FUNCTION__, dhd_slpauto); + } + else if (!strncmp("kso_enable=", full_param, len_param)) { + if (!strncmp(data, "1", 1)) + dhd_slpauto = FALSE; + else + dhd_slpauto = TRUE; + printf("%s: dhd_slpauto = %d\n", __FUNCTION__, dhd_slpauto); + } + else if (!strncmp("bus:txglom=", full_param, len_param)) { + conf->bus_txglom = (int)simple_strtol(data, NULL, 10); + printf("%s: bus:txglom = %d\n", __FUNCTION__, conf->bus_txglom); + } + else if (!strncmp("use_rxchain=", full_param, len_param)) { + conf->use_rxchain = (int)simple_strtol(data, NULL, 10); + printf("%s: use_rxchain = %d\n", __FUNCTION__, conf->use_rxchain); + } + else if (!strncmp("dhd_txminmax=", full_param, len_param)) { + conf->dhd_txminmax = (uint)simple_strtol(data, NULL, 10); + printf("%s: dhd_txminmax = %d\n", __FUNCTION__, conf->dhd_txminmax); + } + else if (!strncmp("txinrx_thres=", full_param, len_param)) { + conf->txinrx_thres = (int)simple_strtol(data, NULL, 10); + printf("%s: txinrx_thres = %d\n", __FUNCTION__, conf->txinrx_thres); + } + else if (!strncmp("sd_f2_blocksize=", full_param, len_param)) { + conf->sd_f2_blocksize = (int)simple_strtol(data, NULL, 10); + printf("%s: sd_f2_blocksize = %d\n", __FUNCTION__, conf->sd_f2_blocksize); + } + else if (!strncmp("oob_enabled_later=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->oob_enabled_later = FALSE; + else + conf->oob_enabled_later = TRUE; + printf("%s: oob_enabled_later = %d\n", __FUNCTION__, conf->oob_enabled_later); + } +#if defined(BCMSDIOH_TXGLOM) + else if (!strncmp("txglomsize=", full_param, len_param)) { + conf->txglomsize = (uint)simple_strtol(data, NULL, 10); + if (conf->txglomsize > SDPCM_MAXGLOM_SIZE) + conf->txglomsize = SDPCM_MAXGLOM_SIZE; + printf("%s: txglomsize = %d\n", __FUNCTION__, conf->txglomsize); + } + else if (!strncmp("swtxglom=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->swtxglom = FALSE; + else + conf->swtxglom = TRUE; + printf("%s: swtxglom = %d\n", __FUNCTION__, conf->swtxglom); + } + else if (!strncmp("txglom_ext=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->txglom_ext = FALSE; + else + conf->txglom_ext = TRUE; + printf("%s: txglom_ext = %d\n", __FUNCTION__, conf->txglom_ext); + if (conf->txglom_ext) { + if ((conf->chip == BCM43362_CHIP_ID) || (conf->chip == BCM4330_CHIP_ID)) + conf->txglom_bucket_size = 1680; + else if (conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID || + conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) + conf->txglom_bucket_size = 1684; + } + printf("%s: txglom_bucket_size = %d\n", __FUNCTION__, conf->txglom_bucket_size); + } + else if (!strncmp("bus:rxglom=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->bus_rxglom = FALSE; + else + conf->bus_rxglom = TRUE; + printf("%s: bus:rxglom = %d\n", __FUNCTION__, conf->bus_rxglom); + } + else if (!strncmp("dhd_poll=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->dhd_poll = 0; + else + conf->dhd_poll = 1; + printf("%s: dhd_poll = %d\n", __FUNCTION__, conf->dhd_poll); + } + else if (!strncmp("deferred_tx_len=", full_param, len_param)) { + conf->deferred_tx_len = (int)simple_strtol(data, NULL, 10); + printf("%s: deferred_tx_len = %d\n", __FUNCTION__, conf->deferred_tx_len); + } + else if (!strncmp("txctl_tmo_fix=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->txctl_tmo_fix = FALSE; + else + conf->txctl_tmo_fix = TRUE; + printf("%s: txctl_tmo_fix = %d\n", __FUNCTION__, conf->txctl_tmo_fix); + } + else if (!strncmp("tx_in_rx=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->tx_in_rx = FALSE; + else + conf->tx_in_rx = TRUE; + printf("%s: tx_in_rx = %d\n", __FUNCTION__, conf->tx_in_rx); + } + else if (!strncmp("tx_max_offset=", full_param, len_param)) { + conf->tx_max_offset = (int)simple_strtol(data, NULL, 10); + printf("%s: tx_max_offset = %d\n", __FUNCTION__, conf->tx_max_offset); + } + else if (!strncmp("txglom_mode=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->txglom_mode = FALSE; + else + conf->txglom_mode = TRUE; + printf("%s: txglom_mode = %d\n", __FUNCTION__, conf->txglom_mode); + } +#endif + else + return false; + + return true; +} +#endif + +bool +dhd_conf_read_pm_params(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + if (!strncmp("lpc=", full_param, len_param)) { + conf->lpc = (int)simple_strtol(data, NULL, 10); + printf("%s: lpc = %d\n", __FUNCTION__, conf->lpc); + } + else if (!strncmp("deepsleep=", full_param, len_param)) { + if (!strncmp(data, "1", 1)) + conf->deepsleep = TRUE; + else + conf->deepsleep = FALSE; + printf("%s: deepsleep = %d\n", __FUNCTION__, conf->deepsleep); + } + else if (!strncmp("PM=", full_param, len_param)) { + conf->pm = (int)simple_strtol(data, NULL, 10); + printf("%s: PM = %d\n", __FUNCTION__, conf->pm); + } + else if (!strncmp("pm_in_suspend=", full_param, len_param)) { + conf->pm_in_suspend = (int)simple_strtol(data, NULL, 10); + printf("%s: pm_in_suspend = %d\n", __FUNCTION__, conf->pm_in_suspend); + } + else if (!strncmp("pm2_sleep_ret=", full_param, len_param)) { + conf->pm2_sleep_ret = (int)simple_strtol(data, NULL, 10); + printf("%s: pm2_sleep_ret = %d\n", __FUNCTION__, conf->pm2_sleep_ret); + } + else if (!strncmp("xmit_in_suspend=", full_param, len_param)) { + if (!strncmp(data, "1", 1)) + conf->xmit_in_suspend = TRUE; + else + conf->xmit_in_suspend = FALSE; + printf("%s: xmit_in_suspend = %d\n", __FUNCTION__, conf->xmit_in_suspend); + } + else + return false; + + return true; +} + +bool +dhd_conf_read_others(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + uint len_data = strlen(data); + char *pch, *pick_tmp; + int i; + + if (!strncmp("band=", full_param, len_param)) { + /* Process band: + * band=a for 5GHz only and band=b for 2.4GHz only + */ + if (!strcmp(data, "b")) + conf->band = WLC_BAND_2G; + else if (!strcmp(data, "a")) + conf->band = WLC_BAND_5G; + else + conf->band = WLC_BAND_AUTO; + printf("%s: band = %d\n", __FUNCTION__, conf->band); + } + else if (!strncmp("mimo_bw_cap=", full_param, len_param)) { + conf->mimo_bw_cap = (uint)simple_strtol(data, NULL, 10); + printf("%s: mimo_bw_cap = %d\n", __FUNCTION__, conf->mimo_bw_cap); + } + else if (!strncmp("bw_cap_2g=", full_param, len_param)) { + conf->bw_cap_2g = (uint)simple_strtol(data, NULL, 0); + printf("%s: bw_cap_2g = %d\n", __FUNCTION__, conf->bw_cap_2g); + } + else if (!strncmp("bw_cap_5g=", full_param, len_param)) { + conf->bw_cap_5g = (uint)simple_strtol(data, NULL, 0); + printf("%s: bw_cap_2g = %d\n", __FUNCTION__, conf->bw_cap_5g); + } + else if (!strncmp("ccode=", full_param, len_param)) { + memset(&conf->cspec, 0, sizeof(wl_country_t)); + memcpy(conf->cspec.country_abbrev, data, len_data); + memcpy(conf->cspec.ccode, data, len_data); + printf("%s: ccode = %s\n", __FUNCTION__, conf->cspec.ccode); + } + else if (!strncmp("regrev=", full_param, len_param)) { + conf->cspec.rev = (int32)simple_strtol(data, NULL, 10); + printf("%s: regrev = %d\n", __FUNCTION__, conf->cspec.rev); + } + else if (!strncmp("channels=", full_param, len_param)) { + pick_tmp = data; + pch = bcmstrtok(&pick_tmp, " ,.-", 0); + i=0; + while (pch != NULL && ichannels.channel[i] = (uint32)simple_strtol(pch, NULL, 10); + pch = bcmstrtok(&pick_tmp, " ,.-", 0); + i++; + } + conf->channels.count = i; + printf("%s: channels = ", __FUNCTION__); + for (i=0; ichannels.count; i++) + printf("%d ", conf->channels.channel[i]); + printf("\n"); + } + else if (!strncmp("keep_alive_period=", full_param, len_param)) { + conf->keep_alive_period = (uint)simple_strtol(data, NULL, 10); + printf("%s: keep_alive_period = %d\n", __FUNCTION__, + conf->keep_alive_period); + } + else if (!strncmp("stbc=", full_param, len_param)) { + conf->stbc = (int)simple_strtol(data, NULL, 10); + printf("%s: stbc = %d\n", __FUNCTION__, conf->stbc); + } + else if (!strncmp("phy_oclscdenable=", full_param, len_param)) { + conf->phy_oclscdenable = (int)simple_strtol(data, NULL, 10); + printf("%s: phy_oclscdenable = %d\n", __FUNCTION__, conf->phy_oclscdenable); + } + else if (!strncmp("srl=", full_param, len_param)) { + conf->srl = (int)simple_strtol(data, NULL, 10); + printf("%s: srl = %d\n", __FUNCTION__, conf->srl); + } + else if (!strncmp("lrl=", full_param, len_param)) { + conf->lrl = (int)simple_strtol(data, NULL, 10); + printf("%s: lrl = %d\n", __FUNCTION__, conf->lrl); + } + else if (!strncmp("bcn_timeout=", full_param, len_param)) { + conf->bcn_timeout= (uint)simple_strtol(data, NULL, 10); + printf("%s: bcn_timeout = %d\n", __FUNCTION__, conf->bcn_timeout); + } + else if (!strncmp("ampdu_ba_wsize=", full_param, len_param)) { + conf->ampdu_ba_wsize = (int)simple_strtol(data, NULL, 10); + printf("%s: ampdu_ba_wsize = %d\n", __FUNCTION__, conf->ampdu_ba_wsize); + } + else if (!strncmp("ampdu_hostreorder=", full_param, len_param)) { + conf->ampdu_hostreorder = (int)simple_strtol(data, NULL, 10); + printf("%s: ampdu_hostreorder = %d\n", __FUNCTION__, conf->ampdu_hostreorder); + } + else if (!strncmp("spect=", full_param, len_param)) { + conf->spect = (int)simple_strtol(data, NULL, 10); + printf("%s: spect = %d\n", __FUNCTION__, conf->spect); + } + else if (!strncmp("txbf=", full_param, len_param)) { + conf->txbf = (int)simple_strtol(data, NULL, 10); + printf("%s: txbf = %d\n", __FUNCTION__, conf->txbf); + } + else if (!strncmp("frameburst=", full_param, len_param)) { + conf->frameburst = (int)simple_strtol(data, NULL, 10); + printf("%s: frameburst = %d\n", __FUNCTION__, conf->frameburst); + } + else if (!strncmp("disable_proptx=", full_param, len_param)) { + conf->disable_proptx = (int)simple_strtol(data, NULL, 10); + printf("%s: disable_proptx = %d\n", __FUNCTION__, conf->disable_proptx); + } +#ifdef DHDTCPACK_SUPPRESS + else if (!strncmp("tcpack_sup_mode=", full_param, len_param)) { + conf->tcpack_sup_mode = (uint)simple_strtol(data, NULL, 10); + printf("%s: tcpack_sup_mode = %d\n", __FUNCTION__, conf->tcpack_sup_mode); + } +#endif + else if (!strncmp("pktprio8021x=", full_param, len_param)) { + conf->pktprio8021x = (int)simple_strtol(data, NULL, 10); + printf("%s: pktprio8021x = %d\n", __FUNCTION__, conf->pktprio8021x); + } + else if (!strncmp("dhd_txbound=", full_param, len_param)) { + dhd_txbound = (uint)simple_strtol(data, NULL, 10); + printf("%s: dhd_txbound = %d\n", __FUNCTION__, dhd_txbound); + } + else if (!strncmp("dhd_rxbound=", full_param, len_param)) { + dhd_rxbound = (uint)simple_strtol(data, NULL, 10); + printf("%s: dhd_rxbound = %d\n", __FUNCTION__, dhd_rxbound); + } + else if (!strncmp("rsdb_mode=", full_param, len_param)) { + conf->rsdb_mode = (int)simple_strtol(data, NULL, 10); + printf("%s: rsdb_mode = %d\n", __FUNCTION__, conf->rsdb_mode); + } + else if (!strncmp("vhtmode=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->vhtmode = 0; + else + conf->vhtmode = 1; + printf("%s: vhtmode = %d\n", __FUNCTION__, conf->vhtmode); + } + else if (!strncmp("num_different_channels=", full_param, len_param)) { + conf->num_different_channels = (int)simple_strtol(data, NULL, 10); + printf("%s: num_different_channels = %d\n", __FUNCTION__, conf->num_different_channels); + } + else if (!strncmp("autocountry=", full_param, len_param)) { + conf->autocountry = (int)simple_strtol(data, NULL, 10); + printf("%s: autocountry = %d\n", __FUNCTION__, conf->autocountry); + } + else if (!strncmp("tsq=", full_param, len_param)) { + conf->tsq = (int)simple_strtol(data, NULL, 10); + printf("%s: tsq = %d\n", __FUNCTION__, conf->tsq); + } + else + return false; + + return true; +} + +int +dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path) +{ + int bcmerror = -1; + uint len, start_pos=0; + void * image = NULL; + char * memblock = NULL; + char *bufp, *pick = NULL, *pch; + bool conf_file_exists; + uint len_param; + + conf_file_exists = ((conf_path != NULL) && (conf_path[0] != '\0')); + if (!conf_file_exists) { + printf("%s: config path %s\n", __FUNCTION__, conf_path); + return (0); + } + + if (conf_file_exists) { + image = dhd_os_open_image(conf_path); + if (image == NULL) { + printf("%s: Ignore config file %s\n", __FUNCTION__, conf_path); + goto err; + } + } + + memblock = MALLOC(dhd->osh, MAXSZ_CONFIG); + if (memblock == NULL) { + CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n", + __FUNCTION__, MAXSZ_CONFIG)); + goto err; + } + + pick = MALLOC(dhd->osh, MAXSZ_BUF); + if (!pick) { + CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n", + __FUNCTION__, MAXSZ_BUF)); + goto err; + } + + /* Read variables */ + if (conf_file_exists) { + len = dhd_os_get_image_block(memblock, MAXSZ_CONFIG, image); + } + if (len > 0 && len < MAXSZ_CONFIG) { + bufp = (char *)memblock; + bufp[len] = 0; + + while (start_pos < len) { + memset(pick, 0, MAXSZ_BUF); + start_pos = pick_config_vars(bufp, len, start_pos, pick); + pch = strchr(pick, '='); + if (pch != NULL) { + len_param = pch-pick+1; + if (len_param == strlen(pick)) { + CONFIG_ERROR(("%s: not a right parameter %s\n", __FUNCTION__, pick)); + continue; + } + } else { + CONFIG_ERROR(("%s: not a right parameter %s\n", __FUNCTION__, pick)); + continue; + } + + if (dhd_conf_read_log_level(dhd, pick, len_param)) + continue; + else if (dhd_conf_read_roam_params(dhd, pick, len_param)) + continue; + else if (dhd_conf_read_wme_ac_params(dhd, pick, len_param)) + continue; + else if (dhd_conf_read_fw_by_mac(dhd, pick, len_param)) + continue; + else if (dhd_conf_read_nv_by_mac(dhd, pick, len_param)) + continue; + else if (dhd_conf_read_nv_by_chip(dhd, pick, len_param)) + continue; + else if (dhd_conf_read_country_list(dhd, pick, len_param)) + continue; +#ifdef PKT_FILTER_SUPPORT + else if (dhd_conf_read_pkt_filter(dhd, pick, len_param)) + continue; +#endif /* PKT_FILTER_SUPPORT */ +#ifdef IAPSTA_PREINIT + else if (dhd_conf_read_iapsta(dhd, pick, len_param)) + continue; +#endif /* IAPSTA_PREINIT */ +#ifdef IDHCPC + else if (dhd_conf_read_dhcp_params(dhd, pick, len_param)) + continue; +#endif /* IDHCPC */ +#ifdef BCMSDIO + else if (dhd_conf_read_sdio_params(dhd, pick, len_param)) + continue; +#endif /* BCMSDIO */ + else if (dhd_conf_read_pm_params(dhd, pick, len_param)) + continue; + else if (dhd_conf_read_others(dhd, pick, len_param)) + continue; + else + continue; + } + + bcmerror = 0; + } else { + CONFIG_ERROR(("%s: error reading config file: %d\n", __FUNCTION__, len)); + bcmerror = BCME_SDIO_ERROR; + } + +err: + if (pick) + MFREE(dhd->osh, pick, MAXSZ_BUF); + + if (memblock) + MFREE(dhd->osh, memblock, MAXSZ_CONFIG); + + if (image) + dhd_os_close_image(image); + + return bcmerror; +} + +int +dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev) +{ + printf("%s: chip=0x%x, chiprev=%d\n", __FUNCTION__, chip, chiprev); + dhd->conf->chip = chip; + dhd->conf->chiprev = chiprev; + return 0; +} + +uint +dhd_conf_get_chip(void *context) +{ + dhd_pub_t *dhd = context; + + if (dhd && dhd->conf) + return dhd->conf->chip; + return 0; +} + +uint +dhd_conf_get_chiprev(void *context) +{ + dhd_pub_t *dhd = context; + + if (dhd && dhd->conf) + return dhd->conf->chiprev; + return 0; +} + +#ifdef BCMSDIO +void +dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable) +{ + struct dhd_conf *conf = dhd->conf; + + if (enable) { +#if defined(SWTXGLOM) + if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID || + conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID || + conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) { + // 43362/4330/4334/43340/43341/43241 must use 1.88.45.x swtxglom if txglom_ext is true, since 1.201.59 not support swtxglom + conf->swtxglom = TRUE; + conf->txglom_ext = TRUE; + } + if (conf->chip == BCM43362_CHIP_ID && conf->bus_txglom == 0) { + conf->bus_txglom = 1; // improve tcp tx tput. and cpu idle for 43362 only + } +#elif defined(BCMSDIOH_TXGLOM_EXT) + if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID || + conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID || + conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) { + conf->txglom_mode = SDPCM_TXGLOM_CPY; + } +#endif + // other parameters set in preinit or config.txt + } else { + // clear txglom parameters, but don't change swtxglom since it's possible enabled in config.txt + conf->txglom_ext = FALSE; + conf->txglom_bucket_size = 0; + conf->txglomsize = 0; + conf->deferred_tx_len = 0; + } + printf("%s: swtxglom=%d, txglom_ext=%d, txglom_bucket_size=%d\n", __FUNCTION__, + conf->swtxglom, conf->txglom_ext, conf->txglom_bucket_size); + printf("%s: txglomsize=%d, deferred_tx_len=%d, bus_txglom=%d\n", __FUNCTION__, + conf->txglomsize, conf->deferred_tx_len, conf->bus_txglom); + printf("%s: tx_in_rx=%d, txinrx_thres=%d, dhd_txminmax=%d\n", __FUNCTION__, + conf->tx_in_rx, conf->txinrx_thres, conf->dhd_txminmax); + printf("%s: tx_max_offset=%d, txctl_tmo_fix=%d\n", __FUNCTION__, + conf->tx_max_offset, conf->txctl_tmo_fix); + +} +#endif + +int +dhd_conf_preinit(dhd_pub_t *dhd) +{ + struct dhd_conf *conf = dhd->conf; + + CONFIG_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef BCMSDIO + dhd_conf_free_mac_list(&conf->fw_by_mac); + dhd_conf_free_mac_list(&conf->nv_by_mac); + dhd_conf_free_chip_nv_path_list(&conf->nv_by_chip); +#endif + memset(&conf->country_list, 0, sizeof(conf_country_list_t)); + conf->band = WLC_BAND_AUTO; + conf->mimo_bw_cap = -1; + conf->bw_cap_2g = -1; + conf->bw_cap_5g = -1; + if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID) { + strcpy(conf->cspec.country_abbrev, "ALL"); + strcpy(conf->cspec.ccode, "ALL"); + conf->cspec.rev = 0; + } else if (conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID || + conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID || + conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID || + conf->chip == BCM43569_CHIP_ID || conf->chip == BCM4359_CHIP_ID) { + strcpy(conf->cspec.country_abbrev, "CN"); + strcpy(conf->cspec.ccode, "CN"); + conf->cspec.rev = 38; + } else { + strcpy(conf->cspec.country_abbrev, "CN"); + strcpy(conf->cspec.ccode, "CN"); + conf->cspec.rev = 0; + } + memset(&conf->channels, 0, sizeof(wl_channel_list_t)); + conf->roam_off = 1; + conf->roam_off_suspend = 1; +#ifdef CUSTOM_ROAM_TRIGGER_SETTING + conf->roam_trigger[0] = CUSTOM_ROAM_TRIGGER_SETTING; +#else + conf->roam_trigger[0] = -65; +#endif + conf->roam_trigger[1] = WLC_BAND_ALL; + conf->roam_scan_period[0] = 10; + conf->roam_scan_period[1] = WLC_BAND_ALL; +#ifdef CUSTOM_ROAM_DELTA_SETTING + conf->roam_delta[0] = CUSTOM_ROAM_DELTA_SETTING; +#else + conf->roam_delta[0] = 15; +#endif + conf->roam_delta[1] = WLC_BAND_ALL; +#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC + conf->fullroamperiod = 60; +#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */ + conf->fullroamperiod = 120; +#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */ +#ifdef CUSTOM_KEEP_ALIVE_SETTING + conf->keep_alive_period = CUSTOM_KEEP_ALIVE_SETTING; +#else + conf->keep_alive_period = 28000; +#endif + conf->force_wme_ac = 0; + memset(&conf->wme_sta, 0, sizeof(wme_param_t)); + memset(&conf->wme_ap, 0, sizeof(wme_param_t)); + conf->stbc = -1; + conf->phy_oclscdenable = -1; +#ifdef PKT_FILTER_SUPPORT + memset(&conf->pkt_filter_add, 0, sizeof(conf_pkt_filter_add_t)); + memset(&conf->pkt_filter_del, 0, sizeof(conf_pkt_filter_del_t)); + memset(&conf->magic_pkt_filter_add, 0, sizeof(conf_pkt_filter_del_t)); +#endif + conf->srl = -1; + conf->lrl = -1; + conf->bcn_timeout = 15; + conf->spect = -1; + conf->txbf = -1; + conf->lpc = -1; + conf->disable_proptx = -1; +#ifdef BCMSDIO + conf->bus_txglom = -1; + conf->use_rxchain = 0; + conf->bus_rxglom = TRUE; + conf->txglom_ext = FALSE; + conf->tx_max_offset = 0; + conf->txglomsize = SDPCM_DEFGLOM_SIZE; + conf->dhd_poll = -1; + conf->txctl_tmo_fix = FALSE; + conf->tx_in_rx = TRUE; + conf->txglom_mode = SDPCM_TXGLOM_MDESC; + conf->deferred_tx_len = 0; + conf->dhd_txminmax = 1; + conf->txinrx_thres = -1; + conf->sd_f2_blocksize = 0; + conf->oob_enabled_later = FALSE; +#endif + conf->ampdu_ba_wsize = 0; + conf->ampdu_hostreorder = -1; + conf->dpc_cpucore = -1; + conf->rxf_cpucore = -1; + conf->frameburst = -1; + conf->deepsleep = FALSE; + conf->pm = -1; + conf->pm_in_suspend = -1; + conf->pm2_sleep_ret = -1; + conf->num_different_channels = -1; + conf->xmit_in_suspend = TRUE; +#ifdef IDHCPC + conf->dhcpc_enable = -1; +#endif + conf->tsq = 0; +#ifdef DHDTCPACK_SUPPRESS + conf->tcpack_sup_mode = TCPACK_SUP_OFF; +#endif + conf->pktprio8021x = -1; + conf->rsdb_mode = -2; + conf->vhtmode = -1; + conf->autocountry = -1; +#ifdef IAPSTA_PREINIT + memset(conf->iapsta_init, 0, sizeof(conf->iapsta_init)); + memset(conf->iapsta_config, 0, sizeof(conf->iapsta_config)); + memset(conf->iapsta_enable, 0, sizeof(conf->iapsta_enable)); +#endif +#ifdef BCMSDIO + if (conf->chip == BCM43430_CHIP_ID || conf->chip == BCM4345_CHIP_ID) { + conf->txctl_tmo_fix = 1; + } +#endif + if (conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID || + conf->chip == BCM4371_CHIP_ID || conf->chip == BCM43569_CHIP_ID || + conf->chip == BCM4359_CHIP_ID) { +#ifdef DHDTCPACK_SUPPRESS + conf->tcpack_sup_mode = TCPACK_SUP_REPLACE; +#endif + dhd_rxbound = 64; + dhd_txbound = 64; + conf->txbf = 1; + conf->frameburst = 1; +#ifdef BCMSDIO + conf->dhd_txminmax = -1; + conf->txinrx_thres = 128; + conf->sd_f2_blocksize = 256; + conf->oob_enabled_later = TRUE; +#endif + } + +#ifdef BCMSDIO +#if defined(SWTXGLOM) + if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID || + conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID || + conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) { + conf->swtxglom = FALSE; // disabled by default + conf->txglom_ext = TRUE; // enabled by default + conf->use_rxchain = 0; // use_rxchain have been disabled if swtxglom enabled + conf->txglomsize = 16; + } else { + conf->swtxglom = FALSE; // use 1.201.59.x txglom by default + conf->txglom_ext = FALSE; + } + + if (conf->chip == BCM43362_CHIP_ID) { + conf->txglom_bucket_size = 1680; // fixed value, don't change + conf->tx_in_rx = FALSE; + conf->tx_max_offset = 1; + } + if (conf->chip == BCM4330_CHIP_ID) { + conf->txglom_bucket_size = 1680; // fixed value, don't change + conf->tx_in_rx = FALSE; + conf->tx_max_offset = 0; + } + if (conf->chip == BCM4334_CHIP_ID) { + conf->txglom_bucket_size = 1684; // fixed value, don't change + conf->tx_in_rx = TRUE; // improve tcp tx tput. and cpu idle + conf->tx_max_offset = 0; // reduce udp tx: dhdsdio_readframes: got unlikely tx max 109 with tx_seq 110 + } + if (conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID) { + conf->txglom_bucket_size = 1684; // fixed value, don't change + conf->tx_in_rx = TRUE; // improve tcp tx tput. and cpu idle + conf->tx_max_offset = 1; + } + if (conf->chip == BCM4324_CHIP_ID) { + conf->txglom_bucket_size = 1684; // fixed value, don't change + conf->tx_in_rx = TRUE; // improve tcp tx tput. and cpu idle + conf->tx_max_offset = 0; + } +#endif +#if defined(BCMSDIOH_TXGLOM_EXT) + if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID || + conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID || + conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) { + conf->txglom_ext = TRUE; + conf->use_rxchain = 0; + conf->tx_in_rx = TRUE; + conf->tx_max_offset = 1; + } else { + conf->txglom_ext = FALSE; + } + if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID) { + conf->txglom_bucket_size = 1680; // fixed value, don't change + conf->txglomsize = 6; + } + if (conf->chip == BCM4334_CHIP_ID || conf->chip == BCM43340_CHIP_ID || + conf->chip == BCM43341_CHIP_ID || conf->chip == BCM4324_CHIP_ID) { + conf->txglom_bucket_size = 1684; // fixed value, don't change + conf->txglomsize = 16; + } +#endif + if (conf->txglomsize > SDPCM_MAXGLOM_SIZE) + conf->txglomsize = SDPCM_MAXGLOM_SIZE; + conf->deferred_tx_len = 0; +#endif + + return 0; +} + +int +dhd_conf_reset(dhd_pub_t *dhd) +{ +#ifdef BCMSDIO + dhd_conf_free_mac_list(&dhd->conf->fw_by_mac); + dhd_conf_free_mac_list(&dhd->conf->nv_by_mac); + dhd_conf_free_chip_nv_path_list(&dhd->conf->nv_by_chip); +#endif + memset(dhd->conf, 0, sizeof(dhd_conf_t)); + return 0; +} + +int +dhd_conf_attach(dhd_pub_t *dhd) +{ + dhd_conf_t *conf; + + CONFIG_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhd->conf != NULL) { + printf("%s: config is attached before!\n", __FUNCTION__); + return 0; + } + /* Allocate private bus interface state */ + if (!(conf = MALLOC(dhd->osh, sizeof(dhd_conf_t)))) { + CONFIG_ERROR(("%s: MALLOC failed\n", __FUNCTION__)); + goto fail; + } + memset(conf, 0, sizeof(dhd_conf_t)); + + dhd->conf = conf; + + return 0; + +fail: + if (conf != NULL) + MFREE(dhd->osh, conf, sizeof(dhd_conf_t)); + return BCME_NOMEM; +} + +void +dhd_conf_detach(dhd_pub_t *dhd) +{ + CONFIG_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhd->conf) { +#ifdef BCMSDIO + dhd_conf_free_mac_list(&dhd->conf->fw_by_mac); + dhd_conf_free_mac_list(&dhd->conf->nv_by_mac); + dhd_conf_free_chip_nv_path_list(&dhd->conf->nv_by_chip); +#endif + MFREE(dhd->osh, dhd->conf, sizeof(dhd_conf_t)); + } + dhd->conf = NULL; +} diff --git a/drivers/net/wireless/bcmdhd/dhd_config.h b/drivers/amlogic/wifi/bcmdhd/dhd_config.h similarity index 73% rename from drivers/net/wireless/bcmdhd/dhd_config.h rename to drivers/amlogic/wifi/bcmdhd/dhd_config.h index 5437016d06ccd..750a2830fb555 100644 --- a/drivers/net/wireless/bcmdhd/dhd_config.h +++ b/drivers/amlogic/wifi/bcmdhd/dhd_config.h @@ -11,11 +11,10 @@ #define FW_PATH_AUTO_SELECT 1 //#define CONFIG_PATH_AUTO_SELECT extern char firmware_path[MOD_PARAM_PATHLEN]; -extern int disable_proptx; extern uint dhd_rxbound; -extern uint dhd_txbound; -#define TXGLOM_RECV_OFFSET 8 +extern uint dhd_txbound; #ifdef BCMSDIO +#define TXGLOM_RECV_OFFSET 8 extern uint dhd_doflow; extern uint dhd_slpauto; @@ -23,6 +22,8 @@ extern uint dhd_slpauto; #define BCM43362A2_CHIP_REV 1 #define BCM43430A0_CHIP_REV 0 #define BCM43430A1_CHIP_REV 1 +#define BCM43430A2_CHIP_REV 2 +#define BCM43012B0_CHIP_REV 1 #define BCM4330B2_CHIP_REV 4 #define BCM4334B1_CHIP_REV 3 #define BCM43341B0_CHIP_REV 2 @@ -30,10 +31,13 @@ extern uint dhd_slpauto; #define BCM4335A0_CHIP_REV 2 #define BCM4339A0_CHIP_REV 1 #define BCM43455C0_CHIP_REV 6 +#define BCM43455C5_CHIP_REV 9 #define BCM4354A1_CHIP_REV 1 #define BCM4359B1_CHIP_REV 5 +#define BCM4359C0_CHIP_REV 9 #endif #define BCM4356A2_CHIP_REV 2 +#define BCM4358A3_CHIP_REV 3 /* mac range */ typedef struct wl_mac_range { @@ -78,8 +82,9 @@ typedef struct wl_channel_list { typedef struct wmes_param { int aifsn[AC_COUNT]; - int cwmin[AC_COUNT]; - int cwmax[AC_COUNT]; + int ecwmin[AC_COUNT]; + int ecwmax[AC_COUNT]; + int txop[AC_COUNT]; } wme_param_t; #ifdef PKT_FILTER_SUPPORT @@ -118,6 +123,8 @@ typedef struct dhd_conf { conf_country_list_t country_list; /* Country list */ int band; /* Band, b:2.4G only, otherwise for auto */ int mimo_bw_cap; /* Bandwidth, 0:HT20ALL, 1: HT40ALL, 2:HT20IN2G_HT40PIN5G */ + int bw_cap_2g; /* Bandwidth, 1:20MHz, 3: 20/40MHz, 7:20/40/80MHz */ + int bw_cap_5g; /* Bandwidth, 1:20MHz, 3: 20/40MHz, 7:20/40/80MHz */ wl_country_t cspec; /* Country */ wl_channel_list_t channels; /* Support channels */ uint roam_off; /* Roaming, 0:enable, 1:disable */ @@ -128,48 +135,80 @@ typedef struct dhd_conf { int fullroamperiod; /* Full Roaming period */ uint keep_alive_period; /* The perioid in ms to send keep alive packet */ int force_wme_ac; - wme_param_t wme; /* WME parameters */ + wme_param_t wme_sta; /* WME parameters */ + wme_param_t wme_ap; /* WME parameters */ int stbc; /* STBC for Tx/Rx */ int phy_oclscdenable; /* phy_oclscdenable */ #ifdef PKT_FILTER_SUPPORT conf_pkt_filter_add_t pkt_filter_add; /* Packet filter add */ conf_pkt_filter_del_t pkt_filter_del; /* Packet filter add */ - bool pkt_filter_magic; + conf_pkt_filter_add_t magic_pkt_filter_add; /* Magic Packet filter add */ #endif int srl; /* short retry limit */ int lrl; /* long retry limit */ uint bcn_timeout; /* beacon timeout */ - bool kso_enable; int spect; int txbf; int lpc; int disable_proptx; +#ifdef BCMSDIO int bus_txglom; /* bus:txglom */ int use_rxchain; - bool bus_rxglom; /* bus:rxglom */ + bool bus_rxglom; /* bus:rxglom */ + bool txglom_ext; /* Only for 43362/4330/43340/43341/43241 */ + /* terence 20161011: + 1) conf->tx_max_offset = 1 to fix credict issue in adaptivity testing + 2) conf->tx_max_offset = 1 will cause to UDP Tx not work in rxglom supported, + but not happened in sw txglom + */ + int tx_max_offset; uint txglomsize; - int ampdu_ba_wsize; - int dpc_cpucore; - int frameburst; - bool deepsleep; - int pm; - uint8 tcpack_sup_mode; int dhd_poll; - uint deferred_tx_len; - int pktprio8021x; + /* terence 20161011: conf->txctl_tmo_fix = 1 to fix for "sched: RT throttling activated, " + this issue happened in tx tput. and tx cmd at the same time in inband interrupt mode + */ bool txctl_tmo_fix; + bool tx_in_rx; // Skip tx before rx, in order to get more glomed in tx + bool txglom_mode; + uint deferred_tx_len; bool swtxglom; /* SW TXGLOM */ - bool txglom_ext; /* Only for 43362/4330/43340/43341/43241 */ /*txglom_bucket_size: * 43362/4330: 1680 * 43340/43341/43241: 1684 - */ + */ int txglom_bucket_size; - int tx_max_offset; - bool tx_in_rx; // Skip tx before rx, in order to get more glomed in tx + int txinrx_thres; + int dhd_txminmax; // -1=DATABUFCNT(bus) + uint sd_f2_blocksize; + bool oob_enabled_later; +#endif + int ampdu_ba_wsize; + int ampdu_hostreorder; + int dpc_cpucore; + int rxf_cpucore; + int frameburst; + bool deepsleep; + int pm; + int pm_in_suspend; + int pm2_sleep_ret; +#ifdef DHDTCPACK_SUPPRESS + uint8 tcpack_sup_mode; +#endif + int pktprio8021x; int rsdb_mode; - bool txglom_mode; int vhtmode; + int num_different_channels; + int xmit_in_suspend; +#ifdef IDHCPC + int dhcpc_enable; +#endif +#ifdef IAPSTA_PREINIT + char iapsta_init[50]; + char iapsta_config[300]; + char iapsta_enable[50]; +#endif + int autocountry; + int tsq; } dhd_conf_t; #ifdef BCMSDIO @@ -179,15 +218,17 @@ void dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *nv_pa #if defined(HW_OOB) || defined(FORCE_WOWLAN) void dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, uint chip); #endif +void dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable); #endif void dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path); +void dhd_conf_set_clm_name_by_chip(dhd_pub_t *dhd, char *clm_path); void dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path); -void dhd_conf_set_conf_path_by_nv_path(dhd_pub_t *dhd, char *conf_path, char *nv_path); +void dhd_conf_set_path(dhd_pub_t *dhd, char *dst_name, char *dst_path, char *src_path); #ifdef CONFIG_PATH_AUTO_SELECT void dhd_conf_set_conf_name_by_chip(dhd_pub_t *dhd, char *conf_path); #endif -int dhd_conf_set_fw_int_cmd(dhd_pub_t *dhd, char *name, uint cmd, int val, int def, bool down); -int dhd_conf_set_fw_string_cmd(dhd_pub_t *dhd, char *cmd, int val, int def, bool down); +int dhd_conf_set_intiovar(dhd_pub_t *dhd, uint cmd, char *name, int val, int def, bool down); +int dhd_conf_get_iovar(dhd_pub_t *dhd, int cmd, char *name, char *buf, int len, int ifidx); uint dhd_conf_get_band(dhd_pub_t *dhd); int dhd_conf_set_country(dhd_pub_t *dhd); int dhd_conf_get_country(dhd_pub_t *dhd, wl_country_t *cspec); @@ -195,23 +236,23 @@ int dhd_conf_get_country_from_config(dhd_pub_t *dhd, wl_country_t *cspec); int dhd_conf_fix_country(dhd_pub_t *dhd); bool dhd_conf_match_channel(dhd_pub_t *dhd, uint32 channel); int dhd_conf_set_roam(dhd_pub_t *dhd); -void dhd_conf_get_wme(dhd_pub_t *dhd, edcf_acparam_t *acp); -void dhd_conf_set_wme(dhd_pub_t *dhd); +void dhd_conf_set_bw_cap(dhd_pub_t *dhd); +void dhd_conf_set_wme(dhd_pub_t *dhd, int mode); void dhd_conf_add_pkt_filter(dhd_pub_t *dhd); bool dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id); void dhd_conf_discard_pkt_filter(dhd_pub_t *dhd); -void dhd_conf_set_disable_proptx(dhd_pub_t *dhd); int dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path); int dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev); uint dhd_conf_get_chip(void *context); uint dhd_conf_get_chiprev(void *context); -void dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable); int dhd_conf_get_pm(dhd_pub_t *dhd); -int dhd_conf_get_tcpack_sup_mode(dhd_pub_t *dhd); +#ifdef PROP_TXSTATUS +int dhd_conf_get_disable_proptx(dhd_pub_t *dhd); +#endif int dhd_conf_preinit(dhd_pub_t *dhd); int dhd_conf_reset(dhd_pub_t *dhd); int dhd_conf_attach(dhd_pub_t *dhd); void dhd_conf_detach(dhd_pub_t *dhd); void *dhd_get_pub(struct net_device *dev); - +void *dhd_get_conf(struct net_device *dev); #endif /* _dhd_config_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c b/drivers/amlogic/wifi/bcmdhd/dhd_custom_gpio.c similarity index 64% rename from drivers/net/wireless/bcmdhd/dhd_custom_gpio.c rename to drivers/amlogic/wifi/bcmdhd/dhd_custom_gpio.c index cdc269e53c336..cbe4af515c4db 100644 --- a/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c +++ b/drivers/amlogic/wifi/bcmdhd/dhd_custom_gpio.c @@ -1,9 +1,31 @@ /* -* Customer code to add GPIO control during WLAN start/stop -* $Copyright Open Broadcom Corporation$ -* -* $Id: dhd_custom_gpio.c 493822 2014-07-29 13:20:26Z $ -*/ + * Customer code to add GPIO control during WLAN start/stop + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_custom_gpio.c 591129 2015-10-07 05:22:14Z $ + */ #include #include @@ -14,29 +36,19 @@ #include #include +#if defined(WL_WIRELESS_EXT) #include +#endif #define WL_ERROR(x) printf x #define WL_TRACE(x) -#if defined(CUSTOMER_HW2) - -#if defined(PLATFORM_MPS) -int __attribute__ ((weak)) wifi_get_fw_nv_path(char *fw, char *nv) { return 0;}; -#endif - -#endif - #if defined(OOB_INTR_ONLY) #if defined(BCMLXSDMMC) extern int sdioh_mmc_irq(int irq); #endif /* (BCMLXSDMMC) */ -#if defined(CUSTOMER_HW3) || defined(PLATFORM_MPS) -#include -#endif - /* Customer specific Host GPIO defintion */ static int dhd_oob_gpio_num = -1; @@ -58,7 +70,7 @@ int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr) { int host_oob_irq = 0; -#if defined(CUSTOMER_HW2) && !defined(PLATFORM_MPS) +#if defined(CUSTOMER_HW2) host_oob_irq = wifi_platform_get_irq_number(adapter, irq_flags_ptr); #else @@ -77,16 +89,11 @@ int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr) WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n", __FUNCTION__, dhd_oob_gpio_num)); -#if defined CUSTOMER_HW3 || defined(PLATFORM_MPS) - gpio_request(dhd_oob_gpio_num, "oob irq"); - host_oob_irq = gpio_to_irq(dhd_oob_gpio_num); - gpio_direction_input(dhd_oob_gpio_num); -#endif /* defined CUSTOMER_HW3 || defined(PLATFORM_MPS) */ -#endif +#endif return (host_oob_irq); } -#endif +#endif /* Customer function to control hw specific wlan gpios */ int @@ -109,8 +116,7 @@ dhd_custom_get_mac_address(void *adapter, unsigned char *buf) return -EINVAL; /* Customer access to MAC address stored outside of DHD driver */ -#if (defined(CUSTOMER_HW2) || defined(CUSTOMER_HW10)) && (LINUX_VERSION_CODE >= \ - KERNEL_VERSION(2, 6, 35)) +#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) ret = wifi_platform_get_mac_addr(adapter, buf); #endif @@ -126,6 +132,14 @@ dhd_custom_get_mac_address(void *adapter, unsigned char *buf) } #endif /* GET_CUSTOM_MAC_ENABLE */ +#if !defined(WL_WIRELESS_EXT) +struct cntry_locales_custom { + char iso_abbrev[WLC_CNTRY_BUF_SZ]; /* ISO 3166-1 country abbreviation */ + char custom_locale[WLC_CNTRY_BUF_SZ]; /* Custom firmware locale */ + int32 custom_locale_rev; /* Custom local revisin default -1 */ +}; +#endif /* WL_WIRELESS_EXT */ + /* Customized Locale table : OPTIONAL feature */ const struct cntry_locales_custom translate_custom_table[] = { /* Table should be filled out based on custom platform regulatory requirement */ @@ -173,7 +187,7 @@ const struct cntry_locales_custom translate_custom_table[] = { {"TR", "TR", 0}, {"NO", "NO", 0}, #endif /* EXMAPLE_TABLE */ -#if defined(CUSTOMER_HW2) && !defined(CUSTOMER_HW5) +#if defined(CUSTOMER_HW2) #if defined(BCM4335_CHIP) {"", "XZ", 11}, /* Universal if Country code is unknown or empty */ #endif @@ -234,143 +248,7 @@ const struct cntry_locales_custom translate_custom_table[] = { {"RU", "RU", 1}, {"US", "US", 5} #endif - -#elif defined(CUSTOMER_HW5) - {"", "XZ", 11}, - {"AE", "AE", 212}, - {"AG", "AG", 2}, - {"AI", "AI", 2}, - {"AL", "AL", 2}, - {"AN", "AN", 3}, - {"AR", "AR", 212}, - {"AS", "AS", 15}, - {"AT", "AT", 4}, - {"AU", "AU", 212}, - {"AW", "AW", 2}, - {"AZ", "AZ", 2}, - {"BA", "BA", 2}, - {"BD", "BD", 2}, - {"BE", "BE", 4}, - {"BG", "BG", 4}, - {"BH", "BH", 4}, - {"BM", "BM", 15}, - {"BN", "BN", 4}, - {"BR", "BR", 212}, - {"BS", "BS", 2}, - {"BY", "BY", 3}, - {"BW", "BW", 1}, - {"CA", "CA", 212}, - {"CH", "CH", 212}, - {"CL", "CL", 212}, - {"CN", "CN", 212}, - {"CO", "CO", 212}, - {"CR", "CR", 21}, - {"CY", "CY", 212}, - {"CZ", "CZ", 212}, - {"DE", "DE", 212}, - {"DK", "DK", 4}, - {"DZ", "DZ", 1}, - {"EC", "EC", 23}, - {"EE", "EE", 4}, - {"EG", "EG", 212}, - {"ES", "ES", 212}, - {"ET", "ET", 2}, - {"FI", "FI", 4}, - {"FR", "FR", 212}, - {"GB", "GB", 212}, - {"GD", "GD", 2}, - {"GF", "GF", 2}, - {"GP", "GP", 2}, - {"GR", "GR", 212}, - {"GT", "GT", 0}, - {"GU", "GU", 17}, - {"HK", "HK", 212}, - {"HR", "HR", 4}, - {"HU", "HU", 4}, - {"IN", "IN", 212}, - {"ID", "ID", 212}, - {"IE", "IE", 5}, - {"IL", "IL", 7}, - {"IN", "IN", 212}, - {"IS", "IS", 4}, - {"IT", "IT", 212}, - {"JO", "JO", 3}, - {"JP", "JP", 212}, - {"KH", "KH", 4}, - {"KI", "KI", 1}, - {"KR", "KR", 212}, - {"KW", "KW", 5}, - {"KY", "KY", 4}, - {"KZ", "KZ", 212}, - {"LA", "LA", 4}, - {"LB", "LB", 6}, - {"LI", "LI", 4}, - {"LK", "LK", 3}, - {"LS", "LS", 2}, - {"LT", "LT", 4}, - {"LR", "LR", 2}, - {"LU", "LU", 3}, - {"LV", "LV", 4}, - {"MA", "MA", 2}, - {"MC", "MC", 1}, - {"MD", "MD", 2}, - {"ME", "ME", 2}, - {"MK", "MK", 2}, - {"MN", "MN", 0}, - {"MO", "MO", 2}, - {"MR", "MR", 2}, - {"MT", "MT", 4}, - {"MQ", "MQ", 2}, - {"MU", "MU", 2}, - {"MV", "MV", 3}, - {"MX", "MX", 212}, - {"MY", "MY", 212}, - {"NI", "NI", 0}, - {"NL", "NL", 212}, - {"NO", "NO", 4}, - {"NP", "NP", 3}, - {"NZ", "NZ", 9}, - {"OM", "OM", 4}, - {"PA", "PA", 17}, - {"PE", "PE", 212}, - {"PG", "PG", 2}, - {"PH", "PH", 212}, - {"PL", "PL", 212}, - {"PR", "PR", 25}, - {"PT", "PT", 212}, - {"PY", "PY", 4}, - {"RE", "RE", 2}, - {"RO", "RO", 212}, - {"RS", "RS", 2}, - {"RU", "RU", 212}, - {"SA", "SA", 212}, - {"SE", "SE", 212}, - {"SG", "SG", 212}, - {"SI", "SI", 4}, - {"SK", "SK", 212}, - {"SN", "SN", 2}, - {"SV", "SV", 25}, - {"TH", "TH", 212}, - {"TR", "TR", 212}, - {"TT", "TT", 5}, - {"TW", "TW", 212}, - {"UA", "UA", 212}, - {"UG", "UG", 2}, - {"US", "US", 212}, - {"UY", "UY", 5}, - {"VA", "VA", 2}, - {"VE", "VE", 3}, - {"VG", "VG", 2}, - {"VI", "VI", 18}, - {"VN", "VN", 4}, - {"YT", "YT", 2}, - {"ZA", "ZA", 212}, - {"ZM", "ZM", 2}, - {"XT", "XT", 212}, - {"XZ", "XZ", 11}, - {"XV", "XV", 17}, - {"Q1", "Q1", 77}, -#endif /* CUSTOMER_HW2 and CUSTOMER_HW5 */ +#endif }; @@ -378,7 +256,13 @@ const struct cntry_locales_custom translate_custom_table[] = { * input : ISO 3166-1 country abbreviation * output: customized cspec */ -void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec) +void +#ifdef CUSTOM_COUNTRY_CODE +get_customized_country_code(void *adapter, char *country_iso_code, + wl_country_t *cspec, u32 flags) +#else +get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec) +#endif /* CUSTOM_COUNTRY_CODE */ { #if (defined(CUSTOMER_HW) || defined(CUSTOMER_HW2)) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) @@ -386,8 +270,12 @@ void get_customized_country_code(void *adapter, char *country_iso_code, wl_count if (!cspec) return; - +#ifdef CUSTOM_COUNTRY_CODE + cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code, + flags); +#else cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code); +#endif /* CUSTOM_COUNTRY_CODE */ if (cloc_ptr) { strlcpy(cspec->ccode, cloc_ptr->custom_locale, WLC_CNTRY_BUF_SZ); cspec->rev = cloc_ptr->custom_locale_rev; diff --git a/drivers/net/wireless/bcmdhd/dhd_dbg.h b/drivers/amlogic/wifi/bcmdhd/dhd_dbg.h similarity index 50% rename from drivers/net/wireless/bcmdhd/dhd_dbg.h rename to drivers/amlogic/wifi/bcmdhd/dhd_dbg.h index 8dc9670db6181..2c32acf0aa7d4 100644 --- a/drivers/net/wireless/bcmdhd/dhd_dbg.h +++ b/drivers/amlogic/wifi/bcmdhd/dhd_dbg.h @@ -1,9 +1,30 @@ /* * Debug/trace/assert driver definitions for Dongle Host Driver. * - * $ Copyright Open Broadcom Corporation $ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_dbg.h 491225 2014-07-15 11:58:29Z $ + * + * <> + * + * $Id: dhd_dbg.h 598059 2015-11-07 07:31:52Z $ */ #ifndef _dhd_dbg_ @@ -12,9 +33,20 @@ #define USE_NET_RATELIMIT 1 #if defined(DHD_DEBUG) - -#define DHD_ERROR(args) do {if ((dhd_msg_level & DHD_ERROR_VAL) && USE_NET_RATELIMIT) \ - printf args;} while (0) +#ifdef DHD_LOG_DUMP +extern void dhd_log_dump_print(const char *fmt, ...); +extern char *dhd_log_dump_get_timestamp(void); +#define DHD_ERROR(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + printf args; \ + dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ + dhd_log_dump_print args; \ + } \ +} while (0) +#else +#define DHD_ERROR(args) do {if (dhd_msg_level & DHD_ERROR_VAL) printf args;} while (0) +#endif /* DHD_LOG_DUMP */ #define DHD_TRACE(args) do {if (dhd_msg_level & DHD_TRACE_VAL) printf args;} while (0) #define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0) #define DHD_DATA(args) do {if (dhd_msg_level & DHD_DATA_VAL) printf args;} while (0) @@ -24,15 +56,58 @@ #define DHD_BYTES(args) do {if (dhd_msg_level & DHD_BYTES_VAL) printf args;} while (0) #define DHD_INTR(args) do {if (dhd_msg_level & DHD_INTR_VAL) printf args;} while (0) #define DHD_GLOM(args) do {if (dhd_msg_level & DHD_GLOM_VAL) printf args;} while (0) +#ifdef DHD_LOG_DUMP +#define DHD_EVENT(args) \ +do { \ + if (dhd_msg_level & DHD_EVENT_VAL) { \ + printf args; \ + dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ + dhd_log_dump_print args; \ + } \ +} while (0) +#else #define DHD_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0) +#endif /* DHD_LOG_DUMP */ #define DHD_BTA(args) do {if (dhd_msg_level & DHD_BTA_VAL) printf args;} while (0) #define DHD_ISCAN(args) do {if (dhd_msg_level & DHD_ISCAN_VAL) printf args;} while (0) #define DHD_ARPOE(args) do {if (dhd_msg_level & DHD_ARPOE_VAL) printf args;} while (0) #define DHD_REORDER(args) do {if (dhd_msg_level & DHD_REORDER_VAL) printf args;} while (0) #define DHD_PNO(args) do {if (dhd_msg_level & DHD_PNO_VAL) printf args;} while (0) - +#ifdef DHD_LOG_DUMP +#define DHD_MSGTRACE_LOG(args) \ +do { \ + if (dhd_msg_level & DHD_MSGTRACE_VAL) { \ + printf args; \ + dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ + dhd_log_dump_print args; \ + } \ +} while (0) +#else +#define DHD_MSGTRACE_LOG(args) do {if (dhd_msg_level & DHD_MSGTRACE_VAL) printf args;} while (0) +#endif /* DHD_LOG_DUMP */ +#define DHD_FWLOG(args) do {if (dhd_msg_level & DHD_FWLOG_VAL) printf args;} while (0) +#define DHD_RTT(args) do {if (dhd_msg_level & DHD_RTT_VAL) printf args;} while (0) +#define DHD_IOV_INFO(args) do {if (dhd_msg_level & DHD_IOV_INFO_VAL) printf args;} while (0) + +#ifdef DHD_LOG_DUMP +#define DHD_ERROR_EX(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ + dhd_log_dump_print args; \ + } \ +} while (0) +#else +#define DHD_ERROR_EX(args) DHD_ERROR(args) +#endif /* DHD_LOG_DUMP */ + +#ifdef CUSTOMER_HW4_DEBUG +#define DHD_TRACE_HW4 DHD_ERROR +#define DHD_INFO_HW4 DHD_ERROR +#else #define DHD_TRACE_HW4 DHD_TRACE #define DHD_INFO_HW4 DHD_INFO +#endif /* CUSTOMER_HW4_DEBUG */ #define DHD_ERROR_ON() (dhd_msg_level & DHD_ERROR_VAL) #define DHD_TRACE_ON() (dhd_msg_level & DHD_TRACE_VAL) @@ -51,10 +126,12 @@ #define DHD_REORDER_ON() (dhd_msg_level & DHD_REORDER_VAL) #define DHD_NOCHECKDIED_ON() (dhd_msg_level & DHD_NOCHECKDIED_VAL) #define DHD_PNO_ON() (dhd_msg_level & DHD_PNO_VAL) +#define DHD_FWLOG_ON() (dhd_msg_level & DHD_FWLOG_VAL) +#define DHD_IOV_INFO_ON() (dhd_msg_level & DHD_IOV_INFO_VAL) #else /* defined(BCMDBG) || defined(DHD_DEBUG) */ -#define DHD_ERROR(args) do {if (USE_NET_RATELIMIT) printf args;} while (0) +#define DHD_ERROR(args) do {printf args;} while (0) #define DHD_TRACE(args) #define DHD_INFO(args) #define DHD_DATA(args) @@ -70,9 +147,18 @@ #define DHD_ARPOE(args) #define DHD_REORDER(args) #define DHD_PNO(args) - +#define DHD_MSGTRACE_LOG(args) +#define DHD_FWLOG(args) +#define DHD_IOV_INFO(args) +#define DHD_ERROR_EX(args) DHD_ERROR(args) + +#ifdef CUSTOMER_HW4_DEBUG +#define DHD_TRACE_HW4 DHD_ERROR +#define DHD_INFO_HW4 DHD_ERROR +#else #define DHD_TRACE_HW4 DHD_TRACE #define DHD_INFO_HW4 DHD_INFO +#endif /* CUSTOMER_HW4_DEBUG */ #define DHD_ERROR_ON() 0 #define DHD_TRACE_ON() 0 @@ -91,8 +177,10 @@ #define DHD_REORDER_ON() 0 #define DHD_NOCHECKDIED_ON() 0 #define DHD_PNO_ON() 0 +#define DHD_FWLOG_ON() 0 +#define DHD_IOV_INFO_ON() 0 -#endif +#endif #define DHD_LOG(args) diff --git a/drivers/net/wireless/bcmdhd/dhd_flowring.c b/drivers/amlogic/wifi/bcmdhd/dhd_flowring.c similarity index 65% rename from drivers/net/wireless/bcmdhd/dhd_flowring.c rename to drivers/amlogic/wifi/bcmdhd/dhd_flowring.c index bb15e0a6a01a8..759dd0ed8bcfd 100644 --- a/drivers/net/wireless/bcmdhd/dhd_flowring.c +++ b/drivers/amlogic/wifi/bcmdhd/dhd_flowring.c @@ -1,10 +1,36 @@ /* - * Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level - * $Copyright Open Broadcom Corporation$ + * @file Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level * - * $Id: dhd_flowrings.c jaganlv $ + * Flow rings are transmit traffic (=propagating towards antenna) related entities + * + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_flowring.c 591285 2015-10-07 11:56:29Z $ */ + #include #include #include @@ -25,6 +51,12 @@ #include #include + +static INLINE int dhd_flow_queue_throttle(flow_queue_t *queue); + +static INLINE uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, + uint8 prio, char *sa, char *da); + static INLINE uint16 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da); @@ -35,18 +67,55 @@ int BCMFASTPATH dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt); #define FLOW_QUEUE_PKT_NEXT(p) PKTLINK(p) #define FLOW_QUEUE_PKT_SETNEXT(p, x) PKTSETLINK((p), (x)) +#ifdef DHD_LOSSLESS_ROAMING +const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 7 }; +#else const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 }; +#endif const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; +/** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */ +static INLINE int +dhd_flow_queue_throttle(flow_queue_t *queue) +{ + return DHD_FLOW_QUEUE_FULL(queue); +} + int BCMFASTPATH dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt) { return BCME_NORESOURCE; } +/** Returns flow ring given a flowid */ +flow_ring_node_t * +dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid) +{ + flow_ring_node_t * flow_ring_node; + + ASSERT(dhdp != (dhd_pub_t*)NULL); + ASSERT(flowid < dhdp->num_flow_rings); + + flow_ring_node = &(((flow_ring_node_t*)(dhdp->flow_ring_table))[flowid]); + + ASSERT(flow_ring_node->flowid == flowid); + return flow_ring_node; +} + +/** Returns 'backup' queue given a flowid */ +flow_queue_t * +dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid) +{ + flow_ring_node_t * flow_ring_node; + + flow_ring_node = dhd_flow_ring_node(dhdp, flowid); + return &flow_ring_node->queue; +} + /* Flow ring's queue management functions */ -void /* Initialize a flow ring's queue */ +/** Initialize a flow ring's queue, called on driver initialization. */ +void dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max) { ASSERT((queue != NULL) && (max > 0)); @@ -54,27 +123,37 @@ dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max) dll_init(&queue->list); queue->head = queue->tail = NULL; queue->len = 0; - queue->max = max - 1; + + /* Set queue's threshold and queue's parent cummulative length counter */ + ASSERT(max > 1); + DHD_FLOW_QUEUE_SET_MAX(queue, max); + DHD_FLOW_QUEUE_SET_THRESHOLD(queue, max); + DHD_FLOW_QUEUE_SET_CLEN(queue, &dhdp->cumm_ctr); + queue->failures = 0U; queue->cb = &dhd_flow_queue_overflow; } -void /* Register an enqueue overflow callback handler */ +/** Register an enqueue overflow callback handler */ +void dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb) { ASSERT(queue != NULL); queue->cb = cb; } - -int BCMFASTPATH /* Enqueue a packet in a flow ring's queue */ +/** + * Enqueue an 802.3 packet at the back of a flow ring's queue. From there, it will travel later on + * to the flow ring itself. + */ +int BCMFASTPATH dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt) { int ret = BCME_OK; ASSERT(queue != NULL); - if (queue->len >= queue->max) { + if (dhd_flow_queue_throttle(queue)) { queue->failures++; ret = (*queue->cb)(queue, pkt); goto done; @@ -91,12 +170,15 @@ dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt) queue->tail = pkt; /* at tail */ queue->len++; + /* increment parent's cummulative length */ + DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue)); done: return ret; } -void * BCMFASTPATH /* Dequeue a packet from a flow ring's queue, from head */ +/** Dequeue an 802.3 packet from a flow ring's queue, from head (FIFO) */ +void * BCMFASTPATH dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue) { void * pkt; @@ -115,6 +197,8 @@ dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue) queue->tail = NULL; queue->len--; + /* decrement parent's cummulative length */ + DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_CLEN_PTR(queue)); FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); /* dettach packet from queue */ @@ -122,7 +206,8 @@ dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue) return pkt; } -void BCMFASTPATH /* Reinsert a dequeued packet back at the head */ +/** Reinsert a dequeued 802.3 packet back at the head */ +void BCMFASTPATH dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt) { if (queue->head == NULL) { @@ -132,28 +217,48 @@ dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt) FLOW_QUEUE_PKT_SETNEXT(pkt, queue->head); queue->head = pkt; queue->len++; + /* increment parent's cummulative length */ + DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue)); } +/** Fetch the backup queue for a flowring, and assign flow control thresholds */ +void +dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid, + int queue_budget, int cumm_threshold, void *cumm_ctr) +{ + flow_queue_t * queue; -/* Init Flow Ring specific data structures */ + ASSERT(dhdp != (dhd_pub_t*)NULL); + ASSERT(queue_budget > 1); + ASSERT(cumm_threshold > 1); + ASSERT(cumm_ctr != (void*)NULL); + + queue = dhd_flow_queue(dhdp, flowid); + + DHD_FLOW_QUEUE_SET_MAX(queue, queue_budget); /* Max queue length */ + + /* Set the queue's parent threshold and cummulative counter */ + DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold); + DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr); +} + +/** Initializes data structures of multiple flow rings */ int dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings) { uint32 idx; uint32 flow_ring_table_sz; - uint32 if_flow_lkup_sz; + uint32 if_flow_lkup_sz = 0; void * flowid_allocator; - flow_ring_table_t *flow_ring_table; + flow_ring_table_t *flow_ring_table = NULL; if_flow_lkup_t *if_flow_lkup = NULL; -#ifdef PCIE_TX_DEFERRAL - uint32 count; -#endif void *lock = NULL; + void *list_lock = NULL; unsigned long flags; DHD_INFO(("%s\n", __FUNCTION__)); - /* Construct a 16bit flow1d allocator */ + /* Construct a 16bit flowid allocator */ flowid_allocator = id16_map_init(dhdp->osh, num_flow_rings - FLOW_RING_COMMON, FLOWID_RESERVED); if (flowid_allocator == NULL) { @@ -163,13 +268,14 @@ dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings) /* Allocate a flow ring table, comprising of requested number of rings */ flow_ring_table_sz = (num_flow_rings * sizeof(flow_ring_node_t)); - flow_ring_table = (flow_ring_table_t *)MALLOC(dhdp->osh, flow_ring_table_sz); + flow_ring_table = (flow_ring_table_t *)MALLOCZ(dhdp->osh, flow_ring_table_sz); if (flow_ring_table == NULL) { DHD_ERROR(("%s: flow ring table alloc failure\n", __FUNCTION__)); goto fail; } /* Initialize flow ring table state */ + DHD_CUMM_CTR_INIT(&dhdp->cumm_ctr); bzero((uchar *)flow_ring_table, flow_ring_table_sz); for (idx = 0; idx < num_flow_rings; idx++) { flow_ring_table[idx].status = FLOW_RING_STATUS_CLOSED; @@ -187,7 +293,7 @@ dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings) FLOW_RING_QUEUE_THRESHOLD); } - /* Allocate per interface hash table */ + /* Allocate per interface hash table (for fast lookup from interface to flow ring) */ if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS; if_flow_lkup = (if_flow_lkup_t *)DHD_OS_PREALLOC(dhdp, DHD_PREALLOC_IF_FLOW_LKUP, if_flow_lkup_sz); @@ -197,7 +303,6 @@ dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings) } /* Initialize per interface hash table */ - bzero((uchar *)if_flow_lkup, if_flow_lkup_sz); for (idx = 0; idx < DHD_MAX_IFS; idx++) { int hash_ix; if_flow_lkup[idx].status = 0; @@ -206,22 +311,19 @@ dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings) if_flow_lkup[idx].fl_hash[hash_ix] = NULL; } -#ifdef PCIE_TX_DEFERRAL - count = BITS_TO_LONGS(num_flow_rings); - dhdp->bus->delete_flow_map = kzalloc(count, GFP_ATOMIC); - if (!dhdp->bus->delete_flow_map) { - DHD_ERROR(("%s: delete_flow_map alloc failure\n", __FUNCTION__)); - goto fail; - } -#endif - lock = dhd_os_spin_lock_init(dhdp->osh); if (lock == NULL) goto fail; + list_lock = dhd_os_spin_lock_init(dhdp->osh); + if (list_lock == NULL) + goto lock_fail; + dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP; bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); - +#ifdef DHD_LOSSLESS_ROAMING + dhdp->dequeue_prec_map = ALLPRIO; +#endif /* Now populate into dhd pub */ DHD_FLOWID_LOCK(lock, flags); dhdp->num_flow_rings = num_flow_rings; @@ -229,19 +331,20 @@ dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings) dhdp->flow_ring_table = (void *)flow_ring_table; dhdp->if_flow_lkup = (void *)if_flow_lkup; dhdp->flowid_lock = lock; + dhdp->flow_rings_inited = TRUE; + dhdp->flowring_list_lock = list_lock; DHD_FLOWID_UNLOCK(lock, flags); DHD_INFO(("%s done\n", __FUNCTION__)); return BCME_OK; -fail: +lock_fail: + /* deinit the spinlock */ + dhd_os_spin_lock_deinit(dhdp->osh, lock); -#ifdef PCIE_TX_DEFERRAL - if (dhdp->bus->delete_flow_map) - kfree(dhdp->bus->delete_flow_map); -#endif +fail: /* Destruct the per interface flow lkup table */ - if (dhdp->if_flow_lkup != NULL) { + if (if_flow_lkup != NULL) { DHD_OS_PREFREE(dhdp, if_flow_lkup, if_flow_lkup_sz); } if (flow_ring_table != NULL) { @@ -256,7 +359,7 @@ dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings) return BCME_NOMEM; } -/* Deinit Flow Ring specific data structures */ +/** Deinit Flow Ring specific data structures */ void dhd_flow_rings_deinit(dhd_pub_t *dhdp) { uint16 idx; @@ -268,6 +371,11 @@ void dhd_flow_rings_deinit(dhd_pub_t *dhdp) DHD_INFO(("dhd_flow_rings_deinit\n")); + if (!(dhdp->flow_rings_inited)) { + DHD_ERROR(("dhd_flow_rings not initialized!\n")); + return; + } + if (dhdp->flow_ring_table != NULL) { ASSERT(dhdp->num_flow_rings > 0); @@ -280,11 +388,12 @@ void dhd_flow_rings_deinit(dhd_pub_t *dhdp) if (flow_ring_table[idx].active) { dhd_bus_clean_flow_ring(dhdp->bus, &flow_ring_table[idx]); } - ASSERT(flow_queue_empty(&flow_ring_table[idx].queue)); + ASSERT(DHD_FLOW_QUEUE_EMPTY(&flow_ring_table[idx].queue)); /* Deinit flow ring queue locks before destroying flow ring table */ dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock); flow_ring_table[idx].lock = NULL; + } /* Destruct the flow ring table */ @@ -297,28 +406,34 @@ void dhd_flow_rings_deinit(dhd_pub_t *dhdp) /* Destruct the per interface flow lkup table */ if (dhdp->if_flow_lkup != NULL) { if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS; - bzero(dhdp->if_flow_lkup, sizeof(if_flow_lkup_sz)); + bzero((uchar *)dhdp->if_flow_lkup, if_flow_lkup_sz); DHD_OS_PREFREE(dhdp, dhdp->if_flow_lkup, if_flow_lkup_sz); dhdp->if_flow_lkup = NULL; } -#ifdef PCIE_TX_DEFERRAL - if (dhdp->bus->delete_flow_map) - kfree(dhdp->bus->delete_flow_map); -#endif - /* Destruct the flowid allocator */ if (dhdp->flowid_allocator != NULL) dhdp->flowid_allocator = id16_map_fini(dhdp->osh, dhdp->flowid_allocator); dhdp->num_flow_rings = 0U; + bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); + lock = dhdp->flowid_lock; dhdp->flowid_lock = NULL; DHD_FLOWID_UNLOCK(lock, flags); dhd_os_spin_lock_deinit(dhdp->osh, lock); + + dhd_os_spin_lock_deinit(dhdp->osh, dhdp->flowring_list_lock); + dhdp->flowring_list_lock = NULL; + + ASSERT(dhdp->if_flow_lkup == NULL); + ASSERT(dhdp->flowid_allocator == NULL); + ASSERT(dhdp->flow_ring_table == NULL); + dhdp->flow_rings_inited = FALSE; } +/** Uses hash table to quickly map from ifindex to a flow ring 'role' (STA/AP) */ uint8 dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex) { @@ -341,8 +456,8 @@ bool is_tdls_destination(dhd_pub_t *dhdp, uint8 *da) } #endif /* WLTDLS */ -/* For a given interface, search the hash table for a matching flow */ -uint16 +/** Uses hash table to quickly map from ifindex+prio+da to a flow ring id */ +static INLINE uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) { int hash; @@ -354,7 +469,9 @@ dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; - if (DHD_IF_ROLE_STA(if_flow_lkup[ifindex].role)) { + ASSERT(if_flow_lkup); + + if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) { #ifdef WLTDLS if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da)) && is_tdls_destination(dhdp, da)) { @@ -376,7 +493,6 @@ dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); return cur->flowid; } - } else { if (ETHER_ISMULTI(da)) { @@ -400,10 +516,11 @@ dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) } DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + DHD_INFO(("%s: cannot find flowid\n", __FUNCTION__)); return FLOWID_INVALID; -} +} /* dhd_flowid_find */ -/* Allocate Flow ID */ +/** Create unique Flow ID, called when a flow ring is created. */ static INLINE uint16 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) { @@ -434,7 +551,8 @@ dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; - if (DHD_IF_ROLE_STA(if_flow_lkup[ifindex].role)) { + + if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) { /* For STA non TDLS dest we allocate entry based on prio only */ #ifdef WLTDLS if (dhdp->peer_tbl.tdls_peer_count && @@ -470,9 +588,9 @@ dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) DHD_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid)); return fl_hash_node->flowid; -} +} /* dhd_flowid_alloc */ -/* Get flow ring ID, if not present try to create one */ +/** Get flow ring ID, if not present try to create one */ static INLINE int dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da, uint16 *flowid) @@ -481,11 +599,13 @@ dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, flow_ring_node_t *flow_ring_node; flow_ring_table_t *flow_ring_table; unsigned long flags; + int ret; DHD_INFO(("%s\n", __FUNCTION__)); - if (!dhdp->flow_ring_table) + if (!dhdp->flow_ring_table) { return BCME_ERROR; + } flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; @@ -499,6 +619,7 @@ dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, if (!if_flow_lkup[ifindex].status) return BCME_ERROR; + id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da); if (id == FLOWID_INVALID) { DHD_ERROR(("%s: alloc flowid ifindex %u status %u\n", @@ -508,41 +629,74 @@ dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, /* register this flowid in dhd_pub */ dhd_add_flowid(dhdp, ifindex, prio, da, id); - } - ASSERT(id < dhdp->num_flow_rings); + ASSERT(id < dhdp->num_flow_rings); + + flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id]; - flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id]; - DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); - if (flow_ring_node->active) { + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + + /* Init Flow info */ + memcpy(flow_ring_node->flow_info.sa, sa, sizeof(flow_ring_node->flow_info.sa)); + memcpy(flow_ring_node->flow_info.da, da, sizeof(flow_ring_node->flow_info.da)); + flow_ring_node->flow_info.tid = prio; + flow_ring_node->flow_info.ifindex = ifindex; + flow_ring_node->active = TRUE; + flow_ring_node->status = FLOW_RING_STATUS_PENDING; DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* Create and inform device about the new flow */ + if (dhd_bus_flow_ring_create_request(dhdp->bus, (void *)flow_ring_node) + != BCME_OK) { + DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id)); + return BCME_ERROR; + } + *flowid = id; return BCME_OK; - } - /* Init Flow info */ - memcpy(flow_ring_node->flow_info.sa, sa, sizeof(flow_ring_node->flow_info.sa)); - memcpy(flow_ring_node->flow_info.da, da, sizeof(flow_ring_node->flow_info.da)); - flow_ring_node->flow_info.tid = prio; - flow_ring_node->flow_info.ifindex = ifindex; - flow_ring_node->active = TRUE; - flow_ring_node->status = FLOW_RING_STATUS_PENDING; - DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); - DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); - dll_prepend(&dhdp->bus->const_flowring, &flow_ring_node->list); - DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + } else { + /* if the Flow id was found in the hash */ + ASSERT(id < dhdp->num_flow_rings); + + flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id]; + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + + /* + * If the flow_ring_node is in Open State or Status pending state then + * we can return the Flow id to the caller.If the flow_ring_node is in + * FLOW_RING_STATUS_PENDING this means the creation is in progress and + * hence the packets should be queued. + * + * If the flow_ring_node is in FLOW_RING_STATUS_DELETE_PENDING Or + * FLOW_RING_STATUS_CLOSED, then we should return Error. + * Note that if the flowing is being deleted we would mark it as + * FLOW_RING_STATUS_DELETE_PENDING. Now before Dongle could respond and + * before we mark it as FLOW_RING_STATUS_CLOSED we could get tx packets. + * We should drop the packets in that case. + * The decission to return OK should NOT be based on 'active' variable, beause + * active is made TRUE when a flow_ring_node gets allocated and is made + * FALSE when the flow ring gets removed and does not reflect the True state + * of the Flow ring. + */ + if (flow_ring_node->status == FLOW_RING_STATUS_OPEN || + flow_ring_node->status == FLOW_RING_STATUS_PENDING) { + *flowid = id; + ret = BCME_OK; + } else { + *flowid = FLOWID_INVALID; + ret = BCME_ERROR; + } - /* Create and inform device about the new flow */ - if (dhd_bus_flow_ring_create_request(dhdp->bus, (void *)flow_ring_node) - != BCME_OK) { - DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id)); - return BCME_ERROR; - } + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + return ret; - *flowid = id; - return BCME_OK; -} + } /* Flow Id found in the hash */ +} /* dhd_flowid_lookup */ -/* Update flowid information on the packet */ +/** + * Assign existing or newly created flowid to an 802.3 packet. This flowid is later on used to + * select the flowring to send the packet to the dongle. + */ int BCMFASTPATH dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, void *pktbuf) { @@ -550,10 +704,8 @@ dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, void *pktbuf) struct ether_header *eh = (struct ether_header *)pktdata; uint16 flowid; - if (dhd_bus_is_txmode_push(dhdp->bus)) - return BCME_OK; - ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) { return BCME_BADARG; } @@ -562,6 +714,7 @@ dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, void *pktbuf) DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__)); return BCME_ERROR; } + if (dhd_flowid_lookup(dhdp, ifindex, prio, eh->ether_shost, eh->ether_dhost, &flowid) != BCME_OK) { return BCME_ERROR; @@ -570,7 +723,7 @@ dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, void *pktbuf) DHD_INFO(("%s: prio %d flowid %d\n", __FUNCTION__, prio, flowid)); /* Tag the packet with flowid */ - DHD_PKTTAG_SET_FLOWID((dhd_pkttag_fr_t *)PKTTAG(pktbuf), flowid); + DHD_PKT_SET_FLOWID(pktbuf, flowid); return BCME_OK; } @@ -626,10 +779,12 @@ dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid) DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); DHD_ERROR(("%s: could not free flow ring hash entry flowid %d\n", __FUNCTION__, flowid)); -} +} /* dhd_flowid_free */ - -/* Delete all Flow rings assocaited with the given Interface */ +/** + * Delete all Flow rings associated with the given interface. Is called when e.g. the dongle + * indicates that a wireless link has gone down. + */ void dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex) { @@ -648,17 +803,14 @@ dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex) flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; for (id = 0; id < dhdp->num_flow_rings; id++) { if (flow_ring_table[id].active && - (flow_ring_table[id].flow_info.ifindex == ifindex) && - (flow_ring_table[id].status != FLOW_RING_STATUS_DELETE_PENDING)) { - DHD_INFO(("%s: deleting flowid %d\n", - __FUNCTION__, flow_ring_table[id].flowid)); + (flow_ring_table[id].flow_info.ifindex == ifindex)) { dhd_bus_flow_ring_delete_request(dhdp->bus, (void *) &flow_ring_table[id]); } } } -/* Delete flow/s for given peer address */ +/** Delete flow ring(s) for given peer address. Related to AP/AWDL/TDLS functionality. */ void dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr) { @@ -677,18 +829,18 @@ dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr) flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; for (id = 0; id < dhdp->num_flow_rings; id++) { if (flow_ring_table[id].active && - (flow_ring_table[id].flow_info.ifindex == ifindex) && - (!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) && - (flow_ring_table[id].status != FLOW_RING_STATUS_DELETE_PENDING)) { + (flow_ring_table[id].flow_info.ifindex == ifindex) && + (!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) && + (flow_ring_table[id].status != FLOW_RING_STATUS_DELETE_PENDING)) { DHD_INFO(("%s: deleting flowid %d\n", - __FUNCTION__, flow_ring_table[id].flowid)); + __FUNCTION__, flow_ring_table[id].flowid)); dhd_bus_flow_ring_delete_request(dhdp->bus, - (void *) &flow_ring_table[id]); + (void *) &flow_ring_table[id]); } } } -/* Handle Interface ADD, DEL operations */ +/** Handles interface ADD, CHANGE, DEL indications from the dongle */ void dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex, uint8 op, uint8 role) @@ -714,7 +866,7 @@ dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex, if_flow_lkup[ifindex].role = role; - if (!(DHD_IF_ROLE_STA(role))) { + if (role != WLC_E_IF_ROLE_STA) { if_flow_lkup[ifindex].status = TRUE; DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n", __FUNCTION__, ifindex, role)); @@ -728,7 +880,7 @@ dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex, DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); } -/* Handle a STA interface link status update */ +/** Handles a STA 'link' indication from the dongle */ int dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, uint8 status) { @@ -744,7 +896,7 @@ dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, uint8 status) DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; - if (DHD_IF_ROLE_STA(if_flow_lkup[ifindex].role)) { + if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) { if (status) if_flow_lkup[ifindex].status = TRUE; else @@ -754,13 +906,14 @@ dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, uint8 status) return BCME_OK; } -/* Update flow priority mapping */ + +/** Update flow priority mapping, called on IOVAR */ int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map) { uint16 flowid; flow_ring_node_t *flow_ring_node; - if (map > DHD_FLOW_PRIO_TID_MAP) + if (map > DHD_FLOW_PRIO_LLR_MAP) return BCME_BADOPTION; /* Check if we need to change prio map */ @@ -773,7 +926,8 @@ int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map) if (flow_ring_node->active) return BCME_EPERM; } - /* Infor firmware about new mapping type */ + + /* Inform firmware about new mapping type */ if (BCME_OK != dhd_flow_prio_map(dhdp, &map, TRUE)) return BCME_ERROR; @@ -787,7 +941,7 @@ int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map) return BCME_OK; } -/* Set/Get flwo ring priority map */ +/** Inform firmware on updated flow priority mapping, called on IOVAR */ int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set) { uint8 iovbuf[24]; diff --git a/drivers/amlogic/wifi/bcmdhd/dhd_flowring.h b/drivers/amlogic/wifi/bcmdhd/dhd_flowring.h new file mode 100644 index 0000000000000..7c36de5459bf4 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/dhd_flowring.h @@ -0,0 +1,235 @@ +/* + * @file Header file describing the flow rings DHD interfaces. + * + * Flow rings are transmit traffic (=propagating towards antenna) related entities. + * + * Provides type definitions and function prototypes used to create, delete and manage flow rings at + * high level. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_flowring.h 591285 2015-10-07 11:56:29Z $ + */ + + +/**************** + * Common types * + */ + +#ifndef _dhd_flowrings_h_ +#define _dhd_flowrings_h_ + +/* Max pkts held in a flow ring's backup queue */ +#define FLOW_RING_QUEUE_THRESHOLD (2048) + +/* Number of H2D common rings */ +#define FLOW_RING_COMMON BCMPCIE_H2D_COMMON_MSGRINGS + +#define FLOWID_INVALID (ID16_INVALID) +#define FLOWID_RESERVED (FLOW_RING_COMMON) + +#define FLOW_RING_STATUS_OPEN 0 +#define FLOW_RING_STATUS_PENDING 1 +#define FLOW_RING_STATUS_CLOSED 2 +#define FLOW_RING_STATUS_DELETE_PENDING 3 +#define FLOW_RING_STATUS_FLUSH_PENDING 4 +#define FLOW_RING_STATUS_STA_FREEING 5 + +#define DHD_FLOWRING_RX_BUFPOST_PKTSZ 2048 + +#define DHD_FLOW_PRIO_AC_MAP 0 +#define DHD_FLOW_PRIO_TID_MAP 1 +#define DHD_FLOW_PRIO_LLR_MAP 2 + +/* Pkttag not compatible with PROP_TXSTATUS or WLFC */ +typedef struct dhd_pkttag_fr { + uint16 flowid; + uint16 ifid; + int dataoff; + dmaaddr_t physaddr; + uint32 pa_len; + +} dhd_pkttag_fr_t; + +#define DHD_PKTTAG_SET_FLOWID(tag, flow) ((tag)->flowid = (uint16)(flow)) +#define DHD_PKTTAG_SET_IFID(tag, idx) ((tag)->ifid = (uint16)(idx)) +#define DHD_PKTTAG_SET_DATAOFF(tag, offset) ((tag)->dataoff = (int)(offset)) +#define DHD_PKTTAG_SET_PA(tag, pa) ((tag)->physaddr = (pa)) +#define DHD_PKTTAG_SET_PA_LEN(tag, palen) ((tag)->pa_len = (palen)) + +#define DHD_PKTTAG_FLOWID(tag) ((tag)->flowid) +#define DHD_PKTTAG_IFID(tag) ((tag)->ifid) +#define DHD_PKTTAG_DATAOFF(tag) ((tag)->dataoff) +#define DHD_PKTTAG_PA(tag) ((tag)->physaddr) +#define DHD_PKTTAG_PA_LEN(tag) ((tag)->pa_len) + +/* Hashing a MacAddress for lkup into a per interface flow hash table */ +#define DHD_FLOWRING_HASH_SIZE 256 +#define DHD_FLOWRING_HASHINDEX(ea, prio) \ + ((((uint8 *)(ea))[3] ^ ((uint8 *)(ea))[4] ^ ((uint8 *)(ea))[5] ^ ((uint8)(prio))) \ + % DHD_FLOWRING_HASH_SIZE) + +#define DHD_IF_ROLE(pub, idx) (((if_flow_lkup_t *)(pub)->if_flow_lkup)[idx].role) +#define DHD_IF_ROLE_AP(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_AP) +#define DHD_IF_ROLE_STA(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_STA) +#define DHD_IF_ROLE_P2PGO(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_GO) +#define DHD_FLOW_RING(dhdp, flowid) \ + (flow_ring_node_t *)&(((flow_ring_node_t *)((dhdp)->flow_ring_table))[flowid]) + +struct flow_queue; + +/* Flow Ring Queue Enqueue overflow callback */ +typedef int (*flow_queue_cb_t)(struct flow_queue * queue, void * pkt); + +/** + * Each flow ring has an associated (tx flow controlled) queue. 802.3 packets are transferred + * between queue and ring. A packet from the host stack is first added to the queue, and in a later + * stage transferred to the flow ring. Packets in the queue are dhd owned, whereas packets in the + * flow ring are device owned. + */ +typedef struct flow_queue { + dll_t list; /* manage a flowring queue in a double linked list */ + void * head; /* first packet in the queue */ + void * tail; /* last packet in the queue */ + uint16 len; /* number of packets in the queue */ + uint16 max; /* maximum or min budget (used in cumm) */ + uint32 threshold; /* parent's cummulative length threshold */ + void * clen_ptr; /* parent's cummulative length counter */ + uint32 failures; /* enqueue failures due to queue overflow */ + flow_queue_cb_t cb; /* callback invoked on threshold crossing */ +} flow_queue_t; + +#define DHD_FLOW_QUEUE_LEN(queue) ((int)(queue)->len) +#define DHD_FLOW_QUEUE_MAX(queue) ((int)(queue)->max) +#define DHD_FLOW_QUEUE_THRESHOLD(queue) ((int)(queue)->threshold) +#define DHD_FLOW_QUEUE_EMPTY(queue) ((queue)->len == 0) +#define DHD_FLOW_QUEUE_FAILURES(queue) ((queue)->failures) + +#define DHD_FLOW_QUEUE_AVAIL(queue) ((int)((queue)->max - (queue)->len)) +#define DHD_FLOW_QUEUE_FULL(queue) ((queue)->len >= (queue)->max) + +#define DHD_FLOW_QUEUE_OVFL(queue, budget) \ + (((queue)->len) > budget) + +#define DHD_FLOW_QUEUE_SET_MAX(queue, budget) \ + ((queue)->max) = ((budget) - 1) + +/* Queue's cummulative threshold. */ +#define DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold) \ + ((queue)->threshold) = ((cumm_threshold) - 1) + +/* Queue's cummulative length object accessor. */ +#define DHD_FLOW_QUEUE_CLEN_PTR(queue) ((queue)->clen_ptr) + +/* Set a queue's cumm_len point to a parent's cumm_ctr_t cummulative length */ +#define DHD_FLOW_QUEUE_SET_CLEN(queue, parent_clen_ptr) \ + ((queue)->clen_ptr) = (void *)(parent_clen_ptr) + +/* see wlfc_proto.h for tx status details */ +#define DHD_FLOWRING_MAXSTATUS_MSGS 5 +#define DHD_FLOWRING_TXSTATUS_CNT_UPDATE(bus, flowid, txstatus) +/** each flow ring is dedicated to a tid/sa/da combination */ +typedef struct flow_info { + uint8 tid; + uint8 ifindex; + char sa[ETHER_ADDR_LEN]; + char da[ETHER_ADDR_LEN]; +} flow_info_t; + +/** a flow ring is used for outbound (towards antenna) 802.3 packets */ +typedef struct flow_ring_node { + dll_t list; /* manage a constructed flowring in a dll, must be at first place */ + flow_queue_t queue; /* queues packets before they enter the flow ring, flow control */ + bool active; + uint8 status; + /* + * flowid: unique ID of a flow ring, which can either be unicast or broadcast/multicast. For + * unicast flow rings, the flow id accelerates ARM 802.3->802.11 header translation. + */ + uint16 flowid; + flow_info_t flow_info; + void *prot_info; + void *lock; /* lock for flowring access protection */ +} flow_ring_node_t; + +typedef flow_ring_node_t flow_ring_table_t; + +typedef struct flow_hash_info { + uint16 flowid; + flow_info_t flow_info; + struct flow_hash_info *next; +} flow_hash_info_t; + +typedef struct if_flow_lkup { + bool status; + uint8 role; /* Interface role: STA/AP */ + flow_hash_info_t *fl_hash[DHD_FLOWRING_HASH_SIZE]; /* Lkup Hash table */ +} if_flow_lkup_t; + +static INLINE flow_ring_node_t * +dhd_constlist_to_flowring(dll_t *item) +{ + return ((flow_ring_node_t *)item); +} + +/* Exported API */ + +/* Flow ring's queue management functions */ +extern flow_ring_node_t * dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid); +extern flow_queue_t * dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid); + +extern void dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max); +extern void dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb); +extern int dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt); +extern void * dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue); +extern void dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt); + +extern void dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid, + int queue_budget, int cumm_threshold, void *cumm_ctr); +extern int dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings); + +extern void dhd_flow_rings_deinit(dhd_pub_t *dhdp); + +extern int dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, + void *pktbuf); + +extern void dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid); + +extern void dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex); + +extern void dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, + char *addr); + +/* Handle Interface ADD, DEL operations */ +extern void dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex, + uint8 op, uint8 role); + +/* Handle a STA interface link status update */ +extern int dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, + uint8 status); +extern int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set); +extern int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map); + +extern uint8 dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex); +#endif /* _dhd_flowrings_h_ */ diff --git a/drivers/amlogic/wifi/bcmdhd/dhd_gpio.c b/drivers/amlogic/wifi/bcmdhd/dhd_gpio.c new file mode 100644 index 0000000000000..db3b45df23bfc --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/dhd_gpio.c @@ -0,0 +1,391 @@ + +#include +#include +#include + +#ifdef CUSTOMER_HW_PLATFORM +#include +#define sdmmc_channel sdmmc_device_mmc0 +#endif /* CUSTOMER_HW_PLATFORM */ + +#if defined(BUS_POWER_RESTORE) && defined(BCMSDIO) +#include +#include +#include +#include +#endif /* defined(BUS_POWER_RESTORE) && defined(BCMSDIO) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) +#include +extern int wifi_irq_trigger_level(void); +#endif +#ifdef CUSTOMER_HW_AMLOGIC +extern void sdio_reinit(void); +extern void extern_wifi_set_enable(int is_on); +extern void pci_remove_reinit(unsigned int vid, unsigned int pid, int delBus); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) +extern int wifi_irq_num(void); +#endif +#endif + +extern u8 *wifi_get_mac(void); +#ifdef CONFIG_DHD_USE_STATIC_BUF +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + +static int gpio_wl_reg_on = -1; // WL_REG_ON is input pin of WLAN module +#ifdef CUSTOMER_OOB +static int gpio_wl_host_wake = -1; // WL_HOST_WAKE is output pin of WLAN module +#endif + +static int +dhd_wlan_set_power(bool on +#ifdef BUS_POWER_RESTORE +, wifi_adapter_info_t *adapter +#endif /* BUS_POWER_RESTORE */ +) +{ + int err = 0; + + if (on) { + printf("======== PULL WL_REG_ON(%d) HIGH! ========\n", gpio_wl_reg_on); + if (gpio_wl_reg_on >= 0) { + err = gpio_direction_output(gpio_wl_reg_on, 1); + if (err) { + printf("%s: WL_REG_ON didn't output high\n", __FUNCTION__); + return -EIO; + } + } +#ifdef CUSTOMER_HW_AMLOGIC +#ifdef BCMSDIO + extern_wifi_set_enable(0); + mdelay(200); + extern_wifi_set_enable(1); + mdelay(200); +// sdio_reinit(); +#endif +#endif +#if defined(BUS_POWER_RESTORE) +#if defined(BCMSDIO) + if (adapter->sdio_func && adapter->sdio_func->card && adapter->sdio_func->card->host) { + printf("======== mmc_power_restore_host! ========\n"); + mmc_power_restore_host(adapter->sdio_func->card->host); + } +#elif defined(BCMPCIE) + OSL_SLEEP(50); /* delay needed to be able to restore PCIe configuration registers */ + if (adapter->pci_dev) { + printf("======== pci_set_power_state PCI_D0! ========\n"); + pci_set_power_state(adapter->pci_dev, PCI_D0); + if (adapter->pci_saved_state) + pci_load_and_free_saved_state(adapter->pci_dev, &adapter->pci_saved_state); + pci_restore_state(adapter->pci_dev); + err = pci_enable_device(adapter->pci_dev); + if (err < 0) + printf("%s: PCI enable device failed", __FUNCTION__); + pci_set_master(adapter->pci_dev); + } +#endif /* BCMPCIE */ +#endif /* BUS_POWER_RESTORE */ + /* Lets customer power to get stable */ + mdelay(100); + } else { +#if defined(BUS_POWER_RESTORE) +#if defined(BCMSDIO) + if (adapter->sdio_func && adapter->sdio_func->card && adapter->sdio_func->card->host) { + printf("======== mmc_power_save_host! ========\n"); + mmc_power_save_host(adapter->sdio_func->card->host); + } +#elif defined(BCMPCIE) + if (adapter->pci_dev) { + printf("======== pci_set_power_state PCI_D3hot! ========\n"); + pci_save_state(adapter->pci_dev); + adapter->pci_saved_state = pci_store_saved_state(adapter->pci_dev); + if (pci_is_enabled(adapter->pci_dev)) + pci_disable_device(adapter->pci_dev); + pci_set_power_state(adapter->pci_dev, PCI_D3hot); + } +#endif /* BCMPCIE */ +#endif /* BUS_POWER_RESTORE */ + printf("======== PULL WL_REG_ON(%d) LOW! ========\n", gpio_wl_reg_on); + if (gpio_wl_reg_on >= 0) { + err = gpio_direction_output(gpio_wl_reg_on, 0); + if (err) { + printf("%s: WL_REG_ON didn't output low\n", __FUNCTION__); + return -EIO; + } + } +#ifdef CUSTOMER_HW_AMLOGIC +// extern_wifi_set_enable(0); +// mdelay(200); +#endif + } + + return err; +} + +static int dhd_wlan_set_reset(int onoff) +{ + return 0; +} + +static int dhd_wlan_set_carddetect(bool present) +{ + int err = 0; + +#if !defined(BUS_POWER_RESTORE) + if (present) { +#if defined(BCMSDIO) + printf("======== Card detection to detect SDIO card! ========\n"); +#ifdef CUSTOMER_HW_PLATFORM + err = sdhci_force_presence_change(&sdmmc_channel, 1); +#endif /* CUSTOMER_HW_PLATFORM */ +#ifdef CUSTOMER_HW_AMLOGIC + sdio_reinit(); +#endif +#endif + } else { +#if defined(BCMSDIO) + printf("======== Card detection to remove SDIO card! ========\n"); +#ifdef CUSTOMER_HW_PLATFORM + err = sdhci_force_presence_change(&sdmmc_channel, 0); +#endif /* CUSTOMER_HW_PLATFORM */ +#ifdef CUSTOMER_HW_AMLOGIC + extern_wifi_set_enable(0); + mdelay(200); +#endif +#elif defined(BCMPCIE) + printf("======== Card detection to remove PCIE card! ========\n"); + extern_wifi_set_enable(0); + mdelay(200); +#endif + } +#endif /* BUS_POWER_RESTORE */ + + return err; +} + +static int dhd_wlan_get_mac_addr(unsigned char *buf) +{ + int err = 0; + + printf("======== %s ========\n", __FUNCTION__); +#ifdef EXAMPLE_GET_MAC + /* EXAMPLE code */ + { + struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}}; + bcopy((char *)&ea_example, buf, sizeof(struct ether_addr)); + } +#endif /* EXAMPLE_GET_MAC */ + bcopy((char *)wifi_get_mac(), buf, sizeof(struct ether_addr)); + if (buf[0] == 0xff) { + printf("custom wifi mac is not set\n"); + err = -1; + } + + return err; +} +#ifdef CONFIG_DHD_USE_STATIC_BUF +extern void *bcmdhd_mem_prealloc(int section, unsigned long size); +void* bcm_wlan_prealloc(int section, unsigned long size) +{ + void *alloc_ptr = NULL; + alloc_ptr = bcmdhd_mem_prealloc(section, size); + if (alloc_ptr) { + printf("success alloc section %d, size %ld\n", section, size); + if (size != 0L) + bzero(alloc_ptr, size); + return alloc_ptr; + } + printf("can't alloc section %d\n", section); + return NULL; +} +#endif + +#if !defined(WL_WIRELESS_EXT) +struct cntry_locales_custom { + char iso_abbrev[WLC_CNTRY_BUF_SZ]; /* ISO 3166-1 country abbreviation */ + char custom_locale[WLC_CNTRY_BUF_SZ]; /* Custom firmware locale */ + int32 custom_locale_rev; /* Custom local revisin default -1 */ +}; +#endif + +static struct cntry_locales_custom brcm_wlan_translate_custom_table[] = { + /* Table should be filled out based on custom platform regulatory requirement */ + {"", "XT", 49}, /* Universal if Country code is unknown or empty */ + {"US", "US", 0}, +}; + +#ifdef CUSTOM_FORCE_NODFS_FLAG +struct cntry_locales_custom brcm_wlan_translate_nodfs_table[] = { + {"", "XT", 50}, /* Universal if Country code is unknown or empty */ + {"US", "US", 0}, +}; +#endif + +static void *dhd_wlan_get_country_code(char *ccode +#ifdef CUSTOM_FORCE_NODFS_FLAG + , u32 flags +#endif +) +{ + struct cntry_locales_custom *locales; + int size; + int i; + + if (!ccode) + return NULL; + +#ifdef CUSTOM_FORCE_NODFS_FLAG + if (flags & WLAN_PLAT_NODFS_FLAG) { + locales = brcm_wlan_translate_nodfs_table; + size = ARRAY_SIZE(brcm_wlan_translate_nodfs_table); + } else { +#endif + locales = brcm_wlan_translate_custom_table; + size = ARRAY_SIZE(brcm_wlan_translate_custom_table); +#ifdef CUSTOM_FORCE_NODFS_FLAG + } +#endif + + for (i = 0; i < size; i++) + if (strcmp(ccode, locales[i].iso_abbrev) == 0) + return &locales[i]; + return NULL; +} + +struct resource dhd_wlan_resources[] = { + [0] = { + .name = "bcmdhd_wlan_irq", + .start = 0, /* Dummy */ + .end = 0, /* Dummy */ + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE + | IORESOURCE_IRQ_HIGHLEVEL, /* Dummy */ + }, +}; + +struct wifi_platform_data dhd_wlan_control = { + .set_power = dhd_wlan_set_power, + .set_reset = dhd_wlan_set_reset, + .set_carddetect = dhd_wlan_set_carddetect, + .get_mac_addr = dhd_wlan_get_mac_addr, +#ifdef CONFIG_DHD_USE_STATIC_BUF + .mem_prealloc = bcm_wlan_prealloc, +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + .get_country_code = dhd_wlan_get_country_code, +}; + +int dhd_wlan_init_gpio(void) +{ + int err = 0; +#ifdef CUSTOMER_OOB + int host_oob_irq = -1; + uint host_oob_irq_flags = 0; +#endif + + /* Please check your schematic and fill right GPIO number which connected to + * WL_REG_ON and WL_HOST_WAKE. + */ + gpio_wl_reg_on = -1; +#ifdef CUSTOMER_OOB + gpio_wl_host_wake = -1; +#endif + +#if defined(BCMPCIE) + printf("======== Card detection to detect PCIE card! ========\n"); + pci_remove_reinit(0x14e4, 0x43ec, 1); +#endif + printf("%s: GPIO(WL_REG_ON) = %d\n", __FUNCTION__, gpio_wl_reg_on); + if (gpio_wl_reg_on >= 0) { + err = gpio_request(gpio_wl_reg_on, "WL_REG_ON"); + if (err < 0) { + printf("%s: Faiiled to request gpio %d for WL_REG_ON\n", + __FUNCTION__, gpio_wl_reg_on); + gpio_wl_reg_on = -1; + } + } + +#ifdef CUSTOMER_OOB + printf("%s: GPIO(WL_HOST_WAKE) = %d\n", __FUNCTION__, gpio_wl_host_wake); + if (gpio_wl_host_wake >= 0) { + err = gpio_request(gpio_wl_host_wake, "bcmdhd"); + if (err < 0) { + printf("%s: gpio_request failed\n", __FUNCTION__); + return -1; + } + err = gpio_direction_input(gpio_wl_host_wake); + if (err < 0) { + printf("%s: gpio_direction_input failed\n", __FUNCTION__); + gpio_free(gpio_wl_host_wake); + return -1; + } + host_oob_irq = gpio_to_irq(gpio_wl_host_wake); + if (host_oob_irq < 0) { + printf("%s: gpio_to_irq failed\n", __FUNCTION__); + gpio_free(gpio_wl_host_wake); + return -1; + } + } +#ifdef CUSTOMER_HW_AMLOGIC +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) + host_oob_irq = INT_GPIO_4; +#else + host_oob_irq = wifi_irq_num(); +#endif +#endif + printf("%s: host_oob_irq: %d\n", __FUNCTION__, host_oob_irq); + +#ifdef HW_OOB +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) + if (wifi_irq_trigger_level() == GPIO_IRQ_LOW) + host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL | IORESOURCE_IRQ_SHAREABLE; + else + host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE; +#else +#ifdef HW_OOB_LOW_LEVEL + host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL | IORESOURCE_IRQ_SHAREABLE; +#else + host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE; +#endif +#endif +#else + host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_SHAREABLE; +#endif + + dhd_wlan_resources[0].start = dhd_wlan_resources[0].end = host_oob_irq; + dhd_wlan_resources[0].flags = host_oob_irq_flags; + printf("%s: host_oob_irq_flags=0x%x\n", __FUNCTION__, host_oob_irq_flags); +#endif /* CUSTOMER_OOB */ + + return 0; +} + +static void dhd_wlan_deinit_gpio(void) +{ + if (gpio_wl_reg_on >= 0) { + printf("%s: gpio_free(WL_REG_ON %d)\n", __FUNCTION__, gpio_wl_reg_on); + gpio_free(gpio_wl_reg_on); + gpio_wl_reg_on = -1; + } +#ifdef CUSTOMER_OOB + if (gpio_wl_host_wake >= 0) { + printf("%s: gpio_free(WL_HOST_WAKE %d)\n", __FUNCTION__, gpio_wl_host_wake); + gpio_free(gpio_wl_host_wake); + gpio_wl_host_wake = -1; + } +#endif /* CUSTOMER_OOB */ +} + +int dhd_wlan_init_plat_data(void) +{ + int err = 0; + + printf("======== %s ========\n", __FUNCTION__); + err = dhd_wlan_init_gpio(); + return err; +} + +void dhd_wlan_deinit_plat_data(wifi_adapter_info_t *adapter) +{ + printf("======== %s ========\n", __FUNCTION__); + dhd_wlan_deinit_gpio(); +} + diff --git a/drivers/net/wireless/bcmdhd/dhd_ip.c b/drivers/amlogic/wifi/bcmdhd/dhd_ip.c similarity index 88% rename from drivers/net/wireless/bcmdhd/dhd_ip.c rename to drivers/amlogic/wifi/bcmdhd/dhd_ip.c index cb54452db836b..971e4ca8fe7df 100644 --- a/drivers/net/wireless/bcmdhd/dhd_ip.c +++ b/drivers/amlogic/wifi/bcmdhd/dhd_ip.c @@ -1,9 +1,30 @@ /* * IP Packet Parser Module. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_ip.c 502735 2014-09-16 00:53:02Z $ + * + * <> + * + * $Id: dhd_ip.c 569132 2015-07-07 09:09:33Z $ */ #include #include @@ -98,70 +119,6 @@ pkt_frag_t pkt_frag_info(osl_t *osh, void *p) } } -bool pkt_is_dhcp(osl_t *osh, void *p) -{ - uint8 *frame; - int length; - uint8 *pt; /* Pointer to type field */ - uint16 ethertype; - struct ipv4_hdr *iph; /* IP frame pointer */ - int ipl; /* IP frame length */ - uint16 src_port; - - ASSERT(osh && p); - - frame = PKTDATA(osh, p); - length = PKTLEN(osh, p); - - /* Process Ethernet II or SNAP-encapsulated 802.3 frames */ - if (length < ETHER_HDR_LEN) { - DHD_INFO(("%s: short eth frame (%d)\n", __FUNCTION__, length)); - return FALSE; - } else if (ntoh16(*(uint16 *)(frame + ETHER_TYPE_OFFSET)) >= ETHER_TYPE_MIN) { - /* Frame is Ethernet II */ - pt = frame + ETHER_TYPE_OFFSET; - } else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN && - !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) { - pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN; - } else { - DHD_INFO(("%s: non-SNAP 802.3 frame\n", __FUNCTION__)); - return FALSE; - } - - ethertype = ntoh16(*(uint16 *)pt); - - /* Skip VLAN tag, if any */ - if (ethertype == ETHER_TYPE_8021Q) { - pt += VLAN_TAG_LEN; - - if (pt + ETHER_TYPE_LEN > frame + length) { - DHD_INFO(("%s: short VLAN frame (%d)\n", __FUNCTION__, length)); - return FALSE; - } - - ethertype = ntoh16(*(uint16 *)pt); - } - - if (ethertype != ETHER_TYPE_IP) { - DHD_INFO(("%s: non-IP frame (ethertype 0x%x, length %d)\n", - __FUNCTION__, ethertype, length)); - return FALSE; - } - - iph = (struct ipv4_hdr *)(pt + ETHER_TYPE_LEN); - ipl = (uint)(length - (pt + ETHER_TYPE_LEN - frame)); - - /* We support IPv4 only */ - if ((ipl < (IPV4_OPTIONS_OFFSET + 2)) || (IP_VER(iph) != IP_VER_4)) { - DHD_INFO(("%s: short frame (%d) or non-IPv4\n", __FUNCTION__, ipl)); - return FALSE; - } - - src_port = ntoh16(*(uint16 *)(pt + ETHER_TYPE_LEN + IPV4_OPTIONS_OFFSET)); - - return (src_port == 0x43 || src_port == 0x44); -} - #ifdef DHDTCPACK_SUPPRESS typedef struct { @@ -179,10 +136,14 @@ typedef struct _tdata_psh_info_t { } tdata_psh_info_t; typedef struct { - uint8 src_ip_addr[IPV4_ADDR_LEN]; /* SRC ip addrs of this TCP stream */ - uint8 dst_ip_addr[IPV4_ADDR_LEN]; /* DST ip addrs of this TCP stream */ - uint8 src_tcp_port[TCP_PORT_LEN]; /* SRC tcp ports of this TCP stream */ - uint8 dst_tcp_port[TCP_PORT_LEN]; /* DST tcp ports of this TCP stream */ + struct { + uint8 src[IPV4_ADDR_LEN]; /* SRC ip addrs of this TCP stream */ + uint8 dst[IPV4_ADDR_LEN]; /* DST ip addrs of this TCP stream */ + } ip_addr; + struct { + uint8 src[TCP_PORT_LEN]; /* SRC tcp ports of this TCP stream */ + uint8 dst[TCP_PORT_LEN]; /* DST tcp ports of this TCP stream */ + } tcp_port; tdata_psh_info_t *tdata_psh_info_head; /* Head of received TCP PSH DATA chain */ tdata_psh_info_t *tdata_psh_info_tail; /* Tail of received TCP PSH DATA chain */ uint32 last_used_time; /* The last time this tcpdata_info was used(in ms) */ @@ -248,6 +209,7 @@ _tdata_psh_info_pool_deq(tcpack_sup_module_t *tcpack_sup_mod) return tdata_psh_info; } +#ifdef BCMSDIO static int _tdata_psh_info_pool_init(dhd_pub_t *dhdp, tcpack_sup_module_t *tcpack_sup_mod) { @@ -325,6 +287,7 @@ static void _tdata_psh_info_pool_deinit(dhd_pub_t *dhdp, return; } +#endif /* BCMSDIO */ static void dhd_tcpack_send(ulong data) { @@ -333,6 +296,7 @@ static void dhd_tcpack_send(ulong data) dhd_pub_t *dhdp; int ifidx; void* pkt; + unsigned long flags; if (!cur_tbl) { return; @@ -343,13 +307,19 @@ static void dhd_tcpack_send(ulong data) return; } - dhd_os_tcpacklock(dhdp); + flags = dhd_os_tcpacklock(dhdp); tcpack_sup_mod = dhdp->tcpack_sup_module; + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", + __FUNCTION__, __LINE__)); + dhd_os_tcpackunlock(dhdp, flags); + return; + } pkt = cur_tbl->pkt_in_q; ifidx = cur_tbl->ifidx; if (!pkt) { - dhd_os_tcpackunlock(dhdp); + dhd_os_tcpackunlock(dhdp, flags); return; } cur_tbl->pkt_in_q = NULL; @@ -361,7 +331,7 @@ static void dhd_tcpack_send(ulong data) __FUNCTION__, __LINE__, tcpack_sup_mod->tcpack_info_cnt)); } - dhd_os_tcpackunlock(dhdp); + dhd_os_tcpackunlock(dhdp, flags); dhd_sendpkt(dhdp, ifidx, pkt); } @@ -369,8 +339,9 @@ static void dhd_tcpack_send(ulong data) int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 mode) { int ret = BCME_OK; + unsigned long flags; - dhd_os_tcpacklock(dhdp); + flags = dhd_os_tcpacklock(dhdp); if (dhdp->tcpack_sup_mode == mode) { DHD_ERROR(("%s %d: already set to %d\n", __FUNCTION__, __LINE__, mode)); @@ -380,7 +351,7 @@ int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 mode) if (mode >= TCPACK_SUP_LAST_MODE || #ifndef BCMSDIO mode == TCPACK_SUP_DELAYTX || -#endif +#endif /* !BCMSDIO */ FALSE) { DHD_ERROR(("%s %d: Invalid mode %d\n", __FUNCTION__, __LINE__, mode)); ret = BCME_BADARG; @@ -390,6 +361,7 @@ int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 mode) DHD_TRACE(("%s: %d -> %d\n", __FUNCTION__, dhdp->tcpack_sup_mode, mode)); +#ifdef BCMSDIO /* Old tcpack_sup_mode is TCPACK_SUP_DELAYTX */ if (dhdp->tcpack_sup_mode == TCPACK_SUP_DELAYTX) { tcpack_sup_module_t *tcpack_sup_mod = dhdp->tcpack_sup_module; @@ -402,11 +374,13 @@ int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 mode) if (dhdp->bus) dhd_bus_set_dotxinrx(dhdp->bus, TRUE); } - +#endif /* BCMSDIO */ dhdp->tcpack_sup_mode = mode; if (mode == TCPACK_SUP_OFF) { ASSERT(dhdp->tcpack_sup_module != NULL); + /* Clean up timer/data structure for any remaining/pending packet or timer. */ + dhd_tcpack_info_tbl_clean(dhdp); MFREE(dhdp->osh, dhdp->tcpack_sup_module, sizeof(tcpack_sup_module_t)); dhdp->tcpack_sup_module = NULL; goto exit; @@ -425,6 +399,7 @@ int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 mode) dhdp->tcpack_sup_module = tcpack_sup_mod; } +#ifdef BCMSDIO if (mode == TCPACK_SUP_DELAYTX) { ret = _tdata_psh_info_pool_init(dhdp, dhdp->tcpack_sup_module); if (ret != BCME_OK) @@ -432,13 +407,14 @@ int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 mode) else if (dhdp->bus) dhd_bus_set_dotxinrx(dhdp->bus, FALSE); } +#endif /* BCMSDIO */ if (mode == TCPACK_SUP_HOLD) { int i; tcpack_sup_module_t *tcpack_sup_mod = (tcpack_sup_module_t *)dhdp->tcpack_sup_module; - dhdp->tcpack_sup_ratio = TCPACK_SUPP_RATIO; - dhdp->tcpack_sup_delay = TCPACK_DELAY_TIME; + dhdp->tcpack_sup_ratio = CUSTOM_TCPACK_SUPP_RATIO; + dhdp->tcpack_sup_delay = CUSTOM_TCPACK_DELAY_TIME; for (i = 0; i < TCPACK_INFO_MAXNUM; i++) { tcpack_sup_mod->tcpack_info_tbl[i].dhdp = dhdp; @@ -450,7 +426,7 @@ int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 mode) } exit: - dhd_os_tcpackunlock(dhdp); + dhd_os_tcpackunlock(dhdp, flags); return ret; } @@ -459,16 +435,17 @@ dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp) { tcpack_sup_module_t *tcpack_sup_mod = dhdp->tcpack_sup_module; int i; + unsigned long flags; if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF) goto exit; - dhd_os_tcpacklock(dhdp); + flags = dhd_os_tcpacklock(dhdp); if (!tcpack_sup_mod) { DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); - dhd_os_tcpackunlock(dhdp); + dhd_os_tcpackunlock(dhdp, flags); goto exit; } @@ -488,7 +465,7 @@ dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp) bzero(tcpack_sup_mod->tcpack_info_tbl, sizeof(tcpack_info_t) * TCPACK_INFO_MAXNUM); } - dhd_os_tcpackunlock(dhdp); + dhd_os_tcpackunlock(dhdp, flags); if (dhdp->tcpack_sup_mode == TCPACK_SUP_HOLD) { for (i = 0; i < TCPACK_INFO_MAXNUM; i++) { @@ -509,6 +486,7 @@ inline int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt) int ret = BCME_OK; void *pdata; uint32 pktlen; + unsigned long flags; if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF) goto exit; @@ -522,13 +500,13 @@ inline int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt) goto exit; } - dhd_os_tcpacklock(dhdp); + flags = dhd_os_tcpacklock(dhdp); tcpack_sup_mod = dhdp->tcpack_sup_module; if (!tcpack_sup_mod) { DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); ret = BCME_ERROR; - dhd_os_tcpackunlock(dhdp); + dhd_os_tcpackunlock(dhdp, flags); goto exit; } tbl_cnt = tcpack_sup_mod->tcpack_info_cnt; @@ -554,7 +532,7 @@ inline int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt) break; } } - dhd_os_tcpackunlock(dhdp); + dhd_os_tcpackunlock(dhdp, flags); exit: return ret; @@ -591,20 +569,20 @@ static INLINE bool dhd_tcpdata_psh_acked(dhd_pub_t *dhdp, uint8 *ip_hdr, tcpdata_info_t *tcpdata_info_tmp = &tcpack_sup_mod->tcpdata_info_tbl[i]; DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR " TCP port %d %d\n", __FUNCTION__, __LINE__, i, - IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->src_ip_addr)), - IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->dst_ip_addr)), - ntoh16_ua(tcpdata_info_tmp->src_tcp_port), - ntoh16_ua(tcpdata_info_tmp->dst_tcp_port))); + IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->ip_addr.src)), + IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->ip_addr.dst)), + ntoh16_ua(tcpdata_info_tmp->tcp_port.src), + ntoh16_ua(tcpdata_info_tmp->tcp_port.dst))); /* If either IP address or TCP port number does not match, skip. */ if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET], - tcpdata_info_tmp->dst_ip_addr, IPV4_ADDR_LEN) == 0 && + tcpdata_info_tmp->ip_addr.dst, IPV4_ADDR_LEN) == 0 && memcmp(&ip_hdr[IPV4_DEST_IP_OFFSET], - tcpdata_info_tmp->src_ip_addr, IPV4_ADDR_LEN) == 0 && + tcpdata_info_tmp->ip_addr.src, IPV4_ADDR_LEN) == 0 && memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET], - tcpdata_info_tmp->dst_tcp_port, TCP_PORT_LEN) == 0 && + tcpdata_info_tmp->tcp_port.dst, TCP_PORT_LEN) == 0 && memcmp(&tcp_hdr[TCP_DEST_PORT_OFFSET], - tcpdata_info_tmp->src_tcp_port, TCP_PORT_LEN) == 0) { + tcpdata_info_tmp->tcp_port.src, TCP_PORT_LEN) == 0) { tcpdata_info = tcpdata_info_tmp; break; } @@ -659,6 +637,8 @@ dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt) int i; bool ret = FALSE; bool set_dotxinrx = TRUE; + unsigned long flags; + if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF) goto exit; @@ -730,7 +710,7 @@ dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt) ntoh16_ua(&new_tcp_hdr[TCP_DEST_PORT_OFFSET]))); /* Look for tcp_ack_info that has the same ip src/dst addrs and tcp src/dst ports */ - dhd_os_tcpacklock(dhdp); + flags = dhd_os_tcpacklock(dhdp); #if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) counter_printlog(&tack_tbl); tack_tbl.cnt[0]++; @@ -742,7 +722,7 @@ dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt) if (!tcpack_sup_mod) { DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); ret = BCME_ERROR; - dhd_os_tcpackunlock(dhdp); + dhd_os_tcpackunlock(dhdp, flags); goto exit; } @@ -785,7 +765,10 @@ dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt) ntoh16_ua(&old_tcp_hdr[TCP_SRC_PORT_OFFSET]), ntoh16_ua(&old_tcp_hdr[TCP_DEST_PORT_OFFSET]))); - /* If either of IP address or TCP port number does not match, skip. */ + /* If either of IP address or TCP port number does not match, skip. + * Note that src/dst addr fields in ip header are contiguous being 8 bytes in total. + * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total. + */ if (memcmp(&new_ip_hdr[IPV4_SRC_IP_OFFSET], &old_ip_hdr[IPV4_SRC_IP_OFFSET], IPV4_ADDR_LEN * 2) || memcmp(&new_tcp_hdr[TCP_SRC_PORT_OFFSET], @@ -828,7 +811,7 @@ dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt) __FUNCTION__, __LINE__, old_tcpack_num, oldpkt, new_tcp_ack_num, pkt)); } - dhd_os_tcpackunlock(dhdp); + dhd_os_tcpackunlock(dhdp, flags); goto exit; } @@ -851,7 +834,7 @@ dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt) DHD_TRACE(("%s %d: No empty tcp ack info tbl\n", __FUNCTION__, __LINE__)); } - dhd_os_tcpackunlock(dhdp); + dhd_os_tcpackunlock(dhdp, flags); exit: /* Unless TCPACK_SUP_DELAYTX, dotxinrx is alwasy TRUE, so no need to set here */ @@ -881,6 +864,7 @@ dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt) int i; bool ret = FALSE; + unsigned long flags; if (dhdp->tcpack_sup_mode != TCPACK_SUP_DELAYTX) goto exit; @@ -942,13 +926,13 @@ dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt) ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]), tcp_hdr[TCP_FLAGS_OFFSET])); - dhd_os_tcpacklock(dhdp); + flags = dhd_os_tcpacklock(dhdp); tcpack_sup_mod = dhdp->tcpack_sup_module; if (!tcpack_sup_mod) { DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); ret = BCME_ERROR; - dhd_os_tcpackunlock(dhdp); + dhd_os_tcpackunlock(dhdp, flags); goto exit; } @@ -959,16 +943,19 @@ dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt) uint32 now_in_ms = OSL_SYSUPTIME(); DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR " TCP port %d %d\n", __FUNCTION__, __LINE__, i, - IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->src_ip_addr)), - IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->dst_ip_addr)), - ntoh16_ua(tdata_info_tmp->src_tcp_port), - ntoh16_ua(tdata_info_tmp->dst_tcp_port))); - - /* If both IP address and TCP port number match, we found it so break. */ + IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->ip_addr.src)), + IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->ip_addr.dst)), + ntoh16_ua(tdata_info_tmp->tcp_port.src), + ntoh16_ua(tdata_info_tmp->tcp_port.dst))); + + /* If both IP address and TCP port number match, we found it so break. + * Note that src/dst addr fields in ip header are contiguous being 8 bytes in total. + * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total. + */ if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET], - tdata_info_tmp->src_ip_addr, IPV4_ADDR_LEN * 2) == 0 && + (void *)&tdata_info_tmp->ip_addr, IPV4_ADDR_LEN * 2) == 0 && memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET], - tdata_info_tmp->src_tcp_port, TCP_PORT_LEN * 2) == 0) { + (void *)&tdata_info_tmp->tcp_port, TCP_PORT_LEN * 2) == 0) { tcpdata_info = tdata_info_tmp; tcpdata_info->last_used_time = now_in_ms; break; @@ -999,7 +986,7 @@ dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt) bcopy(last_tdata_info, tdata_info_tmp, sizeof(tcpdata_info_t)); } bzero(last_tdata_info, sizeof(tcpdata_info_t)); - DHD_ERROR(("%s %d: tcpdata_info(idx %d) is aged out. ttl cnt is now %d\n", + DHD_INFO(("%s %d: tcpdata_info(idx %d) is aged out. ttl cnt is now %d\n", __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt)); /* Don't increase "i" here, so that the prev last tcpdata_info is checked */ } else @@ -1020,7 +1007,7 @@ dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt) IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])), ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]), ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]))); - dhd_os_tcpackunlock(dhdp); + dhd_os_tcpackunlock(dhdp, flags); goto exit; } tcpdata_info = &tcpack_sup_mod->tcpdata_info_tbl[i]; @@ -1028,17 +1015,19 @@ dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt) /* No TCP flow with the same IP addr and TCP port is found * in tcp_data_info_tbl. So add this flow to the table. */ - DHD_ERROR(("%s %d: Add data info to tbl[%d]: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + DHD_INFO(("%s %d: Add data info to tbl[%d]: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR " TCP port %d %d\n", __FUNCTION__, __LINE__, tcpack_sup_mod->tcpdata_info_cnt, IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])), IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])), ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]), ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]))); - - bcopy(&ip_hdr[IPV4_SRC_IP_OFFSET], tcpdata_info->src_ip_addr, + /* Note that src/dst addr fields in ip header are contiguous being 8 bytes in total. + * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total. + */ + bcopy(&ip_hdr[IPV4_SRC_IP_OFFSET], (void *)&tcpdata_info->ip_addr, IPV4_ADDR_LEN * 2); - bcopy(&tcp_hdr[TCP_SRC_PORT_OFFSET], tcpdata_info->src_tcp_port, + bcopy(&tcp_hdr[TCP_SRC_PORT_OFFSET], (void *)&tcpdata_info->tcp_port, TCP_PORT_LEN * 2); tcpdata_info->last_used_time = OSL_SYSUPTIME(); @@ -1056,7 +1045,7 @@ dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt) if (tdata_psh_info == NULL) { DHD_ERROR(("%s %d: No more free tdata_psh_info!!\n", __FUNCTION__, __LINE__)); ret = BCME_ERROR; - dhd_os_tcpackunlock(dhdp); + dhd_os_tcpackunlock(dhdp, flags); goto exit; } tdata_psh_info->end_seq = end_tcp_seq_num; @@ -1078,7 +1067,7 @@ dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt) } tcpdata_info->tdata_psh_info_tail = tdata_psh_info; - dhd_os_tcpackunlock(dhdp); + dhd_os_tcpackunlock(dhdp, flags); exit: return ret; @@ -1100,6 +1089,7 @@ dhd_tcpack_hold(dhd_pub_t *dhdp, void *pkt, int ifidx) tcpack_info_t *tcpack_info_tbl; int i, free_slot = TCPACK_INFO_MAXNUM; bool hold = FALSE; + unsigned long flags; if (dhdp->tcpack_sup_mode != TCPACK_SUP_HOLD) { goto exit; @@ -1176,14 +1166,14 @@ dhd_tcpack_hold(dhd_pub_t *dhdp, void *pkt, int ifidx) ntoh16_ua(&new_tcp_hdr[TCP_DEST_PORT_OFFSET]))); /* Look for tcp_ack_info that has the same ip src/dst addrs and tcp src/dst ports */ - dhd_os_tcpacklock(dhdp); + flags = dhd_os_tcpacklock(dhdp); tcpack_sup_mod = dhdp->tcpack_sup_module; tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl; if (!tcpack_sup_mod) { DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); - dhd_os_tcpackunlock(dhdp); + dhd_os_tcpackunlock(dhdp, flags); goto exit; } @@ -1206,7 +1196,7 @@ dhd_tcpack_hold(dhd_pub_t *dhdp, void *pkt, int ifidx) DHD_ERROR(("%s %d: oldpkt data NULL!! cur idx %d\n", __FUNCTION__, __LINE__, i)); hold = FALSE; - dhd_os_tcpackunlock(dhdp); + dhd_os_tcpackunlock(dhdp, flags); goto exit; } @@ -1250,7 +1240,7 @@ dhd_tcpack_hold(dhd_pub_t *dhdp, void *pkt, int ifidx) } else { PKTFREE(dhdp->osh, pkt, TRUE); } - dhd_os_tcpackunlock(dhdp); + dhd_os_tcpackunlock(dhdp, flags); if (!hold) { del_timer_sync(&tcpack_info_tbl[i].timer); @@ -1277,7 +1267,7 @@ dhd_tcpack_hold(dhd_pub_t *dhdp, void *pkt, int ifidx) DHD_TRACE(("%s %d: No empty tcp ack info tbl\n", __FUNCTION__, __LINE__)); } - dhd_os_tcpackunlock(dhdp); + dhd_os_tcpackunlock(dhdp, flags); exit: return hold; diff --git a/drivers/net/wireless/bcmdhd/dhd_ip.h b/drivers/amlogic/wifi/bcmdhd/dhd_ip.h similarity index 50% rename from drivers/net/wireless/bcmdhd/dhd_ip.h rename to drivers/amlogic/wifi/bcmdhd/dhd_ip.h index c932b5a4e562c..a72976b07ccfc 100644 --- a/drivers/net/wireless/bcmdhd/dhd_ip.h +++ b/drivers/amlogic/wifi/bcmdhd/dhd_ip.h @@ -3,9 +3,30 @@ * * Provides type definitions and function prototypes used to parse ip packet. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_ip.h 502735 2014-09-16 00:53:02Z $ + * + * <> + * + * $Id: dhd_ip.h 537119 2015-02-25 04:24:14Z $ */ #ifndef _dhd_ip_h_ @@ -26,7 +47,6 @@ typedef enum pkt_frag } pkt_frag_t; extern pkt_frag_t pkt_frag_info(osl_t *osh, void *p); -extern bool pkt_is_dhcp(osl_t *osh, void *p); #ifdef DHDTCPACK_SUPPRESS #define TCPACKSZMIN (ETHER_HDR_LEN + IPV4_MIN_HEADER_LEN + TCP_MIN_HEADER_LEN) @@ -40,8 +60,15 @@ extern bool pkt_is_dhcp(osl_t *osh, void *p); #define TCPDATA_INFO_TIMEOUT 5000 /* Remove tcpdata_info if inactive for this time (in ms) */ -#define TCPACK_SUPP_RATIO 3 -#define TCPACK_DELAY_TIME 10 /* ms */ +#define DEFAULT_TCPACK_SUPP_RATIO 3 +#ifndef CUSTOM_TCPACK_SUPP_RATIO +#define CUSTOM_TCPACK_SUPP_RATIO DEFAULT_TCPACK_SUPP_RATIO +#endif /* CUSTOM_TCPACK_SUPP_RATIO */ + +#define DEFAULT_TCPACK_DELAY_TIME 10 /* ms */ +#ifndef CUSTOM_TCPACK_DELAY_TIME +#define CUSTOM_TCPACK_DELAY_TIME DEFAULT_TCPACK_DELAY_TIME +#endif /* CUSTOM_TCPACK_DELAY_TIME */ extern int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 on); extern void dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp); diff --git a/drivers/net/wireless/bcmdhd/dhd_linux.c b/drivers/amlogic/wifi/bcmdhd/dhd_linux.c similarity index 58% rename from drivers/net/wireless/bcmdhd/dhd_linux.c rename to drivers/amlogic/wifi/bcmdhd/dhd_linux.c index deca81b1df25d..1dff80656143c 100644 --- a/drivers/net/wireless/bcmdhd/dhd_linux.c +++ b/drivers/amlogic/wifi/bcmdhd/dhd_linux.c @@ -2,9 +2,30 @@ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface * Basically selected code segments from usb-cdc.c and usb-rndis.c * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_linux.c 505753 2014-10-01 01:40:15Z $ + * + * <> + * + * $Id: dhd_linux.c 609723 2016-01-05 08:40:45Z $ */ #include @@ -48,9 +69,6 @@ #include #include #include -#ifdef DHD_L2_FILTER -#include -#endif #include #include @@ -63,6 +81,9 @@ #include #include #include +#ifdef WL_ESCAN +#include +#endif #include #ifdef CONFIG_HAS_WAKELOCK #include @@ -70,16 +91,11 @@ #ifdef WL_CFG80211 #include #endif -#ifdef P2PONEINT -#include -#endif #ifdef PNO_SUPPORT #include #endif -#ifdef WLBTAMP -#include -#include -#include +#ifdef RTT_SUPPORT +#include #endif #ifdef CONFIG_COMPAT @@ -90,17 +106,51 @@ #include #endif /* DHD_WMF */ -#ifdef AMPDU_VO_ENABLE -#include -#endif /* AMPDU_VO_ENABLE */ +#ifdef DHD_L2_FILTER +#include +#include +#include +#endif /* DHD_L2_FILTER */ + +#ifdef DHD_PSTA +#include +#endif /* DHD_PSTA */ + + #ifdef DHDTCPACK_SUPPRESS #include #endif /* DHDTCPACK_SUPPRESS */ -#if defined(DHD_TCP_WINSIZE_ADJUST) -#include -#include -#endif /* DHD_TCP_WINSIZE_ADJUST */ +#ifdef DHD_DEBUG_PAGEALLOC +typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len); +void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len); +extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle); +#endif /* DHD_DEBUG_PAGEALLOC */ + + +#if defined(DHD_LB) +/* Dynamic CPU selection for load balancing */ +#include +#include +#include +#include +#include + +#if !defined(DHD_LB_PRIMARY_CPUS) +#define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */ +#endif + +#if !defined(DHD_LB_SECONDARY_CPUS) +#define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */ +#endif + +#define HIST_BIN_SIZE 8 + +#if defined(DHD_LB_RXP) +static void dhd_rx_napi_dispatcher_fn(struct work_struct * work); +#endif /* DHD_LB_RXP */ + +#endif /* DHD_LB */ #ifdef WLMEDIA_HTSF #include @@ -128,23 +178,35 @@ typedef struct histo_ { static histo_t vi_d1, vi_d2, vi_d3, vi_d4; #endif /* WLMEDIA_HTSF */ -#if defined(DHD_TCP_WINSIZE_ADJUST) -#define MIN_TCP_WIN_SIZE 18000 -#define WIN_SIZE_SCALE_FACTOR 2 -#define MAX_TARGET_PORTS 5 +#ifdef STBLINUX +#ifdef quote_str +#undef quote_str +#endif /* quote_str */ +#ifdef to_str +#undef to_str +#endif /* quote_str */ +#define to_str(s) #s +#define quote_str(s) to_str(s) -static uint target_ports[MAX_TARGET_PORTS] = {20, 0, 0, 0, 0}; -static uint dhd_use_tcp_window_size_adjust = FALSE; -static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb); -#endif /* DHD_TCP_WINSIZE_ADJUST */ +static char *driver_target = "driver_target: "quote_str(BRCM_DRIVER_TARGET); +#endif /* STBLINUX */ #if defined(SOFTAP) extern bool ap_cfg_running; extern bool ap_fw_loaded; #endif +extern void dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction); - +#ifdef FIX_CPU_MIN_CLOCK +#include +#endif /* FIX_CPU_MIN_CLOCK */ +#ifdef SET_RANDOM_MAC_SOFTAP +#ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL +#define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11 +#endif +static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL; +#endif /* SET_RANDOM_MAC_SOFTAP */ #ifdef ENABLE_ADAPTIVE_SCHED #define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */ #ifndef CUSTOM_CPUFREQ_THRESH @@ -166,17 +228,13 @@ extern bool ap_fw_loaded; #include -#if defined(CUSTOMER_HW20) && defined(WLANAUDIO) -#include -#endif /* CUSTOMER_HW20 && WLANAUDIO */ +/* Maximum STA per radio */ +#define DHD_MAX_STA 32 #ifdef CUSTOMER_HW_AMLOGIC #include #endif -/* Maximum STA per radio */ -#define DHD_MAX_STA 32 - const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 }; const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 }; @@ -195,7 +253,7 @@ static struct notifier_block dhd_inetaddr_notifier = { static bool dhd_inetaddr_notifier_registered = FALSE; #endif /* ARP_OFFLOAD_SUPPORT */ -#ifdef CONFIG_IPV6 +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) static int dhd_inet6addr_notifier_call(struct notifier_block *this, unsigned long event, void *ptr); static struct notifier_block dhd_inet6addr_notifier = { @@ -205,7 +263,7 @@ static struct notifier_block dhd_inet6addr_notifier = { * created in kernel notifier link list (with 'next' pointing to itself) */ static bool dhd_inet6addr_notifier_registered = FALSE; -#endif +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) #include @@ -215,12 +273,12 @@ DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait); #if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN) extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable); -#endif -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (1) +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) static void dhd_hang_process(void *dhd_info, void *event_data, u8 event); -#endif +#endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL and additional rights"); #endif /* LinuxVer */ #include @@ -240,6 +298,8 @@ extern bool dhd_wlfc_skip_fc(void); extern void dhd_wlfc_plat_init(void *dhd); extern void dhd_wlfc_plat_deinit(void *dhd); #endif /* PROP_TXSTATUS */ +extern uint sd_f2_blocksize; +extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size); #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) const char * @@ -255,9 +315,19 @@ print_tainted() extern wl_iw_extra_params_t g_wl_iw_params; #endif /* defined(WL_WIRELESS_EXT) */ +#ifdef CONFIG_PARTIALSUSPEND_SLP +#include +#define CONFIG_HAS_EARLYSUSPEND +#define DHD_USE_EARLYSUSPEND +#define register_early_suspend register_pre_suspend +#define unregister_early_suspend unregister_pre_suspend +#define early_suspend pre_suspend +#define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50 +#else #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) #include #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */ +#endif /* CONFIG_PARTIALSUSPEND_SLP */ extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd); @@ -280,28 +350,29 @@ static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; } #endif -#if defined(SOFTAP_TPUT_ENHANCE) -extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time); -extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int* idle_time); -#endif /* SOFTAP_TPUT_ENHANCE */ -#ifdef SET_RPS_CPUS -int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len); -void custom_rps_map_clear(struct netdev_rx_queue *queue); -#ifdef CONFIG_MACH_UNIVERSAL5433 -#define RPS_CPUS_MASK "10" -#else -#define RPS_CPUS_MASK "6" -#endif /* CONFIG_MACH_UNIVERSAL5433 */ -#endif /* SET_RPS_CPUS */ + +#ifdef DHD_FW_COREDUMP +static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event); +#endif /* DHD_FW_COREDUMP */ +#ifdef DHD_LOG_DUMP +static void dhd_log_dump_init(dhd_pub_t *dhd); +static void dhd_log_dump_deinit(dhd_pub_t *dhd); +static void dhd_log_dump(void *handle, void *event_info, u8 event); +void dhd_schedule_log_dump(dhd_pub_t *dhdp); +static int do_dhd_log_dump(dhd_pub_t *dhdp); +#endif /* DHD_LOG_DUMP */ static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused); static struct notifier_block dhd_reboot_notifier = { - .notifier_call = dhd_reboot_callback, - .priority = 1, + .notifier_call = dhd_reboot_callback, + .priority = 1, }; +#ifdef BCMPCIE +static int is_reboot = 0; +#endif /* BCMPCIE */ typedef struct dhd_if_event { struct list_head list; @@ -324,6 +395,7 @@ typedef struct dhd_if { bool attached; /* Delayed attachment when unset */ bool txflowcontrol; /* Per interface flow control indicator */ char name[IFNAMSIZ+1]; /* linux interface name */ + char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */ struct net_device_stats stats; #ifdef DHD_WMF dhd_wmf_t wmf; /* per bsscfg wmf setting */ @@ -335,6 +407,16 @@ typedef struct dhd_if { #endif /* ! BCM_GMAC3 */ #endif /* PCIE_FULL_DONGLE */ uint32 ap_isolate; /* ap-isolation settings */ +#ifdef DHD_L2_FILTER + bool parp_enable; + bool parp_discard; + bool parp_allnode; + arp_table_t *phnd_arp_table; +/* for Per BSS modification */ + bool dhcp_unicast; + bool block_ping; + bool grat_arp; +#endif /* DHD_L2_FILTER */ } dhd_if_t; #ifdef WLMEDIA_HTSF @@ -363,7 +445,7 @@ static tstamp_t ts[TSMAX]; static tstamp_t maxdelayts; static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0; -#endif /* WLMEDIA_HTSF */ +#endif /* WLMEDIA_HTSF */ struct ipv6_work_info_t { uint8 if_idx; @@ -371,16 +453,12 @@ struct ipv6_work_info_t { unsigned long event; }; -#if defined(CUSTOMER_HW20) && defined(WLANAUDIO) -#define MAX_WLANAUDIO_BLACKLIST 4 - -struct wlanaudio_blacklist { - bool is_blacklist; - uint32 cnt; - ulong txfail_jiffies; - struct ether_addr blacklist_addr; -}; -#endif /* CUSTOMER_HW20 && WLANAUDIO */ +#ifdef DHD_DEBUG +typedef struct dhd_dump { + uint8 *buf; + int bufsize; +} dhd_dump_t; +#endif /* DHD_DEBUG */ /* When Perimeter locks are deployed, any blocking calls must be preceeded * with a PERIM UNLOCK and followed by a PERIM LOCK. @@ -399,8 +477,12 @@ typedef struct dhd_info { void *adapter; /* adapter information, interrupt, fw path etc. */ char fw_path[PATH_MAX]; /* path to firmware image */ char nv_path[PATH_MAX]; /* path to nvram vars file */ + char clm_path[PATH_MAX]; /* path to clm vars file */ char conf_path[PATH_MAX]; /* path to config vars file */ + /* serialize dhd iovars */ + struct mutex dhd_iovar_mutex; + struct semaphore proto_sem; #ifdef PROP_TXSTATUS spinlock_t wlfc_spinlock; @@ -410,10 +492,17 @@ typedef struct dhd_info { htsf_t htsf; #endif wait_queue_head_t ioctl_resp_wait; + wait_queue_head_t d3ack_wait; + wait_queue_head_t dhd_bus_busy_state_wait; uint32 default_wd_interval; struct timer_list timer; bool wd_timer_valid; +#ifdef DHD_PCIE_RUNTIMEPM + struct timer_list rpm_timer; + bool rpm_timer_valid; + tsk_ctl_t thr_rpm_ctl; +#endif /* DHD_PCIE_RUNTIMEPM */ struct tasklet_struct tasklet; spinlock_t sdlock; spinlock_t txqlock; @@ -433,12 +522,16 @@ typedef struct dhd_info { struct wake_lock wl_rxwake; /* Wifi rx wakelock */ struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */ struct wake_lock wl_wdwake; /* Wifi wd wakelock */ + struct wake_lock wl_evtwake; /* Wifi event wakelock */ #ifdef BCMPCIE_OOB_HOST_WAKE struct wake_lock wl_intrwake; /* Host wakeup wakelock */ #endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + struct wake_lock wl_scanwake; /* Wifi scan wakelock */ +#endif /* DHD_USE_SCAN_WAKELOCK */ #endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) /* net_device interface lock, prevent race conditions among net_dev interface * calls and wifi_on or wifi_off */ @@ -446,6 +539,8 @@ typedef struct dhd_info { struct mutex dhd_suspend_mutex; #endif spinlock_t wakelock_spinlock; + spinlock_t wakelock_evt_spinlock; + uint32 wakelock_event_counter; uint32 wakelock_counter; int wakelock_wd_counter; int wakelock_rx_timeout_enable; @@ -473,11 +568,19 @@ typedef struct dhd_info { void *rpc_osh; struct timer_list rpcth_timer; bool rpcth_timer_active; - bool fdaggr; + uint8 fdaggr; #endif #ifdef DHDTCPACK_SUPPRESS spinlock_t tcpack_lock; #endif /* DHDTCPACK_SUPPRESS */ +#ifdef FIX_CPU_MIN_CLOCK + bool cpufreq_fix_status; + struct mutex cpufreq_fix; + struct pm_qos_request dhd_cpu_qos; +#ifdef FIX_BUS_MIN_CLOCK + struct pm_qos_request dhd_bus_qos; +#endif /* FIX_BUS_MIN_CLOCK */ +#endif /* FIX_CPU_MIN_CLOCK */ void *dhd_deferred_wq; #ifdef DEBUG_CPU_FREQ struct notifier_block freq_trans; @@ -485,10 +588,109 @@ typedef struct dhd_info { #endif unsigned int unit; struct notifier_block pm_notifier; -#if defined(CUSTOMER_HW20) && defined(WLANAUDIO) - struct wlanaudio_blacklist wlanaudio_blist[MAX_WLANAUDIO_BLACKLIST]; - bool is_wlanaudio_blist; -#endif /* CUSTOMER_HW20 && WLANAUDIO */ +#ifdef DHD_PSTA + uint32 psta_mode; /* PSTA or PSR */ +#endif /* DHD_PSTA */ +#ifdef DHD_DEBUG + dhd_dump_t *dump; + struct timer_list join_timer; + u32 join_timeout_val; + bool join_timer_active; + uint scan_time_count; + struct timer_list scan_timer; + bool scan_timer_active; +#endif +#if defined(DHD_LB) + /* CPU Load Balance dynamic CPU selection */ + + /* Variable that tracks the currect CPUs available for candidacy */ + cpumask_var_t cpumask_curr_avail; + + /* Primary and secondary CPU mask */ + cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */ + cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */ + + struct notifier_block cpu_notifier; + + /* Tasklet to handle Tx Completion packet freeing */ + struct tasklet_struct tx_compl_tasklet; + atomic_t tx_compl_cpu; + + + /* Tasklet to handle RxBuf Post during Rx completion */ + struct tasklet_struct rx_compl_tasklet; + atomic_t rx_compl_cpu; + + /* Napi struct for handling rx packet sendup. Packets are removed from + * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then + * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled + * to run to rx_napi_cpu. + */ + struct sk_buff_head rx_pend_queue ____cacheline_aligned; + struct sk_buff_head rx_napi_queue ____cacheline_aligned; + struct napi_struct rx_napi_struct ____cacheline_aligned; + atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */ + struct net_device *rx_napi_netdev; /* netdev of primary interface */ + + struct work_struct rx_napi_dispatcher_work; + struct work_struct tx_compl_dispatcher_work; + struct work_struct rx_compl_dispatcher_work; + /* Number of times DPC Tasklet ran */ + uint32 dhd_dpc_cnt; + + /* Number of times NAPI processing got scheduled */ + uint32 napi_sched_cnt; + + /* Number of times NAPI processing ran on each available core */ + uint32 napi_percpu_run_cnt[NR_CPUS]; + + /* Number of times RX Completions got scheduled */ + uint32 rxc_sched_cnt; + /* Number of times RX Completion ran on each available core */ + uint32 rxc_percpu_run_cnt[NR_CPUS]; + + /* Number of times TX Completions got scheduled */ + uint32 txc_sched_cnt; + /* Number of times TX Completions ran on each available core */ + uint32 txc_percpu_run_cnt[NR_CPUS]; + + /* CPU status */ + /* Number of times each CPU came online */ + uint32 cpu_online_cnt[NR_CPUS]; + + /* Number of times each CPU went offline */ + uint32 cpu_offline_cnt[NR_CPUS]; + + /* + * Consumer Histogram - NAPI RX Packet processing + * ----------------------------------------------- + * On Each CPU, when the NAPI RX Packet processing call back was invoked + * how many packets were processed is captured in this data structure. + * Now its difficult to capture the "exact" number of packets processed. + * So considering the packet counter to be a 32 bit one, we have a + * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets + * processed is rounded off to the next power of 2 and put in the + * approriate "bin" the value in the bin gets incremented. + * For example, assume that in CPU 1 if NAPI Rx runs 3 times + * and the packet count processed is as follows (assume the bin counters are 0) + * iteration 1 - 10 (the bin counter 2^4 increments to 1) + * iteration 2 - 30 (the bin counter 2^5 increments to 1) + * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2) + */ + uint32 napi_rx_hist[NR_CPUS][HIST_BIN_SIZE]; + uint32 txc_hist[NR_CPUS][HIST_BIN_SIZE]; + uint32 rxc_hist[NR_CPUS][HIST_BIN_SIZE]; +#endif /* DHD_LB */ + +#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) +#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ + + struct kobject dhd_kobj; +#ifdef SUPPORT_SENSORHUB + uint32 shub_enable; +#endif /* SUPPORT_SENSORHUB */ + + struct delayed_work dhd_memdump_work; } dhd_info_t; #define DHDIF_FWDER(dhdif) FALSE @@ -496,11 +698,15 @@ typedef struct dhd_info { /* Flag to indicate if we should download firmware on driver load */ uint dhd_download_fw_on_driverload = TRUE; +/* Flag to indicate if driver is initialized */ +uint dhd_driver_init_done = FALSE; + /* Definitions to provide path to the firmware and nvram * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt" */ char firmware_path[MOD_PARAM_PATHLEN]; char nvram_path[MOD_PARAM_PATHLEN]; +char clm_path[MOD_PARAM_PATHLEN]; char config_path[MOD_PARAM_PATHLEN]; /* backup buffer for firmware and nvram path */ @@ -513,6 +719,12 @@ module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444); int op_mode = 0; int disable_proptx = 0; module_param(op_mode, int, 0644); + +#if defined(DHD_LB_RXP) +static int dhd_napi_weight = 32; +module_param(dhd_napi_weight, int, 0644); +#endif /* DHD_LB_RXP */ + extern int wl_control_wl_start(struct net_device *dev); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC) struct semaphore dhd_registration_sem; @@ -523,10 +735,9 @@ static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event); static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event); static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event); static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event); -#ifdef CONFIG_IPV6 +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event); -#endif - +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ #ifdef WL_CFG80211 extern void dhd_netdev_free(struct net_device *ndev); #endif /* WL_CFG80211 */ @@ -549,7 +760,11 @@ module_param(dhd_arp_enable, uint, 0); /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */ +#ifdef ENABLE_ARP_SNOOP_MODE +uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP; +#else uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY; +#endif /* ENABLE_ARP_SNOOP_MODE */ module_param(dhd_arp_mode, uint, 0); #endif /* ARP_OFFLOAD_SUPPORT */ @@ -559,6 +774,7 @@ module_param(disable_proptx, int, 0644); /* load firmware and/or nvram values from the filesystem */ module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660); module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660); +module_param_string(clm_path, clm_path, MOD_PARAM_PATHLEN, 0660); module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0); /* Watchdog interval */ @@ -569,6 +785,9 @@ module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0); uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS; module_param(dhd_watchdog_ms, uint, 0); +#ifdef DHD_PCIE_RUNTIMEPM +uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS; +#endif /* DHD_PCIE_RUNTIMEPMT */ #if defined(DHD_DEBUG) /* Console poll interval */ uint dhd_console_ms = 0; @@ -590,7 +809,11 @@ uint dhd_pkt_filter_init = 0; module_param(dhd_pkt_filter_init, uint, 0); /* Pkt filter mode control */ +#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER +uint dhd_master_mode = FALSE; +#else uint dhd_master_mode = FALSE; +#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */ module_param(dhd_master_mode, uint, 0); int dhd_watchdog_prio = 0; @@ -598,11 +821,11 @@ module_param(dhd_watchdog_prio, int, 0); /* DPC thread priority */ int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING; -module_param(dhd_dpc_prio, int, 0); +module_param(dhd_dpc_prio, int, 0644); /* RX frame thread priority */ int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING; -module_param(dhd_rxf_prio, int, 0); +module_param(dhd_rxf_prio, int, 0644); int passive_channel_skip = 0; module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR)); @@ -617,187 +840,602 @@ static int dhd_found = 0; static int instance_base = 0; /* Starting instance number */ module_param(instance_base, int, 0644); -#if defined(CUSTOMER_HW20) && defined(WLANAUDIO) -dhd_info_t *dhd_global = NULL; -#endif /* CUSTOMER_HW20 && WLANAUDIO */ +/* Functions to manage sysfs interface for dhd */ +static int dhd_sysfs_init(dhd_info_t *dhd); +static void dhd_sysfs_exit(dhd_info_t *dhd); +#if defined(DHD_LB) +static void +dhd_lb_set_default_cpus(dhd_info_t *dhd) +{ + /* Default CPU allocation for the jobs */ + atomic_set(&dhd->rx_napi_cpu, 1); + atomic_set(&dhd->rx_compl_cpu, 2); + atomic_set(&dhd->tx_compl_cpu, 2); +} -/* DHD Perimiter lock only used in router with bypass forwarding. */ -#define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0) -#define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0) -#define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0) -#define DHD_PERIM_LOCK_ALL() do { /* noop */ } while (0) -#define DHD_PERIM_UNLOCK_ALL() do { /* noop */ } while (0) +static void +dhd_cpumasks_deinit(dhd_info_t *dhd) +{ + free_cpumask_var(dhd->cpumask_curr_avail); + free_cpumask_var(dhd->cpumask_primary); + free_cpumask_var(dhd->cpumask_primary_new); + free_cpumask_var(dhd->cpumask_secondary); + free_cpumask_var(dhd->cpumask_secondary_new); +} -#ifdef PCIE_FULL_DONGLE -#if defined(BCM_GMAC3) -#define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0) -#define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); }) -#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); }) -#else /* ! BCM_GMAC3 */ -#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock) -#define DHD_IF_STA_LIST_LOCK(ifp, flags) \ - spin_lock_irqsave(&(ifp)->sta_list_lock, (flags)) -#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \ - spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags)) -#endif /* ! BCM_GMAC3 */ -#endif /* PCIE_FULL_DONGLE */ +static int +dhd_cpumasks_init(dhd_info_t *dhd) +{ + int id; + uint32 cpus; + int ret = 0; -/* Control fw roaming */ -#ifdef BCMCCX -uint dhd_roam_disable = 0; -#else -uint dhd_roam_disable = 0; -#endif /* BCMCCX */ + if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) { + DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__)); + ret = -ENOMEM; + goto fail; + } -/* Control radio state */ -uint dhd_radio_up = 1; + cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask); + cpumask_clear(dhd->cpumask_primary); + cpumask_clear(dhd->cpumask_secondary); -/* Network inteface name */ -char iface_name[IFNAMSIZ] = {'\0'}; -module_param_string(iface_name, iface_name, IFNAMSIZ, 0); + cpus = DHD_LB_PRIMARY_CPUS; + for (id = 0; id < NR_CPUS; id++) { + if (isset(&cpus, id)) + cpumask_set_cpu(id, dhd->cpumask_primary); + } -/* The following are specific to the SDIO dongle */ + cpus = DHD_LB_SECONDARY_CPUS; + for (id = 0; id < NR_CPUS; id++) { + if (isset(&cpus, id)) + cpumask_set_cpu(id, dhd->cpumask_secondary); + } -/* IOCTL response timeout */ -int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT; + return ret; +fail: + dhd_cpumasks_deinit(dhd); + return ret; +} -/* Idle timeout for backplane clock */ -int dhd_idletime = DHD_IDLETIME_TICKS; -module_param(dhd_idletime, int, 0); +/* + * The CPU Candidacy Algorithm + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * The available CPUs for selection are divided into two groups + * Primary Set - A CPU mask that carries the First Choice CPUs + * Secondary Set - A CPU mask that carries the Second Choice CPUs. + * + * There are two types of Job, that needs to be assigned to + * the CPUs, from one of the above mentioned CPU group. The Jobs are + * 1) Rx Packet Processing - napi_cpu + * 2) Completion Processiong (Tx, RX) - compl_cpu + * + * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes + * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy + * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu. + * If there are more processors free, it assigns one to compl_cpu. + * It also tries to ensure that both napi_cpu and compl_cpu are not on the same + * CPU, as much as possible. + * + * By design, both Tx and Rx completion jobs are run on the same CPU core, as it + * would allow Tx completion skb's to be released into a local free pool from + * which the rx buffer posts could have been serviced. it is important to note + * that a Tx packet may not have a large enough buffer for rx posting. + */ +void dhd_select_cpu_candidacy(dhd_info_t *dhd) +{ + uint32 primary_available_cpus; /* count of primary available cpus */ + uint32 secondary_available_cpus; /* count of secondary available cpus */ + uint32 napi_cpu = 0; /* cpu selected for napi rx processing */ + uint32 compl_cpu = 0; /* cpu selected for completion jobs */ -/* Use polling */ -uint dhd_poll = FALSE; -module_param(dhd_poll, uint, 0); + cpumask_clear(dhd->cpumask_primary_new); + cpumask_clear(dhd->cpumask_secondary_new); -/* Use interrupts */ -uint dhd_intr = TRUE; -module_param(dhd_intr, uint, 0); + /* + * Now select from the primary mask. Even if a Job is + * already running on a CPU in secondary group, we still move + * to primary CPU. So no conditional checks. + */ + cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary, + dhd->cpumask_curr_avail); -/* SDIO Drive Strength (in milliamps) */ -uint dhd_sdiod_drive_strength = 6; -module_param(dhd_sdiod_drive_strength, uint, 0); + cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary, + dhd->cpumask_curr_avail); -#ifdef BCMSDIO -/* Tx/Rx bounds */ -extern uint dhd_txbound; -extern uint dhd_rxbound; -module_param(dhd_txbound, uint, 0); -module_param(dhd_rxbound, uint, 0); + primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new); -/* Deferred transmits */ -extern uint dhd_deferred_tx; -module_param(dhd_deferred_tx, uint, 0); + if (primary_available_cpus > 0) { + napi_cpu = cpumask_first(dhd->cpumask_primary_new); -#ifdef BCMDBGFS -extern void dhd_dbg_init(dhd_pub_t *dhdp); -extern void dhd_dbg_remove(void); -#endif /* BCMDBGFS */ + /* If no further CPU is available, + * cpumask_next returns >= nr_cpu_ids + */ + compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new); + if (compl_cpu >= nr_cpu_ids) + compl_cpu = 0; + } -#endif /* BCMSDIO */ + DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d\n", + __FUNCTION__, napi_cpu, compl_cpu)); + /* -- Now check for the CPUs from the secondary mask -- */ + secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new); -#ifdef SDTEST -/* Echo packet generator (pkts/s) */ -uint dhd_pktgen = 0; -module_param(dhd_pktgen, uint, 0); + DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n", + __FUNCTION__, secondary_available_cpus, nr_cpu_ids)); -/* Echo packet len (0 => sawtooth, max 2040) */ -uint dhd_pktgen_len = 0; -module_param(dhd_pktgen_len, uint, 0); -#endif /* SDTEST */ + if (secondary_available_cpus > 0) { + /* At this point if napi_cpu is unassigned it means no CPU + * is online from Primary Group + */ + if (napi_cpu == 0) { + napi_cpu = cpumask_first(dhd->cpumask_secondary_new); + compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new); + } else if (compl_cpu == 0) { + compl_cpu = cpumask_first(dhd->cpumask_secondary_new); + } -#if defined(BCMSUP_4WAY_HANDSHAKE) -/* Use in dongle supplicant for 4-way handshake */ -uint dhd_use_idsup = 0; -module_param(dhd_use_idsup, uint, 0); -#endif /* BCMSUP_4WAY_HANDSHAKE */ + /* If no CPU was available for completion, choose CPU 0 */ + if (compl_cpu >= nr_cpu_ids) + compl_cpu = 0; + } + if ((primary_available_cpus == 0) && + (secondary_available_cpus == 0)) { + /* No CPUs available from primary or secondary mask */ + napi_cpu = 0; + compl_cpu = 0; + } -extern char dhd_version[]; + DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d\n", + __FUNCTION__, napi_cpu, compl_cpu)); + ASSERT(napi_cpu < nr_cpu_ids); + ASSERT(compl_cpu < nr_cpu_ids); -int dhd_net_bus_devreset(struct net_device *dev, uint8 flag); -static void dhd_net_if_lock_local(dhd_info_t *dhd); -static void dhd_net_if_unlock_local(dhd_info_t *dhd); -static void dhd_suspend_lock(dhd_pub_t *dhdp); -static void dhd_suspend_unlock(dhd_pub_t *dhdp); + atomic_set(&dhd->rx_napi_cpu, napi_cpu); + atomic_set(&dhd->tx_compl_cpu, compl_cpu); + atomic_set(&dhd->rx_compl_cpu, compl_cpu); + return; +} -#ifdef WLMEDIA_HTSF -void htsf_update(dhd_info_t *dhd, void *data); -tsf_t prev_tsf, cur_tsf; +/* + * Function to handle CPU Hotplug notifications. + * One of the task it does is to trigger the CPU Candidacy algorithm + * for load balancing. + */ +int +dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned int)(long)hcpu; -uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx); -static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx); -static void dhd_dump_latency(void); -static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf); -static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf); -static void dhd_dump_htsfhisto(histo_t *his, char *s); -#endif /* WLMEDIA_HTSF */ + dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier); -/* Monitor interface */ -int dhd_monitor_init(void *dhd_pub); -int dhd_monitor_uninit(void); + switch (action) + { + case CPU_ONLINE: + DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]); + cpumask_set_cpu(cpu, dhd->cpumask_curr_avail); + dhd_select_cpu_candidacy(dhd); + break; + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]); + cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail); + dhd_select_cpu_candidacy(dhd); + break; + default: + break; + } -#if defined(WL_WIRELESS_EXT) -struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev); -#endif /* defined(WL_WIRELESS_EXT) */ + return NOTIFY_OK; +} -static void dhd_dpc(ulong data); -/* forward decl */ -extern int dhd_wait_pend8021x(struct net_device *dev); -void dhd_os_wd_timer_extend(void *bus, bool extend); +#if defined(DHD_LB_STATS) +void dhd_lb_stats_init(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + int i, j; -#ifdef TOE -#ifndef BDC -#error TOE requires BDC -#endif /* !BDC */ -static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol); -static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol); -#endif /* TOE */ + if (dhdp == NULL) { + DHD_ERROR(("%s(): Invalid argument dhdp is NULL \n", + __FUNCTION__)); + return; + } -static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata, - wl_event_msg_t *event_ptr, void **data_ptr); -#ifdef DHD_UNICAST_DHCP -static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00}; -static int dhd_get_pkt_ip_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr, - int *len_ptr, uint8 *prot_ptr); -static int dhd_get_pkt_ether_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr, - int *len_ptr, uint16 *et_ptr, bool *snap_ptr); - -static int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx); -#endif /* DHD_UNICAST_DHCP */ -#ifdef DHD_L2_FILTER -static int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx); -#endif -#if defined(CONFIG_PM_SLEEP) -static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored) -{ - int ret = NOTIFY_DONE; - bool suspend = FALSE; - dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier); + dhd = dhdp->info; + if (dhd == NULL) { + DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); + return; + } - BCM_REFERENCE(dhdinfo); - switch (action) { - case PM_HIBERNATION_PREPARE: - case PM_SUSPEND_PREPARE: - suspend = TRUE; - break; - case PM_POST_HIBERNATION: - case PM_POST_SUSPEND: - suspend = FALSE; - break; + DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt); + DHD_LB_STATS_CLR(dhd->napi_sched_cnt); + DHD_LB_STATS_CLR(dhd->rxc_sched_cnt); + DHD_LB_STATS_CLR(dhd->txc_sched_cnt); + + for (i = 0; i < NR_CPUS; i++) { + DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]); + DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]); + DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]); + + DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]); + DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]); } -#if defined(SUPPORT_P2P_GO_PS) -#ifdef PROP_TXSTATUS + for (i = 0; i < NR_CPUS; i++) { + for (j = 0; j < HIST_BIN_SIZE; j++) { + DHD_LB_STATS_CLR(dhd->napi_rx_hist[i][j]); + DHD_LB_STATS_CLR(dhd->txc_hist[i][j]); + DHD_LB_STATS_CLR(dhd->rxc_hist[i][j]); + } + } + + return; +} + +static void dhd_lb_stats_dump_histo( + struct bcmstrbuf *strbuf, uint32 (*hist)[HIST_BIN_SIZE]) +{ + int i, j; + uint32 per_cpu_total[NR_CPUS] = {0}; + uint32 total = 0; + + bcm_bprintf(strbuf, "CPU: \t\t"); + for (i = 0; i < num_possible_cpus(); i++) + bcm_bprintf(strbuf, "%d\t", i); + bcm_bprintf(strbuf, "\nBin\n"); + + for (i = 0; i < HIST_BIN_SIZE; i++) { + bcm_bprintf(strbuf, "%d:\t\t", 1<<(i+1)); + for (j = 0; j < num_possible_cpus(); j++) { + bcm_bprintf(strbuf, "%d\t", hist[j][i]); + } + bcm_bprintf(strbuf, "\n"); + } + bcm_bprintf(strbuf, "Per CPU Total \t"); + total = 0; + for (i = 0; i < num_possible_cpus(); i++) { + for (j = 0; j < HIST_BIN_SIZE; j++) { + per_cpu_total[i] += (hist[i][j] * (1<<(j+1))); + } + bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]); + total += per_cpu_total[i]; + } + bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total); + + return; +} + +static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p) +{ + int i; + + bcm_bprintf(strbuf, "CPU: \t"); + for (i = 0; i < num_possible_cpus(); i++) + bcm_bprintf(strbuf, "%d\t", i); + bcm_bprintf(strbuf, "\n"); + + bcm_bprintf(strbuf, "Val: \t"); + for (i = 0; i < num_possible_cpus(); i++) + bcm_bprintf(strbuf, "%u\t", *(p+i)); + bcm_bprintf(strbuf, "\n"); + return; +} + +void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + dhd_info_t *dhd; + + if (dhdp == NULL || strbuf == NULL) { + DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n", + __FUNCTION__, dhdp, strbuf)); + return; + } + + dhd = dhdp->info; + if (dhd == NULL) { + DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); + return; + } + + bcm_bprintf(strbuf, "\ncpu_online_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt); + + bcm_bprintf(strbuf, "cpu_offline_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt); + + bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n", + dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt, + dhd->txc_sched_cnt); +#ifdef DHD_LB_RXP + bcm_bprintf(strbuf, "napi_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt); + bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n"); + dhd_lb_stats_dump_histo(strbuf, dhd->napi_rx_hist); +#endif /* DHD_LB_RXP */ + +#ifdef DHD_LB_RXC + bcm_bprintf(strbuf, "rxc_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt); + bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n"); + dhd_lb_stats_dump_histo(strbuf, dhd->rxc_hist); +#endif /* DHD_LB_RXC */ + + +#ifdef DHD_LB_TXC + bcm_bprintf(strbuf, "txc_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt); + bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n"); + dhd_lb_stats_dump_histo(strbuf, dhd->txc_hist); +#endif /* DHD_LB_TXC */ +} + +static void dhd_lb_stats_update_histo(uint32 *bin, uint32 count) +{ + uint32 bin_power; + uint32 *p = NULL; + + bin_power = next_larger_power2(count); + + switch (bin_power) { + case 0: break; + case 1: /* Fall through intentionally */ + case 2: p = bin + 0; break; + case 4: p = bin + 1; break; + case 8: p = bin + 2; break; + case 16: p = bin + 3; break; + case 32: p = bin + 4; break; + case 64: p = bin + 5; break; + case 128: p = bin + 6; break; + default : p = bin + 7; break; + } + if (p) + *p = *p + 1; + return; +} + +extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count) +{ + int cpu; + dhd_info_t *dhd = dhdp->info; + + cpu = get_cpu(); + put_cpu(); + dhd_lb_stats_update_histo(&dhd->napi_rx_hist[cpu][0], count); + + return; +} + +extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count) +{ + int cpu; + dhd_info_t *dhd = dhdp->info; + + cpu = get_cpu(); + put_cpu(); + dhd_lb_stats_update_histo(&dhd->txc_hist[cpu][0], count); + + return; +} + +extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count) +{ + int cpu; + dhd_info_t *dhd = dhdp->info; + + cpu = get_cpu(); + put_cpu(); + dhd_lb_stats_update_histo(&dhd->rxc_hist[cpu][0], count); + + return; +} + +extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt); +} + +extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt); +} + +#endif /* DHD_LB_STATS */ +#endif /* DHD_LB */ + + +#if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF) +int g_frameburst = 1; +#endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */ + +static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd); + +/* DHD Perimiter lock only used in router with bypass forwarding. */ +#define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0) +#define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0) +#define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0) + +#ifdef PCIE_FULL_DONGLE +#if defined(BCM_GMAC3) +#define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0) +#define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); }) +#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); }) + +#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) +#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ BCM_REFERENCE(slist); &(ifp)->sta_list; }) +#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ BCM_REFERENCE(slist); }) +#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */ + +#else /* ! BCM_GMAC3 */ +#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock) +#define DHD_IF_STA_LIST_LOCK(ifp, flags) \ + spin_lock_irqsave(&(ifp)->sta_list_lock, (flags)) +#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \ + spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags)) + +#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) +static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, + struct list_head *snapshot_list); +static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list); +#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); }) +#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); }) +#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */ + +#endif /* ! BCM_GMAC3 */ +#endif /* PCIE_FULL_DONGLE */ + +/* Control fw roaming */ +uint dhd_roam_disable = 0; + +#ifdef BCMDBGFS +extern int dhd_dbg_init(dhd_pub_t *dhdp); +extern void dhd_dbg_remove(void); +#endif + +/* Control radio state */ +uint dhd_radio_up = 1; + +/* Network inteface name */ +char iface_name[IFNAMSIZ] = {'\0'}; +module_param_string(iface_name, iface_name, IFNAMSIZ, 0); + +/* The following are specific to the SDIO dongle */ + +/* IOCTL response timeout */ +int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT; + +/* Idle timeout for backplane clock */ +int dhd_idletime = DHD_IDLETIME_TICKS; +module_param(dhd_idletime, int, 0); + +/* Use polling */ +uint dhd_poll = FALSE; +module_param(dhd_poll, uint, 0); + +/* Use interrupts */ +uint dhd_intr = TRUE; +module_param(dhd_intr, uint, 0); + +/* SDIO Drive Strength (in milliamps) */ +uint dhd_sdiod_drive_strength = 6; +module_param(dhd_sdiod_drive_strength, uint, 0); + +#ifdef BCMSDIO +/* Tx/Rx bounds */ +extern uint dhd_txbound; +extern uint dhd_rxbound; +module_param(dhd_txbound, uint, 0); +module_param(dhd_rxbound, uint, 0); + +/* Deferred transmits */ +extern uint dhd_deferred_tx; +module_param(dhd_deferred_tx, uint, 0); + +#endif /* BCMSDIO */ + + +#ifdef SDTEST +/* Echo packet generator (pkts/s) */ +uint dhd_pktgen = 0; +module_param(dhd_pktgen, uint, 0); + +/* Echo packet len (0 => sawtooth, max 2040) */ +uint dhd_pktgen_len = 0; +module_param(dhd_pktgen_len, uint, 0); +#endif /* SDTEST */ + + + +/* Allow delayed firmware download for debug purpose */ +int allow_delay_fwdl = FALSE; +module_param(allow_delay_fwdl, int, 0); + +extern char dhd_version[]; +extern char fw_version[]; +extern char clm_version[]; + +int dhd_net_bus_devreset(struct net_device *dev, uint8 flag); +static void dhd_net_if_lock_local(dhd_info_t *dhd); +static void dhd_net_if_unlock_local(dhd_info_t *dhd); +static void dhd_suspend_lock(dhd_pub_t *dhdp); +static void dhd_suspend_unlock(dhd_pub_t *dhdp); + +#ifdef WLMEDIA_HTSF +void htsf_update(dhd_info_t *dhd, void *data); +tsf_t prev_tsf, cur_tsf; + +uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx); +static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx); +static void dhd_dump_latency(void); +static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf); +static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf); +static void dhd_dump_htsfhisto(histo_t *his, char *s); +#endif /* WLMEDIA_HTSF */ + +/* Monitor interface */ +int dhd_monitor_init(void *dhd_pub); +int dhd_monitor_uninit(void); + + +#if defined(WL_WIRELESS_EXT) +struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev); +#endif /* defined(WL_WIRELESS_EXT) */ + +static void dhd_dpc(ulong data); +/* forward decl */ +extern int dhd_wait_pend8021x(struct net_device *dev); +void dhd_os_wd_timer_extend(void *bus, bool extend); + +#ifdef TOE +#ifndef BDC +#error TOE requires BDC +#endif /* !BDC */ +static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol); +static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol); +#endif /* TOE */ + +static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata, + wl_event_msg_t *event_ptr, void **data_ptr); + +#if defined(CONFIG_PM_SLEEP) +static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored) +{ + int ret = NOTIFY_DONE; + bool suspend = FALSE; + dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier); + + BCM_REFERENCE(dhdinfo); + + switch (action) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + suspend = TRUE; + break; + + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + suspend = FALSE; + break; + } + +#if defined(SUPPORT_P2P_GO_PS) +#ifdef PROP_TXSTATUS if (suspend) { DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub); dhd_wlfc_suspend(&dhdinfo->pub); DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub); } else dhd_wlfc_resume(&dhdinfo->pub); -#endif +#endif /* PROP_TXSTATUS */ #endif /* defined(SUPPORT_P2P_GO_PS) */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \ @@ -809,10 +1447,6 @@ static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, voi return ret; } -static struct notifier_block dhd_pm_notifier = { - .notifier_call = dhd_pm_callback, - .priority = 10 -}; /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be * created in kernel notifier link list (with 'next' pointing to itself) */ @@ -884,7 +1518,8 @@ dhd_info_t dhd_info_null = { #ifdef DHDTCPACK_SUPPRESS .tcpack_sup_mode = TCPACK_SUP_REPLACE, #endif /* DHDTCPACK_SUPPRESS */ - .up = FALSE, .busstate = DHD_BUS_DOWN + .up = FALSE, + .busstate = DHD_BUS_DOWN } }; #define DHD_INFO_NULL (&dhd_info_null) @@ -928,6 +1563,7 @@ static void dhd_if_flush_sta(dhd_if_t * ifp); /* Construct/Destruct a sta pool. */ static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta); static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta); +/* Clear the pool of dhd_sta_t objects for built-in type driver */ static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta); @@ -951,9 +1587,46 @@ dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta) ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID)); ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL)); - id16_map_free(dhdp->staid_allocator, sta->idx); - for (prio = 0; prio < (int)NUMPRIO; prio++) + + /* + * Flush and free all packets in all flowring's queues belonging to sta. + * Packets in flow ring will be flushed later. + */ + for (prio = 0; prio < (int)NUMPRIO; prio++) { + uint16 flowid = sta->flowid[prio]; + + if (flowid != FLOWID_INVALID) { + unsigned long flags; + flow_queue_t * queue = dhd_flow_queue(dhdp, flowid); + flow_ring_node_t * flow_ring_node; + +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(dhdp); +#endif /* DHDTCPACK_SUPPRESS */ + + flow_ring_node = dhd_flow_ring_node(dhdp, flowid); + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING; + + if (!DHD_FLOW_QUEUE_EMPTY(queue)) { + void * pkt; + while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != NULL) { + PKTFREE(dhdp->osh, pkt, TRUE); + } + } + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); + } + sta->flowid[prio] = FLOWID_INVALID; + } + + id16_map_free(dhdp->staid_allocator, sta->idx); + DHD_CUMM_CTR_INIT(&sta->cumm_ctr); sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */ sta->ifidx = DHD_BAD_IF; bzero(sta->ea.octet, ETHER_ADDR_LEN); @@ -982,6 +1655,9 @@ dhd_sta_alloc(dhd_pub_t * dhdp) ASSERT((sta->idx == ID16_INVALID) && (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF)); + + DHD_CUMM_CTR_INIT(&sta->cumm_ctr); + sta->idx = idx; /* implying allocated */ return sta; @@ -1038,7 +1714,7 @@ dhd_if_flush_sta(dhd_if_t * ifp) static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { - int idx, sta_pool_memsz; + int idx, prio, sta_pool_memsz; dhd_sta_t * sta; dhd_sta_pool_t * sta_pool; void * staid_allocator; @@ -1075,6 +1751,9 @@ dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) /* Now place them into the pre-allocated free pool. */ for (idx = 1; idx <= max_sta; idx++) { sta = &sta_pool[idx]; + for (prio = 0; prio < (int)NUMPRIO; prio++) { + sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */ + } dhd_sta_free(dhdp, sta); } @@ -1108,7 +1787,7 @@ dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) { - int idx, sta_pool_memsz; + int idx, prio, sta_pool_memsz; dhd_sta_t * sta; dhd_sta_pool_t * sta_pool; void *staid_allocator; @@ -1147,6 +1826,9 @@ dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) /* Now place them into the pre-allocated free pool. */ for (idx = 1; idx <= max_sta; idx++) { sta = &sta_pool[idx]; + for (prio = 0; prio < (int)NUMPRIO; prio++) { + sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */ + } dhd_sta_free(dhdp, sta); } } @@ -1214,73 +1896,491 @@ dhd_add_sta(void *pub, int ifidx, void *ea) /* Add sta to WOFA forwarder. */ fwder_reassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta); } -#endif /* BCM_GMAC3 */ +#endif /* BCM_GMAC3 */ + + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + + return sta; +} + +/** Delete STA from the interface's STA list. */ +void +dhd_del_sta(void *pub, int ifidx, void *ea) +{ + dhd_sta_t *sta, *next; + dhd_if_t *ifp; + unsigned long flags; + + ASSERT(ea != NULL); + ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); + if (ifp == NULL) + return; + + DHD_IF_STA_LIST_LOCK(ifp, flags); + + list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { + if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) { +#if defined(BCM_GMAC3) + if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */ + ASSERT(ISALIGNED(ea, 2)); + fwder_deassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta); + } +#endif /* BCM_GMAC3 */ + list_del(&sta->list); + dhd_sta_free(&ifp->info->pub, sta); + } + } + + DHD_IF_STA_LIST_UNLOCK(ifp, flags); +#ifdef DHD_L2_FILTER + if (ifp->parp_enable) { + /* clear Proxy ARP cache of specific Ethernet Address */ + bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE, + ea, FALSE, ((dhd_pub_t*)pub)->tickcnt); + } +#endif /* DHD_L2_FILTER */ + return; +} + +/** Add STA if it doesn't exist. Not reentrant. */ +dhd_sta_t* +dhd_findadd_sta(void *pub, int ifidx, void *ea) +{ + dhd_sta_t *sta; + + sta = dhd_find_sta(pub, ifidx, ea); + + if (!sta) { + /* Add entry */ + sta = dhd_add_sta(pub, ifidx, ea); + } + + return sta; +} + +#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) +#if !defined(BCM_GMAC3) +static struct list_head * +dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list) +{ + unsigned long flags; + dhd_sta_t *sta, *snapshot; + + INIT_LIST_HEAD(snapshot_list); + + DHD_IF_STA_LIST_LOCK(ifp, flags); + + list_for_each_entry(sta, &ifp->sta_list, list) { + /* allocate one and add to snapshot */ + snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t)); + if (snapshot == NULL) { + DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__)); + continue; + } + + memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN); + + INIT_LIST_HEAD(&snapshot->list); + list_add_tail(&snapshot->list, snapshot_list); + } + + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + + return snapshot_list; +} + +static void +dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list) +{ + dhd_sta_t *sta, *next; + + list_for_each_entry_safe(sta, next, snapshot_list, list) { + list_del(&sta->list); + MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t)); + } +} +#endif /* !BCM_GMAC3 */ +#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */ + +#else +static inline void dhd_if_flush_sta(dhd_if_t * ifp) { } +static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {} +static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; } +static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {} +static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {} +dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; } +void dhd_del_sta(void *pub, int ifidx, void *ea) {} +#endif /* PCIE_FULL_DONGLE */ + + +#if defined(DHD_LB) + +#if defined(DHD_LB_TXC) || defined(DHD_LB_RXC) +/** + * dhd_tasklet_schedule - Function that runs in IPI context of the destination + * CPU and schedules a tasklet. + * @tasklet: opaque pointer to the tasklet + */ +static INLINE void +dhd_tasklet_schedule(void *tasklet) +{ + tasklet_schedule((struct tasklet_struct *)tasklet); +} + +/** + * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU + * @tasklet: tasklet to be scheduled + * @on_cpu: cpu core id + * + * If the requested cpu is online, then an IPI is sent to this cpu via the + * smp_call_function_single with no wait and the tasklet_schedule function + * will be invoked to schedule the specified tasklet on the requested CPU. + */ +static void +dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu) +{ + const int wait = 0; + smp_call_function_single(on_cpu, + dhd_tasklet_schedule, (void *)tasklet, wait); +} +#endif /* DHD_LB_TXC || DHD_LB_RXC */ + + +#if defined(DHD_LB_TXC) +/** + * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet + * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and + * freeing the packets placed in the tx_compl workq + */ +void +dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + int curr_cpu, on_cpu; + + if (dhd->rx_napi_netdev == NULL) { + DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__)); + return; + } + + DHD_LB_STATS_INCR(dhd->txc_sched_cnt); + /* + * If the destination CPU is NOT online or is same as current CPU + * no need to schedule the work + */ + curr_cpu = get_cpu(); + put_cpu(); + + on_cpu = atomic_read(&dhd->tx_compl_cpu); + + if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) { + dhd_tasklet_schedule(&dhd->tx_compl_tasklet); + } else { + schedule_work(&dhd->tx_compl_dispatcher_work); + } +} + +static void dhd_tx_compl_dispatcher_fn(struct work_struct * work) +{ + struct dhd_info *dhd = + container_of(work, struct dhd_info, tx_compl_dispatcher_work); + int cpu; + + get_online_cpus(); + cpu = atomic_read(&dhd->tx_compl_cpu); + if (!cpu_online(cpu)) + dhd_tasklet_schedule(&dhd->tx_compl_tasklet); + else + dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu); + put_online_cpus(); +} + +#endif /* DHD_LB_TXC */ + + +#if defined(DHD_LB_RXC) +/** + * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet + * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers + * in the H2D RxBuffer Post common ring, by using the recycled pktids that were + * placed in the rx_compl workq. + * + * @dhdp: pointer to dhd_pub object + */ +void +dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + int curr_cpu, on_cpu; + + if (dhd->rx_napi_netdev == NULL) { + DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__)); + return; + } + + DHD_LB_STATS_INCR(dhd->rxc_sched_cnt); + /* + * If the destination CPU is NOT online or is same as current CPU + * no need to schedule the work + */ + curr_cpu = get_cpu(); + put_cpu(); + + on_cpu = atomic_read(&dhd->rx_compl_cpu); + + if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) { + dhd_tasklet_schedule(&dhd->rx_compl_tasklet); + } else { + schedule_work(&dhd->rx_compl_dispatcher_work); + } +} + +static void dhd_rx_compl_dispatcher_fn(struct work_struct * work) +{ + struct dhd_info *dhd = + container_of(work, struct dhd_info, rx_compl_dispatcher_work); + int cpu; + + get_online_cpus(); + cpu = atomic_read(&dhd->tx_compl_cpu); + if (!cpu_online(cpu)) + dhd_tasklet_schedule(&dhd->rx_compl_tasklet); + else + dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu); + put_online_cpus(); +} + +#endif /* DHD_LB_RXC */ + + +#if defined(DHD_LB_RXP) +/** + * dhd_napi_poll - Load balance napi poll function to process received + * packets and send up the network stack using netif_receive_skb() + * + * @napi: napi object in which context this poll function is invoked + * @budget: number of packets to be processed. + * + * Fetch the dhd_info given the rx_napi_struct. Move all packets from the + * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock). + * Dequeue each packet from head of rx_process_queue, fetch the ifid from the + * packet tag and sendup. + */ +static int +dhd_napi_poll(struct napi_struct *napi, int budget) +{ + int ifid; + const int pkt_count = 1; + const int chan = 0; + struct sk_buff * skb; + unsigned long flags; + struct dhd_info *dhd; + int processed = 0; + struct sk_buff_head rx_process_queue; + + dhd = container_of(napi, struct dhd_info, rx_napi_struct); + DHD_INFO(("%s napi_queue<%d> budget<%d>\n", + __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget)); + + __skb_queue_head_init(&rx_process_queue); + + /* extract the entire rx_napi_queue into local rx_process_queue */ + spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags); + skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue); + spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags); + + while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) { + OSL_PREFETCH(skb->data); + + ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb)); + + DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n", + __FUNCTION__, skb, ifid)); + + dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan); + processed++; + } + + DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed); + + DHD_INFO(("%s processed %d\n", __FUNCTION__, processed)); + napi_complete(napi); + + return budget - 1; +} + +/** + * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi + * poll list. This function may be invoked via the smp_call_function_single + * from a remote CPU. + * + * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ) + * after the napi_struct is added to the softnet data's poll_list + * + * @info: pointer to a dhd_info struct + */ +static void +dhd_napi_schedule(void *info) +{ + dhd_info_t *dhd = (dhd_info_t *)info; + + DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n", + __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu))); + + /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */ + if (napi_schedule_prep(&dhd->rx_napi_struct)) { + __napi_schedule(&dhd->rx_napi_struct); + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt); + } + + /* + * If the rx_napi_struct was already running, then we let it complete + * processing all its packets. The rx_napi_struct may only run on one + * core at a time, to avoid out-of-order handling. + */ +} + +/** + * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ + * action after placing the dhd's rx_process napi object in the the remote CPU's + * softnet data's poll_list. + * + * @dhd: dhd_info which has the rx_process napi object + * @on_cpu: desired remote CPU id + */ +static INLINE int +dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu) +{ + int wait = 0; /* asynchronous IPI */ + + DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n", + __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu)); + + if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) { + DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n", + __FUNCTION__, on_cpu)); + } - DHD_IF_STA_LIST_UNLOCK(ifp, flags); + DHD_LB_STATS_INCR(dhd->napi_sched_cnt); - return sta; + return 0; } -/** Delete STA from the interface's STA list. */ +/* + * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on + * Why should we do this? + * The candidacy algorithm is run from the call back function + * registered to CPU hotplug notifier. This call back happens from Worker + * context. The dhd_napi_schedule_on is also from worker context. + * Note that both of this can run on two different CPUs at the same time. + * So we can possibly have a window where a given CPUn is being brought + * down from CPUm while we try to run a function on CPUn. + * To prevent this its better have the whole code to execute an SMP + * function under get_online_cpus. + * This function call ensures that hotplug mechanism does not kick-in + * until we are done dealing with online CPUs + * If the hotplug worker is already running, no worries because the + * candidacy algo would then reflect the same in dhd->rx_napi_cpu. + * + * The below mentioned code structure is proposed in + * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt + * for the question + * Q: I need to ensure that a particular cpu is not removed when there is some + * work specific to this cpu is in progress + * + * According to the documentation calling get_online_cpus is NOT required, if + * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can + * run from Work Queue context we have to call these functions + */ +static void dhd_rx_napi_dispatcher_fn(struct work_struct * work) +{ + struct dhd_info *dhd = + container_of(work, struct dhd_info, rx_napi_dispatcher_work); + int cpu; + + get_online_cpus(); + cpu = atomic_read(&dhd->rx_napi_cpu); + if (!cpu_online(cpu)) + dhd_napi_schedule(dhd); + else + dhd_napi_schedule_on(dhd, cpu); + put_online_cpus(); +} + +/** + * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct + * to run on another CPU. The rx_napi_struct's poll function will retrieve all + * the packets enqueued into the rx_napi_queue and sendup. + * The producer's rx packet queue is appended to the rx_napi_queue before + * dispatching the rx_napi_struct. + */ void -dhd_del_sta(void *pub, int ifidx, void *ea) +dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp) { - dhd_sta_t *sta, *next; - dhd_if_t *ifp; unsigned long flags; + dhd_info_t *dhd = dhdp->info; + int curr_cpu; + int on_cpu; - ASSERT(ea != NULL); - ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); - if (ifp == NULL) + if (dhd->rx_napi_netdev == NULL) { + DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__)); return; + } - DHD_IF_STA_LIST_LOCK(ifp, flags); + DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__, + skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue))); - list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { - if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) { -#if defined(BCM_GMAC3) - if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */ - ASSERT(ISALIGNED(ea, 2)); - fwder_deassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta); - } -#endif /* BCM_GMAC3 */ - list_del(&sta->list); - dhd_sta_free(&ifp->info->pub, sta); - } - } + /* append the producer's queue of packets to the napi's rx process queue */ + spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags); + skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue); + spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags); - DHD_IF_STA_LIST_UNLOCK(ifp, flags); + /* + * If the destination CPU is NOT online or is same as current CPU + * no need to schedule the work + */ + curr_cpu = get_cpu(); + put_cpu(); - return; + on_cpu = atomic_read(&dhd->rx_napi_cpu); + + if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) { + dhd_napi_schedule(dhd); + } else { + schedule_work(&dhd->rx_napi_dispatcher_work); + } } -/** Add STA if it doesn't exist. Not reentrant. */ -dhd_sta_t* -dhd_findadd_sta(void *pub, int ifidx, void *ea) +/** + * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue + */ +void +dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx) { - dhd_sta_t *sta; + dhd_info_t *dhd = dhdp->info; - sta = dhd_find_sta(pub, ifidx, ea); + DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__, + pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue))); + DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx); + __skb_queue_tail(&dhd->rx_pend_queue, pkt); +} +#endif /* DHD_LB_RXP */ - if (!sta) { - /* Add entry */ - sta = dhd_add_sta(pub, ifidx, ea); - } +#endif /* DHD_LB */ - return sta; +static void dhd_memdump_work_handler(struct work_struct * work) +{ + struct dhd_info *dhd = + container_of(work, struct dhd_info, dhd_memdump_work.work); + + BCM_REFERENCE(dhd); +#ifdef BCMPCIE + dhd_prot_collect_memdump(&dhd->pub); +#endif } -#else -static inline void dhd_if_flush_sta(dhd_if_t * ifp) { } -static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {} -static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; } -static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {} -static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {} -dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; } -void dhd_del_sta(void *pub, int ifidx, void *ea) {} -#endif /* PCIE_FULL_DONGLE */ -/* Returns dhd iflist index correspondig the the bssidx provided by apps */ +/** Returns dhd iflist index corresponding the the bssidx provided by apps */ int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx) { dhd_if_t *ifp; @@ -1374,9 +2474,7 @@ static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp) int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost) { -#ifndef CUSTOMER_HW10 dhd_info_t *dhd = (dhd_info_t *)dhdp->info; -#endif /* !CUSTOMER_HW10 */ if (prepost) { /* pre process */ dhd_read_macaddr(dhd); @@ -1387,7 +2485,8 @@ int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost) return 0; } -#if defined(PKT_FILTER_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER) +// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed +#if defined(PKT_FILTER_SUPPORT) &&defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER) static bool _turn_on_arp_filter(dhd_pub_t *dhd, int op_mode) { @@ -1409,164 +2508,6 @@ _turn_on_arp_filter(dhd_pub_t *dhd, int op_mode) } #endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */ -#if defined(CUSTOM_PLATFORM_NV_TEGRA) -#ifdef PKT_FILTER_SUPPORT -void -dhd_set_packet_filter_mode(struct net_device *dev, char *command) -{ - dhd_info_t *dhdi = *(dhd_info_t **)netdev_priv(dev); - - dhdi->pub.pkt_filter_mode = bcm_strtoul(command, &command, 0); -} - -int -dhd_set_packet_filter_ports(struct net_device *dev, char *command) -{ - int i = 0, error = BCME_OK, count = 0, get_count = 0, action = 0; - uint16 portnum = 0, *ports = NULL, get_ports[WL_PKT_FILTER_PORTS_MAX]; - dhd_info_t *dhdi = *(dhd_info_t **)netdev_priv(dev); - dhd_pub_t *dhdp = &dhdi->pub; - char iovbuf[WLC_IOCTL_SMLEN]; - - /* get action */ - action = bcm_strtoul(command, &command, 0); - if (action > PKT_FILTER_PORTS_MAX) - return BCME_BADARG; - - if (action == PKT_FILTER_PORTS_LOOPBACK) { - /* echo the loopback value if port filter is supported else error */ - bcm_mkiovar("cap", NULL, 0, iovbuf, sizeof(iovbuf)); - error = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0); - if (error < 0) { - DHD_ERROR(("%s: Get Capability failed (error=%d)\n", __FUNCTION__, error)); - return error; - } - - if (strstr(iovbuf, "pktfltr2")) - return bcm_strtoul(command, &command, 0); - else { - DHD_ERROR(("%s: pktfltr2 is not supported\n", __FUNCTION__)); - return BCME_UNSUPPORTED; - } - } - - if (action == PKT_FILTER_PORTS_CLEAR) { - /* action 0 is clear all ports */ - dhdp->pkt_filter_ports_count = 0; - bzero(dhdp->pkt_filter_ports, sizeof(dhdp->pkt_filter_ports)); - } - else { - portnum = bcm_strtoul(command, &command, 0); - if (portnum == 0) { - /* no ports to add or remove */ - return BCME_BADARG; - } - - /* get configured ports */ - count = dhdp->pkt_filter_ports_count; - ports = dhdp->pkt_filter_ports; - - if (action == PKT_FILTER_PORTS_ADD) { - /* action 1 is add ports */ - - /* copy new ports */ - while ((portnum != 0) && (count < WL_PKT_FILTER_PORTS_MAX)) { - for (i = 0; i < count; i++) { - /* duplicate port */ - if (portnum == ports[i]) - break; - } - if (portnum != ports[i]) - ports[count++] = portnum; - portnum = bcm_strtoul(command, &command, 0); - } - } else if ((action == PKT_FILTER_PORTS_DEL) && (count > 0)) { - /* action 2 is remove ports */ - bcopy(dhdp->pkt_filter_ports, get_ports, count * sizeof(uint16)); - get_count = count; - - while (portnum != 0) { - count = 0; - for (i = 0; i < get_count; i++) { - if (portnum != get_ports[i]) - ports[count++] = get_ports[i]; - } - get_count = count; - bcopy(ports, get_ports, count * sizeof(uint16)); - portnum = bcm_strtoul(command, &command, 0); - } - } - dhdp->pkt_filter_ports_count = count; - } - return error; -} - -static void -dhd_enable_packet_filter_ports(dhd_pub_t *dhd, bool enable) -{ - int error = 0; - wl_pkt_filter_ports_t *portlist = NULL; - const uint pkt_filter_ports_buf_len = sizeof("pkt_filter_ports") - + WL_PKT_FILTER_PORTS_FIXED_LEN + (WL_PKT_FILTER_PORTS_MAX * sizeof(uint16)); - char pkt_filter_ports_buf[pkt_filter_ports_buf_len]; - char iovbuf[pkt_filter_ports_buf_len]; - - DHD_TRACE(("%s: enable %d, in_suspend %d, mode %d, port count %d\n", __FUNCTION__, - enable, dhd->in_suspend, dhd->pkt_filter_mode, - dhd->pkt_filter_ports_count)); - - bzero(pkt_filter_ports_buf, sizeof(pkt_filter_ports_buf)); - portlist = (wl_pkt_filter_ports_t*)pkt_filter_ports_buf; - portlist->version = WL_PKT_FILTER_PORTS_VERSION; - portlist->reserved = 0; - - if (enable) { - if (!(dhd->pkt_filter_mode & PKT_FILTER_MODE_PORTS_ONLY)) - return; - - /* enable port filter */ - dhd_master_mode |= PKT_FILTER_MODE_PORTS_ONLY; - if (dhd->pkt_filter_mode & PKT_FILTER_MODE_FORWARD_ON_MATCH) - /* whitelist mode: FORWARD_ON_MATCH */ - dhd_master_mode |= PKT_FILTER_MODE_FORWARD_ON_MATCH; - else - /* blacklist mode: DISCARD_ON_MATCH */ - dhd_master_mode &= ~PKT_FILTER_MODE_FORWARD_ON_MATCH; - - portlist->count = dhd->pkt_filter_ports_count; - bcopy(dhd->pkt_filter_ports, portlist->ports, - dhd->pkt_filter_ports_count * sizeof(uint16)); - } else { - /* disable port filter */ - portlist->count = 0; - dhd_master_mode &= ~PKT_FILTER_MODE_PORTS_ONLY; - dhd_master_mode |= PKT_FILTER_MODE_FORWARD_ON_MATCH; - } - - DHD_INFO(("%s: update: mode %d, port count %d\n", __FUNCTION__, dhd_master_mode, - portlist->count)); - - /* update ports */ - bcm_mkiovar("pkt_filter_ports", - (char*)portlist, - (WL_PKT_FILTER_PORTS_FIXED_LEN + (portlist->count * sizeof(uint16))), - iovbuf, sizeof(iovbuf)); - error = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); - if (error < 0) - DHD_ERROR(("%s: set pkt_filter_ports failed %d\n", __FUNCTION__, error)); - - /* update mode */ - bcm_mkiovar("pkt_filter_mode", (char*)&dhd_master_mode, - sizeof(dhd_master_mode), iovbuf, sizeof(iovbuf)); - error = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); - if (error < 0) - DHD_ERROR(("%s: set pkt_filter_mode failed %d\n", __FUNCTION__, error)); - - return; -} -#endif /* PKT_FILTER_SUPPORT */ -#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */ - void dhd_set_packet_filter(dhd_pub_t *dhd) { #ifdef PKT_FILTER_SUPPORT @@ -1586,19 +2527,20 @@ void dhd_enable_packet_filter(int value, dhd_pub_t *dhd) #ifdef PKT_FILTER_SUPPORT int i; - DHD_TRACE(("%s: enter, value = %d\n", __FUNCTION__, value)); - -#if defined(CUSTOM_PLATFORM_NV_TEGRA) - dhd_enable_packet_filter_ports(dhd, value); -#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */ + DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value)); + if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) { + DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__)); + return; + } /* 1 - Enable packet filter, only allow unicast packet to send up */ /* 0 - Disable packet filter */ if (dhd_pkt_filter_enable && (!value || (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress))) { for (i = 0; i < dhd->pktfilter_count; i++) { -#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER +// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed +#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER) if (value && (i == DHD_ARP_FILTER_NUM) && !_turn_on_arp_filter(dhd, dhd->op_mode)) { DHD_TRACE(("Do not turn on ARP white list pkt filter:" @@ -1616,19 +2558,48 @@ void dhd_enable_packet_filter(int value, dhd_pub_t *dhd) static int dhd_set_suspend(int value, dhd_pub_t *dhd) { -#ifndef SUPPORT_PM2_ONLY int power_mode = PM_MAX; -#endif /* SUPPORT_PM2_ONLY */ +#ifdef SUPPORT_SENSORHUB + uint32 shub_msreq; +#endif /* SUPPORT_SENSORHUB */ /* wl_pkt_filter_enable_t enable_parm; */ char iovbuf[32]; int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */ +#ifdef DHD_USE_EARLYSUSPEND +#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND + int bcn_timeout = 0; +#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */ +#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND + int roam_time_thresh = 0; /* (ms) */ +#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */ +#ifndef ENABLE_FW_ROAM_SUSPEND uint roamvar = dhd->conf->roam_off_suspend; +#endif /* ENABLE_FW_ROAM_SUSPEND */ +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + int bcn_li_bcn; +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ uint nd_ra_filter = 0; int ret = 0; +#endif /* DHD_USE_EARLYSUSPEND */ +#ifdef PASS_ALL_MCAST_PKTS + struct dhd_info *dhdinfo; + uint32 allmulti; + uint i; +#endif /* PASS_ALL_MCAST_PKTS */ +#ifdef DYNAMIC_SWOOB_DURATION +#ifndef CUSTOM_INTR_WIDTH +#define CUSTOM_INTR_WIDTH 100 + int intr_width = 0; +#endif /* CUSTOM_INTR_WIDTH */ +#endif /* DYNAMIC_SWOOB_DURATION */ if (!dhd) return -ENODEV; +#ifdef PASS_ALL_MCAST_PKTS + dhdinfo = dhd->info; +#endif /* PASS_ALL_MCAST_PKTS */ + DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n", __FUNCTION__, value, dhd->in_suspend)); @@ -1639,10 +2610,11 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd) /* set specific cpucore */ dhd_set_cpucore(dhd, TRUE); #endif /* CUSTOM_SET_CPUCORE */ -#ifndef SUPPORT_PM2_ONLY + if (dhd->conf->pm >= 0) power_mode = dhd->conf->pm; -#endif /* SUPPORT_PM2_ONLY */ + else + power_mode = PM_FAST; if (dhd->up) { if (value && dhd->in_suspend) { #ifdef PKT_FILTER_SUPPORT @@ -1651,18 +2623,52 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd) /* Kernel suspended */ DHD_ERROR(("%s: force extra Suspend setting\n", __FUNCTION__)); -#ifndef SUPPORT_PM2_ONLY +#ifdef SUPPORT_SENSORHUB + shub_msreq = 1; + if (dhd->info->shub_enable == 1) { + bcm_mkiovar("shub_msreq", (char *)&shub_msreq, 4, + iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Sensor Hub move/stop start: failed %d\n", + __FUNCTION__, ret)); + } + } +#endif /* SUPPORT_SENSORHUB */ + + if (dhd->conf->pm_in_suspend >= 0) + power_mode = dhd->conf->pm_in_suspend; dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0); -#endif /* SUPPORT_PM2_ONLY */ - /* Enable packet filter, only allow unicast packet to send up */ +#ifdef PKT_FILTER_SUPPORT + /* Enable packet filter, + * only allow unicast packet to send up + */ dhd_enable_packet_filter(1, dhd); +#endif /* PKT_FILTER_SUPPORT */ + +#ifdef PASS_ALL_MCAST_PKTS + allmulti = 0; + bcm_mkiovar("allmulti", (char *)&allmulti, 4, + iovbuf, sizeof(iovbuf)); + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net) + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, i); + } +#endif /* PASS_ALL_MCAST_PKTS */ /* If DTIM skip is set up as default, force it to wake * each third DTIM for better power savings. Note that * one side effect is a chance to miss BC/MC packet. */ +#ifdef WLTDLS + /* Do not set bcn_li_ditm on WFD mode */ + if (dhd->tdls_mode) { + bcn_li_dtim = 0; + } else +#endif /* WLTDLS */ bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd); bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim, 4, iovbuf, sizeof(iovbuf)); @@ -1670,9 +2676,30 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd) TRUE, 0) < 0) DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__)); +#ifdef DHD_USE_EARLYSUSPEND +#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND + bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND; + bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, + 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */ +#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND + roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND; + bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh, + 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */ +#ifndef ENABLE_FW_ROAM_SUSPEND /* Disable firmware roaming during suspend */ bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf)); dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* ENABLE_FW_ROAM_SUSPEND */ +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + bcn_li_bcn = 0; + bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn, + 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ if (FW_SUPPORTED(dhd, ndoe)) { /* enable IPv6 RA filter in firmware during suspend */ nd_ra_filter = 1; @@ -1683,6 +2710,16 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd) DHD_ERROR(("failed to set nd_ra_filter (%d)\n", ret)); } +#ifdef DYNAMIC_SWOOB_DURATION + intr_width = CUSTOM_INTR_WIDTH; + bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4, + iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("failed to set intr_width (%d)\n", ret)); + } +#endif /* DYNAMIC_SWOOB_DURATION */ +#endif /* DHD_USE_EARLYSUSPEND */ } else { #ifdef PKT_FILTER_SUPPORT dhd->early_suspended = 0; @@ -1690,24 +2727,75 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd) /* Kernel resumed */ DHD_ERROR(("%s: Remove extra suspend setting\n", __FUNCTION__)); -#ifndef SUPPORT_PM2_ONLY - power_mode = PM_FAST; +#ifdef SUPPORT_SENSORHUB + shub_msreq = 0; + if (dhd->info->shub_enable == 1) { + bcm_mkiovar("shub_msreq", (char *)&shub_msreq, + 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Sensor Hub move/stop stop:" + "failed %d\n", __FUNCTION__, ret)); + } + } +#endif /* SUPPORT_SENSORHUB */ + + +#ifdef DYNAMIC_SWOOB_DURATION + intr_width = 0; + bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4, + iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("failed to set intr_width (%d)\n", ret)); + } +#endif /* DYNAMIC_SWOOB_DURATION */ dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0); -#endif /* SUPPORT_PM2_ONLY */ #ifdef PKT_FILTER_SUPPORT /* disable pkt filter */ dhd_enable_packet_filter(0, dhd); #endif /* PKT_FILTER_SUPPORT */ +#ifdef PASS_ALL_MCAST_PKTS + allmulti = 1; + bcm_mkiovar("allmulti", (char *)&allmulti, 4, + iovbuf, sizeof(iovbuf)); + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net) + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, i); + } +#endif /* PASS_ALL_MCAST_PKTS */ /* restore pre-suspend setting for dtim_skip */ bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim, 4, iovbuf, sizeof(iovbuf)); dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#ifdef DHD_USE_EARLYSUSPEND +#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND + bcn_timeout = CUSTOM_BCN_TIMEOUT; + bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, + 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */ +#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND + roam_time_thresh = 2000; + bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh, + 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */ +#ifndef ENABLE_FW_ROAM_SUSPEND roamvar = dhd_roam_disable; bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf)); dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* ENABLE_FW_ROAM_SUSPEND */ +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + bcn_li_bcn = 1; + bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn, + 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ if (FW_SUPPORTED(dhd, ndoe)) { /* disable IPv6 RA filter in firmware during suspend */ nd_ra_filter = 0; @@ -1718,6 +2806,12 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd) DHD_ERROR(("failed to set nd_ra_filter (%d)\n", ret)); } +#endif /* DHD_USE_EARLYSUSPEND */ + + /* terence 2017029: Reject in early suspend */ + if (!dhd->conf->xmit_in_suspend) { + dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF); + } } } dhd_suspend_unlock(dhd); @@ -1830,6 +2924,7 @@ dhd_net2idx(dhd_info_t *dhd, struct net_device *net) DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__)); return DHD_BAD_IF; } + while (i < DHD_MAX_IFS) { if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net)) return i; @@ -1863,7 +2958,7 @@ dhd_ifname2idx(dhd_info_t *dhd, char *name) return 0; while (--i > 0) - if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ)) + if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ)) break; DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name)); @@ -1871,22 +2966,6 @@ dhd_ifname2idx(dhd_info_t *dhd, char *name) return i; /* default - the primary interface */ } -int -dhd_ifidx2hostidx(dhd_info_t *dhd, int ifidx) -{ - int i = DHD_MAX_IFS; - - ASSERT(dhd); - - while (--i > 0) - if (dhd->iflist[i] && (dhd->iflist[i]->idx == ifidx)) - break; - - DHD_TRACE(("%s: return hostidx %d for ifidx %d\n", __FUNCTION__, i, ifidx)); - - return i; /* default - the primary interface */ -} - char * dhd_ifname(dhd_pub_t *dhdp, int ifidx) { @@ -1941,26 +3020,35 @@ _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) uint buflen; int ret; - ASSERT(dhd && dhd->iflist[ifidx]); + if (!dhd->iflist[ifidx]) { + DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx)); + return; + } dev = dhd->iflist[ifidx]->net; if (!dev) return; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) netif_addr_lock_bh(dev); -#endif +#endif /* LINUX >= 2.6.27 */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) cnt = netdev_mc_count(dev); #else cnt = dev->mc_count; -#endif /* LINUX_VERSION_CODE */ - +#endif /* LINUX >= 2.6.35 */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) netif_addr_unlock_bh(dev); -#endif +#endif /* LINUX >= 2.6.27 */ /* Determine initial value of allmulti flag */ allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE; +#ifdef PASS_ALL_MCAST_PKTS +#ifdef PKT_FILTER_SUPPORT + if (!dhd->pub.early_suspended) +#endif /* PKT_FILTER_SUPPORT */ + allmulti = TRUE; +#endif /* PASS_ALL_MCAST_PKTS */ + /* Send down the multicast list first. */ @@ -1979,10 +3067,9 @@ _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) memcpy(bufp, &cnt, sizeof(cnt)); bufp += sizeof(cnt); - #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) netif_addr_lock_bh(dev); -#endif +#endif /* LINUX >= 2.6.27 */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) netdev_for_each_mc_addr(ha, dev) { if (!cnt) @@ -1991,17 +3078,16 @@ _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) bufp += ETHER_ADDR_LEN; cnt--; } -#else +#else /* LINUX < 2.6.35 */ for (mclist = dev->mc_list; (mclist && (cnt > 0)); cnt--, mclist = mclist->next) { memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN); bufp += ETHER_ADDR_LEN; } -#endif /* LINUX_VERSION_CODE */ - +#endif /* LINUX >= 2.6.35 */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) netif_addr_unlock_bh(dev); -#endif +#endif /* LINUX >= 2.6.27 */ memset(&ioc, 0, sizeof(ioc)); ioc.cmd = WLC_SET_VAR; @@ -2105,6 +3191,22 @@ extern struct net_device *ap_net_dev; extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */ #endif +#ifdef DHD_PSTA +/* Get psta/psr configuration configuration */ +int dhd_get_psta_mode(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + return (int)dhd->psta_mode; +} +/* Set psta/psr configuration configuration */ +int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val) +{ + dhd_info_t *dhd = dhdp->info; + dhd->psta_mode = val; + return 0; +} +#endif /* DHD_PSTA */ + static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event) { @@ -2113,7 +3215,7 @@ dhd_ifadd_event_handler(void *handle, void *event_info, u8 event) struct net_device *ndev; int ifidx, bssidx; int ret; -#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) +#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) struct wireless_dev *vwdev, *primary_wdev; struct net_device *primary_ndev; #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */ @@ -2141,17 +3243,20 @@ dhd_ifadd_event_handler(void *handle, void *event_info, u8 event) bssidx = if_event->event.bssidx; DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx)); + /* This path is for non-android case */ + /* The interface name in host and in event msg are same */ + /* if name in event msg is used to create dongle if list on host */ ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name, - if_event->mac, bssidx, TRUE); + if_event->mac, bssidx, TRUE, if_event->name); if (!ndev) { DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__)); goto done; } -#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) +#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL); if (unlikely(!vwdev)) { - WL_ERR(("Could not allocate wireless device\n")); + DHD_ERROR(("Could not allocate wireless device\n")); goto done; } primary_ndev = dhd->pub.info->iflist[0]->net; @@ -2174,7 +3279,7 @@ dhd_ifadd_event_handler(void *handle, void *event_info, u8 event) } #ifdef PCIE_FULL_DONGLE /* Turn on AP isolation in the firmware for interfaces operating in AP mode */ - if (FW_SUPPORTED((&dhd->pub), ap) && !(DHD_IF_ROLE_STA(if_event->event.role))) { + if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) { char iovbuf[WLC_IOCTL_SMLEN]; uint32 var_int = 1; @@ -2188,6 +3293,7 @@ dhd_ifadd_event_handler(void *handle, void *event_info, u8 event) } } #endif /* PCIE_FULL_DONGLE */ + done: MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t)); @@ -2226,7 +3332,9 @@ dhd_ifdel_event_handler(void *handle, void *event_info, u8 event) ifidx = if_event->event.ifidx; DHD_TRACE(("Removing interface with idx %d\n", ifidx)); + DHD_PERIM_UNLOCK(&dhd->pub); dhd_remove_if(&dhd->pub, ifidx, TRUE); + DHD_PERIM_LOCK(&dhd->pub); MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t)); @@ -2270,6 +3378,10 @@ dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event) } #endif /* SOFTAP */ + // terence 20160907: fix for not able to set mac when wlan0 is down + if (ifp == NULL || !ifp->set_macaddress) { + goto done; + } if (ifp == NULL || !dhd->pub.up) { DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__)); goto done; @@ -2381,6 +3493,10 @@ dhd_set_multicast_list(struct net_device *dev) dhd->iflist[ifidx]->set_multicast = TRUE; dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx], DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WORK_PRIORITY_LOW); + + // terence 20160907: fix for not able to set mac when wlan0 is down + dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx], + DHD_WQ_WORK_SET_MAC, dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW); } #ifdef PROP_TXSTATUS @@ -2389,6 +3505,9 @@ dhd_os_wlfc_block(dhd_pub_t *pub) { dhd_info_t *di = (dhd_info_t *)(pub->info); ASSERT(di != NULL); + /* terence 20161229: don't do spin lock if proptx not enabled */ + if (disable_proptx) + return 1; spin_lock_bh(&di->wlfc_spinlock); return 1; } @@ -2399,6 +3518,9 @@ dhd_os_wlfc_unblock(dhd_pub_t *pub) dhd_info_t *di = (dhd_info_t *)(pub->info); ASSERT(di != NULL); + /* terence 20161229: don't do spin lock if proptx not enabled */ + if (disable_proptx) + return 1; spin_unlock_bh(&di->wlfc_spinlock); return 1; } @@ -2437,21 +3559,20 @@ static const char *_get_packet_type_str(uint16 type) #if defined(DHD_TX_DUMP) void -dhd_tx_dump(osl_t *osh, void *pkt) +dhd_tx_dump(struct net_device *ndev, osl_t *osh, void *pkt) { uint8 *dump_data; uint16 protocol; - struct ether_header *eh; + char *ifname; dump_data = PKTDATA(osh, pkt); - eh = (struct ether_header *) dump_data; - protocol = ntoh16(eh->ether_type); + protocol = (dump_data[12] << 8) | dump_data[13]; + ifname = ndev ? ndev->name : "N/A"; - DHD_ERROR(("TX DUMP - %s\n", _get_packet_type_str(protocol))); + DHD_ERROR(("TX DUMP[%s] - %s\n", ifname, _get_packet_type_str(protocol))); if (protocol == ETHER_TYPE_802_1X) { - DHD_ERROR(("ETHER_TYPE_802_1X [TX]: ver %d, type %d, replay %d\n", - dump_data[14], dump_data[15], dump_data[30])); + dhd_dump_eapol_4way_message(ifname, dump_data, TRUE); } #if defined(DHD_TX_FULL_DUMP) @@ -2461,27 +3582,108 @@ dhd_tx_dump(osl_t *osh, void *pkt) datalen = PKTLEN(osh, pkt); for (i = 0; i < datalen; i++) { - DHD_ERROR(("%02X ", dump_data[i])); + printk("%02X ", dump_data[i]); if ((i & 15) == 15) printk("\n"); } - DHD_ERROR(("\n")); + printk("\n"); } #endif /* DHD_TX_FULL_DUMP */ } #endif /* DHD_TX_DUMP */ +/* This routine do not support Packet chain feature, Currently tested for + * proxy arp feature + */ +int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p) +{ + struct sk_buff *skb; + void *skbhead = NULL; + void *skbprev = NULL; + dhd_if_t *ifp; + ASSERT(!PKTISCHAINED(p)); + skb = PKTTONATIVE(dhdp->osh, p); + + ifp = dhdp->info->iflist[ifidx]; + skb->dev = ifp->net; +#if defined(BCM_GMAC3) + /* Forwarder capable interfaces use WOFA based forwarding */ + if (ifp->fwdh) { + struct ether_header *eh = (struct ether_header *)PKTDATA(dhdp->osh, p); + uint16 * da = (uint16 *)(eh->ether_dhost); + wofa_t wofa; + ASSERT(ISALIGNED(da, 2)); + + wofa = fwder_lookup(ifp->fwdh->mate, da, ifp->idx); + if (wofa == FWDER_WOFA_INVALID) { /* Unknown MAC address */ + if (fwder_transmit(ifp->fwdh, skb, 1, skb->dev) == FWDER_SUCCESS) { + return BCME_OK; + } + } + PKTFRMNATIVE(dhdp->osh, p); + PKTFREE(dhdp->osh, p, FALSE); + return BCME_OK; + } +#endif /* BCM_GMAC3 */ + + skb->protocol = eth_type_trans(skb, skb->dev); + + if (in_interrupt()) { + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + netif_rx(skb); + } else { + if (dhdp->info->rxthread_enabled) { + if (!skbhead) { + skbhead = skb; + } else { + PKTSETNEXT(dhdp->osh, skbprev, skb); + } + skbprev = skb; + } else { + /* If the receive is not processed inside an ISR, + * the softirqd must be woken explicitly to service + * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled + * by netif_rx_ni(), but in earlier kernels, we need + * to do it manually. + */ + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) + netif_rx_ni(skb); +#else + ulong flags; + netif_rx(skb); + local_irq_save(flags); + RAISE_RX_SOFTIRQ(); + local_irq_restore(flags); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ + } + } + + if (dhdp->info->rxthread_enabled && skbhead) + dhd_sched_rxf(dhdp, skbhead); + + return BCME_OK; +} + int BCMFASTPATH -dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) +__dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) { int ret = BCME_OK; dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); struct ether_header *eh = NULL; +#ifdef DHD_L2_FILTER + dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx); +#endif +#ifdef DHD_8021X_DUMP + struct net_device *ndev; +#endif /* DHD_8021X_DUMP */ /* Reject if down */ if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) { /* free the packet here since the caller won't */ - PKTFREE(dhdp->osh, pktbuf, TRUE); + PKTCFREE(dhdp->osh, pktbuf, TRUE); return -ENODEV; } @@ -2493,13 +3695,44 @@ dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) } #endif /* PCIE_FULL_DONGLE */ -#ifdef DHD_UNICAST_DHCP +#ifdef DHD_L2_FILTER /* if dhcp_unicast is enabled, we need to convert the */ /* broadcast DHCP ACK/REPLY packets to Unicast. */ - if (dhdp->dhcp_unicast) { - dhd_convert_dhcp_broadcast_ack_to_unicast(dhdp, pktbuf, ifidx); + if (ifp->dhcp_unicast) { + uint8* mac_addr; + uint8* ehptr = NULL; + int ret; + ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr); + if (ret == BCME_OK) { + /* if given mac address having valid entry in sta list + * copy the given mac address, and return with BCME_OK + */ + if (dhd_find_sta(dhdp, ifidx, mac_addr)) { + ehptr = PKTDATA(dhdp->osh, pktbuf); + bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN); + } + } + } + + if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) { + if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return BCME_ERROR; + } + } + + if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) { + ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE); + + /* Drop the packets if l2 filter has processed it already + * otherwise continue with the normal path + */ + if (ret == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return BCME_ERROR; + } } -#endif /* DHD_UNICAST_DHCP */ +#endif /* DHD_L2_FILTER */ /* Update multicast statistic */ if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) { uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf); @@ -2509,19 +3742,66 @@ dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) dhdp->tx_multicast++; if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) atomic_inc(&dhd->pend_8021x_cnt); +#ifdef DHD_DHCP_DUMP + if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) { + uint16 dump_hex; + uint16 source_port; + uint16 dest_port; + uint16 udp_port_pos; + uint8 *ptr8 = (uint8 *)&pktdata[ETHER_HDR_LEN]; + uint8 ip_header_len = (*ptr8 & 0x0f)<<2; + struct net_device *net; + char *ifname; + + net = dhd_idx2net(dhdp, ifidx); + ifname = net ? net->name : "N/A"; + udp_port_pos = ETHER_HDR_LEN + ip_header_len; + source_port = (pktdata[udp_port_pos] << 8) | pktdata[udp_port_pos+1]; + dest_port = (pktdata[udp_port_pos+2] << 8) | pktdata[udp_port_pos+3]; + if (source_port == 0x0044 || dest_port == 0x0044) { + dump_hex = (pktdata[udp_port_pos+249] << 8) | + pktdata[udp_port_pos+250]; + if (dump_hex == 0x0101) { + DHD_ERROR(("DHCP[%s] - DISCOVER [TX]", ifname)); + } else if (dump_hex == 0x0102) { + DHD_ERROR(("DHCP[%s] - OFFER [TX]", ifname)); + } else if (dump_hex == 0x0103) { + DHD_ERROR(("DHCP[%s] - REQUEST [TX]", ifname)); + } else if (dump_hex == 0x0105) { + DHD_ERROR(("DHCP[%s] - ACK [TX]", ifname)); + } else { + DHD_ERROR(("DHCP[%s] - 0x%X [TX]", ifname, dump_hex)); + } +#ifdef DHD_LOSSLESS_ROAMING + if (dhdp->dequeue_prec_map != (uint8)ALLPRIO) { + DHD_ERROR(("/%d", dhdp->dequeue_prec_map)); + } +#endif /* DHD_LOSSLESS_ROAMING */ + DHD_ERROR(("\n")); + } else if (source_port == 0x0043 || dest_port == 0x0043) { + DHD_ERROR(("DHCP[%s] - BOOTP [RX]\n", ifname)); + } + } +#endif /* DHD_DHCP_DUMP */ } else { - PKTFREE(dhd->pub.osh, pktbuf, TRUE); + PKTCFREE(dhdp->osh, pktbuf, TRUE); return BCME_ERROR; } /* Look into the packet and update the packet priority */ #ifndef PKTPRIO_OVERRIDE if (PKTPRIO(pktbuf) == 0) -#endif +#endif /* !PKTPRIO_OVERRIDE */ + { +#ifdef QOS_MAP_SET + pktsetprio_qms(pktbuf, wl_get_up_table(), FALSE); +#else pktsetprio(pktbuf, FALSE); +#endif /* QOS_MAP_SET */ + } -#if defined(PCIE_FULL_DONGLE) && !defined(PCIE_TX_DEFERRAL) +#ifdef PCIE_FULL_DONGLE /* * Lkup the per interface hash table, for a matching flowring. If one is not * available, allocate a unique flowid and add a flowring entry. @@ -2533,10 +3813,11 @@ dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) return ret; } #endif + #if defined(DHD_TX_DUMP) - dhd_tx_dump(dhdp->osh, pktbuf); + ndev = dhd_idx2net(dhdp, ifidx); + dhd_tx_dump(ndev, dhdp->osh, pktbuf); #endif - /* terence 20150901: Micky add to ajust the 802.1X priority */ /* Set the 802.1X packet with the highest priority 7 */ if (dhdp->conf->pktprio8021x >= 0) @@ -2558,14 +3839,15 @@ dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf))); } else #endif /* PROP_TXSTATUS */ - /* If the protocol uses a data header, apply it */ - dhd_prot_hdrpush(dhdp, ifidx, pktbuf); + { + /* If the protocol uses a data header, apply it */ + dhd_prot_hdrpush(dhdp, ifidx, pktbuf); + } /* Use bus module to send data frame */ #ifdef WLMEDIA_HTSF dhd_htsf_addtxts(dhdp, pktbuf); #endif - #ifdef PROP_TXSTATUS { if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata, @@ -2589,6 +3871,44 @@ dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) return ret; } +int BCMFASTPATH +dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) +{ + int ret = 0; + unsigned long flags; + + DHD_GENERAL_LOCK(dhdp, flags); + if (dhdp->busstate == DHD_BUS_DOWN || + dhdp->busstate == DHD_BUS_DOWN_IN_PROGRESS) { + DHD_ERROR(("%s: returning as busstate=%d\n", + __FUNCTION__, dhdp->busstate)); + DHD_GENERAL_UNLOCK(dhdp, flags); + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return -ENODEV; + } + dhdp->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SEND_PKT; + DHD_GENERAL_UNLOCK(dhdp, flags); + +#ifdef DHD_PCIE_RUNTIMEPM + if (dhdpcie_runtime_bus_wake(dhdp, FALSE, __builtin_return_address(0))) { + DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__)); + PKTCFREE(dhdp->osh, pktbuf, TRUE); + ret = -EBUSY; + goto exit; + } +#endif /* DHD_PCIE_RUNTIMEPM */ + + ret = __dhd_sendpkt(dhdp, ifidx, pktbuf); + +#ifdef DHD_PCIE_RUNTIMEPM +exit: +#endif + DHD_GENERAL_LOCK(dhdp, flags); + dhdp->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SEND_PKT; + DHD_GENERAL_UNLOCK(dhdp, flags); + return ret; +} + int BCMFASTPATH dhd_start_xmit(struct sk_buff *skb, struct net_device *net) { @@ -2598,11 +3918,12 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) dhd_info_t *dhd = DHD_DEV_INFO(net); dhd_if_t *ifp = NULL; int ifidx; + unsigned long flags; #ifdef WLMEDIA_HTSF uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz; #else uint8 htsfdlystat_sz = 0; -#endif +#endif #ifdef DHD_WMF struct ether_header *eh; uint8 *iph; @@ -2610,21 +3931,78 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + /* terence 2017029: Reject in early suspend */ + if (!dhd->pub.conf->xmit_in_suspend && dhd->pub.early_suspended) { + dhd_txflowcontrol(&dhd->pub, ALL_INTERFACES, ON); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) + return -ENODEV; +#else + return NETDEV_TX_BUSY; +#endif + } + + +#ifdef PCIE_FULL_DONGLE + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->pub.dhd_bus_busy_state |= DHD_BUS_BUSY_IN_TX; + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#endif /* PCIE_FULL_DONGLE */ + +#ifdef DHD_PCIE_RUNTIMEPM + if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) { + /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */ + /* stop the network queue temporarily until resume done */ + DHD_GENERAL_LOCK(&dhd->pub, flags); + if (!dhdpcie_is_resume_done(&dhd->pub)) { + dhd_bus_stop_queue(dhd->pub.bus); + } + dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) + return -ENODEV; +#else + return NETDEV_TX_BUSY; +#endif + } +#endif /* DHD_PCIE_RUNTIMEPM */ + + DHD_GENERAL_LOCK(&dhd->pub, flags); +#ifdef PCIE_FULL_DONGLE + if (dhd->pub.busstate == DHD_BUS_SUSPEND) { + dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) + return -ENODEV; +#else + return NETDEV_TX_BUSY; +#endif + } +#endif /* PCIE_FULL_DONGLE */ + DHD_OS_WAKE_LOCK(&dhd->pub); - DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE); + DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); /* Reject if down */ - if (dhd->pub.busstate == DHD_BUS_DOWN || dhd->pub.hang_was_sent) { + if (dhd->pub.hang_was_sent || dhd->pub.busstate == DHD_BUS_DOWN || + dhd->pub.busstate == DHD_BUS_DOWN_IN_PROGRESS) { DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n", __FUNCTION__, dhd->pub.up, dhd->pub.busstate)); netif_stop_queue(net); /* Send Event when bus down detected during data session */ - if (dhd->pub.up) { + if (dhd->pub.up && !dhd->pub.hang_was_sent) { DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__)); + dhd->pub.hang_reason = HANG_REASON_BUS_DOWN; net_os_send_hang_message(net); } - DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE); +#ifdef PCIE_FULL_DONGLE + dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + dhd_os_busbusy_wake(&dhd->pub); +#endif /* PCIE_FULL_DONGLE */ + DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); DHD_OS_WAKE_UNLOCK(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) return -ENODEV; #else @@ -2634,21 +4012,30 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) ifp = DHD_DEV_IFP(net); ifidx = DHD_DEV_IFIDX(net); - - ASSERT(ifidx == dhd_net2idx(dhd, net)); - ASSERT((ifp != NULL) && (ifp == dhd->iflist[ifidx])); + BUZZZ_LOG(START_XMIT_BGN, 2, (uint32)ifidx, (uintptr)skb); if (ifidx == DHD_BAD_IF) { DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx)); netif_stop_queue(net); - DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE); +#ifdef PCIE_FULL_DONGLE + dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + dhd_os_busbusy_wake(&dhd->pub); +#endif /* PCIE_FULL_DONGLE */ + DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); DHD_OS_WAKE_UNLOCK(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) return -ENODEV; #else return NETDEV_TX_BUSY; #endif } + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + + ASSERT(ifidx == dhd_net2idx(dhd, net)); + ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx]))); + + bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__); /* re-align socket buffer if "skb->data" is odd address */ if (((unsigned long)(skb->data)) & 0x1) { @@ -2662,7 +4049,6 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) datalen = PKTLEN(dhd->pub.osh, skb); /* Make sure there's enough room for any header */ - if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) { struct sk_buff *skb2; @@ -2670,6 +4056,7 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) dhd_ifname(&dhd->pub, ifidx))); dhd->pub.tx_realloc++; + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__); skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz); dev_kfree_skb(skb); @@ -2679,17 +4066,20 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) ret = -ENOMEM; goto done; } + bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__); } /* Convert to packet */ if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) { DHD_ERROR(("%s: PKTFRMNATIVE failed\n", dhd_ifname(&dhd->pub, ifidx))); + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__); dev_kfree_skb_any(skb); ret = -ENOMEM; goto done; } -#ifdef WLMEDIA_HTSF + +#if defined(WLMEDIA_HTSF) if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) { uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf); struct ether_header *eh = (struct ether_header *)pktdata; @@ -2699,7 +4089,8 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS); } } -#endif +#endif + #ifdef DHD_WMF eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf); iph = (uint8 *)eh + ETHER_HDR_LEN; @@ -2726,27 +4117,42 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) #endif /* DHD_IGMP_UCQUERY */ if (ucast_convert) { dhd_sta_t *sta; +#ifdef PCIE_FULL_DONGLE unsigned long flags; +#endif + struct list_head snapshot_list; + struct list_head *wmf_ucforward_list; - DHD_IF_STA_LIST_LOCK(ifp, flags); + ret = NETDEV_TX_OK; + + /* For non BCM_GMAC3 platform we need a snapshot sta_list to + * resolve double DHD_IF_STA_LIST_LOCK call deadlock issue. + */ + wmf_ucforward_list = DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, &snapshot_list); /* Convert upnp/igmp query to unicast for each assoc STA */ - list_for_each_entry(sta, &ifp->sta_list, list) { + list_for_each_entry(sta, wmf_ucforward_list, list) { if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) { - DHD_IF_STA_LIST_UNLOCK(ifp, flags); - DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE); - DHD_OS_WAKE_UNLOCK(&dhd->pub); - return (WMF_NOP); + ret = WMF_NOP; + break; } dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1); } + DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, wmf_ucforward_list); - DHD_IF_STA_LIST_UNLOCK(ifp, flags); - DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE); +#ifdef PCIE_FULL_DONGLE + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#endif /* PCIE_FULL_DONGLE */ + DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); DHD_OS_WAKE_UNLOCK(&dhd->pub); - PKTFREE(dhd->pub.osh, pktbuf, TRUE); - return NETDEV_TX_OK; + if (ret == NETDEV_TX_OK) + PKTFREE(dhd->pub.osh, pktbuf, TRUE); + + return ret; } else #endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */ { @@ -2760,7 +4166,13 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) /* Either taken by WMF or we should drop it. * Exiting send path */ - DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE); +#ifdef PCIE_FULL_DONGLE + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#endif /* PCIE_FULL_DONGLE */ + DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); DHD_OS_WAKE_UNLOCK(&dhd->pub); return NETDEV_TX_OK; default: @@ -2770,27 +4182,45 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) } } #endif /* DHD_WMF */ +#ifdef DHD_PSTA + /* PSR related packet proto manipulation should be done in DHD + * since dongle doesn't have complete payload + */ + if (PSR_ENABLED(&dhd->pub) && (dhd_psta_proc(&dhd->pub, + ifidx, &pktbuf, TRUE) < 0)) { + DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__, + dhd_ifname(&dhd->pub, ifidx))); + } +#endif /* DHD_PSTA */ #ifdef DHDTCPACK_SUPPRESS if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) { /* If this packet has been hold or got freed, just return */ - if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) - return 0; + if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) { + ret = 0; + goto done; + } } else { /* If this packet has replaced another packet and got freed, just return */ - if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) - return 0; + if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) { + ret = 0; + goto done; + } } #endif /* DHDTCPACK_SUPPRESS */ - ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf); + /* no segmented SKB support (Kernel-3.18.y) */ + if ((PKTLINK(skb) != NULL) && (PKTLINK(skb) == skb)) { + PKTSETLINK(skb, NULL); + } + + ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf); done: if (ret) { ifp->stats.tx_dropped++; dhd->pub.tx_dropped++; - } - else { + } else { #ifdef PROP_TXSTATUS /* tx_packets counter can counted only when wlfc is disabled */ @@ -2803,8 +4233,16 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) } } - DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE); +#ifdef PCIE_FULL_DONGLE + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#endif /* PCIE_FULL_DONGLE */ + + DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); DHD_OS_WAKE_UNLOCK(&dhd->pub); + BUZZZ_LOG(START_XMIT_END, 0); /* Return ok: we always eat the packet */ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) @@ -2826,6 +4264,13 @@ dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state) ASSERT(dhd); +#ifdef DHD_LOSSLESS_ROAMING + /* block flowcontrol during roaming */ + if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) { + return; + } +#endif + if (ifidx == ALL_INTERFACES) { /* Flow control on all active interfaces */ dhdp->txoff = state; @@ -2838,8 +4283,7 @@ dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state) netif_wake_queue(net); } } - } - else { + } else { if (dhd->iflist[ifidx]) { net = dhd->iflist[ifidx]->net; if (state == ON) @@ -2861,6 +4305,7 @@ dhd_is_rxthread_enabled(dhd_pub_t *dhdp) } #endif /* DHD_WMF */ +/** Called when a frame is received by the dongle on interface 'ifidx' */ void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) { @@ -2876,18 +4321,16 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) int tout_ctrl = 0; void *skbhead = NULL; void *skbprev = NULL; -#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) +#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP) char *dump_data; uint16 protocol; -#endif /* DHD_RX_DUMP || DHD_8021X_DUMP */ + char *ifname; +#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */ DHD_TRACE(("%s: Enter\n", __FUNCTION__)); for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) { struct ether_header *eh; -#ifdef WLBTAMP - struct dot11_llc_snap_header *lsh; -#endif pnext = PKTNEXT(dhdp->osh, pktbuf); PKTSETNEXT(dhdp->osh, pktbuf, NULL); @@ -2917,18 +4360,6 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) continue; } -#ifdef WLBTAMP - lsh = (struct dot11_llc_snap_header *)&eh[1]; - - if ((ntoh16(eh->ether_type) < ETHER_TYPE_MIN) && - (PKTLEN(dhdp->osh, pktbuf) >= RFC1042_HDR_LEN) && - bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 && - lsh->type == HTON16(BTA_PROT_L2CAP)) { - amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *) - ((uint8 *)eh + RFC1042_HDR_LEN); - ACL_data = NULL; - } -#endif /* WLBTAMP */ #ifdef PROP_TXSTATUS if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) { @@ -2942,13 +4373,30 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) #endif #ifdef DHD_L2_FILTER /* If block_ping is enabled drop the ping packet */ - if (dhdp->block_ping) { - if (dhd_l2_filter_block_ping(dhdp, pktbuf, ifidx) == BCME_OK) { - PKTFREE(dhdp->osh, pktbuf, FALSE); + if (ifp->block_ping) { + if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, FALSE); continue; } } -#endif + if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) { + if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + } + } + if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) { + int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE); + + /* Drop the packets if l2 filter has processed it already + * otherwise continue with the normal path + */ + if (ret == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, TRUE); + continue; + } + } +#endif /* DHD_L2_FILTER */ #ifdef DHD_WMF /* WMF processing for multicast packets */ if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) { @@ -2973,18 +4421,22 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) } } #endif /* DHD_WMF */ + #ifdef DHDTCPACK_SUPPRESS dhd_tcpdata_info_get(dhdp, pktbuf); #endif skb = PKTTONATIVE(dhdp->osh, pktbuf); - ifp = dhd->iflist[ifidx]; - if (ifp == NULL) - ifp = dhd->iflist[0]; - ASSERT(ifp); skb->dev = ifp->net; +#ifdef DHD_PSTA + if (PSR_ENABLED(dhdp) && (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) { + DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__, + dhd_ifname(dhdp, ifidx))); + } +#endif /* DHD_PSTA */ + #ifdef PCIE_FULL_DONGLE if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) && (!ifp->ap_isolate)) { @@ -2996,7 +4448,8 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) } } else { void *npktbuf = PKTDUP(dhdp->osh, pktbuf); - dhd_sendpkt(dhdp, ifidx, npktbuf); + if (npktbuf) + dhd_sendpkt(dhdp, ifidx, npktbuf); } } #endif /* PCIE_FULL_DONGLE */ @@ -3013,19 +4466,49 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) eth = skb->data; len = skb->len; -#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) +#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP) dump_data = skb->data; protocol = (dump_data[12] << 8) | dump_data[13]; - + ifname = skb->dev ? skb->dev->name : "N/A"; +#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */ +#ifdef DHD_8021X_DUMP if (protocol == ETHER_TYPE_802_1X) { - DHD_ERROR(("ETHER_TYPE_802_1X [RX]: " - "ver %d, type %d, replay %d\n", - dump_data[14], dump_data[15], - dump_data[30])); + dhd_dump_eapol_4way_message(ifname, dump_data, FALSE); + } +#endif /* DHD_8021X_DUMP */ +#ifdef DHD_DHCP_DUMP + if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) { + uint16 dump_hex; + uint16 source_port; + uint16 dest_port; + uint16 udp_port_pos; + uint8 *ptr8 = (uint8 *)&dump_data[ETHER_HDR_LEN]; + uint8 ip_header_len = (*ptr8 & 0x0f)<<2; + + udp_port_pos = ETHER_HDR_LEN + ip_header_len; + source_port = (dump_data[udp_port_pos] << 8) | dump_data[udp_port_pos+1]; + dest_port = (dump_data[udp_port_pos+2] << 8) | dump_data[udp_port_pos+3]; + if (source_port == 0x0044 || dest_port == 0x0044) { + dump_hex = (dump_data[udp_port_pos+249] << 8) | + dump_data[udp_port_pos+250]; + if (dump_hex == 0x0101) { + DHD_ERROR(("DHCP[%s] - DISCOVER [RX]\n", ifname)); + } else if (dump_hex == 0x0102) { + DHD_ERROR(("DHCP[%s] - OFFER [RX]\n", ifname)); + } else if (dump_hex == 0x0103) { + DHD_ERROR(("DHCP[%s] - REQUEST [RX]\n", ifname)); + } else if (dump_hex == 0x0105) { + DHD_ERROR(("DHCP[%s] - ACK [RX]\n", ifname)); + } else { + DHD_ERROR(("DHCP[%s] - 0x%X [RX]\n", ifname, dump_hex)); + } + } else if (source_port == 0x0043 || dest_port == 0x0043) { + DHD_ERROR(("DHCP[%s] - BOOTP [RX]\n", ifname)); + } } -#endif /* DHD_RX_DUMP || DHD_8021X_DUMP */ +#endif /* DHD_DHCP_DUMP */ #if defined(DHD_RX_DUMP) - DHD_ERROR(("RX DUMP - %s\n", _get_packet_type_str(protocol))); + DHD_ERROR(("RX DUMP[%s] - %s\n", ifname, _get_packet_type_str(protocol))); if (protocol != ETHER_TYPE_BRCM) { if (dump_data[0] == 0xFF) { DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__)); @@ -3043,11 +4526,11 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) { int k; for (k = 0; k < skb->len; k++) { - DHD_ERROR(("%02X ", dump_data[k])); + printk("%02X ", dump_data[k]); if ((k & 15) == 15) - DHD_ERROR(("\n")); + printk("\n"); } - DHD_ERROR(("\n")); + printk("\n"); } #endif /* DHD_RX_FULL_DUMP */ } @@ -3084,11 +4567,6 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) wl_event_to_host_order(&event); if (!tout_ctrl) tout_ctrl = DHD_PACKET_TIMEOUT_MS; -#ifdef WLBTAMP - if (event.event_type == WLC_E_BTA_HCI_EVENT) { - dhd_bta_doevt(dhdp, data, event.datalen); - } -#endif /* WLBTAMP */ #if defined(PNO_SUPPORT) if (event.event_type == WLC_E_PFN_NET_FOUND) { @@ -3098,7 +4576,11 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) #endif /* PNO_SUPPORT */ #ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); +#else PKTFREE(dhdp->osh, pktbuf, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ continue; #endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */ } else { @@ -3121,16 +4603,17 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) ifp->stats.rx_bytes += skb->len; ifp->stats.rx_packets++; } -#if defined(DHD_TCP_WINSIZE_ADJUST) - if (dhd_use_tcp_window_size_adjust) { - if (ifidx == 0 && ntoh16(skb->protocol) == ETHER_TYPE_IP) { - dhd_adjust_tcp_winsize(dhdp->op_mode, skb); - } - } -#endif /* DHD_TCP_WINSIZE_ADJUST */ if (in_interrupt()) { + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); +#if defined(DHD_LB) && defined(DHD_LB_RXP) + netif_receive_skb(skb); +#else netif_rx(skb); +#endif /* !defined(DHD_LB) && !defined(DHD_LB_RXP) */ + DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); } else { if (dhd->rxthread_enabled) { if (!skbhead) @@ -3146,15 +4629,28 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) * by netif_rx_ni(), but in earlier kernels, we need * to do it manually. */ + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + +#if defined(DHD_LB) && defined(DHD_LB_RXP) + DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + netif_receive_skb(skb); + DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); +#else #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) + DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); netif_rx_ni(skb); + DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); #else ulong flags; + DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); netif_rx(skb); + DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); local_irq_save(flags); RAISE_RX_SOFTIRQ(); local_irq_restore(flags); #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ +#endif /* !defined(DHD_LB) && !defined(DHD_LB_RXP) */ } } } @@ -3164,6 +4660,7 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx); DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl); + DHD_OS_WAKE_LOCK_TIMEOUT(dhdp); } void @@ -3179,46 +4676,27 @@ dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success) dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); struct ether_header *eh; uint16 type; -#ifdef WLBTAMP - uint len; -#endif dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL); eh = (struct ether_header *)PKTDATA(dhdp->osh, txp); type = ntoh16(eh->ether_type); - if (type == ETHER_TYPE_802_1X) + if ((type == ETHER_TYPE_802_1X) && (dhd_get_pend_8021x_cnt(dhd) > 0)) atomic_dec(&dhd->pend_8021x_cnt); -#ifdef WLBTAMP - /* Crack open the packet and check to see if it is BT HCI ACL data packet. - * If yes generate packet completion event. - */ - len = PKTLEN(dhdp->osh, txp); - - /* Generate ACL data tx completion event locally to avoid SDIO bus transaction */ - if ((type < ETHER_TYPE_MIN) && (len >= RFC1042_HDR_LEN)) { - struct dot11_llc_snap_header *lsh = (struct dot11_llc_snap_header *)&eh[1]; - - if (bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 && - ntoh16(lsh->type) == BTA_PROT_L2CAP) { - - dhd_bta_tx_hcidata_complete(dhdp, txp, success); - } - } -#endif /* WLBTAMP */ #ifdef PROP_TXSTATUS if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) { dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))]; uint datalen = PKTLEN(dhd->pub.osh, txp); - - if (success) { - dhd->pub.tx_packets++; - ifp->stats.tx_packets++; - ifp->stats.tx_bytes += datalen; - } else { - ifp->stats.tx_dropped++; + if (ifp != NULL) { + if (success) { + dhd->pub.tx_packets++; + ifp->stats.tx_packets++; + ifp->stats.tx_bytes += datalen; + } else { + ifp->stats.tx_dropped++; + } } } #endif @@ -3248,25 +4726,110 @@ dhd_get_stats(struct net_device *net) /* Use the protocol to get dongle stats */ dhd_prot_dstats(&dhd->pub); } - return &ifp->stats; + return &ifp->stats; +} + +static int +dhd_watchdog_thread(void *data) +{ + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + dhd_info_t *dhd = (dhd_info_t *)tsk->parent; + /* This thread doesn't need any user-level access, + * so get rid of all our resources + */ + if (dhd_watchdog_prio > 0) { + struct sched_param param; + param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)? + dhd_watchdog_prio:(MAX_RT_PRIO-1); + setScheduler(current, SCHED_FIFO, ¶m); + } + + while (1) { + if (down_interruptible (&tsk->sema) == 0) { + unsigned long flags; + unsigned long jiffies_at_start = jiffies; + unsigned long time_lapse; + + DHD_OS_WD_WAKE_LOCK(&dhd->pub); + SMP_RD_BARRIER_DEPENDS(); + if (tsk->terminated) { + break; + } + + if (dhd->pub.dongle_reset == FALSE) { + DHD_TIMER(("%s:\n", __FUNCTION__)); + dhd_bus_watchdog(&dhd->pub); + + DHD_GENERAL_LOCK(&dhd->pub, flags); + /* Count the tick for reference */ + dhd->pub.tickcnt++; +#ifdef DHD_L2_FILTER + dhd_l2_filter_watchdog(&dhd->pub); +#endif /* DHD_L2_FILTER */ + time_lapse = jiffies - jiffies_at_start; + + /* Reschedule the watchdog */ + if (dhd->wd_timer_valid) { + mod_timer(&dhd->timer, + jiffies + + msecs_to_jiffies(dhd_watchdog_ms) - + min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse)); + } + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + } + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); + } else { + break; + } + } + + complete_and_exit(&tsk->completed, 0); +} + +static void dhd_watchdog(ulong data) +{ + dhd_info_t *dhd = (dhd_info_t *)data; + unsigned long flags; + + if (dhd->pub.dongle_reset) { + return; + } + + if (dhd->pub.busstate == DHD_BUS_SUSPEND) { + DHD_ERROR(("%s wd while suspend in progress \n", __FUNCTION__)); + return; + } + + if (dhd->thr_wdt_ctl.thr_pid >= 0) { + up(&dhd->thr_wdt_ctl.sema); + return; + } + + DHD_OS_WD_WAKE_LOCK(&dhd->pub); + /* Call the bus module watchdog */ + dhd_bus_watchdog(&dhd->pub); + DHD_GENERAL_LOCK(&dhd->pub, flags); + /* Count the tick for reference */ + dhd->pub.tickcnt++; + +#ifdef DHD_L2_FILTER + dhd_l2_filter_watchdog(&dhd->pub); +#endif /* DHD_L2_FILTER */ + /* Reschedule the watchdog */ + if (dhd->wd_timer_valid) + mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms)); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); } +#ifdef DHD_PCIE_RUNTIMEPM static int -dhd_watchdog_thread(void *data) +dhd_rpm_state_thread(void *data) { tsk_ctl_t *tsk = (tsk_ctl_t *)data; dhd_info_t *dhd = (dhd_info_t *)tsk->parent; - /* This thread doesn't need any user-level access, - * so get rid of all our resources - */ - if (dhd_watchdog_prio > 0) { - struct sched_param param; - param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)? - dhd_watchdog_prio:(MAX_RT_PRIO-1); - setScheduler(current, SCHED_FIFO, ¶m); - } - while (1) + while (1) { if (down_interruptible (&tsk->sema) == 0) { unsigned long flags; unsigned long jiffies_at_start = jiffies; @@ -3279,58 +4842,60 @@ dhd_watchdog_thread(void *data) if (dhd->pub.dongle_reset == FALSE) { DHD_TIMER(("%s:\n", __FUNCTION__)); - - /* Call the bus module watchdog */ - dhd_bus_watchdog(&dhd->pub); - + if (dhd->pub.up) { + dhd_runtimepm_state(&dhd->pub); + } DHD_GENERAL_LOCK(&dhd->pub, flags); - /* Count the tick for reference */ - dhd->pub.tickcnt++; time_lapse = jiffies - jiffies_at_start; /* Reschedule the watchdog */ - if (dhd->wd_timer_valid) - mod_timer(&dhd->timer, - jiffies + - msecs_to_jiffies(dhd_watchdog_ms) - - min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse)); - DHD_GENERAL_UNLOCK(&dhd->pub, flags); + if (dhd->rpm_timer_valid) { + mod_timer(&dhd->rpm_timer, + jiffies + + msecs_to_jiffies(dhd_runtimepm_ms) - + min(msecs_to_jiffies(dhd_runtimepm_ms), + time_lapse)); } + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + } } else { break; + } } complete_and_exit(&tsk->completed, 0); } -static void dhd_watchdog(ulong data) +static void dhd_runtimepm(ulong data) { dhd_info_t *dhd = (dhd_info_t *)data; - unsigned long flags; if (dhd->pub.dongle_reset) { return; } - if (dhd->thr_wdt_ctl.thr_pid >= 0) { - up(&dhd->thr_wdt_ctl.sema); + if (dhd->thr_rpm_ctl.thr_pid >= 0) { + up(&dhd->thr_rpm_ctl.sema); return; } +} - /* Call the bus module watchdog */ - dhd_bus_watchdog(&dhd->pub); +void dhd_runtime_pm_disable(dhd_pub_t *dhdp) +{ + dhd_os_runtimepm_timer(dhdp, 0); + dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0)); + DHD_ERROR(("DHD Runtime PM Disabled \n")); +} - DHD_GENERAL_LOCK(&dhd->pub, flags); - /* Count the tick for reference */ - dhd->pub.tickcnt++; +void dhd_runtime_pm_enable(dhd_pub_t *dhdp) +{ + dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms); + DHD_ERROR(("DHD Runtime PM Enabled \n")); +} - /* Reschedule the watchdog */ - if (dhd->wd_timer_valid) - mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms)); - DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#endif /* DHD_PCIE_RUNTIMEPM */ -} #ifdef ENABLE_ADAPTIVE_SCHED static void @@ -3406,21 +4971,34 @@ dhd_dpc_thread(void *data) /* Call bus dpc unless it indicated down (then clean stop) */ if (dhd->pub.busstate != DHD_BUS_DOWN) { +#ifdef DEBUG_DPC_THREAD_WATCHDOG + int resched_cnt = 0; +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ dhd_os_wd_timer_extend(&dhd->pub, TRUE); while (dhd_bus_dpc(dhd->pub.bus)) { /* process all data */ +#ifdef DEBUG_DPC_THREAD_WATCHDOG + resched_cnt++; + if (resched_cnt > MAX_RESCHED_CNT) { + DHD_INFO(("%s Calling msleep to" + "let other processes run. \n", + __FUNCTION__)); + dhd->pub.dhd_bug_on = true; + resched_cnt = 0; + OSL_SLEEP(1); + } +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ } dhd_os_wd_timer_extend(&dhd->pub, FALSE); DHD_OS_WAKE_UNLOCK(&dhd->pub); - } else { if (dhd->pub.up) dhd_bus_stop(dhd->pub.bus, TRUE); DHD_OS_WAKE_UNLOCK(&dhd->pub); } - } - else + } else { break; + } } complete_and_exit(&tsk->completed, 0); } @@ -3449,6 +5027,16 @@ dhd_rxf_thread(void *data) DAEMONIZE("dhd_rxf"); /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below */ +#ifdef CUSTOM_RXF_CPUCORE + /* change rxf thread to other cpu core */ + set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_RXF_CPUCORE)); +#else + if (dhd->pub.conf->rxf_cpucore >= 0) { + printf("%s: set rxf_cpucore %d from config.txt\n", __FUNCTION__, dhd->pub.conf->rxf_cpucore); + set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->rxf_cpucore)); + } +#endif + /* signal: thread has started */ complete(&tsk->completed); #ifdef CUSTOM_SET_CPUCORE @@ -3478,7 +5066,8 @@ dhd_rxf_thread(void *data) while (skb) { void *skbnext = PKTNEXT(pub->osh, skb); PKTSETNEXT(pub->osh, skb, NULL); - + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) netif_rx_ni(skb); #else @@ -3498,28 +5087,76 @@ dhd_rxf_thread(void *data) #endif DHD_OS_WAKE_UNLOCK(pub); - } - else + } else { break; + } } complete_and_exit(&tsk->completed, 0); } #ifdef BCMPCIE -void dhd_dpc_kill(dhd_pub_t *dhdp) +void dhd_dpc_enable(dhd_pub_t *dhdp) { dhd_info_t *dhd; - if (!dhdp) + if (!dhdp || !dhdp->info) + return; + dhd = dhdp->info; + +#ifdef DHD_LB +#ifdef DHD_LB_RXP + __skb_queue_head_init(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ +#ifdef DHD_LB_TXC + if (atomic_read(&dhd->tx_compl_tasklet.count) == 1) + tasklet_enable(&dhd->tx_compl_tasklet); +#endif /* DHD_LB_TXC */ +#ifdef DHD_LB_RXC + if (atomic_read(&dhd->rx_compl_tasklet.count) == 1) + tasklet_enable(&dhd->rx_compl_tasklet); +#endif /* DHD_LB_RXC */ +#endif /* DHD_LB */ + if (atomic_read(&dhd->tasklet.count) == 1) + tasklet_enable(&dhd->tasklet); +} +#endif /* BCMPCIE */ + + +#ifdef BCMPCIE +void +dhd_dpc_kill(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + + if (!dhdp) { return; + } dhd = dhdp->info; - if (!dhd) + if (!dhd) { return; + } - tasklet_kill(&dhd->tasklet); - DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__)); + if (dhd->thr_dpc_ctl.thr_pid < 0) { + tasklet_disable(&dhd->tasklet); + tasklet_kill(&dhd->tasklet); + DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__)); + } +#if defined(DHD_LB) +#ifdef DHD_LB_RXP + __skb_queue_purge(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ + /* Kill the Load Balancing Tasklets */ +#if defined(DHD_LB_TXC) + tasklet_disable(&dhd->tx_compl_tasklet); + tasklet_kill(&dhd->tx_compl_tasklet); +#endif /* DHD_LB_TXC */ +#if defined(DHD_LB_RXC) + tasklet_disable(&dhd->rx_compl_tasklet); + tasklet_kill(&dhd->rx_compl_tasklet); +#endif /* DHD_LB_RXC */ +#endif /* DHD_LB */ } #endif /* BCMPCIE */ @@ -3536,13 +5173,12 @@ dhd_dpc(ulong data) */ /* Call bus dpc unless it indicated down (then clean stop) */ if (dhd->pub.busstate != DHD_BUS_DOWN) { - if (dhd_bus_dpc(dhd->pub.bus)) + if (dhd_bus_dpc(dhd->pub.bus)) { + DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt); tasklet_schedule(&dhd->tasklet); - else - DHD_OS_WAKE_UNLOCK(&dhd->pub); + } } else { dhd_bus_stop(dhd->pub.bus, TRUE); - DHD_OS_WAKE_UNLOCK(&dhd->pub); } } @@ -3551,13 +5187,14 @@ dhd_sched_dpc(dhd_pub_t *dhdp) { dhd_info_t *dhd = (dhd_info_t *)dhdp->info; - DHD_OS_WAKE_LOCK(dhdp); if (dhd->thr_dpc_ctl.thr_pid >= 0) { + DHD_OS_WAKE_LOCK(dhdp); /* If the semaphore does not get up, * wake unlock should be done here */ - if (!binary_sema_up(&dhd->thr_dpc_ctl)) + if (!binary_sema_up(&dhd->thr_dpc_ctl)) { DHD_OS_WAKE_UNLOCK(dhdp); + } return; } else { tasklet_schedule(&dhd->tasklet); @@ -3591,12 +5228,13 @@ dhd_sched_rxf(dhd_pub_t *dhdp, void *skb) while (skbp) { void *skbnext = PKTNEXT(dhdp->osh, skbp); PKTSETNEXT(dhdp->osh, skbp, NULL); + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); netif_rx_ni(skbp); skbp = skbnext; } DHD_ERROR(("send skb to kernel backlog without rxf_thread\n")); - } - else { + } else { if (dhd->thr_rxf_ctl.thr_pid >= 0) { up(&dhd->thr_rxf_ctl.sema); } @@ -3613,6 +5251,9 @@ dhd_sched_rxf(dhd_pub_t *dhdp, void *skb) #endif /* RXF_DEQUEUE_ON_BUSY */ } +#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) +#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ + #ifdef TOE /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */ static int @@ -3690,23 +5331,24 @@ dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol) } #endif /* TOE */ -#if defined(WL_CFG80211) +#if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE) void dhd_set_scb_probe(dhd_pub_t *dhd) { -#define NUM_SCB_MAX_PROBE 3 int ret = 0; wl_scb_probe_t scb_probe; - char iovbuf[WL_EVENTING_MASK_LEN + 12]; + char iovbuf[WL_EVENTING_MASK_LEN + sizeof(wl_scb_probe_t)]; memset(&scb_probe, 0, sizeof(wl_scb_probe_t)); - if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { return; + } bcm_mkiovar("scb_probe", NULL, 0, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) { DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__)); + } memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t)); @@ -3714,12 +5356,12 @@ void dhd_set_scb_probe(dhd_pub_t *dhd) bcm_mkiovar("scb_probe", (char *)&scb_probe, sizeof(wl_scb_probe_t), iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__)); -#undef NUM_SCB_MAX_PROBE - return; + return; + } } -#endif /* WL_CFG80211 */ +#endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) static void @@ -3867,17 +5509,29 @@ static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error) DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__)); return FALSE; } -#endif +#endif -#ifdef CONFIG_MACH_UNIVERSAL5433 - /* old revision does not send hang message */ - if ((check_rev() && (error == -ETIMEDOUT)) || (error == -EREMOTEIO) || -#else if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) || -#endif /* CONFIG_MACH_UNIVERSAL5433 */ ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) { +#ifdef BCMPCIE + DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n", + __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout, + dhdp->d3ackcnt_timeout, error, dhdp->busstate)); +#else DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate)); +#endif /* BCMPCIE */ + if (dhdp->hang_reason == 0) { + if (dhdp->dongle_trap_occured) { + dhdp->hang_reason = HANG_REASON_DONGLE_TRAP; +#ifdef BCMPCIE + } else if (dhdp->d3ackcnt_timeout) { + dhdp->hang_reason = HANG_REASON_D3_ACK_TIMEOUT; +#endif /* BCMPCIE */ + } else { + dhdp->hang_reason = HANG_REASON_IOCTL_RESP_TIMEOUT; + } + } net_os_send_hang_message(net); return TRUE; } @@ -3908,9 +5562,18 @@ int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_bu } /* send to dongle (must be up, and wl). */ - if (pub->busstate != DHD_BUS_DATA) { - bcmerror = BCME_DONGLE_DOWN; - goto done; + if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) { + if (allow_delay_fwdl) { + int ret = dhd_bus_start(pub); + if (ret != 0) { + DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret)); + bcmerror = BCME_DONGLE_DOWN; + goto done; + } + } else { + bcmerror = BCME_DONGLE_DOWN; + goto done; + } } if (!pub->iswl) { @@ -3992,6 +5655,28 @@ int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_bu #endif goto done; } + +#ifdef DHD_DEBUG + if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION) { + if (ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) { + /* Print IOVAR Information */ + DHD_IOV_INFO(("%s: IOVAR_INFO name = %s set = %d\n", + __FUNCTION__, (char *)data_buf, ioc->set)); + if ((dhd_msg_level & DHD_IOV_INFO_VAL) && ioc->set && data_buf) { + prhex(NULL, data_buf + strlen(data_buf) + 1, + buflen - strlen(data_buf) - 1); + } + } else { + /* Print IOCTL Information */ + DHD_IOV_INFO(("%s: IOCTL_INFO cmd = %d set = %d\n", + __FUNCTION__, ioc->cmd, ioc->set)); + if ((dhd_msg_level & DHD_IOV_INFO_VAL) && ioc->set && data_buf) { + prhex(NULL, data_buf, buflen); + } + } + } +#endif /* DHD_DEBUG */ + bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen); done: @@ -4005,7 +5690,6 @@ dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) { dhd_info_t *dhd = DHD_DEV_INFO(net); dhd_ioctl_t ioc; - int bcmerror = 0; int ifidx; int ret; void *local_buf = NULL; @@ -4015,19 +5699,18 @@ dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) DHD_PERIM_LOCK(&dhd->pub); /* Interface up check for built-in type */ - if (!dhd_download_fw_on_driverload && dhd->pub.up == 0) { + if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) { DHD_ERROR(("%s: Interface is down \n", __FUNCTION__)); - DHD_PERIM_UNLOCK(&dhd->pub); - DHD_OS_WAKE_UNLOCK(&dhd->pub); - return BCME_NOTUP; + ret = BCME_NOTUP; + goto exit; } /* send to dongle only if we are not waiting for reload already */ if (dhd->pub.hang_was_sent) { DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__)); DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS); - DHD_OS_WAKE_UNLOCK(&dhd->pub); - return OSL_ERROR(BCME_DONGLE_DOWN); + ret = BCME_DONGLE_DOWN; + goto exit; } ifidx = dhd_net2idx(dhd, net); @@ -4035,9 +5718,8 @@ dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) if (ifidx == DHD_BAD_IF) { DHD_ERROR(("%s: BAD IF\n", __FUNCTION__)); - DHD_PERIM_UNLOCK(&dhd->pub); - DHD_OS_WAKE_UNLOCK(&dhd->pub); - return -1; + ret = -1; + goto exit; } #if defined(WL_WIRELESS_EXT) @@ -4045,41 +5727,40 @@ dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) { /* may recurse, do NOT lock */ ret = wl_iw_ioctl(net, ifr, cmd); - DHD_PERIM_UNLOCK(&dhd->pub); - DHD_OS_WAKE_UNLOCK(&dhd->pub); - return ret; + goto exit; } #endif /* defined(WL_WIRELESS_EXT) */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) if (cmd == SIOCETHTOOL) { ret = dhd_ethtool(dhd, (void*)ifr->ifr_data); - DHD_PERIM_UNLOCK(&dhd->pub); - DHD_OS_WAKE_UNLOCK(&dhd->pub); - return ret; + goto exit; } #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */ if (cmd == SIOCDEVPRIVATE+1) { ret = wl_android_priv_cmd(net, ifr, cmd); dhd_check_hang(net, &dhd->pub, ret); - DHD_OS_WAKE_UNLOCK(&dhd->pub); - return ret; + goto exit; } if (cmd != SIOCDEVPRIVATE) { - DHD_PERIM_UNLOCK(&dhd->pub); - DHD_OS_WAKE_UNLOCK(&dhd->pub); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto exit; } memset(&ioc, 0, sizeof(ioc)); #ifdef CONFIG_COMPAT - if (is_compat_task()) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)) + if (in_compat_syscall()) +#else + if (is_compat_task()) +#endif + { compat_wl_ioctl_t compat_ioc; if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) { - bcmerror = BCME_BADADDR; + ret = BCME_BADADDR; goto done; } ioc.cmd = compat_ioc.cmd; @@ -4091,7 +5772,7 @@ dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) /* To differentiate between wl and dhd read 4 more byes */ if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t), sizeof(uint)) != 0)) { - bcmerror = BCME_BADADDR; + ret = BCME_BADADDR; goto done; } } else @@ -4099,34 +5780,34 @@ dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) { /* Copy the ioc control structure part of ioctl request */ if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) { - bcmerror = BCME_BADADDR; + ret = BCME_BADADDR; goto done; } /* To differentiate between wl and dhd read 4 more byes */ if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t), sizeof(uint)) != 0)) { - bcmerror = BCME_BADADDR; + ret = BCME_BADADDR; goto done; } } if (!capable(CAP_NET_ADMIN)) { - bcmerror = BCME_EPERM; + ret = BCME_EPERM; goto done; } if (ioc.len > 0) { buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN); if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) { - bcmerror = BCME_NOMEM; + ret = BCME_NOMEM; goto done; } DHD_PERIM_UNLOCK(&dhd->pub); if (copy_from_user(local_buf, ioc.buf, buflen)) { DHD_PERIM_LOCK(&dhd->pub); - bcmerror = BCME_BADADDR; + ret = BCME_BADADDR; goto done; } DHD_PERIM_LOCK(&dhd->pub); @@ -4134,12 +5815,12 @@ dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) *(char *)(local_buf + buflen) = '\0'; } - bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf); + ret = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf); - if (!bcmerror && buflen && local_buf && ioc.buf) { + if (!ret && buflen && local_buf && ioc.buf) { DHD_PERIM_UNLOCK(&dhd->pub); if (copy_to_user(ioc.buf, local_buf, buflen)) - bcmerror = -EFAULT; + ret = -EFAULT; DHD_PERIM_LOCK(&dhd->pub); } @@ -4147,11 +5828,69 @@ dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) if (local_buf) MFREE(dhd->pub.osh, local_buf, buflen+1); +exit: DHD_PERIM_UNLOCK(&dhd->pub); DHD_OS_WAKE_UNLOCK(&dhd->pub); - return OSL_ERROR(bcmerror); + return OSL_ERROR(ret); +} + + +#ifdef FIX_CPU_MIN_CLOCK +static int dhd_init_cpufreq_fix(dhd_info_t *dhd) +{ + if (dhd) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_init(&dhd->cpufreq_fix); +#endif + dhd->cpufreq_fix_status = FALSE; + } + return 0; +} + +static void dhd_fix_cpu_freq(dhd_info_t *dhd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_lock(&dhd->cpufreq_fix); +#endif + if (dhd && !dhd->cpufreq_fix_status) { + pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000); +#ifdef FIX_BUS_MIN_CLOCK + pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000); +#endif /* FIX_BUS_MIN_CLOCK */ + DHD_ERROR(("pm_qos_add_requests called\n")); + + dhd->cpufreq_fix_status = TRUE; + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_unlock(&dhd->cpufreq_fix); +#endif +} + +static void dhd_rollback_cpu_freq(dhd_info_t *dhd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_lock(&dhd ->cpufreq_fix); +#endif + if (dhd && dhd->cpufreq_fix_status != TRUE) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_unlock(&dhd->cpufreq_fix); +#endif + return; + } + + pm_qos_remove_request(&dhd->dhd_cpu_qos); +#ifdef FIX_BUS_MIN_CLOCK + pm_qos_remove_request(&dhd->dhd_bus_qos); +#endif /* FIX_BUS_MIN_CLOCK */ + DHD_ERROR(("pm_qos_add_requests called\n")); + + dhd->cpufreq_fix_status = FALSE; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_unlock(&dhd->cpufreq_fix); +#endif } +#endif /* FIX_CPU_MIN_CLOCK */ #define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */ int dhd_deepsleep(dhd_info_t *dhd, int flag) @@ -4233,12 +5972,26 @@ dhd_stop(struct net_device *net) DHD_OS_WAKE_LOCK(&dhd->pub); DHD_PERIM_LOCK(&dhd->pub); printf("%s: Enter %p\n", __FUNCTION__, net); + dhd->pub.rxcnt_timeout = 0; + dhd->pub.txcnt_timeout = 0; + +#ifdef BCMPCIE + dhd->pub.d3ackcnt_timeout = 0; +#endif /* BCMPCIE */ + if (dhd->pub.up == 0) { goto exit; } dhd_if_flush_sta(DHD_DEV_IFP(net)); + /* Disable Runtime PM before interface down */ + DHD_DISABLE_RUNTIME_PM(&dhd->pub); + +#ifdef FIX_CPU_MIN_CLOCK + if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) + dhd_rollback_cpu_freq(dhd); +#endif /* FIX_CPU_MIN_CLOCK */ ifidx = dhd_net2idx(dhd, net); BCM_REFERENCE(ifidx); @@ -4249,8 +6002,11 @@ dhd_stop(struct net_device *net) #ifdef WL_CFG80211 if (ifidx == 0) { + dhd_if_t *ifp; wl_cfg80211_down(NULL); + ifp = dhd->iflist[0]; + ASSERT(ifp && ifp->net); /* * For CFG80211: Clean up all the left over virtual interfaces * when the primary Interface is brought down. [ifconfig wlan0 down] @@ -4260,12 +6016,55 @@ dhd_stop(struct net_device *net) (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) { int i; +#ifdef WL_CFG80211_P2P_DEV_IF + wl_cfg80211_del_p2p_wdev(); +#endif /* WL_CFG80211_P2P_DEV_IF */ + dhd_net_if_lock_local(dhd); for (i = 1; i < DHD_MAX_IFS; i++) dhd_remove_if(&dhd->pub, i, FALSE); + + if (ifp && ifp->net) { + dhd_if_del_sta_list(ifp); + } + +#ifdef ARP_OFFLOAD_SUPPORT + if (dhd_inetaddr_notifier_registered) { + dhd_inetaddr_notifier_registered = FALSE; + unregister_inetaddr_notifier(&dhd_inetaddr_notifier); + } +#endif /* ARP_OFFLOAD_SUPPORT */ +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) + if (dhd_inet6addr_notifier_registered) { + dhd_inet6addr_notifier_registered = FALSE; + unregister_inet6addr_notifier(&dhd_inet6addr_notifier); + } +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ dhd_net_if_unlock_local(dhd); } +#if 0 + // terence 20161024: remove this to prevent dev_close() get stuck in dhd_hang_process + cancel_work_sync(dhd->dhd_deferred_wq); +#endif +#if defined(DHD_LB) && defined(DHD_LB_RXP) + __skb_queue_purge(&dhd->rx_pend_queue); +#endif /* DHD_LB && DHD_LB_RXP */ + } + +#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS) + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); +#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */ +#if defined(DHD_LB) && defined(DHD_LB_RXP) + if (ifp->net == dhd->rx_napi_netdev) { + DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n", + __FUNCTION__, &dhd->rx_napi_struct, net, net->name)); + skb_queue_purge(&dhd->rx_napi_queue); + napi_disable(&dhd->rx_napi_struct); + netif_napi_del(&dhd->rx_napi_struct); + dhd->rx_napi_netdev = NULL; } +#endif /* DHD_LB && DHD_LB_RXP */ + } #endif /* WL_CFG80211 */ @@ -4277,15 +6076,15 @@ dhd_stop(struct net_device *net) OLD_MOD_DEC_USE_COUNT; exit: - if (ifidx == 0 && !dhd_download_fw_on_driverload) - wl_android_wifi_off(net); - else { + if (ifidx == 0 && !dhd_download_fw_on_driverload) { + wl_android_wifi_off(net, TRUE); +#ifdef WL_EXT_IAPSTA + wl_android_ext_dettach_netdev(); +#endif + } else { if (dhd->pub.conf->deepsleep) dhd_deepsleep(dhd, 1); } - dhd->pub.rxcnt_timeout = 0; - dhd->pub.txcnt_timeout = 0; - dhd->pub.hang_was_sent = 0; /* Clear country spec for for built-in type driver */ @@ -4295,15 +6094,27 @@ dhd_stop(struct net_device *net) dhd->pub.dhd_cspec.ccode[0] = 0x00; } - printf("%s: Exit\n", __FUNCTION__); +#ifdef BCMDBGFS + dhd_dbg_remove(); +#endif + DHD_PERIM_UNLOCK(&dhd->pub); DHD_OS_WAKE_UNLOCK(&dhd->pub); + + /* Destroy wakelock */ + if (!dhd_download_fw_on_driverload && + (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + DHD_OS_WAKE_LOCK_DESTROY(dhd); + dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT; + } + printf("%s: Exit\n", __FUNCTION__); + return 0; } #if defined(WL_CFG80211) && defined(USE_INITIAL_SHORT_DWELL_TIME) extern bool g_first_broadcast_scan; -#endif +#endif #ifdef WL11U static int dhd_interworking_enable(dhd_pub_t *dhd) @@ -4313,7 +6124,8 @@ static int dhd_interworking_enable(dhd_pub_t *dhd) int ret = BCME_OK; bcm_mkiovar("interworking", (char *)&enable, sizeof(enable), iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + if (ret < 0) { DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret)); } @@ -4321,9 +6133,9 @@ static int dhd_interworking_enable(dhd_pub_t *dhd) /* basic capabilities for HS20 REL2 */ uint32 cap = WL_WNM_BSSTRANS | WL_WNM_NOTIF; bcm_mkiovar("wnm", (char *)&cap, sizeof(cap), iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, - iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { - DHD_ERROR(("%s: failed to set WNM info, ret=%d\n", __FUNCTION__, ret)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + if (ret < 0) { + DHD_ERROR(("%s: set wnm returned (%d)\n", __FUNCTION__, ret)); } } @@ -4338,8 +6150,25 @@ dhd_open(struct net_device *net) #ifdef TOE uint32 toe_ol; #endif +#ifdef BCM_FD_AGGR + char iovbuf[WLC_IOCTL_SMLEN]; + dbus_config_t config; + uint32 agglimit = 0; + uint32 rpc_agg = BCM_RPC_TP_DNGL_AGG_DPC; /* host aggr not enabled yet */ +#endif /* BCM_FD_AGGR */ int ifidx; int32 ret = 0; +#if defined(OOB_INTR_ONLY) + uint32 bus_type = -1; + uint32 bus_num = -1; + uint32 slot_num = -1; + wifi_adapter_info_t *adapter = NULL; +#endif + + if (!dhd_download_fw_on_driverload && !dhd_driver_init_done) { + DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__)); + return -1; + } printf("%s: Enter %p\n", __FUNCTION__, net); #if defined(MULTIPLE_SUPPLICANT) @@ -4350,12 +6179,38 @@ dhd_open(struct net_device *net) mutex_lock(&_dhd_sdio_mutex_lock_); #endif #endif /* MULTIPLE_SUPPLICANT */ + /* Init wakelock */ + if (!dhd_download_fw_on_driverload && + !(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + DHD_OS_WAKE_LOCK_INIT(dhd); + dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT; + } + +#ifdef PREVENT_REOPEN_DURING_HANG + /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */ + if (dhd->pub.hang_was_sent == 1) { + DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__)); + /* Force to bring down WLAN interface in case dhd_stop() is not called + * from the upper layer when HANG event is triggered. + */ + if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) { + DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__)); + dhd_stop(net); + } else { + return -1; + } + } +#endif /* PREVENT_REOPEN_DURING_HANG */ + DHD_OS_WAKE_LOCK(&dhd->pub); DHD_PERIM_LOCK(&dhd->pub); dhd->pub.dongle_trap_occured = 0; dhd->pub.hang_was_sent = 0; - + dhd->pub.hang_reason = 0; +#ifdef DHD_LOSSLESS_ROAMING + dhd->pub.dequeue_prec_map = ALLPRIO; +#endif #if 0 /* * Force start if ifconfig_up gets called before START command @@ -4391,7 +6246,7 @@ dhd_open(struct net_device *net) DHD_ERROR(("\n%s\n", dhd_version)); #if defined(USE_INITIAL_SHORT_DWELL_TIME) g_first_broadcast_scan = TRUE; -#endif +#endif ret = wl_android_wifi_on(net); if (ret != 0) { DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n", @@ -4400,6 +6255,22 @@ dhd_open(struct net_device *net) goto exit; } } +#ifdef FIX_CPU_MIN_CLOCK + if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) { + dhd_init_cpufreq_fix(dhd); + dhd_fix_cpu_freq(dhd); + } +#endif /* FIX_CPU_MIN_CLOCK */ +#if defined(OOB_INTR_ONLY) + if (dhd->pub.conf->dpc_cpucore >= 0) { + dhd_bus_get_ids(dhd->pub.bus, &bus_type, &bus_num, &slot_num); + adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num); + if (adapter) { + printf("%s: set irq affinity hit %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore); + irq_set_affinity_hint(adapter->irq_num, cpumask_of(dhd->pub.conf->dpc_cpucore)); + } + } +#endif if (dhd->pub.busstate != DHD_BUS_DATA) { @@ -4419,15 +6290,56 @@ dhd_open(struct net_device *net) dhd_deepsleep(dhd, 0); } +#ifdef BCM_FD_AGGR + config.config_id = DBUS_CONFIG_ID_AGGR_LIMIT; + + + memset(iovbuf, 0, sizeof(iovbuf)); + bcm_mkiovar("rpc_dngl_agglimit", (char *)&agglimit, 4, + iovbuf, sizeof(iovbuf)); + + if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) { + agglimit = *(uint32 *)iovbuf; + config.aggr_param.maxrxsf = agglimit >> BCM_RPC_TP_AGG_SF_SHIFT; + config.aggr_param.maxrxsize = agglimit & BCM_RPC_TP_AGG_BYTES_MASK; + DHD_ERROR(("rpc_dngl_agglimit %x : sf_limit %d bytes_limit %d\n", + agglimit, config.aggr_param.maxrxsf, config.aggr_param.maxrxsize)); + if (bcm_rpc_tp_set_config(dhd->pub.info->rpc_th, &config)) { + DHD_ERROR(("set tx/rx queue size and buffersize failed\n")); + } + } else { + DHD_ERROR(("get rpc_dngl_agglimit failed\n")); + rpc_agg &= ~BCM_RPC_TP_DNGL_AGG_DPC; + } + + /* Set aggregation for TX */ + bcm_rpc_tp_agg_set(dhd->pub.info->rpc_th, BCM_RPC_TP_HOST_AGG_MASK, + rpc_agg & BCM_RPC_TP_HOST_AGG_MASK); + + /* Set aggregation for RX */ + memset(iovbuf, 0, sizeof(iovbuf)); + bcm_mkiovar("rpc_agg", (char *)&rpc_agg, sizeof(rpc_agg), iovbuf, sizeof(iovbuf)); + if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) { + dhd->pub.info->fdaggr = 0; + if (rpc_agg & BCM_RPC_TP_HOST_AGG_MASK) + dhd->pub.info->fdaggr |= BCM_FDAGGR_H2D_ENABLED; + if (rpc_agg & BCM_RPC_TP_DNGL_AGG_MASK) + dhd->pub.info->fdaggr |= BCM_FDAGGR_D2H_ENABLED; + } else { + DHD_ERROR(("%s(): Setting RX aggregation failed %d\n", __FUNCTION__, ret)); + } +#endif /* BCM_FD_AGGR */ + /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */ memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN); #ifdef TOE /* Get current TOE mode from dongle */ - if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) + if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) { dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM; - else + } else { dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM; + } #endif /* TOE */ #if defined(WL_CFG80211) @@ -4436,7 +6348,51 @@ dhd_open(struct net_device *net) ret = -1; goto exit; } + if (!dhd_download_fw_on_driverload) { +#ifdef ARP_OFFLOAD_SUPPORT + dhd->pend_ipaddr = 0; + if (!dhd_inetaddr_notifier_registered) { + dhd_inetaddr_notifier_registered = TRUE; + register_inetaddr_notifier(&dhd_inetaddr_notifier); + } +#endif /* ARP_OFFLOAD_SUPPORT */ +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) + if (!dhd_inet6addr_notifier_registered) { + dhd_inet6addr_notifier_registered = TRUE; + register_inet6addr_notifier(&dhd_inet6addr_notifier); + } +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ +#ifdef DHD_LB + DHD_LB_STATS_INIT(&dhd->pub); +#ifdef DHD_LB_RXP + __skb_queue_head_init(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ +#endif /* DHD_LB */ + } + +#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS) +#if defined(SET_RPS_CPUS) + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); +#else + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD); +#endif +#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */ +#if defined(DHD_LB) && defined(DHD_LB_RXP) + if (dhd->rx_napi_netdev == NULL) { + dhd->rx_napi_netdev = dhd->iflist[ifidx]->net; + memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct)); + netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct, + dhd_napi_poll, dhd_napi_weight); + DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n", + __FUNCTION__, &dhd->rx_napi_struct, net, net->name)); + napi_enable(&dhd->rx_napi_struct); + DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__)); + skb_queue_head_init(&dhd->rx_napi_queue); + } +#endif /* DHD_LB && DHD_LB_RXP */ +#if defined(NUM_SCB_MAX_PROBE) dhd_set_scb_probe(&dhd->pub); +#endif /* NUM_SCB_MAX_PROBE */ #endif /* WL_CFG80211 */ } @@ -4444,14 +6400,16 @@ dhd_open(struct net_device *net) netif_start_queue(net); dhd->pub.up = 1; + OLD_MOD_INC_USE_COUNT; + #ifdef BCMDBGFS dhd_dbg_init(&dhd->pub); #endif - OLD_MOD_INC_USE_COUNT; exit: - if (ret) + if (ret) { dhd_stop(net); + } DHD_PERIM_UNLOCK(&dhd->pub); DHD_OS_WAKE_UNLOCK(&dhd->pub); @@ -4518,6 +6476,11 @@ dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, ui */ if (ifevent->ifidx > 0) { dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t)); + if (if_event == NULL) { + DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes", + MALLOCED(dhdinfo->pub.osh))); + return BCME_NOMEM; + } memcpy(&if_event->event, ifevent, sizeof(if_event->event)); memcpy(if_event->mac, mac, ETHER_ADDR_LEN); @@ -4535,7 +6498,7 @@ dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, ui { dhd_if_event_t *if_event; -#if defined(WL_CFG80211) && !defined(P2PONEINT) +#ifdef WL_CFG80211 if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK) return BCME_OK; #endif /* WL_CFG80211 */ @@ -4544,6 +6507,11 @@ dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, ui * anything else */ if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t)); + if (if_event == NULL) { + DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes", + MALLOCED(dhdinfo->pub.osh))); + return BCME_NOMEM; + } memcpy(&if_event->event, ifevent, sizeof(if_event->event)); memcpy(if_event->mac, mac, ETHER_ADDR_LEN); strncpy(if_event->name, name, IFNAMSIZ); @@ -4560,7 +6528,7 @@ dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, ui */ struct net_device* dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name, - uint8 *mac, uint8 bssidx, bool need_rtnl_lock) + uint8 *mac, uint8 bssidx, bool need_rtnl_lock, char *dngl_name) { dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info; dhd_if_t *ifp; @@ -4617,6 +6585,7 @@ dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name, strncpy(ifp->net->name, name, IFNAMSIZ); ifp->net->name[IFNAMSIZ - 1] = '\0'; } + #ifdef WL_CFG80211 if (ifidx == 0) ifp->net->destructor = free_netdev; @@ -4629,15 +6598,26 @@ dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name, ifp->name[IFNAMSIZ - 1] = '\0'; dhdinfo->iflist[ifidx] = ifp; +/* initialize the dongle provided if name */ + if (dngl_name) + strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ); + else + strncpy(ifp->dngl_name, name, IFNAMSIZ); + #ifdef PCIE_FULL_DONGLE /* Initialize STA info list */ INIT_LIST_HEAD(&ifp->sta_list); DHD_IF_STA_LIST_LOCK_INIT(ifp); #endif /* PCIE_FULL_DONGLE */ +#ifdef DHD_L2_FILTER + ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh); + ifp->parp_allnode = TRUE; +#endif return ifp->net; fail: + if (ifp != NULL) { if (ifp->net != NULL) { dhd_dev_priv_clear(ifp->net); @@ -4647,6 +6627,7 @@ dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name, MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp)); ifp = NULL; } + dhdinfo->iflist[ifidx] = NULL; return NULL; } @@ -4661,6 +6642,7 @@ dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock) dhd_if_t *ifp; ifp = dhdinfo->iflist[ifidx]; + if (ifp != NULL) { if (ifp->net != NULL) { DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx)); @@ -4671,29 +6653,40 @@ dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock) if (ifp->net->reg_state == NETREG_UNINITIALIZED) { free_netdev(ifp->net); } else { - netif_stop_queue(ifp->net); + netif_tx_disable(ifp->net); -#ifdef SET_RPS_CPUS +#if defined(SET_RPS_CPUS) custom_rps_map_clear(ifp->net->_rx); #endif /* SET_RPS_CPUS */ +#if defined(SET_RPS_CPUS) +#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE)) + dhd_tcpack_suppress_set(dhdpub, TCPACK_SUP_OFF); +#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */ +#endif if (need_rtnl_lock) unregister_netdev(ifp->net); else unregister_netdevice(ifp->net); } ifp->net = NULL; + dhdinfo->iflist[ifidx] = NULL; } #ifdef DHD_WMF dhd_wmf_cleanup(dhdpub, ifidx); #endif /* DHD_WMF */ +#ifdef DHD_L2_FILTER + bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE, + NULL, FALSE, dhdpub->tickcnt); + deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table); + ifp->phnd_arp_table = NULL; +#endif /* DHD_L2_FILTER */ dhd_if_del_sta_list(ifp); - dhdinfo->iflist[ifidx] = NULL; MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp)); - + ifp = NULL; } return BCME_OK; @@ -4725,25 +6718,6 @@ static struct net_device_ops dhd_ops_virt = { .ndo_set_multicast_list = dhd_set_multicast_list, #endif }; - -#ifdef P2PONEINT -extern int wl_cfgp2p_if_open(struct net_device *net); -extern int wl_cfgp2p_if_stop(struct net_device *net); - -static struct net_device_ops dhd_cfgp2p_ops_virt = { - .ndo_open = wl_cfgp2p_if_open, - .ndo_stop = wl_cfgp2p_if_stop, - .ndo_get_stats = dhd_get_stats, - .ndo_do_ioctl = dhd_ioctl_entry, - .ndo_start_xmit = dhd_start_xmit, - .ndo_set_mac_address = dhd_set_mac_address, -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) - .ndo_set_rx_mode = dhd_set_multicast_list, -#else - .ndo_set_multicast_list = dhd_set_multicast_list, -#endif -}; -#endif /* P2PONEINT */ #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */ #ifdef DEBUGGER @@ -4753,9 +6727,31 @@ extern void debugger_init(void *bus_handle); #ifdef SHOW_LOGTRACE static char *logstrs_path = "/root/logstrs.bin"; +static char *st_str_file_path = "/root/rtecdc.bin"; +static char *map_file_path = "/root/rtecdc.map"; +static char *rom_st_str_file_path = "/root/roml.bin"; +static char *rom_map_file_path = "/root/roml.map"; + +#define BYTES_AHEAD_NUM 11 /* address in map file is before these many bytes */ +#define READ_NUM_BYTES 1000 /* read map file each time this No. of bytes */ +#define GO_BACK_FILE_POS_NUM_BYTES 100 /* set file pos back to cur pos */ +static char *ramstart_str = "text_start"; /* string in mapfile has addr ramstart */ +static char *rodata_start_str = "rodata_start"; /* string in mapfile has addr rodata start */ +static char *rodata_end_str = "rodata_end"; /* string in mapfile has addr rodata end */ +static char *ram_file_str = "rtecdc"; +static char *rom_file_str = "roml"; +#define RAMSTART_BIT 0x01 +#define RDSTART_BIT 0x02 +#define RDEND_BIT 0x04 +#define ALL_MAP_VAL (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT) + module_param(logstrs_path, charp, S_IRUGO); +module_param(st_str_file_path, charp, S_IRUGO); +module_param(map_file_path, charp, S_IRUGO); +module_param(rom_st_str_file_path, charp, S_IRUGO); +module_param(rom_map_file_path, charp, S_IRUGO); -int +static void dhd_init_logstrs_array(dhd_event_log_t *temp) { struct file *filep = NULL; @@ -4772,27 +6768,30 @@ dhd_init_logstrs_array(dhd_event_log_t *temp) int num_fmts = 0; uint32 i = 0; int error = 0; - set_fs(KERNEL_DS); + fs = get_fs(); + set_fs(KERNEL_DS); + filep = filp_open(logstrs_path, O_RDONLY, 0); + if (IS_ERR(filep)) { - DHD_ERROR(("Failed to open the file logstrs.bin in %s\n", __FUNCTION__)); + DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path)); goto fail; } error = vfs_stat(logstrs_path, &stat); if (error) { - DHD_ERROR(("Failed in %s to find file stat\n", __FUNCTION__)); + DHD_ERROR(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path)); goto fail; } logstrs_size = (int) stat.size; raw_fmts = kmalloc(logstrs_size, GFP_KERNEL); if (raw_fmts == NULL) { - DHD_ERROR(("Failed to allocate raw_fmts memory\n")); + DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__)); goto fail; } if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) { - DHD_ERROR(("Error: Log strings file read failed\n")); + DHD_ERROR(("%s: Failed to read file %s", __FUNCTION__, logstrs_path)); goto fail; } @@ -4879,7 +6878,7 @@ dhd_init_logstrs_array(dhd_event_log_t *temp) temp->num_fmts = num_fmts; filp_close(filep, NULL); set_fs(fs); - return 0; + return; fail: if (raw_fmts) { kfree(raw_fmts); @@ -4889,8 +6888,206 @@ dhd_init_logstrs_array(dhd_event_log_t *temp) filp_close(filep, NULL); set_fs(fs); temp->fmts = NULL; - return -1; + return; +} + +static int +dhd_read_map(char *fname, uint32 *ramstart, uint32 *rodata_start, + uint32 *rodata_end) +{ + struct file *filep = NULL; + mm_segment_t fs; + char *raw_fmts = NULL; + uint32 read_size = READ_NUM_BYTES; + int error = 0; + char * cptr = NULL; + char c; + uint8 count = 0; + + *ramstart = 0; + *rodata_start = 0; + *rodata_end = 0; + + if (fname == NULL) { + DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__)); + return BCME_ERROR; + } + + fs = get_fs(); + set_fs(KERNEL_DS); + + filep = filp_open(fname, O_RDONLY, 0); + if (IS_ERR(filep)) { + DHD_ERROR(("%s: Failed to open %s \n", __FUNCTION__, fname)); + goto fail; + } + + /* Allocate 1 byte more than read_size to terminate it with NULL */ + raw_fmts = kmalloc(read_size + 1, GFP_KERNEL); + if (raw_fmts == NULL) { + DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__)); + goto fail; + } + + /* read ram start, rodata_start and rodata_end values from map file */ + + while (count != ALL_MAP_VAL) + { + error = vfs_read(filep, raw_fmts, read_size, (&filep->f_pos)); + if (error < 0) { + DHD_ERROR(("%s: read failed %s err:%d \n", __FUNCTION__, + map_file_path, error)); + goto fail; + } + + if (error < read_size) { + /* + * since we reset file pos back to earlier pos by + * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF. + * So if ret value is less than read_size, reached EOF don't read further + */ + break; + } + /* End raw_fmts with NULL as strstr expects NULL terminated strings */ + raw_fmts[read_size] = '\0'; + + /* Get ramstart address */ + if ((cptr = strstr(raw_fmts, ramstart_str))) { + cptr = cptr - BYTES_AHEAD_NUM; + sscanf(cptr, "%x %c text_start", ramstart, &c); + count |= RAMSTART_BIT; + } + + /* Get ram rodata start address */ + if ((cptr = strstr(raw_fmts, rodata_start_str))) { + cptr = cptr - BYTES_AHEAD_NUM; + sscanf(cptr, "%x %c rodata_start", rodata_start, &c); + count |= RDSTART_BIT; + } + + /* Get ram rodata end address */ + if ((cptr = strstr(raw_fmts, rodata_end_str))) { + cptr = cptr - BYTES_AHEAD_NUM; + sscanf(cptr, "%x %c rodata_end", rodata_end, &c); + count |= RDEND_BIT; + } + memset(raw_fmts, 0, read_size); + /* + * go back to predefined NUM of bytes so that we won't miss + * the string and addr even if it comes as splited in next read. + */ + filep->f_pos = filep->f_pos - GO_BACK_FILE_POS_NUM_BYTES; + } + + DHD_ERROR(("---ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n", + *ramstart, *rodata_start, *rodata_end)); + + DHD_ERROR(("readmap over \n")); + +fail: + if (raw_fmts) { + kfree(raw_fmts); + raw_fmts = NULL; + } + if (!IS_ERR(filep)) + filp_close(filep, NULL); + + set_fs(fs); + if (count == ALL_MAP_VAL) { + return BCME_OK; + } + DHD_ERROR(("readmap error 0X%x \n", count)); + return BCME_ERROR; +} + +static void +dhd_init_static_strs_array(dhd_event_log_t *temp, char *str_file, char *map_file) +{ + struct file *filep = NULL; + mm_segment_t fs; + char *raw_fmts = NULL; + uint32 logstrs_size = 0; + + int error = 0; + uint32 ramstart = 0; + uint32 rodata_start = 0; + uint32 rodata_end = 0; + uint32 logfilebase = 0; + + error = dhd_read_map(map_file, &ramstart, &rodata_start, &rodata_end); + if (error == BCME_ERROR) { + DHD_ERROR(("readmap Error!! \n")); + /* don't do event log parsing in actual case */ + temp->raw_sstr = NULL; + return; + } + DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n", + ramstart, rodata_start, rodata_end)); + + fs = get_fs(); + set_fs(KERNEL_DS); + + filep = filp_open(str_file, O_RDONLY, 0); + if (IS_ERR(filep)) { + DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, str_file)); + goto fail; + } + + /* Full file size is huge. Just read required part */ + logstrs_size = rodata_end - rodata_start; + + raw_fmts = kmalloc(logstrs_size, GFP_KERNEL); + if (raw_fmts == NULL) { + DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__)); + goto fail; + } + + logfilebase = rodata_start - ramstart; + + error = generic_file_llseek(filep, logfilebase, SEEK_SET); + if (error < 0) { + DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error)); + goto fail; + } + + error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos)); + if (error != logstrs_size) { + DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error)); + goto fail; + } + + if (strstr(str_file, ram_file_str) != NULL) { + temp->raw_sstr = raw_fmts; + temp->ramstart = ramstart; + temp->rodata_start = rodata_start; + temp->rodata_end = rodata_end; + } else if (strstr(str_file, rom_file_str) != NULL) { + temp->rom_raw_sstr = raw_fmts; + temp->rom_ramstart = ramstart; + temp->rom_rodata_start = rodata_start; + temp->rom_rodata_end = rodata_end; + } + + filp_close(filep, NULL); + set_fs(fs); + + return; +fail: + if (raw_fmts) { + kfree(raw_fmts); + raw_fmts = NULL; + } + if (!IS_ERR(filep)) + filp_close(filep, NULL); + set_fs(fs); + if (strstr(str_file, ram_file_str) != NULL) { + temp->raw_sstr = NULL; + } else if (strstr(str_file, rom_file_str) != NULL) { + temp->rom_raw_sstr = NULL; + } + return; } + #endif /* SHOW_LOGTRACE */ @@ -4908,10 +7105,13 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); +#ifdef STBLINUX + DHD_ERROR(("%s\n", driver_target)); +#endif /* STBLINUX */ /* will implement get_ids for DBUS later */ #if defined(BCMSDIO) dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num); -#endif +#endif adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num); /* Allocate primary dhd_info */ @@ -4934,6 +7134,15 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) #ifdef GET_CUSTOM_MAC_ENABLE wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet); #endif /* GET_CUSTOM_MAC_ENABLE */ +#ifdef CUSTOM_FORCE_NODFS_FLAG + dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG; + dhd->pub.force_country_change = TRUE; +#endif /* CUSTOM_FORCE_NODFS_FLAG */ +#ifdef CUSTOM_COUNTRY_CODE + get_customized_country_code(dhd->adapter, + dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec, + dhd->pub.dhd_cflags); +#endif /* CUSTOM_COUNTRY_CODE */ dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID; dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID; @@ -4964,9 +7173,6 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) * solution */ dhd_update_fw_nv_path(dhd); -#ifndef BUILD_IN_KERNEL - dhd_conf_read_config(&dhd->pub, dhd->conf_path); -#endif /* Set network interface name if it was provided as module parameter */ if (iface_name[0]) { @@ -4979,17 +7185,26 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2)) strcat(if_name, "%d"); } - net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE); - if (net == NULL) + + /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */ + net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL); + if (net == NULL) { goto fail; - dhd_state |= DHD_ATTACH_STATE_ADD_IF; + } + + dhd_state |= DHD_ATTACH_STATE_ADD_IF; +#ifdef DHD_L2_FILTER + /* initialize the l2_filter_cnt */ + dhd->pub.l2_filter_cnt = 0; +#endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) net->open = NULL; #else net->netdev_ops = NULL; #endif + mutex_init(&dhd->dhd_iovar_mutex); sema_init(&dhd->proto_sem, 1); #ifdef PROP_TXSTATUS @@ -4998,11 +7213,25 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) dhd->pub.skip_fc = dhd_wlfc_skip_fc; dhd->pub.plat_init = dhd_wlfc_plat_init; dhd->pub.plat_deinit = dhd_wlfc_plat_deinit; + +#ifdef DHD_WLFC_THREAD + init_waitqueue_head(&dhd->pub.wlfc_wqhead); + dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread"); + if (IS_ERR(dhd->pub.wlfc_thread)) { + DHD_ERROR(("create wlfc thread failed\n")); + goto fail; + } else { + wake_up_process(dhd->pub.wlfc_thread); + } +#endif /* DHD_WLFC_THREAD */ #endif /* PROP_TXSTATUS */ /* Initialize other structure content */ init_waitqueue_head(&dhd->ioctl_resp_wait); + init_waitqueue_head(&dhd->d3ack_wait); init_waitqueue_head(&dhd->ctrl_wait); + init_waitqueue_head(&dhd->dhd_bus_busy_state_wait); + dhd->pub.dhd_bus_busy_state = 0; /* Initialize the spinlocks */ spin_lock_init(&dhd->sdlock); @@ -5019,20 +7248,16 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) /* Initialize Wakelock stuff */ spin_lock_init(&dhd->wakelock_spinlock); - dhd->wakelock_counter = 0; + spin_lock_init(&dhd->wakelock_evt_spinlock); + DHD_OS_WAKE_LOCK_INIT(dhd); dhd->wakelock_wd_counter = 0; - dhd->wakelock_rx_timeout_enable = 0; - dhd->wakelock_ctrl_timeout_enable = 0; #ifdef CONFIG_HAS_WAKELOCK + // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake"); - wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake"); - wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake"); wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake"); -#ifdef BCMPCIE_OOB_HOST_WAKE - wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake"); -#endif /* BCMPCIE_OOB_HOST_WAKE */ #endif /* CONFIG_HAS_WAKELOCK */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) mutex_init(&dhd->dhd_net_if_mutex); mutex_init(&dhd->dhd_suspend_mutex); #endif @@ -5055,6 +7280,9 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) dhd_monitor_init(&dhd->pub); dhd_state |= DHD_ATTACH_STATE_CFG80211; #endif +#ifdef DHD_LOG_DUMP + dhd_log_dump_init(&dhd->pub); +#endif /* DHD_LOG_DUMP */ #if defined(WL_WIRELESS_EXT) /* Attach and link in the iw */ if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) { @@ -5064,10 +7292,15 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) } dhd_state |= DHD_ATTACH_STATE_WL_ATTACH; } +#ifdef WL_ESCAN + wl_escan_attach(net, (void *)&dhd->pub); +#endif #endif /* defined(WL_WIRELESS_EXT) */ #ifdef SHOW_LOGTRACE dhd_init_logstrs_array(&dhd->event_data); + dhd_init_static_strs_array(&dhd->event_data, st_str_file_path, map_file_path); + dhd_init_static_strs_array(&dhd->event_data, rom_st_str_file_path, rom_map_file_path); #endif /* SHOW_LOGTRACE */ if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) { @@ -5076,6 +7309,7 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) } + /* Set up the watchdog timer */ init_timer(&dhd->timer); dhd->timer.data = (ulong)dhd; @@ -5085,11 +7319,28 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) if (dhd_watchdog_prio >= 0) { /* Initialize watchdog thread */ PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread"); + if (dhd->thr_wdt_ctl.thr_pid < 0) { + goto fail; + } } else { dhd->thr_wdt_ctl.thr_pid = -1; } +#ifdef DHD_PCIE_RUNTIMEPM + /* Setup up the runtime PM Idlecount timer */ + init_timer(&dhd->rpm_timer); + dhd->rpm_timer.data = (ulong)dhd; + dhd->rpm_timer.function = dhd_runtimepm; + dhd->rpm_timer_valid = FALSE; + + dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID; + PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread"); + if (dhd->thr_rpm_ctl.thr_pid < 0) { + goto fail; + } +#endif /* DHD_PCIE_RUNTIMEPM */ + #ifdef DEBUGGER debugger_init((void *) bus); #endif @@ -5098,6 +7349,9 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) if (dhd_dpc_prio >= 0) { /* Initialize DPC thread */ PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc"); + if (dhd->thr_dpc_ctl.thr_pid < 0) { + goto fail; + } } else { /* use tasklet for dpc */ tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd); @@ -5108,6 +7362,9 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND); /* Initialize RXF thread */ PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf"); + if (dhd->thr_rxf_ctl.thr_pid < 0) { + goto fail; + } } dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED; @@ -5115,8 +7372,11 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) #if defined(CONFIG_PM_SLEEP) if (!dhd_pm_notifier_registered) { dhd_pm_notifier_registered = TRUE; - register_pm_notifier(&dhd_pm_notifier); + dhd->pm_notifier.notifier_call = dhd_pm_callback; + dhd->pm_notifier.priority = 10; + register_pm_notifier(&dhd->pm_notifier); } + #endif /* CONFIG_PM_SLEEP */ #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) @@ -5134,12 +7394,13 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) register_inetaddr_notifier(&dhd_inetaddr_notifier); } #endif /* ARP_OFFLOAD_SUPPORT */ -#ifdef CONFIG_IPV6 + +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) if (!dhd_inet6addr_notifier_registered) { dhd_inet6addr_notifier_registered = TRUE; register_inet6addr_notifier(&dhd_inet6addr_notifier); } -#endif +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd); #ifdef DEBUG_CPU_FREQ dhd->new_freq = alloc_percpu(int); @@ -5156,13 +7417,79 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) #endif /* BCMSDIO */ #endif /* DHDTCPACK_SUPPRESS */ +#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) +#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ + dhd_state |= DHD_ATTACH_STATE_DONE; dhd->dhd_state = dhd_state; dhd_found++; -#if defined(CUSTOMER_HW20) && defined(WLANAUDIO) - dhd_global = dhd; -#endif /* CUSTOMER_HW20 && WLANAUDIO */ +#ifdef DHD_DEBUG_PAGEALLOC + register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub); +#endif /* DHD_DEBUG_PAGEALLOC */ + +#if defined(DHD_LB) + DHD_ERROR(("DHD LOAD BALANCING Enabled\n")); + + dhd_lb_set_default_cpus(dhd); + + /* Initialize the CPU Masks */ + if (dhd_cpumasks_init(dhd) == 0) { + + /* Now we have the current CPU maps, run through candidacy */ + dhd_select_cpu_candidacy(dhd); + + /* + * If we are able to initialize CPU masks, lets register to the + * CPU Hotplug framework to change the CPU for each job dynamically + * using candidacy algorithm. + */ + dhd->cpu_notifier.notifier_call = dhd_cpu_callback; + register_cpu_notifier(&dhd->cpu_notifier); /* Register a callback */ + } else { + /* + * We are unable to initialize CPU masks, so candidacy algorithm + * won't run, but still Load Balancing will be honoured based + * on the CPUs allocated for a given job statically during init + */ + dhd->cpu_notifier.notifier_call = NULL; + DHD_ERROR(("%s(): dhd_cpumasks_init failed CPUs for JOB would be static\n", + __FUNCTION__)); + } + + + DHD_LB_STATS_INIT(&dhd->pub); + + /* Initialize the Load Balancing Tasklets and Napi object */ +#if defined(DHD_LB_TXC) + tasklet_init(&dhd->tx_compl_tasklet, + dhd_lb_tx_compl_handler, (ulong)(&dhd->pub)); + INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn); + DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__)); +#endif /* DHD_LB_TXC */ + +#if defined(DHD_LB_RXC) + tasklet_init(&dhd->rx_compl_tasklet, + dhd_lb_rx_compl_handler, (ulong)(&dhd->pub)); + INIT_WORK(&dhd->rx_compl_dispatcher_work, dhd_rx_compl_dispatcher_fn); + DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__)); +#endif /* DHD_LB_RXC */ + +#if defined(DHD_LB_RXP) + __skb_queue_head_init(&dhd->rx_pend_queue); + skb_queue_head_init(&dhd->rx_napi_queue); + + /* Initialize the work that dispatches NAPI job to a given core */ + INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn); + DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__)); +#endif /* DHD_LB_RXP */ + +#endif /* DHD_LB */ + + INIT_DELAYED_WORK(&dhd->dhd_memdump_work, dhd_memdump_work_handler); + + (void)dhd_sysfs_init(dhd); + return &dhd->pub; fail: @@ -5177,6 +7504,15 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) return NULL; } +#include + +void dhd_memdump_work_schedule(dhd_pub_t *dhdp, unsigned long msecs) +{ + dhd_info_t *dhd = (dhd_info_t*)dhdp->info; + + schedule_delayed_work(&dhd->dhd_memdump_work, msecs_to_jiffies(msecs)); +} + int dhd_get_fw_mode(dhd_info_t *dhdinfo) { if (strstr(dhdinfo->fw_path, "_apsta") != NULL) @@ -5195,9 +7531,11 @@ bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo) { int fw_len; int nv_len; + int clm_len; int conf_len; const char *fw = NULL; const char *nv = NULL; + const char *clm = NULL; const char *conf = NULL; wifi_adapter_info_t *adapter = dhdinfo->adapter; @@ -5232,6 +7570,10 @@ bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo) if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0') nv = adapter->nv_path; } + if (dhdinfo->clm_path[0] == '\0') { + if (adapter && adapter->clm_path && adapter->clm_path[0] != '\0') + clm = adapter->clm_path; + } if (dhdinfo->conf_path[0] == '\0') { if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0') conf = adapter->conf_path; @@ -5245,6 +7587,8 @@ bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo) fw = firmware_path; if (nvram_path[0] != '\0') nv = nvram_path; + if (clm_path[0] != '\0') + clm = clm_path; if (config_path[0] != '\0') conf = config_path; @@ -5268,6 +7612,16 @@ bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo) if (dhdinfo->nv_path[nv_len-1] == '\n') dhdinfo->nv_path[nv_len-1] = '\0'; } + if (clm && clm[0] != '\0') { + clm_len = strlen(clm); + if (clm_len >= sizeof(dhdinfo->clm_path)) { + DHD_ERROR(("clm path len exceeds max len of dhdinfo->clm_path\n")); + return FALSE; + } + strncpy(dhdinfo->clm_path, clm, sizeof(dhdinfo->clm_path)); + if (dhdinfo->clm_path[clm_len-1] == '\n') + dhdinfo->clm_path[clm_len-1] = '\0'; + } if (conf && conf[0] != '\0') { conf_len = strlen(conf); if (conf_len >= sizeof(dhdinfo->conf_path)) { @@ -5281,9 +7635,12 @@ bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo) #if 0 /* clear the path in module parameter */ - firmware_path[0] = '\0'; - nvram_path[0] = '\0'; - config_path[0] = '\0'; + if (dhd_download_fw_on_driverload) { + firmware_path[0] = '\0'; + nvram_path[0] = '\0'; + clm_path[0] = '\0'; + config_path[0] = '\0'; + } #endif #ifndef BCMEMBEDIMAGE @@ -5296,17 +7653,62 @@ bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo) DHD_ERROR(("nvram path not found\n")); return FALSE; } - if (dhdinfo->conf_path[0] == '\0') { - dhd_conf_set_conf_path_by_nv_path(&dhdinfo->pub, dhdinfo->conf_path, dhdinfo->nv_path); - } -#ifdef CONFIG_PATH_AUTO_SELECT - dhd_conf_set_conf_name_by_chip(&dhdinfo->pub, dhdinfo->conf_path); -#endif #endif /* BCMEMBEDIMAGE */ return TRUE; } +#ifdef CUSTOMER_HW4_DEBUG +bool dhd_validate_chipid(dhd_pub_t *dhdp) +{ + uint chipid = dhd_bus_chip_id(dhdp); + uint config_chipid; + +#ifdef BCM4359_CHIP + config_chipid = BCM4359_CHIP_ID; +#elif defined(BCM4358_CHIP) + config_chipid = BCM4358_CHIP_ID; +#elif defined(BCM4354_CHIP) + config_chipid = BCM4354_CHIP_ID; +#elif defined(BCM4356_CHIP) + config_chipid = BCM4356_CHIP_ID; +#elif defined(BCM4339_CHIP) + config_chipid = BCM4339_CHIP_ID; +#elif defined(BCM43349_CHIP) + config_chipid = BCM43349_CHIP_ID; +#elif defined(BCM4335_CHIP) + config_chipid = BCM4335_CHIP_ID; +#elif defined(BCM43241_CHIP) + config_chipid = BCM4324_CHIP_ID; +#elif defined(BCM4330_CHIP) + config_chipid = BCM4330_CHIP_ID; +#elif defined(BCM43430_CHIP) + config_chipid = BCM43430_CHIP_ID; +#elif defined(BCM4334W_CHIP) + config_chipid = BCM43342_CHIP_ID; +#elif defined(BCM43455_CHIP) + config_chipid = BCM4345_CHIP_ID; +#elif defined(BCM43012_CHIP_) + config_chipid = BCM43012_CHIP_ID; +#else + DHD_ERROR(("%s: Unknown chip id, if you use new chipset," + " please add CONFIG_BCMXXXX into the Kernel and" + " BCMXXXX_CHIP definition into the DHD driver\n", + __FUNCTION__)); + config_chipid = 0; + + return FALSE; +#endif /* BCM4354_CHIP */ + +#if defined(BCM4359_CHIP) + if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) { + return TRUE; + } +#endif /* BCM4359_CHIP */ + + return config_chipid == chipid; +} +#endif /* CUSTOMER_HW4_DEBUG */ int dhd_bus_start(dhd_pub_t *dhdp) @@ -5323,16 +7725,20 @@ dhd_bus_start(dhd_pub_t *dhdp) /* try to download image and nvram to the dongle */ if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) { + /* Indicate FW Download has not yet done */ + dhd->pub.is_fw_download_done = FALSE; DHD_INFO(("%s download fw %s, nv %s, conf %s\n", __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path)); ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh, - dhd->fw_path, dhd->nv_path, dhd->conf_path); + dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path); if (ret < 0) { DHD_ERROR(("%s: failed to download firmware %s\n", __FUNCTION__, dhd->fw_path)); DHD_PERIM_UNLOCK(dhdp); return ret; } + /* Indicate FW Download has succeeded */ + dhd->pub.is_fw_download_done = TRUE; } if (dhd->pub.busstate != DHD_BUS_LOAD) { DHD_PERIM_UNLOCK(dhdp); @@ -5344,6 +7750,7 @@ dhd_bus_start(dhd_pub_t *dhdp) /* Start the watchdog timer */ dhd->pub.tickcnt = 0; dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms); + DHD_ENABLE_RUNTIME_PM(&dhd->pub); /* Bring up the bus */ if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) { @@ -5367,7 +7774,8 @@ dhd_bus_start(dhd_pub_t *dhdp) del_timer_sync(&dhd->timer); dhd_os_sdunlock(dhdp); -#endif /* BCMPCIE_OOB_HOST_WAKE */ +#endif /* !BCMPCIE_OOB_HOST_WAKE */ + DHD_DISABLE_RUNTIME_PM(&dhd->pub); DHD_PERIM_UNLOCK(dhdp); DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__)); @@ -5384,15 +7792,15 @@ dhd_bus_start(dhd_pub_t *dhdp) #elif defined(FORCE_WOWLAN) /* Enable oob at firmware */ dhd_enable_oob_intr(dhd->pub.bus, TRUE); -#endif +#endif #ifdef PCIE_FULL_DONGLE { - uint8 txpush = 0; - uint32 num_flowrings; /* includes H2D common rings */ - num_flowrings = dhd_bus_max_h2d_queues(dhd->pub.bus, &txpush); - DHD_ERROR(("%s: Initializing %u flowrings\n", __FUNCTION__, - num_flowrings)); - if ((ret = dhd_flow_rings_init(&dhd->pub, num_flowrings)) != BCME_OK) { + /* max_h2d_rings includes H2D common rings */ + uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus); + + DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__, + max_h2d_rings)); + if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) { dhd_os_sdunlock(dhdp); DHD_PERIM_UNLOCK(dhdp); return ret; @@ -5401,7 +7809,18 @@ dhd_bus_start(dhd_pub_t *dhdp) #endif /* PCIE_FULL_DONGLE */ /* Do protocol initialization necessary for IOCTL/IOVAR */ - dhd_prot_init(&dhd->pub); +#ifdef PCIE_FULL_DONGLE + dhd_os_sdunlock(dhdp); +#endif /* PCIE_FULL_DONGLE */ + ret = dhd_prot_init(&dhd->pub); + if (unlikely(ret) != BCME_OK) { + DHD_PERIM_UNLOCK(dhdp); + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); + return ret; + } +#ifdef PCIE_FULL_DONGLE + dhd_os_sdlock(dhdp); +#endif /* PCIE_FULL_DONGLE */ /* If bus is not ready, can't come up */ if (dhd->pub.busstate != DHD_BUS_DATA) { @@ -5410,6 +7829,7 @@ dhd_bus_start(dhd_pub_t *dhdp) DHD_GENERAL_UNLOCK(&dhd->pub, flags); del_timer_sync(&dhd->timer); DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__)); + DHD_DISABLE_RUNTIME_PM(&dhd->pub); dhd_os_sdunlock(dhdp); DHD_PERIM_UNLOCK(dhdp); DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); @@ -5420,6 +7840,13 @@ dhd_bus_start(dhd_pub_t *dhdp) /* Bus is ready, query any dongle information */ if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) { + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->wd_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + del_timer_sync(&dhd->timer); + DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__)); + DHD_DISABLE_RUNTIME_PM(&dhd->pub); + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); DHD_PERIM_UNLOCK(dhdp); return ret; } @@ -5506,6 +7933,48 @@ int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct e ret = BCME_ERROR; return ret; } +int +dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + int ret = 0; + bool auto_on = false; + uint32 mode = wfd_mode; + +#ifdef ENABLE_TDLS_AUTO_MODE + if (wfd_mode) { + auto_on = false; + } else { + auto_on = true; + } +#else + auto_on = false; +#endif /* ENABLE_TDLS_AUTO_MODE */ + ret = _dhd_tdls_enable(dhd, false, auto_on, NULL); + if (ret < 0) { + DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret)); + return ret; + } + + + bcm_mkiovar("tdls_wfd_mode", (char *)&mode, sizeof(mode), + iovbuf, sizeof(iovbuf)); + if (((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) && + (ret != BCME_UNSUPPORTED)) { + DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret)); + return ret; + } + + ret = _dhd_tdls_enable(dhd, true, auto_on, NULL); + if (ret < 0) { + DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret)); + return ret; + } + + dhd->tdls_mode = mode; + return ret; +} #ifdef PCIE_FULL_DONGLE void dhd_tdls_update_peer_info(struct net_device *dev, bool connect, uint8 *da) { @@ -5562,7 +8031,7 @@ void dhd_tdls_update_peer_info(struct net_device *dev, bool connect, uint8 *da) } } #endif /* PCIE_FULL_DONGLE */ -#endif +#endif bool dhd_is_concurrent_mode(dhd_pub_t *dhd) { @@ -5600,8 +8069,7 @@ dhd_get_concurrent_capabilites(dhd_pub_t *dhd) if (!FW_SUPPORTED(dhd, p2p)) { DHD_TRACE(("Chip does not support p2p\n")); return 0; - } - else { + } else { /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */ memset(buf, 0, sizeof(buf)); bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf)); @@ -5609,8 +8077,7 @@ dhd_get_concurrent_capabilites(dhd_pub_t *dhd) FALSE, 0)) < 0) { DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret)); return 0; - } - else { + } else { if (buf[0] == 1) { /* By default, chip supports single chan concurrency, * now lets check for mchan @@ -5618,20 +8085,23 @@ dhd_get_concurrent_capabilites(dhd_pub_t *dhd) ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE; if (mchan_supported) ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE; + if (FW_SUPPORTED(dhd, rsdb)) { + ret |= DHD_FLAG_RSDB_MODE; + } + if (FW_SUPPORTED(dhd, mp2p)) { + ret |= DHD_FLAG_MP2P_MODE; + } #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF) - /* For customer_hw4, although ICS, - * we still support concurrent mode - */ return ret; #else return 0; -#endif +#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */ } } } return 0; } -#endif +#endif #ifdef SUPPORT_AP_POWERSAVE #define RXCHAIN_PWRSAVE_PPS 10 @@ -5680,283 +8150,6 @@ int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable) #endif /* SUPPORT_AP_POWERSAVE */ -#if defined(READ_CONFIG_FROM_FILE) -#include -#include - -#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) -bool PM_control = TRUE; - -static int dhd_preinit_proc(dhd_pub_t *dhd, int ifidx, char *name, char *value) -{ - int var_int; - wl_country_t cspec = {{0}, -1, {0}}; - char *revstr; - char *endptr = NULL; - int iolen; - char smbuf[WLC_IOCTL_SMLEN*2]; - - if (!strcmp(name, "country")) { - revstr = strchr(value, '/'); - if (revstr) { - cspec.rev = strtoul(revstr + 1, &endptr, 10); - memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ); - cspec.country_abbrev[2] = '\0'; - memcpy(cspec.ccode, cspec.country_abbrev, WLC_CNTRY_BUF_SZ); - } else { - cspec.rev = -1; - memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ); - memcpy(cspec.ccode, value, WLC_CNTRY_BUF_SZ); - get_customized_country_code(dhd->info->adapter, - (char *)&cspec.country_abbrev, &cspec); - } - memset(smbuf, 0, sizeof(smbuf)); - DHD_ERROR(("config country code is country : %s, rev : %d !!\n", - cspec.country_abbrev, cspec.rev)); - iolen = bcm_mkiovar("country", (char*)&cspec, sizeof(cspec), - smbuf, sizeof(smbuf)); - return dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, - smbuf, iolen, TRUE, 0); - } else if (!strcmp(name, "roam_scan_period")) { - var_int = (int)simple_strtol(value, NULL, 0); - return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, - &var_int, sizeof(var_int), TRUE, 0); - } else if (!strcmp(name, "roam_delta")) { - struct { - int val; - int band; - } x; - x.val = (int)simple_strtol(value, NULL, 0); - /* x.band = WLC_BAND_AUTO; */ - x.band = WLC_BAND_ALL; - return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, &x, sizeof(x), TRUE, 0); - } else if (!strcmp(name, "roam_trigger")) { - int ret = 0; - - roam_trigger[0] = (int)simple_strtol(value, NULL, 0); - roam_trigger[1] = WLC_BAND_ALL; - ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, &roam_trigger, - sizeof(roam_trigger), TRUE, 0); - - return ret; - } else if (!strcmp(name, "PM")) { - int ret = 0; - var_int = (int)simple_strtol(value, NULL, 0); - - ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, - &var_int, sizeof(var_int), TRUE, 0); - -#if defined(CONFIG_PM_LOCK) - if (var_int == 0) { - g_pm_control = TRUE; - printk("%s var_int=%d don't control PM\n", __func__, var_int); - } else { - g_pm_control = FALSE; - printk("%s var_int=%d do control PM\n", __func__, var_int); - } -#endif - - return ret; - } -#ifdef WLBTAMP - else if (!strcmp(name, "btamp_chan")) { - int btamp_chan; - int iov_len = 0; - char iovbuf[128]; - int ret; - - btamp_chan = (int)simple_strtol(value, NULL, 0); - iov_len = bcm_mkiovar("btamp_chan", (char *)&btamp_chan, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0) < 0)) - DHD_ERROR(("%s btamp_chan=%d set failed code %d\n", - __FUNCTION__, btamp_chan, ret)); - else - DHD_ERROR(("%s btamp_chan %d set success\n", - __FUNCTION__, btamp_chan)); - } -#endif /* WLBTAMP */ - else if (!strcmp(name, "band")) { - int ret; - if (!strcmp(value, "auto")) - var_int = WLC_BAND_AUTO; - else if (!strcmp(value, "a")) - var_int = WLC_BAND_5G; - else if (!strcmp(value, "b")) - var_int = WLC_BAND_2G; - else if (!strcmp(value, "all")) - var_int = WLC_BAND_ALL; - else { - printk(" set band value should be one of the a or b or all\n"); - var_int = WLC_BAND_AUTO; - } - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, &var_int, - sizeof(var_int), TRUE, 0)) < 0) - printk(" set band err=%d\n", ret); - return ret; - } else if (!strcmp(name, "cur_etheraddr")) { - struct ether_addr ea; - char buf[32]; - uint iovlen; - int ret; - - bcm_ether_atoe(value, &ea); - - ret = memcmp(&ea.octet, dhd->mac.octet, ETHER_ADDR_LEN); - if (ret == 0) { - DHD_ERROR(("%s: Same Macaddr\n", __FUNCTION__)); - return 0; - } - - DHD_ERROR(("%s: Change Macaddr = %02X:%02X:%02X:%02X:%02X:%02X\n", __FUNCTION__, - ea.octet[0], ea.octet[1], ea.octet[2], - ea.octet[3], ea.octet[4], ea.octet[5])); - - iovlen = bcm_mkiovar("cur_etheraddr", (char*)&ea, ETHER_ADDR_LEN, buf, 32); - - ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0); - if (ret < 0) { - DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret)); - return ret; - } - else { - memcpy(dhd->mac.octet, (void *)&ea, ETHER_ADDR_LEN); - return ret; - } - } else if (!strcmp(name, "lpc")) { - int ret = 0; - char buf[32]; - uint iovlen; - var_int = (int)simple_strtol(value, NULL, 0); - if (dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0) < 0) { - DHD_ERROR(("%s: wl down failed\n", __FUNCTION__)); - } - iovlen = bcm_mkiovar("lpc", (char *)&var_int, 4, buf, sizeof(buf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0)) < 0) { - DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret)); - } - if (dhd_wl_ioctl_cmd(dhd, WLC_UP, NULL, 0, TRUE, 0) < 0) { - DHD_ERROR(("%s: wl up failed\n", __FUNCTION__)); - } - return ret; - } else if (!strcmp(name, "vht_features")) { - int ret = 0; - char buf[32]; - uint iovlen; - var_int = (int)simple_strtol(value, NULL, 0); - - if (dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0) < 0) { - DHD_ERROR(("%s: wl down failed\n", __FUNCTION__)); - } - iovlen = bcm_mkiovar("vht_features", (char *)&var_int, 4, buf, sizeof(buf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0)) < 0) { - DHD_ERROR(("%s Set vht_features failed %d\n", __FUNCTION__, ret)); - } - if (dhd_wl_ioctl_cmd(dhd, WLC_UP, NULL, 0, TRUE, 0) < 0) { - DHD_ERROR(("%s: wl up failed\n", __FUNCTION__)); - } - return ret; - } else { - uint iovlen; - char iovbuf[WLC_IOCTL_SMLEN]; - - /* wlu_iovar_setint */ - var_int = (int)simple_strtol(value, NULL, 0); - - /* Setup timeout bcn_timeout from dhd driver 4.217.48 */ - if (!strcmp(name, "roam_off")) { - /* Setup timeout if Beacons are lost to report link down */ - if (var_int) { - uint bcn_timeout = 2; - bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, - iovbuf, sizeof(iovbuf)); - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); - } - } - /* Setup timeout bcm_timeout from dhd driver 4.217.48 */ - - DHD_INFO(("%s:[%s]=[%d]\n", __FUNCTION__, name, var_int)); - - iovlen = bcm_mkiovar(name, (char *)&var_int, sizeof(var_int), - iovbuf, sizeof(iovbuf)); - return dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, - iovbuf, iovlen, TRUE, 0); - } - - return 0; -} - -static int dhd_preinit_config(dhd_pub_t *dhd, int ifidx) -{ - mm_segment_t old_fs; - struct kstat stat; - struct file *fp = NULL; - unsigned int len; - char *buf = NULL, *p, *name, *value; - int ret = 0; - char *config_path; - - config_path = CONFIG_BCMDHD_CONFIG_PATH; - - if (!config_path) - { - printk(KERN_ERR "config_path can't read. \n"); - return 0; - } - - old_fs = get_fs(); - set_fs(get_ds()); - if ((ret = vfs_stat(config_path, &stat))) { - set_fs(old_fs); - printk(KERN_ERR "%s: Failed to get information (%d)\n", - config_path, ret); - return ret; - } - set_fs(old_fs); - - if (!(buf = MALLOC(dhd->osh, stat.size + 1))) { - printk(KERN_ERR "Failed to allocate memory %llu bytes\n", stat.size); - return -ENOMEM; - } - - printk("dhd_preinit_config : config path : %s \n", config_path); - - if (!(fp = dhd_os_open_image(config_path)) || - (len = dhd_os_get_image_block(buf, stat.size, fp)) < 0) - goto err; - - buf[stat.size] = '\0'; - for (p = buf; *p; p++) { - if (isspace(*p)) - continue; - for (name = p++; *p && !isspace(*p); p++) { - if (*p == '=') { - *p = '\0'; - p++; - for (value = p; *p && !isspace(*p); p++); - *p = '\0'; - if ((ret = dhd_preinit_proc(dhd, ifidx, name, value)) < 0) { - printk(KERN_ERR "%s: %s=%s\n", - bcmerrorstr(ret), name, value); - } - break; - } - } - } - ret = 0; - -out: - if (fp) - dhd_os_close_image(fp); - if (buf) - MFREE(dhd->osh, buf, stat.size+1); - return ret; - -err: - ret = -1; - goto out; -} -#endif /* READ_CONFIG_FROM_FILE */ - int dhd_preinit_ioctls(dhd_pub_t *dhd) { @@ -5971,20 +8164,9 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) eventmsgs_ext_t *eventmask_msg = NULL; char* iov_buf = NULL; int ret2 = 0; -#ifdef WLAIBSS - aibss_bcn_force_config_t bcn_config; - uint32 aibss; -#ifdef WLAIBSS_PS - uint32 aibss_ps; -#endif /* WLAIBSS_PS */ -#endif /* WLAIBSS */ -#if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X) - uint32 sup_wpa = 0; -#endif -#if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \ - defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)) +#if defined(CUSTOM_AMPDU_BA_WSIZE) uint32 ampdu_ba_wsize = 0; -#endif /* CUSTOM_AMPDU_BA_WSIZE ||(WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */ +#endif #if defined(CUSTOM_AMPDU_MPDU) int32 ampdu_mpdu = 0; #endif @@ -5994,7 +8176,9 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #if defined(CUSTOM_AMSDU_AGGSF) int32 amsdu_aggsf = 0; #endif - +#ifdef SUPPORT_SENSORHUB + int32 shub_enable = 0; +#endif /* SUPPORT_SENSORHUB */ #if defined(BCMSDIO) #ifdef PROP_TXSTATUS int wlfc_enable = TRUE; @@ -6003,24 +8187,34 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) uint wl_down = 1; #endif /* DISABLE_11N */ #endif /* PROP_TXSTATUS */ -#endif +#endif #ifdef PCIE_FULL_DONGLE uint32 wl_ap_isolate; #endif /* PCIE_FULL_DONGLE */ +#if defined(BCMSDIO) + /* by default frame burst is enabled for PCIe and disabled for SDIO dongles */ + uint32 frameburst = 0; +#else + uint32 frameburst = 1; +#endif /* BCMSDIO */ + #ifdef DHD_ENABLE_LPC uint32 lpc = 1; #endif /* DHD_ENABLE_LPC */ uint power_mode = PM_FAST; - uint32 dongle_align = DHD_SDALIGN; #if defined(BCMSDIO) + uint32 dongle_align = DHD_SDALIGN; uint32 glom = CUSTOM_GLOM_SETTING; #endif /* defined(BCMSDIO) */ #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL) uint32 credall = 1; #endif uint bcn_timeout = dhd->conf->bcn_timeout; - uint retry_max = 3; +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + uint32 bcn_li_bcn = 1; +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ + uint retry_max = CUSTOM_ASSOC_RETRY_MAX; #if defined(ARP_OFFLOAD_SUPPORT) int arpoe = 1; #endif @@ -6053,9 +8247,6 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */ struct ether_addr p2p_ea; #endif -#ifdef BCMCCX - uint32 ccx = 1; -#endif #ifdef SOFTAP_UAPSD_OFF uint32 wme_apsd = 0; #endif /* SOFTAP_UAPSD_OFF */ @@ -6073,52 +8264,66 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) uint32 nmode = 0; #endif /* DISABLE_11N */ -#if defined(DISABLE_11AC) - uint32 vhtmode = 0; -#endif /* DISABLE_11AC */ #ifdef USE_WL_TXBF uint32 txbf = 1; #endif /* USE_WL_TXBF */ -#ifdef AMPDU_VO_ENABLE - struct ampdu_tid_control tid; -#endif -#ifdef USE_WL_FRAMEBURST - uint32 frameburst = 1; -#endif /* USE_WL_FRAMEBURST */ -#ifdef DHD_SET_FW_HIGHSPEED - uint32 ack_ratio = 250; - uint32 ack_ratio_depth = 64; -#endif /* DHD_SET_FW_HIGHSPEED */ -#ifdef SUPPORT_2G_VHT - uint32 vht_features = 0x3; /* 2G enable | rates all */ -#endif /* SUPPORT_2G_VHT */ +#if defined(PROP_TXSTATUS) +#ifdef USE_WFA_CERT_CONF + uint32 proptx = 0; +#endif /* USE_WFA_CERT_CONF */ +#endif /* PROP_TXSTATUS */ #ifdef CUSTOM_PSPRETEND_THR uint32 pspretend_thr = CUSTOM_PSPRETEND_THR; #endif + uint32 rsdb_mode = 0; +#ifdef ENABLE_TEMP_THROTTLING + wl_temp_control_t temp_control; +#endif /* ENABLE_TEMP_THROTTLING */ +#ifdef DISABLE_PRUNED_SCAN + uint32 scan_features = 0; +#endif /* DISABLE_PRUNED_SCAN */ +#ifdef CUSTOM_EVENT_PM_WAKE + uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE; +#endif /* CUSTOM_EVENT_PM_WAKE */ #ifdef PKT_FILTER_SUPPORT dhd_pkt_filter_enable = TRUE; #endif /* PKT_FILTER_SUPPORT */ #ifdef WLTDLS dhd->tdls_enable = FALSE; + dhd_tdls_set_mode(dhd, false); #endif /* WLTDLS */ dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM; DHD_TRACE(("Enter %s\n", __FUNCTION__)); - dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_BAND", WLC_SET_BAND, dhd->conf->band, 0, FALSE); + dhd_conf_set_intiovar(dhd, WLC_SET_BAND, "WLC_SET_BAND", dhd->conf->band, 0, FALSE); #ifdef DHDTCPACK_SUPPRESS printf("%s: Set tcpack_sup_mode %d\n", __FUNCTION__, dhd->conf->tcpack_sup_mode); dhd_tcpack_suppress_set(dhd, dhd->conf->tcpack_sup_mode); #endif dhd->op_mode = 0; +#ifdef CUSTOMER_HW4_DEBUG + if (!dhd_validate_chipid(dhd)) { + DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n", + __FUNCTION__, dhd_bus_chip_id(dhd))); +#ifndef SUPPORT_MULTIPLE_CHIPS + ret = BCME_BADARG; + goto done; +#endif /* !SUPPORT_MULTIPLE_CHIPS */ + } +#endif /* CUSTOMER_HW4_DEBUG */ if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) || (op_mode == DHD_FLAG_MFG_MODE)) { +#ifdef DHD_PCIE_RUNTIMEPM + /* Disable RuntimePM in mfg mode */ + DHD_DISABLE_RUNTIME_PM(dhd); + DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__)); +#endif /* DHD_PCIE_RUNTIME_PM */ /* Check and adjust IOCTL response timeout for Manufactring firmware */ dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT); DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n", __FUNCTION__)); - } - else { + } else { dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT); DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__)); } @@ -6131,45 +8336,53 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) if (ret < 0) { DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n", __FUNCTION__, MAC2STRDBG(ea_addr.octet), ret)); - ret = BCME_NOTUP; - goto done; } - memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN); - } else { + } #endif /* GET_CUSTOM_MAC_ENABLE */ - /* Get the default device MAC address directly from firmware */ - memset(buf, 0, sizeof(buf)); - bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), - FALSE, 0)) < 0) { - DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret)); - ret = BCME_NOTUP; - goto done; - } - /* Update public MAC address after reading from Firmware */ - memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN); + /* Get the default device MAC address directly from firmware */ + memset(buf, 0, sizeof(buf)); + bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), + FALSE, 0)) < 0) { + DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret)); + ret = BCME_NOTUP; + goto done; + } + /* Update public MAC address after reading from Firmware */ + memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN); -#ifdef GET_CUSTOM_MAC_ENABLE + if ((ret = dhd_apply_default_clm(dhd, dhd->clm_path)) < 0) { + DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__)); + goto done; } -#endif /* GET_CUSTOM_MAC_ENABLE */ /* get a capabilities from firmware */ - memset(dhd->fw_capabilities, 0, sizeof(dhd->fw_capabilities)); - bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, sizeof(dhd->fw_capabilities)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities, - sizeof(dhd->fw_capabilities), FALSE, 0)) < 0) { - DHD_ERROR(("%s: Get Capability failed (error=%d)\n", - __FUNCTION__, ret)); - goto done; + { + uint32 cap_buf_size = sizeof(dhd->fw_capabilities); + memset(dhd->fw_capabilities, 0, cap_buf_size); + bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, cap_buf_size - 1); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities, + (cap_buf_size - 1), FALSE, 0)) < 0) + { + DHD_ERROR(("%s: Get Capability failed (error=%d)\n", + __FUNCTION__, ret)); + return 0; + } + + memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1)); + dhd->fw_capabilities[0] = ' '; + dhd->fw_capabilities[cap_buf_size - 2] = ' '; + dhd->fw_capabilities[cap_buf_size - 1] = '\0'; } + if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) || (op_mode == DHD_FLAG_HOSTAP_MODE)) { #ifdef SET_RANDOM_MAC_SOFTAP uint rand_mac; -#endif +#endif /* SET_RANDOM_MAC_SOFTAP */ dhd->op_mode = DHD_FLAG_HOSTAP_MODE; #if defined(ARP_OFFLOAD_SUPPORT) - arpoe = 0; + arpoe = 0; #endif #ifdef PKT_FILTER_SUPPORT dhd_pkt_filter_enable = FALSE; @@ -6177,9 +8390,9 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #ifdef SET_RANDOM_MAC_SOFTAP SRANDOM32((uint)jiffies); rand_mac = RANDOM32(); - iovbuf[0] = 0x02; /* locally administered bit */ - iovbuf[1] = 0x1A; - iovbuf[2] = 0x11; + iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02; /* local admin bit */ + iovbuf[1] = (unsigned char)(vendor_oui >> 8); + iovbuf[2] = (unsigned char)vendor_oui; iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0; iovbuf[4] = (unsigned char)(rand_mac >> 8); iovbuf[5] = (unsigned char)(rand_mac >> 16); @@ -6199,13 +8412,19 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret)); } #endif +#ifdef USE_DYNAMIC_F2_BLKSIZE + dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ #ifdef SUPPORT_AP_POWERSAVE dhd_set_ap_powersave(dhd, 0, TRUE); -#endif +#endif /* SUPPORT_AP_POWERSAVE */ #ifdef SOFTAP_UAPSD_OFF bcm_mkiovar("wme_apsd", (char *)&wme_apsd, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) - DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n", __FUNCTION__, ret)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n", + __FUNCTION__, ret)); + } #endif /* SOFTAP_UAPSD_OFF */ } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) || (op_mode == DHD_FLAG_MFG_MODE)) { @@ -6216,6 +8435,18 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) dhd_pkt_filter_enable = FALSE; #endif /* PKT_FILTER_SUPPORT */ dhd->op_mode = DHD_FLAG_MFG_MODE; +#ifdef USE_DYNAMIC_F2_BLKSIZE + dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ + if (FW_SUPPORTED(dhd, rsdb)) { + rsdb_mode = 0; + bcm_mkiovar("rsdb_mode", (char *)&rsdb_mode, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n", + __FUNCTION__, ret)); + } + } } else { uint32 concurrent_mode = 0; if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) || @@ -6268,11 +8499,49 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) } #else (void)concurrent_mode; +#endif + } +#ifdef BCMSDIO + if (dhd->conf->sd_f2_blocksize) + dhdsdio_func_blocksize(dhd, 2, dhd->conf->sd_f2_blocksize); #endif + +#ifdef RSDB_MODE_FROM_FILE + (void)dhd_rsdb_mode_from_file(dhd); +#endif /* RSDB_MODE_FROM_FILE */ + +#ifdef DISABLE_PRUNED_SCAN + if (FW_SUPPORTED(dhd, rsdb)) { + memset(iovbuf, 0, sizeof(iovbuf)); + bcm_mkiovar("scan_features", (char *)&scan_features, + 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, + iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) { + DHD_ERROR(("%s get scan_features is failed ret=%d\n", + __FUNCTION__, ret)); + } else { + memcpy(&scan_features, iovbuf, 4); + scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM; + memset(iovbuf, 0, sizeof(iovbuf)); + bcm_mkiovar("scan_features", (char *)&scan_features, + 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s set scan_features is failed ret=%d\n", + __FUNCTION__, ret)); + } + } } +#endif /* DISABLE_PRUNED_SCAN */ DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n", dhd->op_mode, MAC2STRDBG(dhd->mac.octet))); + #if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA) + if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE) + dhd->info->rxthread_enabled = FALSE; + else + dhd->info->rxthread_enabled = TRUE; + #endif /* Set Country code */ if (dhd->dhd_cspec.ccode[0] != 0) { printf("Set country %s, revision %d\n", dhd->dhd_cspec.ccode, dhd->dhd_cspec.rev); @@ -6284,14 +8553,9 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) dhd_conf_set_country(dhd); dhd_conf_fix_country(dhd); } + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "autocountry", dhd->conf->autocountry, 0, FALSE); dhd_conf_get_country(dhd, &dhd->dhd_cspec); -#if defined(DISABLE_11AC) - bcm_mkiovar("vhtmode", (char *)&vhtmode, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) - DHD_ERROR(("%s wl vhtmode 0 failed %d\n", __FUNCTION__, ret)); -#endif /* DISABLE_11AC */ - dhd_conf_set_fw_string_cmd(dhd, "vhtmode", dhd->conf->vhtmode, 0, TRUE); /* Set Listen Interval */ bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf)); @@ -6299,6 +8563,11 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret)); #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM) +#ifdef USE_WFA_CERT_CONF + if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) { + DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar)); + } +#endif /* USE_WFA_CERT_CONF */ /* Disable built-in roaming to allowed ext supplicant to take care of roaming */ bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf)); dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); @@ -6319,13 +8588,20 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #endif /* ROAM_ENABLE */ dhd_conf_set_roam(dhd); -#ifdef BCMCCX - bcm_mkiovar("ccx_enable", (char *)&ccx, 4, iovbuf, sizeof(iovbuf)); - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); -#endif /* BCMCCX */ +#ifdef CUSTOM_EVENT_PM_WAKE + bcm_mkiovar("const_awake_thresh", (char *)&pm_awake_thresh, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret)); + } +#endif /* CUSTOM_EVENT_PM_WAKE */ #ifdef WLTDLS +#ifdef ENABLE_TDLS_AUTO_MODE + /* by default TDLS on and auto mode on */ + _dhd_tdls_enable(dhd, true, true, NULL); +#else /* by default TDLS on and auto mode off */ _dhd_tdls_enable(dhd, true, false, NULL); +#endif /* ENABLE_TDLS_AUTO_MODE */ #endif /* WLTDLS */ #ifdef DHD_ENABLE_LPC @@ -6334,15 +8610,28 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret)); + + if (ret == BCME_NOTDOWN) { + uint wl_down = 1; + ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, + (char *)&wl_down, sizeof(wl_down), TRUE, 0); + DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc)); + + bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret)); + } } #endif /* DHD_ENABLE_LPC */ - dhd_conf_set_fw_string_cmd(dhd, "lpc", dhd->conf->lpc, 0, FALSE); + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "lpc", dhd->conf->lpc, 0, FALSE); /* Set PowerSave mode */ if (dhd->conf->pm >= 0) power_mode = dhd->conf->pm; dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0); + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "pm2_sleep_ret", dhd->conf->pm2_sleep_ret, 0, FALSE); +#if defined(BCMSDIO) /* Match Host and Dongle rx alignment */ bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf)); dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); @@ -6353,7 +8642,11 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); #endif -#if defined(BCMSDIO) +#ifdef USE_WFA_CERT_CONF + if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) { + DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom)); + } +#endif /* USE_WFA_CERT_CONF */ if (glom != DEFAULT_GLOM_VALUE) { DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom)); bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf)); @@ -6375,20 +8668,29 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); #endif /* defined(AP) && !defined(WLP2P) */ /* 0:HT20 in ALL, 1:HT40 in ALL, 2: HT20 in 2G HT40 in 5G */ - dhd_conf_set_fw_string_cmd(dhd, "mimo_bw_cap", dhd->conf->mimo_bw_cap, 1, TRUE); - dhd_conf_set_fw_string_cmd(dhd, "force_wme_ac", dhd->conf->force_wme_ac, 1, FALSE); - dhd_conf_set_fw_string_cmd(dhd, "stbc_tx", dhd->conf->stbc, 0, FALSE); - dhd_conf_set_fw_string_cmd(dhd, "stbc_rx", dhd->conf->stbc, 0, FALSE); - dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_SRL", WLC_SET_SRL, dhd->conf->srl, 0, TRUE); - dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_LRL", WLC_SET_LRL, dhd->conf->lrl, 0, FALSE); - dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_SPECT_MANAGMENT", WLC_SET_SPECT_MANAGMENT, dhd->conf->spect, 0, FALSE); - dhd_conf_set_fw_string_cmd(dhd, "rsdb_mode", dhd->conf->rsdb_mode, -1, TRUE); + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "mimo_bw_cap", dhd->conf->mimo_bw_cap, 1, TRUE); + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "force_wme_ac", dhd->conf->force_wme_ac, 1, FALSE); + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "stbc_tx", dhd->conf->stbc, 0, FALSE); + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "stbc_rx", dhd->conf->stbc, 0, FALSE); + dhd_conf_set_intiovar(dhd, WLC_SET_SRL, "WLC_SET_SRL", dhd->conf->srl, 0, TRUE); + dhd_conf_set_intiovar(dhd, WLC_SET_LRL, "WLC_SET_LRL", dhd->conf->lrl, 0, FALSE); + dhd_conf_set_intiovar(dhd, WLC_SET_SPECT_MANAGMENT, "WLC_SET_SPECT_MANAGMENT", dhd->conf->spect, 0, FALSE); + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "rsdb_mode", dhd->conf->rsdb_mode, -1, TRUE); + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "vhtmode", dhd->conf->vhtmode, 0, TRUE); +#ifdef IDHCPC + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "dhcpc_enable", dhd->conf->dhcpc_enable, 0, FALSE); +#endif + dhd_conf_set_bw_cap(dhd); + +#ifdef MIMO_ANT_SETTING + dhd_sel_ant_from_file(dhd); +#endif /* MIMO_ANT_SETTING */ #if defined(SOFTAP) if (ap_fw_loaded == TRUE) { dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0); } -#endif +#endif #if defined(KEEP_ALIVE) { @@ -6397,7 +8699,7 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #if defined(SOFTAP) if (ap_fw_loaded == FALSE) -#endif +#endif if (!(dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) { if ((res = dhd_keep_alive_onoff(dhd)) < 0) @@ -6411,43 +8713,38 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf)); if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { - DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret)); + DHD_ERROR(("%s Set txbf returned (%d)\n", __FUNCTION__, ret)); } #endif /* USE_WL_TXBF */ - dhd_conf_set_fw_string_cmd(dhd, "txbf", dhd->conf->txbf, 0, FALSE); + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "txbf", dhd->conf->txbf, 0, FALSE); + +#ifdef USE_WFA_CERT_CONF #ifdef USE_WL_FRAMEBURST + if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) { + DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst)); + } +#endif /* USE_WL_FRAMEBURST */ +#ifdef DISABLE_FRAMEBURST_VSDB + g_frameburst = frameburst; +#endif /* DISABLE_FRAMEBURST_VSDB */ +#endif /* USE_WFA_CERT_CONF */ +#ifdef DISABLE_WL_FRAMEBURST_SOFTAP + /* Disable Framebursting for SofAP */ + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + frameburst = 0; + } +#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */ /* Set frameburst to value */ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst, sizeof(frameburst), TRUE, 0)) < 0) { - DHD_ERROR(("%s Set frameburst failed %d\n", __FUNCTION__, ret)); - } -#endif /* USE_WL_FRAMEBURST */ - dhd_conf_set_fw_string_cmd(dhd, "frameburst", dhd->conf->frameburst, 0, FALSE); -#ifdef DHD_SET_FW_HIGHSPEED - /* Set ack_ratio */ - bcm_mkiovar("ack_ratio", (char *)&ack_ratio, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) { - DHD_ERROR(("%s Set ack_ratio failed %d\n", __FUNCTION__, ret)); - } - - /* Set ack_ratio_depth */ - bcm_mkiovar("ack_ratio_depth", (char *)&ack_ratio_depth, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) { - DHD_ERROR(("%s Set ack_ratio_depth failed %d\n", __FUNCTION__, ret)); + DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret)); } -#endif /* DHD_SET_FW_HIGHSPEED */ -#if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \ - defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)) + dhd_conf_set_intiovar(dhd, WLC_SET_FAKEFRAG, "WLC_SET_FAKEFRAG", dhd->conf->frameburst, 0, FALSE); +#if defined(CUSTOM_AMPDU_BA_WSIZE) /* Set ampdu ba wsize to 64 or 16 */ #ifdef CUSTOM_AMPDU_BA_WSIZE ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE; #endif -#if defined(WLAIBSS) && defined(CUSTOM_IBSS_AMPDU_BA_WSIZE) - if (dhd->op_mode == DHD_FLAG_IBSS_MODE) - ampdu_ba_wsize = CUSTOM_IBSS_AMPDU_BA_WSIZE; -#endif /* WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE */ if (ampdu_ba_wsize != 0) { bcm_mkiovar("ampdu_ba_wsize", (char *)&du_ba_wsize, 4, iovbuf, sizeof(iovbuf)); if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, @@ -6456,8 +8753,8 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) __FUNCTION__, ampdu_ba_wsize, ret)); } } -#endif /* CUSTOM_AMPDU_BA_WSIZE || (WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */ - dhd_conf_set_fw_string_cmd(dhd, "ampdu_ba_wsize", dhd->conf->ampdu_ba_wsize, 1, FALSE); +#endif + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_ba_wsize", dhd->conf->ampdu_ba_wsize, 1, FALSE); iov_buf = (char*)kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL); if (iov_buf == NULL) { @@ -6465,44 +8762,20 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) ret = BCME_NOMEM; goto done; } -#ifdef WLAIBSS - /* Configure custom IBSS beacon transmission */ - if (dhd->op_mode & DHD_FLAG_IBSS_MODE) - { - aibss = 1; - bcm_mkiovar("aibss", (char *)&aibss, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) { - DHD_ERROR(("%s Set aibss to %d failed %d\n", - __FUNCTION__, aibss, ret)); - } -#ifdef WLAIBSS_PS - aibss_ps = 1; - bcm_mkiovar("aibss_ps", (char *)&aibss_ps, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) { - DHD_ERROR(("%s Set aibss PS to %d failed %d\n", - __FUNCTION__, aibss, ret)); +#ifdef ENABLE_TEMP_THROTTLING + if (dhd->op_mode & DHD_FLAG_STA_MODE) { + memset(&temp_control, 0, sizeof(temp_control)); + temp_control.enable = 1; + temp_control.control_bit = TEMP_THROTTLE_CONTROL_BIT; + bcm_mkiovar("temp_throttle_control", (char *)&temp_control, + sizeof(wl_temp_control_t), iov_buf, WLC_IOCTL_SMLEN); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iov_buf, WLC_IOCTL_SMLEN, TRUE, 0); + if (ret < 0) { + DHD_ERROR(("%s Set temp_throttle_control to %d failed \n", + __FUNCTION__, ret)); } -#endif /* WLAIBSS_PS */ - } - memset(&bcn_config, 0, sizeof(bcn_config)); - bcn_config.initial_min_bcn_dur = AIBSS_INITIAL_MIN_BCN_DUR; - bcn_config.min_bcn_dur = AIBSS_MIN_BCN_DUR; - bcn_config.bcn_flood_dur = AIBSS_BCN_FLOOD_DUR; - bcn_config.version = AIBSS_BCN_FORCE_CONFIG_VER_0; - bcn_config.len = sizeof(bcn_config); - - bcm_mkiovar("aibss_bcn_force_config", (char *)&bcn_config, - sizeof(aibss_bcn_force_config_t), iov_buf, WLC_IOCTL_SMLEN); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iov_buf, - WLC_IOCTL_SMLEN, TRUE, 0)) < 0) { - DHD_ERROR(("%s Set aibss_bcn_force_config to %d, %d, %d failed %d\n", - __FUNCTION__, AIBSS_INITIAL_MIN_BCN_DUR, AIBSS_MIN_BCN_DUR, - AIBSS_BCN_FLOOD_DUR, ret)); } -#endif /* WLAIBSS */ - +#endif /* ENABLE_TEMP_THROTTLING */ #if defined(CUSTOM_AMPDU_MPDU) ampdu_mpdu = CUSTOM_AMPDU_MPDU; if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) { @@ -6531,33 +8804,14 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) amsdu_aggsf = CUSTOM_AMSDU_AGGSF; if (amsdu_aggsf != 0) { bcm_mkiovar("amsdu_aggsf", (char *)&amsdu_aggsf, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) { - DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n", + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + if (ret < 0) { + DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n", __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret)); } } #endif /* CUSTOM_AMSDU_AGGSF */ -#if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X) - /* Read 4-way handshake requirements */ - if (dhd_use_idsup == 1) { - bcm_mkiovar("sup_wpa", (char *)&sup_wpa, 4, iovbuf, sizeof(iovbuf)); - ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0); - /* sup_wpa iovar returns NOTREADY status on some platforms using modularized - * in-dongle supplicant. - */ - if (ret >= 0 || ret == BCME_NOTREADY) - dhd->fw_4way_handshake = TRUE; - DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake)); - } -#endif /* BCMSUP_4WAY_HANDSHAKE && WLAN_AKM_SUITE_FT_8021X */ -#ifdef SUPPORT_2G_VHT - bcm_mkiovar("vht_features", (char *)&vht_features, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { - DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret)); - } -#endif /* SUPPORT_2G_VHT */ #ifdef CUSTOM_PSPRETEND_THR /* Turn off MPC in AP mode */ bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr, 4, @@ -6591,7 +8845,8 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) setbit(eventmask, WLC_E_ASSOC); setbit(eventmask, WLC_E_REASSOC); setbit(eventmask, WLC_E_REASSOC_IND); - setbit(eventmask, WLC_E_DEAUTH); + if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE)) + setbit(eventmask, WLC_E_DEAUTH); setbit(eventmask, WLC_E_DEAUTH_IND); setbit(eventmask, WLC_E_DISASSOC_IND); setbit(eventmask, WLC_E_DISASSOC); @@ -6600,7 +8855,6 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) setbit(eventmask, WLC_E_ASSOC_IND); setbit(eventmask, WLC_E_PSK_SUP); setbit(eventmask, WLC_E_LINK); - setbit(eventmask, WLC_E_NDIS_LINK); setbit(eventmask, WLC_E_MIC_ERROR); setbit(eventmask, WLC_E_ASSOC_REQ_IE); setbit(eventmask, WLC_E_ASSOC_RESP_IE); @@ -6610,6 +8864,9 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #endif setbit(eventmask, WLC_E_JOIN_START); // setbit(eventmask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event +#ifdef DHD_DEBUG + setbit(eventmask, WLC_E_SCAN_CONFIRM_IND); +#endif #ifdef WLMEDIA_HTSF setbit(eventmask, WLC_E_HTSFSYNC); #endif /* WLMEDIA_HTSF */ @@ -6622,28 +8879,44 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) /* enable dongle roaming event */ setbit(eventmask, WLC_E_ROAM); setbit(eventmask, WLC_E_BSSID); -#ifdef BCMCCX - setbit(eventmask, WLC_E_ADDTS_IND); - setbit(eventmask, WLC_E_DELTS_IND); -#endif /* BCMCCX */ #ifdef WLTDLS setbit(eventmask, WLC_E_TDLS_PEER_EVENT); #endif /* WLTDLS */ +#ifdef WL_ESCAN + setbit(eventmask, WLC_E_ESCAN_RESULT); +#endif #ifdef WL_CFG80211 setbit(eventmask, WLC_E_ESCAN_RESULT); + setbit(eventmask, WLC_E_AP_STARTED); if (dhd->op_mode & DHD_FLAG_P2P_MODE) { setbit(eventmask, WLC_E_ACTION_FRAME_RX); setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE); } #endif /* WL_CFG80211 */ -#ifdef WLAIBSS - setbit(eventmask, WLC_E_AIBSS_TXFAIL); -#endif /* WLAIBSS */ -#ifdef CUSTOMER_HW10 - clrbit(eventmask, WLC_E_TRACE); -#else + +#if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) + if (dhd_logtrace_from_file(dhd)) { + setbit(eventmask, WLC_E_TRACE); + } else { + clrbit(eventmask, WLC_E_TRACE); + } +#elif defined(SHOW_LOGTRACE) setbit(eventmask, WLC_E_TRACE); +#else + clrbit(eventmask, WLC_E_TRACE); +#endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */ + + setbit(eventmask, WLC_E_CSA_COMPLETE_IND); +#ifdef DHD_LOSSLESS_ROAMING + setbit(eventmask, WLC_E_ROAM_PREP); #endif +#ifdef CUSTOM_EVENT_PM_WAKE + setbit(eventmask, WLC_E_EXCESS_PM_WAKE_EVENT); +#endif /* CUSTOM_EVENT_PM_WAKE */ +#if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) + dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP); +#endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */ + /* Write updated Event mask */ bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { @@ -6666,11 +8939,13 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) /* Read event_msgs_ext mask */ bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, WLC_IOCTL_SMLEN); ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iov_buf, WLC_IOCTL_SMLEN, FALSE, 0); - if (ret2 != BCME_UNSUPPORTED) - ret = ret2; if (ret2 == 0) { /* event_msgs_ext must be supported */ bcopy(iov_buf, eventmask_msg, msglen); - +#ifdef GSCAN_SUPPORT + setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT); + setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE); + setbit(eventmask_msg->mask, WLC_E_PFN_SWC); +#endif /* GSCAN_SUPPORT */ #ifdef BT_WIFI_HANDOVER setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ); #endif /* BT_WIFI_HANDOVER */ @@ -6686,10 +8961,15 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret)); goto done; } - } else if (ret2 < 0 && ret2 != BCME_UNSUPPORTED) { + } else if (ret2 == BCME_UNSUPPORTED || ret2 == BCME_VERSION) { + /* Skip for BCME_UNSUPPORTED or BCME_VERSION */ + DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n", + __FUNCTION__, ret2)); + } else { DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2)); + ret = ret2; goto done; - } /* unsupported is ok */ + } dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time, sizeof(scan_assoc_time), TRUE, 0); @@ -6717,17 +8997,31 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #ifdef PKT_FILTER_SUPPORT /* Setup default defintions for pktfilter , enable in suspend */ - dhd->pktfilter_count = 6; - /* Setup filter to allow only unicast */ if (dhd_master_mode) { - dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00"; + dhd->pktfilter_count = 6; dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL; dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL; dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL; - /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */ - dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB"; /* apply APP pktfilter */ dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806"; + + /* Setup filter to allow only unicast */ + dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00"; + + /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */ + dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL; + +#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER + dhd->pktfilter_count = 4; + /* Setup filter to block broadcast and NAT Keepalive packets */ + /* discard all broadcast packets */ + dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff"; + /* discard NAT Keepalive packets */ + dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009"; + /* discard NAT Keepalive packets */ + dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009"; + dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL; +#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */ } else dhd_conf_discard_pkt_filter(dhd); dhd_conf_add_pkt_filter(dhd); @@ -6745,45 +9039,10 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret)); #endif /* DISABLE_11N */ -#ifdef AMPDU_VO_ENABLE - tid.tid = PRIO_8021D_VO; /* Enable TID(6) for voice */ - tid.enable = TRUE; - bcm_mkiovar("ampdu_tid", (char *)&tid, sizeof(tid), iovbuf, sizeof(iovbuf)); - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); - - tid.tid = PRIO_8021D_NC; /* Enable TID(7) for voice */ - tid.enable = TRUE; - bcm_mkiovar("ampdu_tid", (char *)&tid, sizeof(tid), iovbuf, sizeof(iovbuf)); +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn, 4, iovbuf, sizeof(iovbuf)); dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); -#endif -#if defined(SOFTAP_TPUT_ENHANCE) - if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { - dhd_bus_setidletime(dhd, (int)100); -#ifdef DHDTCPACK_SUPPRESS - dhd->tcpack_sup_enabled = FALSE; -#endif -#if defined(DHD_TCP_WINSIZE_ADJUST) - dhd_use_tcp_window_size_adjust = TRUE; -#endif - - memset(buf, 0, sizeof(buf)); - bcm_mkiovar("bus:txglom_auto_control", 0, 0, buf, sizeof(buf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0) { - glom = 0; - bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf)); - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); - } - else { - if (buf[0] == 0) { - glom = 1; - bcm_mkiovar("bus:txglom_auto_control", (char *)&glom, 4, iovbuf, - sizeof(iovbuf)); - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); - } - } - } -#endif /* SOFTAP_TPUT_ENHANCE */ - +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ /* query for 'ver' to get version info from firmware */ memset(buf, 0, sizeof(buf)); ptr = buf; @@ -6794,16 +9053,41 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) bcmstrtok(&ptr, "\n", 0); /* Print fw version info */ DHD_ERROR(("Firmware version = %s\n", buf)); + strncpy(fw_version, buf, FW_VER_STR_LEN); dhd_set_version_info(dhd, buf); +#ifdef WRITE_WLANINFO + sec_save_wlinfo(buf, EPI_VERSION_STR, dhd->info->nv_path); +#endif /* WRITE_WLANINFO */ + } + /* query for 'clmver' to get clm version info from firmware */ + memset(buf, 0, sizeof(buf)); + bcm_mkiovar("clmver", (char *)&buf, 4, buf, sizeof(buf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0) + DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); + else { + char *clmver_temp_buf = NULL; + + if ((clmver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) { + DHD_ERROR(("Couldn't find \"Data:\"\n")); + } else { + ptr = (clmver_temp_buf + strlen("Data:")); + if ((clmver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) { + DHD_ERROR(("Couldn't find New line character\n")); + } else { + memset(clm_version, 0, CLM_VER_STR_LEN); + strncpy(clm_version, clmver_temp_buf, + MIN(strlen(clmver_temp_buf), CLM_VER_STR_LEN - 1)); + DHD_ERROR((" clm = %s\n", clm_version)); + } + } } #if defined(BCMSDIO) dhd_txglom_enable(dhd, dhd->conf->bus_rxglom); // terence 20151210: set bus:txglom after dhd_txglom_enable since it's possible changed in dhd_conf_set_txglom_params - dhd_conf_set_fw_string_cmd(dhd, "bus:txglom", dhd->conf->bus_txglom, 1, FALSE); + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "bus:txglom", dhd->conf->bus_txglom, 0, FALSE); #endif /* defined(BCMSDIO) */ - dhd_conf_set_disable_proptx(dhd); #if defined(BCMSDIO) #ifdef PROP_TXSTATUS if (disable_proptx || @@ -6815,6 +9099,23 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) FALSE) { wlfc_enable = FALSE; } + ret = dhd_conf_get_disable_proptx(dhd); + if (ret == 0){ + disable_proptx = 0; + wlfc_enable = TRUE; + } else if (ret >= 1) { + disable_proptx = 1; + wlfc_enable = FALSE; + /* terence 20161229: we should set ampdu_hostreorder=0 when disalbe_proptx=1 */ + hostreorder = 0; + } + +#ifdef USE_WFA_CERT_CONF + if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) { + DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx)); + wlfc_enable = proptx; + } +#endif /* USE_WFA_CERT_CONF */ #ifndef DISABLE_11N ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0); @@ -6823,23 +9124,42 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2)); if (ret2 != BCME_UNSUPPORTED) ret = ret2; + + if (ret == BCME_NOTDOWN) { + uint wl_down = 1; + ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, + sizeof(wl_down), TRUE, 0); + DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n", + __FUNCTION__, ret2, hostreorder)); + + bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, + iovbuf, sizeof(iovbuf)); + ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2)); + if (ret2 != BCME_UNSUPPORTED) + ret = ret2; + } if (ret2 != BCME_OK) hostreorder = 0; } #endif /* DISABLE_11N */ -#ifdef READ_CONFIG_FROM_FILE - dhd_preinit_config(dhd, 0); -#endif /* READ_CONFIG_FROM_FILE */ - if (wlfc_enable) + if (wlfc_enable) { dhd_wlfc_init(dhd); + /* terence 20161229: enable ampdu_hostreorder if tlv enabled */ + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 1, 0, TRUE); + } #ifndef DISABLE_11N else if (hostreorder) dhd_wlfc_hostreorder_init(dhd); #endif /* DISABLE_11N */ - +#else + /* terence 20161229: disable ampdu_hostreorder if PROP_TXSTATUS not defined */ + printf("%s: not define PROP_TXSTATUS\n", __FUNCTION__); + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 0, 0, TRUE); #endif /* PROP_TXSTATUS */ + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", dhd->conf->ampdu_hostreorder, 0, TRUE); #endif /* BCMSDIO || BCMBUS */ #ifdef PCIE_FULL_DONGLE /* For FD we need all the packets at DHD to handle intra-BSS forwarding */ @@ -6862,6 +9182,20 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&up, sizeof(up), TRUE, 0); #endif +#ifdef SUPPORT_SENSORHUB + bcm_mkiovar("shub", (char *)&shub_enable, 4, iovbuf, sizeof(iovbuf)); + if ((dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), + FALSE, 0)) < 0) { + DHD_ERROR(("%s failed to get shub hub enable information %d\n", + __FUNCTION__, ret)); + dhd->info->shub_enable = 0; + } else { + memcpy(&shub_enable, iovbuf, sizeof(uint32)); + dhd->info->shub_enable = shub_enable; + DHD_ERROR(("%s: checking sensorhub enable %d\n", + __FUNCTION__, dhd->info->shub_enable)); + } +#endif /* SUPPORT_SENSORHUB */ done: if (eventmask_msg) @@ -7014,16 +9348,15 @@ static int dhd_inetaddr_notifier_call(struct notifier_block *this, if (dhd_pub->arp_version == 1) { idx = 0; - } - else { + } else { for (idx = 0; idx < DHD_MAX_IFS; idx++) { if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev) break; } - if (idx < DHD_MAX_IFS) + if (idx < DHD_MAX_IFS) { DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net, dhd->iflist[idx]->name, dhd->iflist[idx]->idx)); - else { + } else { DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label)); idx = 0; } @@ -7074,7 +9407,7 @@ static int dhd_inetaddr_notifier_call(struct notifier_block *this, } #endif /* ARP_OFFLOAD_SUPPORT */ -#ifdef CONFIG_IPV6 +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) /* Neighbor Discovery Offload: defered handler */ static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event) @@ -7105,7 +9438,7 @@ dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event) switch (ndo_work->event) { case NETDEV_UP: - DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n", __FUNCTION__)); + DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n ", __FUNCTION__)); ret = dhd_ndo_enable(pub, TRUE); if (ret < 0) { DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret)); @@ -7173,6 +9506,7 @@ static int dhd_inet6addr_notifier_call(struct notifier_block *this, if (dhd->iflist[idx] && dhd->iflist[idx]->net != inet6_ifa->idev->dev) return NOTIFY_DONE; dhd_pub = &dhd->pub; + if (!FW_SUPPORTED(dhd_pub, ndoe)) return NOTIFY_DONE; @@ -7191,7 +9525,7 @@ static int dhd_inet6addr_notifier_call(struct notifier_block *this, dhd_inet6_work_handler, DHD_WORK_PRIORITY_LOW); return NOTIFY_DONE; } -#endif /* #ifdef CONFIG_IPV6 */ +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ int dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock) @@ -7209,7 +9543,6 @@ dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock) net = ifp->net; ASSERT(net && (ifp->idx == ifidx)); -#ifndef P2PONEINT #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) ASSERT(!net->open); net->get_stats = dhd_get_stats; @@ -7222,9 +9555,6 @@ dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock) ASSERT(!net->netdev_ops); net->netdev_ops = &dhd_ops_virt; #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */ -#else - net->netdev_ops = &dhd_cfgp2p_ops_virt; -#endif /* P2PONEINT */ /* Ok, link into the network layer... */ if (ifidx == 0) { @@ -7277,6 +9607,11 @@ dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock) if (ifidx == 0) printf("%s\n", dhd_version); +#ifdef WL_EXT_IAPSTA + else if (!strncmp(net->name, "wl0.", strlen("wl0."))) { + wl_android_ext_attach_netdev(net, ifidx); + } +#endif if (need_rtnl_lock) err = register_netdev(net); @@ -7288,28 +9623,35 @@ dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock) goto fail; } -#ifdef SET_RPS_CPUS - err = custom_rps_map_set(net->_rx, RPS_CPUS_MASK, strlen(RPS_CPUS_MASK)); - if (err < 0) - DHD_ERROR(("%s : custom_rps_map_set done. error : %d\n", __FUNCTION__, err)); -#endif /* SET_RPS_CPUS */ - printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name, +#if defined(CUSTOMER_HW4_DEBUG) + MAC2STRDBG(dhd->pub.mac.octet)); +#else MAC2STRDBG(net->dev_addr)); +#endif /* CUSTOMER_HW4_DEBUG */ #if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211) // wl_iw_iscan_set_scan_broadcast_prep(net, 1); #endif -#if 1 && (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \ +#if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \ KERNEL_VERSION(2, 6, 27)))) if (ifidx == 0) { -#ifdef BCMLXSDMMC +#if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD) up(&dhd_registration_sem); -#endif +#endif /* BCMLXSDMMC */ if (!dhd_download_fw_on_driverload) { +#ifdef WL_CFG80211 + wl_terminate_event_handler(); +#endif /* WL_CFG80211 */ +#if defined(DHD_LB) && defined(DHD_LB_RXP) + __skb_queue_purge(&dhd->rx_pend_queue); +#endif /* DHD_LB && DHD_LB_RXP */ +#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS) + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF); +#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */ dhd_net_bus_devreset(net, TRUE); #ifdef BCMLXSDMMC dhd_net_bus_suspend(net); @@ -7354,7 +9696,7 @@ dhd_bus_detach(dhd_pub_t *dhdp) #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE) dhd_bus_oob_intr_unregister(dhdp); -#endif +#endif } } } @@ -7365,6 +9707,7 @@ void dhd_detach(dhd_pub_t *dhdp) dhd_info_t *dhd; unsigned long flags; int timer_valid = FALSE; + struct net_device *dev; if (!dhdp) return; @@ -7373,9 +9716,21 @@ void dhd_detach(dhd_pub_t *dhdp) if (!dhd) return; -#if defined(CUSTOMER_HW20) && defined(WLANAUDIO) - dhd_global = NULL; -#endif /* CUSTOMER_HW20 && WLANAUDIO */ + dev = dhd->iflist[0]->net; + + if (dev) { + rtnl_lock(); + if (dev->flags & IFF_UP) { + /* If IFF_UP is still up, it indicates that + * "ifconfig wlan0 down" hasn't been called. + * So invoke dev_close explicitly here to + * bring down the interface. + */ + DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n")); + dev_close(dev); + } + rtnl_unlock(); + } DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state)); @@ -7387,14 +9742,37 @@ void dhd_detach(dhd_pub_t *dhdp) OSL_SLEEP(100); } +#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) +#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ + +#ifdef PROP_TXSTATUS +#ifdef DHD_WLFC_THREAD + if (dhd->pub.wlfc_thread) { + kthread_stop(dhd->pub.wlfc_thread); + dhdp->wlfc_thread_go = TRUE; + wake_up_interruptible(&dhdp->wlfc_wqhead); + } + dhd->pub.wlfc_thread = NULL; +#endif /* DHD_WLFC_THREAD */ +#endif /* PROP_TXSTATUS */ + if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) { - dhd_bus_detach(dhdp); -#ifdef PCIE_FULL_DONGLE - dhd_flow_rings_deinit(dhdp); -#endif + dhd_bus_detach(dhdp); +#ifdef BCMPCIE + if (is_reboot == SYS_RESTART) { + extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata; + if (dhd_wifi_platdata && !dhdp->dongle_reset) { + dhdpcie_bus_clock_stop(dhdp->bus); + wifi_platform_set_power(dhd_wifi_platdata->adapters, + FALSE, WIFI_TURNOFF_DELAY); + } + } +#endif /* BCMPCIE */ +#ifndef PCIE_FULL_DONGLE if (dhdp->prot) dhd_prot_detach(dhdp); +#endif } #ifdef ARP_OFFLOAD_SUPPORT @@ -7403,13 +9781,12 @@ void dhd_detach(dhd_pub_t *dhdp) unregister_inetaddr_notifier(&dhd_inetaddr_notifier); } #endif /* ARP_OFFLOAD_SUPPORT */ -#ifdef CONFIG_IPV6 +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) if (dhd_inet6addr_notifier_registered) { dhd_inet6addr_notifier_registered = FALSE; unregister_inet6addr_notifier(&dhd_inet6addr_notifier); } -#endif - +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) { if (dhd->early_suspend.suspend) @@ -7422,6 +9799,9 @@ void dhd_detach(dhd_pub_t *dhdp) /* Detatch and unlink in the iw */ wl_iw_detach(); } +#ifdef WL_ESCAN + wl_escan_detach(); +#endif #endif /* defined(WL_WIRELESS_EXT) */ /* delete all interfaces, start with virtual */ @@ -7448,18 +9828,25 @@ void dhd_detach(dhd_pub_t *dhdp) /* in unregister_netdev case, the interface gets freed by net->destructor * (which is set to free_netdev) */ - if (ifp->net->reg_state == NETREG_UNINITIALIZED) + if (ifp->net->reg_state == NETREG_UNINITIALIZED) { free_netdev(ifp->net); - else { + } else { #ifdef SET_RPS_CPUS custom_rps_map_clear(ifp->net->_rx); #endif /* SET_RPS_CPUS */ + netif_tx_disable(ifp->net); unregister_netdev(ifp->net); } ifp->net = NULL; #ifdef DHD_WMF dhd_wmf_cleanup(dhdp, 0); #endif /* DHD_WMF */ +#ifdef DHD_L2_FILTER + bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, + NULL, FALSE, dhdp->tickcnt); + deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table); + ifp->phnd_arp_table = NULL; +#endif /* DHD_L2_FILTER */ dhd_if_del_sta_list(ifp); @@ -7475,8 +9862,14 @@ void dhd_detach(dhd_pub_t *dhdp) DHD_GENERAL_UNLOCK(&dhd->pub, flags); if (timer_valid) del_timer_sync(&dhd->timer); + DHD_DISABLE_RUNTIME_PM(&dhd->pub); if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) { +#ifdef DHD_PCIE_RUNTIMEPM + if (dhd->thr_rpm_ctl.thr_pid >= 0) { + PROC_STOP(&dhd->thr_rpm_ctl); + } +#endif /* DHD_PCIE_RUNTIMEPM */ if (dhd->thr_wdt_ctl.thr_pid >= 0) { PROC_STOP(&dhd->thr_wdt_ctl); } @@ -7487,9 +9880,32 @@ void dhd_detach(dhd_pub_t *dhdp) if (dhd->thr_dpc_ctl.thr_pid >= 0) { PROC_STOP(&dhd->thr_dpc_ctl); - } else + } else { tasklet_kill(&dhd->tasklet); - } +#ifdef DHD_LB_RXP + __skb_queue_purge(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ + } + } + +#if defined(DHD_LB) + /* Kill the Load Balancing Tasklets */ +#if defined(DHD_LB_TXC) + tasklet_disable(&dhd->tx_compl_tasklet); + tasklet_kill(&dhd->tx_compl_tasklet); +#endif /* DHD_LB_TXC */ +#if defined(DHD_LB_RXC) + tasklet_disable(&dhd->rx_compl_tasklet); + tasklet_kill(&dhd->rx_compl_tasklet); +#endif /* DHD_LB_RXC */ + if (dhd->cpu_notifier.notifier_call != NULL) + unregister_cpu_notifier(&dhd->cpu_notifier); + dhd_cpumasks_deinit(dhd); +#endif /* DHD_LB */ + +#ifdef DHD_LOG_DUMP + dhd_log_dump_deinit(&dhd->pub); +#endif /* DHD_LOG_DUMP */ #ifdef WL_CFG80211 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) { wl_cfg80211_detach(NULL); @@ -7505,6 +9921,8 @@ void dhd_detach(dhd_pub_t *dhdp) kfree(dhd->event_data.fmts); if (dhd->event_data.raw_fmts) kfree(dhd->event_data.raw_fmts); + if (dhd->event_data.raw_sstr) + kfree(dhd->event_data.raw_sstr); #endif /* SHOW_LOGTRACE */ #ifdef PNO_SUPPORT @@ -7513,10 +9931,11 @@ void dhd_detach(dhd_pub_t *dhdp) #endif #if defined(CONFIG_PM_SLEEP) if (dhd_pm_notifier_registered) { - unregister_pm_notifier(&dhd_pm_notifier); + unregister_pm_notifier(&dhd->pm_notifier); dhd_pm_notifier_registered = FALSE; } #endif /* CONFIG_PM_SLEEP */ + #ifdef DEBUG_CPU_FREQ if (dhd->new_freq) free_percpu(dhd->new_freq); @@ -7526,27 +9945,30 @@ void dhd_detach(dhd_pub_t *dhdp) if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) { DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter)); #ifdef CONFIG_HAS_WAKELOCK - dhd->wakelock_counter = 0; dhd->wakelock_wd_counter = 0; - dhd->wakelock_rx_timeout_enable = 0; - dhd->wakelock_ctrl_timeout_enable = 0; - wake_lock_destroy(&dhd->wl_wifi); - wake_lock_destroy(&dhd->wl_rxwake); - wake_lock_destroy(&dhd->wl_ctrlwake); wake_lock_destroy(&dhd->wl_wdwake); -#ifdef BCMPCIE_OOB_HOST_WAKE - wake_lock_destroy(&dhd->wl_intrwake); -#endif /* BCMPCIE_OOB_HOST_WAKE */ + // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry + wake_lock_destroy(&dhd->wl_wifi); #endif /* CONFIG_HAS_WAKELOCK */ + DHD_OS_WAKE_LOCK_DESTROY(dhd); } - #ifdef DHDTCPACK_SUPPRESS /* This will free all MEM allocated for TCPACK SUPPRESS */ dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); #endif /* DHDTCPACK_SUPPRESS */ + +#ifdef PCIE_FULL_DONGLE + dhd_flow_rings_deinit(dhdp); + if (dhdp->prot) + dhd_prot_detach(dhdp); +#endif + + + dhd_sysfs_exit(dhd); + dhd->pub.is_fw_download_done = FALSE; dhd_conf_detach(dhdp); } @@ -7578,6 +10000,25 @@ dhd_free(dhd_pub_t *dhdp) dhd_sta_pool_fini(dhdp, DHD_MAX_STA); dhd = (dhd_info_t *)dhdp->info; + if (dhdp->soc_ram) { +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length); +#else + MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + dhdp->soc_ram = NULL; + } +#ifdef CACHE_FW_IMAGES + if (dhdp->cached_fw) { + MFREE(dhdp->osh, dhdp->cached_fw, dhdp->bus->ramsize); + dhdp->cached_fw = NULL; + } + + if (dhdp->cached_nvram) { + MFREE(dhdp->osh, dhdp->cached_nvram, MAX_NVRAMBUF_SIZE); + dhdp->cached_nvram = NULL; + } +#endif /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */ if (dhd && dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, DHD_PREALLOC_DHD_INFO, 0, FALSE)) @@ -7593,6 +10034,10 @@ dhd_clear(dhd_pub_t *dhdp) if (dhdp) { int i; +#ifdef DHDTCPACK_SUPPRESS + /* Clean up timer/data structure for any remaining/pending packet or timer. */ + dhd_tcpack_info_tbl_clean(dhdp); +#endif /* DHDTCPACK_SUPPRESS */ for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) { if (dhdp->reorder_bufs[i]) { reorder_info_t *ptr; @@ -7610,6 +10055,15 @@ dhd_clear(dhd_pub_t *dhdp) } dhd_sta_pool_clear(dhdp, DHD_MAX_STA); + + if (dhdp->soc_ram) { +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length); +#else + MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + dhdp->soc_ram = NULL; + } } } @@ -7623,6 +10077,7 @@ dhd_module_cleanup(void) wl_android_exit(); dhd_wifi_platform_unregister_drv(); + #ifdef CUSTOMER_HW_AMLOGIC #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) wifi_teardown_dt(); @@ -7634,6 +10089,7 @@ dhd_module_cleanup(void) static void __exit dhd_module_exit(void) { + dhd_buzzz_detach(); dhd_module_cleanup(); unregister_reboot_notifier(&dhd_reboot_notifier); } @@ -7644,7 +10100,7 @@ dhd_module_init(void) int err; int retry = POWERUP_MAX_RETRY; - printf("%s: in\n", __FUNCTION__); + printf("%s: in %s\n", __FUNCTION__, dhd_version); #ifdef CUSTOMER_HW_AMLOGIC #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) if (wifi_setup_dt()) { @@ -7653,8 +10109,11 @@ dhd_module_init(void) #endif #endif + dhd_buzzz_attach(); + DHD_PERIM_RADIO_INIT(); + if (firmware_path[0] != '\0') { strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN); fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0'; @@ -7688,6 +10147,10 @@ dhd_module_init(void) #endif #endif DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__)); + } else { + if (!dhd_download_fw_on_driverload) { + dhd_driver_init_done = TRUE; + } } printf("%s: Exit err=%d\n", __FUNCTION__, err); @@ -7699,15 +10162,24 @@ dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unuse { DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code)); if (code == SYS_RESTART) { +#ifdef BCMPCIE + is_reboot = code; +#endif /* BCMPCIE */ } - return NOTIFY_DONE; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) #if defined(CONFIG_DEFERRED_INITCALLS) +#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \ + defined(CONFIG_ARCH_MSM8996) +deferred_module_init_sync(dhd_module_init); +#else deferred_module_init(dhd_module_init); +#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 || + * CONFIG_ARCH_MSM8996 + */ #elif defined(USE_LATE_INITCALL_SYNC) late_initcall_sync(dhd_module_init); #else @@ -7752,6 +10224,26 @@ dhd_os_proto_unblock(dhd_pub_t *pub) return 0; } +void +dhd_os_dhdiovar_lock(dhd_pub_t *pub) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + mutex_lock(&dhd->dhd_iovar_mutex); + } +} + +void +dhd_os_dhdiovar_unlock(dhd_pub_t *pub) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + mutex_unlock(&dhd->dhd_iovar_mutex); + } +} + unsigned int dhd_os_get_ioctl_resp_timeout(void) { @@ -7765,7 +10257,7 @@ dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec) } int -dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending) +dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition) { dhd_info_t * dhd = (dhd_info_t *)(pub->info); int timeout; @@ -7795,6 +10287,69 @@ dhd_os_ioctl_resp_wake(dhd_pub_t *pub) return 0; } +int +dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + /* Convert timeout in millsecond to jiffies */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec); +#else + timeout = dhd_ioctl_timeout_msec * HZ / 1000; +#endif + + DHD_PERIM_UNLOCK(pub); + + timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout); + + DHD_PERIM_LOCK(pub); + + return timeout; +} + +int +dhd_os_d3ack_wake(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + wake_up(&dhd->d3ack_wait); + return 0; +} + +int +dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + /* Wait for bus usage contexts to gracefully exit within some timeout value + * Set time out to little higher than dhd_ioctl_timeout_msec, + * so that IOCTL timeout should not get affected. + */ + /* Convert timeout in millsecond to jiffies */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT); +#else + timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000; +#endif + + timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout); + + return timeout; +} + +int INLINE +dhd_os_busbusy_wake(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + /* Call wmb() to make sure before waking up the other event value gets updated */ + OSL_SMP_WMB(); + wake_up(&dhd->dhd_bus_busy_state_wait); + return 0; +} + void dhd_os_wd_timer_extend(void *bus, bool extend) { @@ -7822,6 +10377,7 @@ dhd_os_wd_timer(void *bus, uint wdtick) return; } + DHD_OS_WD_WAKE_LOCK(pub); DHD_GENERAL_LOCK(pub, flags); /* don't start the wd until fw is loaded */ @@ -7836,25 +10392,78 @@ dhd_os_wd_timer(void *bus, uint wdtick) if (!wdtick && dhd->wd_timer_valid == TRUE) { dhd->wd_timer_valid = FALSE; DHD_GENERAL_UNLOCK(pub, flags); - del_timer_sync(&dhd->timer); - DHD_OS_WD_WAKE_UNLOCK(pub); + del_timer_sync(&dhd->timer); + DHD_OS_WD_WAKE_UNLOCK(pub); + return; + } + + if (wdtick) { + DHD_OS_WD_WAKE_LOCK(pub); + dhd_watchdog_ms = (uint)wdtick; + /* Re arm the timer, at last watchdog period */ + mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms)); + dhd->wd_timer_valid = TRUE; + } + DHD_GENERAL_UNLOCK(pub, flags); + DHD_OS_WD_WAKE_UNLOCK(pub); +} + +#ifdef DHD_PCIE_RUNTIMEPM +void +dhd_os_runtimepm_timer(void *bus, uint tick) +{ + dhd_pub_t *pub = bus; + dhd_info_t *dhd = (dhd_info_t *)pub->info; + unsigned long flags; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + DHD_GENERAL_LOCK(pub, flags); + + /* don't start the RPM until fw is loaded */ + if (pub->busstate == DHD_BUS_DOWN || + pub->busstate == DHD_BUS_DOWN_IN_PROGRESS) { + DHD_GENERAL_UNLOCK(pub, flags); return; } - if (wdtick) { - DHD_OS_WD_WAKE_LOCK(pub); - dhd_watchdog_ms = (uint)wdtick; - /* Re arm the timer, at last watchdog period */ - mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms)); - dhd->wd_timer_valid = TRUE; + /* If tick is non-zero, the request is to start the timer */ + if (tick) { + /* Start the timer only if its not already running */ + if (dhd->rpm_timer_valid == FALSE) { + mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms)); + dhd->rpm_timer_valid = TRUE; + } + } else { + /* tick is zero, we have to stop the timer */ + /* Stop the timer only if its running, otherwise we don't have to do anything */ + if (dhd->rpm_timer_valid == TRUE) { + dhd->rpm_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(pub, flags); + del_timer_sync(&dhd->rpm_timer); + /* we have already released the lock, so just go to exit */ + goto exit; + } } + DHD_GENERAL_UNLOCK(pub, flags); +exit: + return; + } +#endif /* DHD_PCIE_RUNTIMEPM */ + void * dhd_os_open_image(char *filename) { struct file *fp; + int size; fp = filp_open(filename, O_RDONLY, 0); /* @@ -7863,9 +10472,27 @@ dhd_os_open_image(char *filename) * fp = open_namei(AT_FDCWD, filename, O_RD, 0); * ??? */ - if (IS_ERR(fp)) + if (IS_ERR(fp)) { + fp = NULL; + goto err; + } + + if (!S_ISREG(file_inode(fp)->i_mode)) { + DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename)); + fp = NULL; + goto err; + } + + size = i_size_read(file_inode(fp)); + if (size <= 0) { + DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size)); fp = NULL; + goto err; + } + + DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size)); +err: return fp; } @@ -7874,17 +10501,38 @@ dhd_os_get_image_block(char *buf, int len, void *image) { struct file *fp = (struct file *)image; int rdlen; + int size; if (!image) return 0; - rdlen = kernel_read(fp, fp->f_pos, buf, len); + size = i_size_read(file_inode(fp)); + rdlen = kernel_read(fp, fp->f_pos, buf, MIN(len, size)); + + if (len >= size && size != rdlen) { + return -EIO; + } + if (rdlen > 0) fp->f_pos += rdlen; return rdlen; } +int +dhd_os_get_image_size(void *image) +{ + struct file *fp = (struct file *)image; + int size; + if (!image) { + return 0; + } + + size = i_size_read(file_inode(fp)); + + return size; +} + void dhd_os_close_image(void *image) { @@ -7966,23 +10614,43 @@ dhd_os_rxfunlock(dhd_pub_t *pub) } #ifdef DHDTCPACK_SUPPRESS -void +unsigned long dhd_os_tcpacklock(dhd_pub_t *pub) { dhd_info_t *dhd; + unsigned long flags = 0; dhd = (dhd_info_t *)(pub->info); - spin_lock_bh(&dhd->tcpack_lock); + if (dhd) { +#ifdef BCMSDIO + spin_lock_bh(&dhd->tcpack_lock); +#else + spin_lock_irqsave(&dhd->tcpack_lock, flags); +#endif /* BCMSDIO */ + } + + return flags; } void -dhd_os_tcpackunlock(dhd_pub_t *pub) +dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags) { dhd_info_t *dhd; +#ifdef BCMSDIO + BCM_REFERENCE(flags); +#endif /* BCMSDIO */ + dhd = (dhd_info_t *)(pub->info); - spin_unlock_bh(&dhd->tcpack_lock); + + if (dhd) { +#ifdef BCMSDIO + spin_unlock_bh(&dhd->tcpack_lock); // terence 20160519 +#else + spin_unlock_irqrestore(&dhd->tcpack_lock, flags); +#endif /* BCMSDIO */ + } } #endif /* DHDTCPACK_SUPPRESS */ @@ -7992,12 +10660,8 @@ uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_i gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC; buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size); - if (buf == NULL) { - DHD_ERROR(("%s: failed to alloc memory, section: %d," - " size: %dbytes\n", __FUNCTION__, section, size)); - if (kmalloc_if_fail) - buf = kmalloc(size, flags); - } + if (buf == NULL && kmalloc_if_fail) + buf = kmalloc(size, flags); return buf; } @@ -8026,128 +10690,17 @@ dhd_get_wireless_stats(struct net_device *dev) } #endif /* defined(WL_WIRELESS_EXT) */ -#if defined(CUSTOMER_HW20) && defined(WLANAUDIO) -static int -dhd_wlanaudio_event(dhd_info_t *dhd, int *ifidx, void *pktdata, - wl_event_msg_t *event, void **data) -{ - int cnt; - char eabuf[ETHER_ADDR_STR_LEN]; - struct ether_addr *addr = &event->addr; - uint32 type = ntoh32_ua((void *)&event->event_type); - - switch (type) { - case WLC_E_TXFAIL: - if (addr != NULL) - bcm_ether_ntoa(addr, eabuf); - else - return (BCME_ERROR); - - for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) { - if (dhd->wlanaudio_blist[cnt].is_blacklist) - break; - - if (!bcmp(&dhd->wlanaudio_blist[cnt].blacklist_addr, - addr, ETHER_ADDR_LEN)) { - /* Mac address is Same */ - dhd->wlanaudio_blist[cnt].cnt++; - - if (dhd->wlanaudio_blist[cnt].cnt < 15) { - /* black list is false */ - if ((dhd->wlanaudio_blist[cnt].cnt > 10) && - (jiffies - dhd->wlanaudio_blist[cnt].txfail_jiffies - < 100)) { - dhd->wlanaudio_blist[cnt].is_blacklist = true; - dhd->is_wlanaudio_blist = true; - } - } else { - if ((!dhd->wlanaudio_blist[cnt].is_blacklist) && - (jiffies - dhd->wlanaudio_blist[cnt].txfail_jiffies - > 100)) { - - bzero(&dhd->wlanaudio_blist[cnt], - sizeof(struct wlanaudio_blacklist)); - } - } - break; - } else if ((!dhd->wlanaudio_blist[cnt].is_blacklist) && - (!dhd->wlanaudio_blist[cnt].cnt)) { - bcopy(addr, - (char*)&dhd->wlanaudio_blist[cnt].blacklist_addr, - ETHER_ADDR_LEN); - dhd->wlanaudio_blist[cnt].cnt++; - dhd->wlanaudio_blist[cnt].txfail_jiffies = jiffies; - - bcm_ether_ntoa(&dhd->wlanaudio_blist[cnt].blacklist_addr, eabuf); - break; - } - } - break; - case WLC_E_AUTH : - case WLC_E_AUTH_IND : - case WLC_E_DEAUTH : - case WLC_E_DEAUTH_IND : - case WLC_E_ASSOC: - case WLC_E_ASSOC_IND: - case WLC_E_REASSOC: - case WLC_E_REASSOC_IND: - case WLC_E_DISASSOC: - case WLC_E_DISASSOC_IND: - { - int bl_cnt = 0; - - if (addr != NULL) - bcm_ether_ntoa(addr, eabuf); - else - return (BCME_ERROR); - - for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) { - if (!bcmp(&dhd->wlanaudio_blist[cnt].blacklist_addr, - addr, ETHER_ADDR_LEN)) { - /* Mac address is Same */ - if (dhd->wlanaudio_blist[cnt].is_blacklist) { - /* black list is true */ - bzero(&dhd->wlanaudio_blist[cnt], - sizeof(struct wlanaudio_blacklist)); - } - } - } - - for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) { - if (dhd->wlanaudio_blist[cnt].is_blacklist) - bl_cnt++; - } - - if (!bl_cnt) - { - dhd->is_wlanaudio_blist = false; - } - - break; - } - } - return BCME_OK; -} -#endif /* CUSTOMER_HW20 && WLANAUDIO */ static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata, wl_event_msg_t *event, void **data) { int bcmerror = 0; - ASSERT(dhd != NULL); -#if defined(CUSTOMER_HW20) && defined(WLANAUDIO) - bcmerror = dhd_wlanaudio_event(dhd, ifidx, pktdata, event, data); - - if (bcmerror != BCME_OK) - return (bcmerror); -#endif /* CUSTOMER_HW20 && WLANAUDIO */ - #ifdef SHOW_LOGTRACE - bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, &dhd->event_data); + bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, &dhd->event_data); #else - bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, NULL); + bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, NULL); #endif /* SHOW_LOGTRACE */ if (bcmerror != BCME_OK) @@ -8163,7 +10716,7 @@ dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata, ASSERT(dhd->iflist[*ifidx]->net != NULL); if (dhd->iflist[*ifidx]->net) { - wl_iw_event(dhd->iflist[*ifidx]->net, event, *data); + wl_iw_event(dhd->iflist[*ifidx]->net, event, *data); } } #endif /* defined(WL_WIRELESS_EXT) */ @@ -8183,102 +10736,6 @@ void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data) { switch (ntoh32(event->event_type)) { -#ifdef WLBTAMP - /* Send up locally generated AMP HCI Events */ - case WLC_E_BTA_HCI_EVENT: { - struct sk_buff *p, *skb; - bcm_event_t *msg; - wl_event_msg_t *p_bcm_event; - char *ptr; - uint32 len; - uint32 pktlen; - dhd_if_t *ifp; - dhd_info_t *dhd; - uchar *eth; - int ifidx; - - len = ntoh32(event->datalen); - pktlen = sizeof(bcm_event_t) + len + 2; - dhd = dhdp->info; - ifidx = dhd_ifname2idx(dhd, event->ifname); - - if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) { - ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32))); - - msg = (bcm_event_t *) PKTDATA(dhdp->osh, p); - - bcopy(&dhdp->mac, &msg->eth.ether_dhost, ETHER_ADDR_LEN); - bcopy(&dhdp->mac, &msg->eth.ether_shost, ETHER_ADDR_LEN); - ETHER_TOGGLE_LOCALADDR(&msg->eth.ether_shost); - - msg->eth.ether_type = hton16(ETHER_TYPE_BRCM); - - /* BCM Vendor specific header... */ - msg->bcm_hdr.subtype = hton16(BCMILCP_SUBTYPE_VENDOR_LONG); - msg->bcm_hdr.version = BCMILCP_BCM_SUBTYPEHDR_VERSION; - bcopy(BRCM_OUI, &msg->bcm_hdr.oui[0], DOT11_OUI_LEN); - - /* vendor spec header length + pvt data length (private indication - * hdr + actual message itself) - */ - msg->bcm_hdr.length = hton16(BCMILCP_BCM_SUBTYPEHDR_MINLENGTH + - BCM_MSG_LEN + sizeof(wl_event_msg_t) + (uint16)len); - msg->bcm_hdr.usr_subtype = hton16(BCMILCP_BCM_SUBTYPE_EVENT); - - PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2)); - - /* copy wl_event_msg_t into sk_buf */ - - /* pointer to wl_event_msg_t in sk_buf */ - p_bcm_event = &msg->event; - bcopy(event, p_bcm_event, sizeof(wl_event_msg_t)); - - /* copy hci event into sk_buf */ - bcopy(data, (p_bcm_event + 1), len); - - msg->bcm_hdr.length = hton16(sizeof(wl_event_msg_t) + - ntoh16(msg->bcm_hdr.length)); - PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2)); - - ptr = (char *)(msg + 1); - /* Last 2 bytes of the message are 0x00 0x00 to signal that there - * are no ethertypes which are following this - */ - ptr[len+0] = 0x00; - ptr[len+1] = 0x00; - - skb = PKTTONATIVE(dhdp->osh, p); - eth = skb->data; - len = skb->len; - - ifp = dhd->iflist[ifidx]; - if (ifp == NULL) - ifp = dhd->iflist[0]; - - ASSERT(ifp); - skb->dev = ifp->net; - skb->protocol = eth_type_trans(skb, skb->dev); - - skb->data = eth; - skb->len = len; - - /* Strip header, count, deliver upward */ - skb_pull(skb, ETH_HLEN); - - /* Send the packet */ - if (in_interrupt()) { - netif_rx(skb); - } else { - netif_rx_ni(skb); - } - } - else { - /* Could not allocate a sk_buf */ - DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__)); - } - break; - } /* case WLC_E_BTA_HCI_EVENT */ -#endif /* WLBTAMP */ default: break; @@ -8329,6 +10786,8 @@ dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len) /* Strip header, count, deliver upward */ skb_pull(skb, ETH_HLEN); + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); /* Send the packet */ if (in_interrupt()) { netif_rx(skb); @@ -8375,7 +10834,8 @@ void dhd_wait_event_wakeup(dhd_pub_t *dhd) int dhd_net_bus_devreset(struct net_device *dev, uint8 flag) { - int ret = 0; + int ret; + dhd_info_t *dhd = DHD_DEV_INFO(dev); if (flag == TRUE) { @@ -8398,7 +10858,7 @@ dhd_net_bus_devreset(struct net_device *dev, uint8 flag) dhd_update_fw_nv_path(dhd); /* update firmware and nvram path to sdio bus */ dhd_bus_update_fw_nv_path(dhd->pub.bus, - dhd->fw_path, dhd->nv_path, dhd->conf_path); + dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path); } #endif /* BCMSDIO */ @@ -8447,6 +10907,8 @@ int net_os_set_suspend(struct net_device *dev, int val, int force) dhd_info_t *dhd = DHD_DEV_INFO(dev); if (dhd) { +#ifdef CONFIG_MACH_UNIVERSAL7420 +#endif /* CONFIG_MACH_UNIVERSAL7420 */ #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) ret = dhd_set_suspend(val, &dhd->pub); #else @@ -8472,6 +10934,9 @@ int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val) #ifdef PKT_FILTER_SUPPORT int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num) { +#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER + return 0; +#else dhd_info_t *dhd = DHD_DEV_INFO(dev); char *filterp = NULL; int filter_id = 0; @@ -8479,9 +10944,8 @@ int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num) if (!dhd_master_mode) add_remove = !add_remove; - - if (!dhd || (num == DHD_UNICAST_FILTER_NUM) || - (num == DHD_MDNS_FILTER_NUM)) + DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num)); + if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) return ret; if (num >= dhd->pub.pktfilter_count) return -EINVAL; @@ -8498,6 +10962,10 @@ int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num) filterp = "103 0 0 0 0xFFFF 0x3333"; filter_id = 103; break; + case DHD_MDNS_FILTER_NUM: + filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB"; + filter_id = 104; + break; default: return -EINVAL; } @@ -8513,6 +10981,7 @@ int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num) } } return ret; +#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */ } int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val) @@ -8539,6 +11008,7 @@ int net_os_enable_packet_filter(struct net_device *dev, int val) { dhd_info_t *dhd = DHD_DEV_INFO(dev); + DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val)); return dhd_os_enable_packet_filter(&dhd->pub, val); } #endif /* PKT_FILTER_SUPPORT */ @@ -8556,6 +11026,127 @@ dhd_dev_init_ioctl(struct net_device *dev) return ret; } +int +dhd_dev_get_feature_set(struct net_device *dev) +{ + dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhd = (&ptr->pub); + int feature_set = 0; + +#ifdef DYNAMIC_SWOOB_DURATION +#ifndef CUSTOM_INTR_WIDTH +#define CUSTOM_INTR_WIDTH 100 + int intr_width = 0; +#endif /* CUSTOM_INTR_WIDTH */ +#endif /* DYNAMIC_SWOOB_DURATION */ + if (!dhd) + return feature_set; + + if (FW_SUPPORTED(dhd, sta)) + feature_set |= WIFI_FEATURE_INFRA; + if (FW_SUPPORTED(dhd, dualband)) + feature_set |= WIFI_FEATURE_INFRA_5G; + if (FW_SUPPORTED(dhd, p2p)) + feature_set |= WIFI_FEATURE_P2P; + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) + feature_set |= WIFI_FEATURE_SOFT_AP; + if (FW_SUPPORTED(dhd, tdls)) + feature_set |= WIFI_FEATURE_TDLS; + if (FW_SUPPORTED(dhd, vsdb)) + feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL; + if (FW_SUPPORTED(dhd, nan)) { + feature_set |= WIFI_FEATURE_NAN; + /* NAN is essentail for d2d rtt */ + if (FW_SUPPORTED(dhd, rttd2d)) + feature_set |= WIFI_FEATURE_D2D_RTT; + } +#ifdef RTT_SUPPORT + feature_set |= WIFI_FEATURE_D2AP_RTT; +#endif /* RTT_SUPPORT */ +#ifdef LINKSTAT_SUPPORT + feature_set |= WIFI_FEATURE_LINKSTAT; +#endif /* LINKSTAT_SUPPORT */ + /* Supports STA + STA always */ + feature_set |= WIFI_FEATURE_ADDITIONAL_STA; +#ifdef PNO_SUPPORT + if (dhd_is_pno_supported(dhd)) { + feature_set |= WIFI_FEATURE_PNO; + feature_set |= WIFI_FEATURE_BATCH_SCAN; +#ifdef GSCAN_SUPPORT + feature_set |= WIFI_FEATURE_GSCAN; +#endif /* GSCAN_SUPPORT */ + } +#endif /* PNO_SUPPORT */ +#ifdef WL11U + feature_set |= WIFI_FEATURE_HOTSPOT; +#endif /* WL11U */ + return feature_set; +} + + +int *dhd_dev_get_feature_set_matrix(struct net_device *dev, int *num) +{ + int feature_set_full, mem_needed; + int *ret; + + *num = 0; + mem_needed = sizeof(int) * MAX_FEATURE_SET_CONCURRRENT_GROUPS; + ret = (int *) kmalloc(mem_needed, GFP_KERNEL); + if (!ret) { + DHD_ERROR(("%s: failed to allocate %d bytes\n", __FUNCTION__, + mem_needed)); + return ret; + } + + feature_set_full = dhd_dev_get_feature_set(dev); + + ret[0] = (feature_set_full & WIFI_FEATURE_INFRA) | + (feature_set_full & WIFI_FEATURE_INFRA_5G) | + (feature_set_full & WIFI_FEATURE_NAN) | + (feature_set_full & WIFI_FEATURE_D2D_RTT) | + (feature_set_full & WIFI_FEATURE_D2AP_RTT) | + (feature_set_full & WIFI_FEATURE_PNO) | + (feature_set_full & WIFI_FEATURE_BATCH_SCAN) | + (feature_set_full & WIFI_FEATURE_GSCAN) | + (feature_set_full & WIFI_FEATURE_HOTSPOT) | + (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA) | + (feature_set_full & WIFI_FEATURE_EPR); + + ret[1] = (feature_set_full & WIFI_FEATURE_INFRA) | + (feature_set_full & WIFI_FEATURE_INFRA_5G) | + /* Not yet verified NAN with P2P */ + /* (feature_set_full & WIFI_FEATURE_NAN) | */ + (feature_set_full & WIFI_FEATURE_P2P) | + (feature_set_full & WIFI_FEATURE_D2AP_RTT) | + (feature_set_full & WIFI_FEATURE_D2D_RTT) | + (feature_set_full & WIFI_FEATURE_EPR); + + ret[2] = (feature_set_full & WIFI_FEATURE_INFRA) | + (feature_set_full & WIFI_FEATURE_INFRA_5G) | + (feature_set_full & WIFI_FEATURE_NAN) | + (feature_set_full & WIFI_FEATURE_D2D_RTT) | + (feature_set_full & WIFI_FEATURE_D2AP_RTT) | + (feature_set_full & WIFI_FEATURE_TDLS) | + (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL) | + (feature_set_full & WIFI_FEATURE_EPR); + *num = MAX_FEATURE_SET_CONCURRRENT_GROUPS; + + return ret; +} +#ifdef CUSTOM_FORCE_NODFS_FLAG +int +dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (nodfs) + dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG; + else + dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG; + dhd->pub.force_country_change = TRUE; + return 0; +} +#endif /* CUSTOM_FORCE_NODFS_FLAG */ #ifdef PNO_SUPPORT /* Linux wrapper to call common dhd_pno_stop_for_ssid */ int @@ -8567,7 +11158,7 @@ dhd_dev_pno_stop_for_ssid(struct net_device *dev) } /* Linux wrapper to call common dhd_pno_set_for_ssid */ int -dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid, +dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid, uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan) { dhd_info_t *dhd = DHD_DEV_INFO(dev); @@ -8600,23 +11191,194 @@ dhd_dev_pno_stop_for_batch(struct net_device *dev) dhd_info_t *dhd = DHD_DEV_INFO(dev); return (dhd_pno_stop_for_batch(&dhd->pub)); } -/* Linux wrapper to call common dhd_dev_pno_set_for_batch */ +/* Linux wrapper to call common dhd_dev_pno_set_for_batch */ +int +dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_set_for_batch(&dhd->pub, batch_params)); +} +/* Linux wrapper to call common dhd_dev_pno_get_for_batch */ +int +dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL)); +} +/* Linux wrapper to call common dhd_pno_set_mac_oui */ +int +dhd_dev_pno_set_mac_oui(struct net_device *dev, uint8 *oui) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_set_mac_oui(&dhd->pub, oui)); +} +#endif /* PNO_SUPPORT */ + +#if defined(PNO_SUPPORT) +#ifdef GSCAN_SUPPORT +/* Linux wrapper to call common dhd_pno_set_cfg_gscan */ +int +dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, + void *buf, uint8 flush) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush)); +} + +/* Linux wrapper to call common dhd_pno_get_gscan */ +void * +dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, + void *info, uint32 *len) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_get_gscan(&dhd->pub, type, info, len)); +} + +/* Linux wrapper to call common dhd_wait_batch_results_complete */ +void +dhd_dev_wait_batch_results_complete(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_wait_batch_results_complete(&dhd->pub)); +} + +/* Linux wrapper to call common dhd_pno_lock_batch_results */ +void +dhd_dev_pno_lock_access_batch_results(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_lock_batch_results(&dhd->pub)); +} +/* Linux wrapper to call common dhd_pno_unlock_batch_results */ +void +dhd_dev_pno_unlock_access_batch_results(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_unlock_batch_results(&dhd->pub)); +} + +/* Linux wrapper to call common dhd_pno_initiate_gscan_request */ +int +dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush)); +} + +/* Linux wrapper to call common dhd_pno_enable_full_scan_result */ +int +dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag)); +} + +/* Linux wrapper to call common dhd_handle_swc_evt */ +void * +dhd_dev_swc_scan_event(struct net_device *dev, const void *data, int *send_evt_bytes) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_handle_swc_evt(&dhd->pub, data, send_evt_bytes)); +} + +/* Linux wrapper to call common dhd_handle_hotlist_scan_evt */ +void * +dhd_dev_hotlist_scan_event(struct net_device *dev, + const void *data, int *send_evt_bytes, hotlist_type_t type) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type)); +} + +/* Linux wrapper to call common dhd_process_full_gscan_result */ +void * +dhd_dev_process_full_gscan_result(struct net_device *dev, +const void *data, int *send_evt_bytes) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_process_full_gscan_result(&dhd->pub, data, send_evt_bytes)); +} + +void +dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type); + + return; +} + +int +dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_gscan_batch_cache_cleanup(&dhd->pub)); +} + +/* Linux wrapper to call common dhd_retreive_batch_scan_results */ +int +dhd_dev_retrieve_batch_scan(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_retreive_batch_scan_results(&dhd->pub)); +} +#endif /* GSCAN_SUPPORT */ +#endif +#ifdef RTT_SUPPORT +/* Linux wrapper to call common dhd_pno_set_cfg_gscan */ +int +dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_set_cfg(&dhd->pub, buf)); +} +int +dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt)); +} +int +dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn)); +} int -dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params) +dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); - return (dhd_pno_set_for_batch(&dhd->pub, batch_params)); + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn)); } -/* Linux wrapper to call common dhd_dev_pno_get_for_batch */ + int -dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize) +dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); - return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL)); + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_capability(&dhd->pub, capa)); } -#endif /* PNO_SUPPORT */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (1) +#endif /* RTT_SUPPORT */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) static void dhd_hang_process(void *dhd_info, void *event_info, u8 event) { dhd_info_t *dhd; @@ -8626,6 +11388,7 @@ static void dhd_hang_process(void *dhd_info, void *event_info, u8 event) dev = dhd->iflist[0]->net; if (dev) { + // terence 20161024: let wlan0 down when hang happened rtnl_lock(); dev_close(dev); rtnl_unlock(); @@ -8638,6 +11401,17 @@ static void dhd_hang_process(void *dhd_info, void *event_info, u8 event) } } +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY +extern dhd_pub_t *link_recovery; +void dhd_host_recover_link(void) +{ + DHD_ERROR(("****** %s ******\n", __FUNCTION__)); + link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN; + dhd_bus_set_linkdown(link_recovery, TRUE); + dhd_os_send_hang_message(link_recovery); +} +EXPORT_SYMBOL(dhd_host_recover_link); +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ int dhd_os_send_hang_message(dhd_pub_t *dhdp) { @@ -8647,6 +11421,8 @@ int dhd_os_send_hang_message(dhd_pub_t *dhdp) dhdp->hang_was_sent = 1; dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp, DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WORK_PRIORITY_HIGH); + DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d s=%d\n", __FUNCTION__, + dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate)); } } return ret; @@ -8674,6 +11450,33 @@ int net_os_send_hang_message(struct net_device *dev) } return ret; } + +int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num) +{ + dhd_info_t *dhd = NULL; + dhd_pub_t *dhdp = NULL; + int reason; + + dhd = DHD_DEV_INFO(dev); + if (dhd) { + dhdp = &dhd->pub; + } + + if (!dhd || !dhdp) { + return 0; + } + + reason = bcm_strtoul(string_num, NULL, 0); + DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason)); + + if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) { + reason = 0; + } + + dhdp->hang_reason = reason; + + return net_os_send_hang_message(dev); +} #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */ @@ -8683,11 +11486,24 @@ int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned lo return wifi_platform_set_power(dhd->adapter, on, delay_msec); } +bool dhd_force_country_change(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (dhd && dhd->pub.up) + return dhd->pub.force_country_change; + return FALSE; +} void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code, wl_country_t *cspec) { dhd_info_t *dhd = DHD_DEV_INFO(dev); +#ifdef CUSTOM_COUNTRY_CODE + get_customized_country_code(dhd->adapter, country_iso_code, cspec, + dhd->pub.dhd_cflags); +#else get_customized_country_code(dhd->adapter, country_iso_code, cspec); +#endif /* CUSTOM_COUNTRY_CODE */ } void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify) { @@ -8728,7 +11544,7 @@ int dhd_net_set_fw_path(struct net_device *dev, char *fw) DHD_INFO(("GOT STA FIRMWARE\n")); ap_fw_loaded = FALSE; } -#endif +#endif return 0; } @@ -8746,7 +11562,7 @@ void dhd_net_if_unlock(struct net_device *dev) static void dhd_net_if_lock_local(dhd_info_t *dhd) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) if (dhd) mutex_lock(&dhd->dhd_net_if_mutex); #endif @@ -8754,7 +11570,7 @@ static void dhd_net_if_lock_local(dhd_info_t *dhd) static void dhd_net_if_unlock_local(dhd_info_t *dhd) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) if (dhd) mutex_unlock(&dhd->dhd_net_if_mutex); #endif @@ -8762,7 +11578,7 @@ static void dhd_net_if_unlock_local(dhd_info_t *dhd) static void dhd_suspend_lock(dhd_pub_t *pub) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) dhd_info_t *dhd = (dhd_info_t *)(pub->info); if (dhd) mutex_lock(&dhd->dhd_suspend_mutex); @@ -8771,7 +11587,7 @@ static void dhd_suspend_lock(dhd_pub_t *pub) static void dhd_suspend_unlock(dhd_pub_t *pub) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) dhd_info_t *dhd = (dhd_info_t *)(pub->info); if (dhd) mutex_unlock(&dhd->dhd_suspend_mutex); @@ -8812,7 +11628,8 @@ dhd_os_spin_lock_init(osl_t *osh) void dhd_os_spin_lock_deinit(osl_t *osh, void *lock) { - MFREE(osh, lock, sizeof(spinlock_t) + 4); + if (lock) + MFREE(osh, lock, sizeof(spinlock_t) + 4); } unsigned long dhd_os_spin_lock(void *lock) @@ -8867,23 +11684,103 @@ dhd_wait_pend8021x(struct net_device *dev) } #ifdef DHD_DEBUG +static void +dhd_convert_memdump_type_to_str(uint32 type, char *buf) +{ + char *type_str = NULL; + + switch (type) { + case DUMP_TYPE_RESUMED_ON_TIMEOUT: + type_str = "resumed_on_timeout"; + break; + case DUMP_TYPE_D3_ACK_TIMEOUT: + type_str = "D3_ACK_timeout"; + break; + case DUMP_TYPE_DONGLE_TRAP: + type_str = "Dongle_Trap"; + break; + case DUMP_TYPE_MEMORY_CORRUPTION: + type_str = "Memory_Corruption"; + break; + case DUMP_TYPE_PKTID_AUDIT_FAILURE: + type_str = "PKTID_AUDIT_Fail"; + break; + case DUMP_TYPE_SCAN_TIMEOUT: + type_str = "SCAN_timeout"; + break; + case DUMP_TYPE_SCAN_BUSY: + type_str = "SCAN_Busy"; + break; + case DUMP_TYPE_BY_SYSDUMP: + type_str = "BY_SYSDUMP"; + break; + case DUMP_TYPE_BY_LIVELOCK: + type_str = "BY_LIVELOCK"; + break; + case DUMP_TYPE_AP_LINKUP_FAILURE: + type_str = "BY_AP_LINK_FAILURE"; + break; + default: + type_str = "Unknown_type"; + break; + } + + strncpy(buf, type_str, strlen(type_str)); + buf[strlen(type_str)] = 0; +} + int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size) { int ret = 0; - struct file *fp; + struct file *fp = NULL; mm_segment_t old_fs; loff_t pos = 0; + char memdump_path[128]; + char memdump_type[32]; + struct timeval curtime; + uint32 file_mode; /* change to KERNEL_DS address limit */ old_fs = get_fs(); set_fs(KERNEL_DS); + /* Init file name */ + memset(memdump_path, 0, sizeof(memdump_path)); + memset(memdump_type, 0, sizeof(memdump_type)); + do_gettimeofday(&curtime); + dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type); +#ifdef CUSTOMER_HW4_DEBUG + snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld", + DHD_COMMON_DUMP_PATH "mem_dump", memdump_type, + (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec); + file_mode = O_CREAT | O_WRONLY | O_SYNC; +#elif defined(CUSTOMER_HW2) + snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld", + "/data/misc/wifi/mem_dump", memdump_type, + (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec); + file_mode = O_CREAT | O_WRONLY | O_SYNC; +#else + snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld", + "/installmedia/mem_dump", memdump_type, + (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec); + /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are + * calling BUG_ON immediately after collecting the socram dump. + * So the file write operation should directly write the contents into the + * file instead of caching it. O_TRUNC flag ensures that file will be re-written + * instead of appending. + */ + file_mode = O_CREAT | O_WRONLY | O_DIRECT | O_SYNC | O_TRUNC; +#endif /* CUSTOMER_HW4_DEBUG */ + + /* print SOCRAM dump file path */ + DHD_ERROR(("%s: memdump_path = %s\n", __FUNCTION__, memdump_path)); + /* open file to write */ - fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640); - if (!fp) { - printf("%s: open file error\n", __FUNCTION__); - ret = -1; + fp = filp_open(memdump_path, file_mode, 0644); + if (IS_ERR(fp)) { + ret = PTR_ERR(fp); + printf("%s: open file error, err = %d\n", __FUNCTION__, ret); goto exit; } @@ -8891,14 +11788,20 @@ write_to_file(dhd_pub_t *dhd, uint8 *buf, int size) fp->f_op->write(fp, buf, size, &pos); exit: - /* free buf before return */ - MFREE(dhd->osh, buf, size); /* close file before return */ - if (fp) + if (!ret) filp_close(fp, current->files); + /* restore previous address limit */ set_fs(old_fs); + /* free buf before return */ +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + DHD_OS_PREFREE(dhd, buf, size); +#else + MFREE(dhd->osh, buf, size); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + return ret; } #endif /* DHD_DEBUG */ @@ -9003,6 +11906,197 @@ int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val) return ret; } + +#if defined(DHD_TRACE_WAKE_LOCK) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) +#include +#else +#include +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) +/* Define 2^5 = 32 bucket size hash table */ +DEFINE_HASHTABLE(wklock_history, 5); +#else +/* Define 2^5 = 32 bucket size hash table */ +struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT }; +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + +int trace_wklock_onoff = 1; + +typedef enum dhd_wklock_type { + DHD_WAKE_LOCK, + DHD_WAKE_UNLOCK, + DHD_WAIVE_LOCK, + DHD_RESTORE_LOCK +} dhd_wklock_t; + +struct wk_trace_record { + unsigned long addr; /* Address of the instruction */ + dhd_wklock_t lock_type; /* lock_type */ + unsigned long long counter; /* counter information */ + struct hlist_node wklock_node; /* hash node */ +}; + + +static struct wk_trace_record *find_wklock_entry(unsigned long addr) +{ + struct wk_trace_record *wklock_info; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr) +#else + struct hlist_node *entry; + int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history))); + hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node) +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + { + if (wklock_info->addr == addr) { + return wklock_info; + } + } + return NULL; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) +#define HASH_ADD(hashtable, node, key) \ + do { \ + hash_add(hashtable, node, key); \ + } while (0); +#else +#define HASH_ADD(hashtable, node, key) \ + do { \ + int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \ + hlist_add_head(node, &hashtable[index]); \ + } while (0); +#endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */ + +#define STORE_WKLOCK_RECORD(wklock_type) \ + do { \ + struct wk_trace_record *wklock_info = NULL; \ + unsigned long func_addr = (unsigned long)__builtin_return_address(0); \ + wklock_info = find_wklock_entry(func_addr); \ + if (wklock_info) { \ + if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \ + wklock_info->counter = dhd->wakelock_counter; \ + } else { \ + wklock_info->counter++; \ + } \ + } else { \ + wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \ + if (!wklock_info) {\ + printk("Can't allocate wk_trace_record \n"); \ + } else { \ + wklock_info->addr = func_addr; \ + wklock_info->lock_type = wklock_type; \ + if (wklock_type == DHD_WAIVE_LOCK || \ + wklock_type == DHD_RESTORE_LOCK) { \ + wklock_info->counter = dhd->wakelock_counter; \ + } else { \ + wklock_info->counter++; \ + } \ + HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \ + } \ + } \ + } while (0); + +static inline void dhd_wk_lock_rec_dump(void) +{ + int bkt; + struct wk_trace_record *wklock_info; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_for_each(wklock_history, bkt, wklock_info, wklock_node) +#else + struct hlist_node *entry = NULL; + int max_index = ARRAY_SIZE(wklock_history); + for (bkt = 0; bkt < max_index; bkt++) + hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node) +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + { + switch (wklock_info->lock_type) { + case DHD_WAKE_LOCK: + DHD_ERROR(("wakelock lock : %pS lock_counter : %llu\n", + (void *)wklock_info->addr, wklock_info->counter)); + break; + case DHD_WAKE_UNLOCK: + DHD_ERROR(("wakelock unlock : %pS, unlock_counter : %llu\n", + (void *)wklock_info->addr, wklock_info->counter)); + break; + case DHD_WAIVE_LOCK: + DHD_ERROR(("wakelock waive : %pS before_waive : %llu\n", + (void *)wklock_info->addr, wklock_info->counter)); + break; + case DHD_RESTORE_LOCK: + DHD_ERROR(("wakelock restore : %pS, after_waive : %llu\n", + (void *)wklock_info->addr, wklock_info->counter)); + break; + } + } +} + +static void dhd_wk_lock_trace_init(struct dhd_info *dhd) +{ + unsigned long flags; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) + int i; +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_init(wklock_history); +#else + for (i = 0; i < ARRAY_SIZE(wklock_history); i++) + INIT_HLIST_HEAD(&wklock_history[i]); +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); +} + +static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd) +{ + int bkt; + struct wk_trace_record *wklock_info; + struct hlist_node *tmp; + unsigned long flags; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) + struct hlist_node *entry = NULL; + int max_index = ARRAY_SIZE(wklock_history); +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node) +#else + for (bkt = 0; bkt < max_index; bkt++) + hlist_for_each_entry_safe(wklock_info, entry, tmp, + &wklock_history[bkt], wklock_node) +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */ + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_del(&wklock_info->wklock_node); +#else + hlist_del_init(&wklock_info->wklock_node); +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */ + kfree(wklock_info); + } + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); +} + +void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + unsigned long flags; + + DHD_ERROR((KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n")); + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + dhd_wk_lock_rec_dump(); + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + DHD_ERROR((KERN_ERR"Event wakelock counter %u\n", dhd->wakelock_event_counter)); +} +#else +#define STORE_WKLOCK_RECORD(wklock_type) +#endif /* ! DHD_TRACE_WAKE_LOCK */ + int dhd_os_wake_lock(dhd_pub_t *pub) { dhd_info_t *dhd = (dhd_info_t *)(pub->info); @@ -9011,7 +12105,6 @@ int dhd_os_wake_lock(dhd_pub_t *pub) if (dhd) { spin_lock_irqsave(&dhd->wakelock_spinlock, flags); - if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) { #ifdef CONFIG_HAS_WAKELOCK wake_lock(&dhd->wl_wifi); @@ -9019,10 +12112,39 @@ int dhd_os_wake_lock(dhd_pub_t *pub) dhd_bus_dev_pm_stay_awake(pub); #endif } +#ifdef DHD_TRACE_WAKE_LOCK + if (trace_wklock_onoff) { + STORE_WKLOCK_RECORD(DHD_WAKE_LOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ dhd->wakelock_counter++; ret = dhd->wakelock_counter; spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); } + + return ret; +} + +int dhd_event_wake_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_evt_spinlock, flags); + if (dhd->wakelock_event_counter == 0) { +#ifdef CONFIG_HAS_WAKELOCK + wake_lock(&dhd->wl_evtwake); +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + dhd_bus_dev_pm_stay_awake(pub); +#endif + } + dhd->wakelock_event_counter++; + ret = dhd->wakelock_event_counter; + spin_unlock_irqrestore(&dhd->wakelock_evt_spinlock, flags); + } + return ret; } @@ -9045,8 +12167,14 @@ int dhd_os_wake_unlock(dhd_pub_t *pub) dhd_os_wake_lock_timeout(pub); if (dhd) { spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (dhd->wakelock_counter > 0) { dhd->wakelock_counter--; +#ifdef DHD_TRACE_WAKE_LOCK + if (trace_wklock_onoff) { + STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) { #ifdef CONFIG_HAS_WAKELOCK wake_unlock(&dhd->wl_wifi); @@ -9061,6 +12189,31 @@ int dhd_os_wake_unlock(dhd_pub_t *pub) return ret; } +int dhd_event_wake_unlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_evt_spinlock, flags); + + if (dhd->wakelock_event_counter > 0) { + dhd->wakelock_event_counter--; + if (dhd->wakelock_event_counter == 0) { +#ifdef CONFIG_HAS_WAKELOCK + wake_unlock(&dhd->wl_evtwake); +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + dhd_bus_dev_pm_relax(pub); +#endif + } + ret = dhd->wakelock_event_counter; + } + spin_unlock_irqrestore(&dhd->wakelock_evt_spinlock, flags); + } + return ret; +} + int dhd_os_check_wakelock(dhd_pub_t *pub) { #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \ @@ -9084,29 +12237,54 @@ int dhd_os_check_wakelock(dhd_pub_t *pub) return 0; } -int dhd_os_check_wakelock_all(dhd_pub_t *pub) +int +dhd_os_check_wakelock_all(dhd_pub_t *pub) { +#ifdef CONFIG_HAS_WAKELOCK + int l1, l2, l3, l4, l7; + int l5 = 0, l6 = 0; + int c, lock_active; +#endif /* CONFIG_HAS_WAKELOCK */ #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \ KERNEL_VERSION(2, 6, 36))) dhd_info_t *dhd; - if (!pub) + if (!pub) { return 0; + } dhd = (dhd_info_t *)(pub->info); + if (!dhd) { + return 0; + } #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */ #ifdef CONFIG_HAS_WAKELOCK - /* Indicate to the SD Host to avoid going to suspend if internal locks are up */ - if (dhd && (wake_lock_active(&dhd->wl_wifi) || - wake_lock_active(&dhd->wl_wdwake) || - wake_lock_active(&dhd->wl_rxwake) || - wake_lock_active(&dhd->wl_ctrlwake))) { + c = dhd->wakelock_counter; + l1 = wake_lock_active(&dhd->wl_wifi); + l2 = wake_lock_active(&dhd->wl_wdwake); + l3 = wake_lock_active(&dhd->wl_rxwake); + l4 = wake_lock_active(&dhd->wl_ctrlwake); +#ifdef BCMPCIE_OOB_HOST_WAKE + l5 = wake_lock_active(&dhd->wl_intrwake); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + l6 = wake_lock_active(&dhd->wl_scanwake); +#endif /* DHD_USE_SCAN_WAKELOCK */ + l7 = wake_lock_active(&dhd->wl_evtwake); + lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7); + + /* Indicate to the Host to avoid going to suspend if internal locks are up */ + if (dhd && lock_active) { + DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d " + "ctl-%d intr-%d scan-%d evt-%d\n", + __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7)); return 1; } #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) - if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) + if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) { return 1; -#endif + } +#endif /* CONFIG_HAS_WAKELOCK */ return 0; } @@ -9160,36 +12338,63 @@ int dhd_os_wd_wake_unlock(dhd_pub_t *pub) } #ifdef BCMPCIE_OOB_HOST_WAKE -int dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val) +void +dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val) { +#ifdef CONFIG_HAS_WAKELOCK dhd_info_t *dhd = (dhd_info_t *)(pub->info); - int ret = 0; if (dhd) { -#ifdef CONFIG_HAS_WAKELOCK wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val)); -#endif } - return ret; +#endif /* CONFIG_HAS_WAKELOCK */ } -int dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub) +void +dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub) { +#ifdef CONFIG_HAS_WAKELOCK dhd_info_t *dhd = (dhd_info_t *)(pub->info); - int ret = 0; if (dhd) { -#ifdef CONFIG_HAS_WAKELOCK /* if wl_intrwake is active, unlock it */ if (wake_lock_active(&dhd->wl_intrwake)) { wake_unlock(&dhd->wl_intrwake); } -#endif } - return ret; +#endif /* CONFIG_HAS_WAKELOCK */ } #endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK +void +dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val)); + } +#endif /* CONFIG_HAS_WAKELOCK */ +} + +void +dhd_os_scan_wake_unlock(dhd_pub_t *pub) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + /* if wl_scanwake is active, unlock it */ + if (wake_lock_active(&dhd->wl_scanwake)) { + wake_unlock(&dhd->wl_scanwake); + } + } +#endif /* CONFIG_HAS_WAKELOCK */ +} +#endif /* DHD_USE_SCAN_WAKELOCK */ + /* waive wakelocks for operations such as IOVARs in suspend function, must be closed * by a paired function call to dhd_wakelock_restore. returns current wakelock counter */ @@ -9201,8 +12406,14 @@ int dhd_os_wake_lock_waive(dhd_pub_t *pub) if (dhd) { spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */ if (dhd->waive_wakelock == FALSE) { +#ifdef DHD_TRACE_WAKE_LOCK + if (trace_wklock_onoff) { + STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ /* record current lock status */ dhd->wakelock_before_waive = dhd->wakelock_counter; dhd->waive_wakelock = TRUE; @@ -9223,6 +12434,7 @@ int dhd_os_wake_lock_restore(dhd_pub_t *pub) return 0; spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */ if (!dhd->waive_wakelock) goto exit; @@ -9232,6 +12444,12 @@ int dhd_os_wake_lock_restore(dhd_pub_t *pub) * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases * the lock in between, do the same by calling wake_unlock or pm_relax */ +#ifdef DHD_TRACE_WAKE_LOCK + if (trace_wklock_onoff) { + STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ + if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) { #ifdef CONFIG_HAS_WAKELOCK wake_lock(&dhd->wl_wifi); @@ -9240,16 +12458,64 @@ int dhd_os_wake_lock_restore(dhd_pub_t *pub) #endif } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) { #ifdef CONFIG_HAS_WAKELOCK - wake_unlock(&dhd->wl_wifi); -#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) - dhd_bus_dev_pm_relax(&dhd->pub); -#endif - } - dhd->wakelock_before_waive = 0; -exit: - ret = dhd->wakelock_wd_counter; - spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); - return ret; + wake_unlock(&dhd->wl_wifi); +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + dhd_bus_dev_pm_relax(&dhd->pub); +#endif + } + dhd->wakelock_before_waive = 0; +exit: + ret = dhd->wakelock_wd_counter; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + return ret; +} + +void dhd_os_wake_lock_init(struct dhd_info *dhd) +{ + DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__)); + dhd->wakelock_event_counter = 0; + dhd->wakelock_counter = 0; + dhd->wakelock_rx_timeout_enable = 0; + dhd->wakelock_ctrl_timeout_enable = 0; +#ifdef CONFIG_HAS_WAKELOCK + // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry + wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake"); + wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake"); + wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake"); +#ifdef BCMPCIE_OOB_HOST_WAKE + wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake"); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake"); +#endif /* DHD_USE_SCAN_WAKELOCK */ +#endif /* CONFIG_HAS_WAKELOCK */ +#ifdef DHD_TRACE_WAKE_LOCK + dhd_wk_lock_trace_init(dhd); +#endif /* DHD_TRACE_WAKE_LOCK */ +} + +void dhd_os_wake_lock_destroy(struct dhd_info *dhd) +{ + DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__)); +#ifdef CONFIG_HAS_WAKELOCK + dhd->wakelock_event_counter = 0; + dhd->wakelock_counter = 0; + dhd->wakelock_rx_timeout_enable = 0; + dhd->wakelock_ctrl_timeout_enable = 0; + // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry + wake_lock_destroy(&dhd->wl_rxwake); + wake_lock_destroy(&dhd->wl_ctrlwake); + wake_lock_destroy(&dhd->wl_evtwake); +#ifdef BCMPCIE_OOB_HOST_WAKE + wake_lock_destroy(&dhd->wl_intrwake); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + wake_lock_destroy(&dhd->wl_scanwake); +#endif /* DHD_USE_SCAN_WAKELOCK */ +#ifdef DHD_TRACE_WAKE_LOCK + dhd_wk_lock_trace_deinit(dhd); +#endif /* DHD_TRACE_WAKE_LOCK */ +#endif /* CONFIG_HAS_WAKELOCK */ } bool dhd_os_check_if_up(dhd_pub_t *pub) @@ -9333,36 +12599,52 @@ int dhd_get_instance(dhd_pub_t *dhdp) void dhd_wlfc_plat_init(void *dhd) { +#ifdef USE_DYNAMIC_F2_BLKSIZE + dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ return; } void dhd_wlfc_plat_deinit(void *dhd) { +#ifdef USE_DYNAMIC_F2_BLKSIZE + dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ return; } bool dhd_wlfc_skip_fc(void) { +#ifdef SKIP_WLFC_ON_CONCURRENT +#ifdef WL_CFG80211 + + /* enable flow control in vsdb mode */ + return !(wl_cfg80211_is_concurrent_mode()); +#else + return TRUE; /* skip flow control */ +#endif /* WL_CFG80211 */ + +#else return FALSE; +#endif /* SKIP_WLFC_ON_CONCURRENT */ } #endif /* PROP_TXSTATUS */ #ifdef BCMDBGFS - #include -extern uint32 dhd_readregl(void *bp, uint32 addr); -extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data); - typedef struct dhd_dbgfs { struct dentry *debugfs_dir; struct dentry *debugfs_mem; - dhd_pub_t *dhdp; - uint32 size; + dhd_pub_t *dhdp; + uint32 size; } dhd_dbgfs_t; dhd_dbgfs_t g_dbgfs; +extern uint32 dhd_readregl(void *bp, uint32 addr); +extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data); + static int dhd_dbg_state_open(struct inode *inode, struct file *file) { @@ -9461,14 +12743,11 @@ static void dhd_dbg_create(void) void dhd_dbg_init(dhd_pub_t *dhdp) { - int err; - g_dbgfs.dhdp = dhdp; g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */ g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0); if (IS_ERR(g_dbgfs.debugfs_dir)) { - err = PTR_ERR(g_dbgfs.debugfs_dir); g_dbgfs.debugfs_dir = NULL; return; } @@ -9484,9 +12763,8 @@ void dhd_dbg_remove(void) debugfs_remove(g_dbgfs.debugfs_dir); bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs)); - } -#endif /* ifdef BCMDBGFS */ +#endif /* BCMDBGFS */ #ifdef WLMEDIA_HTSF @@ -9585,10 +12863,9 @@ void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf) if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) { memcpy(&old_magic, p1+78, 2); htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4); - } - else + } else { return; - + } if (htsf_ts->magic == HTSFMAGIC) { htsf_ts->tE0 = dhd_get_htsf(dhd, 0); htsf_ts->cE0 = get_cycles(); @@ -9631,9 +12908,9 @@ uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx) t = get_cycles(); cur_cycle = t; - if (cur_cycle > dhd->htsf.last_cycle) + if (cur_cycle > dhd->htsf.last_cycle) { delta = cur_cycle - dhd->htsf.last_cycle; - else { + } else { delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle); } @@ -9646,8 +12923,7 @@ uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx) baseval2 = (delta*10)/(factor+1); delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10); htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY; - } - else { + } else { DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n")); } @@ -9770,9 +13046,9 @@ void htsf_update(dhd_info_t *dhd, void *data) if (cur_tsf.high > prev_tsf.high) { tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low); DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta)); - } - else + } else { return; /* do not update */ + } } if (tsf_delta) { @@ -9789,13 +13065,12 @@ void htsf_update(dhd_info_t *dhd, void *data) if (dec1 == 9) { dec1 = 0; hfactor++; - } - else { + } else { dec1++; } - } - else + } else { dec2++; + } } } @@ -9806,8 +13081,7 @@ void htsf_update(dhd_info_t *dhd, void *data) dhd->htsf.last_tsf = cur_tsf.low; dhd->htsf.coefdec1 = dec1; dhd->htsf.coefdec2 = dec2; - } - else { + } else { htsf = prev_tsf.low; } } @@ -9866,51 +13140,390 @@ void dhd_set_cpucore(dhd_pub_t *dhd, int set) return; } #endif /* CUSTOM_SET_CPUCORE */ -#if defined(DHD_TCP_WINSIZE_ADJUST) -static int dhd_port_list_match(int port) + +/* Get interface specific ap_isolate configuration */ +int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx) { - int i; - for (i = 0; i < MAX_TARGET_PORTS; i++) { - if (target_ports[i] == port) - return 1; - } + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + return ifp->ap_isolate; +} + +/* Set interface specific ap_isolate configuration */ +int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ifp->ap_isolate = val; + return 0; } -static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb) + +#ifdef DHD_FW_COREDUMP + + +#ifdef CUSTOMER_HW4_DEBUG +#ifdef PLATFORM_SLP +#define MEMDUMPINFO "/opt/etc/.memdump.info" +#else +#define MEMDUMPINFO "/data/.memdump.info" +#endif /* PLATFORM_SLP */ +#elif defined(CUSTOMER_HW2) +#define MEMDUMPINFO "/data/misc/wifi/.memdump.info" +#else +#define MEMDUMPINFO "/installmedia/.memdump.info" +#endif /* CUSTOMER_HW4_DEBUG */ + +void dhd_get_memdump_info(dhd_pub_t *dhd) +{ + struct file *fp = NULL; + uint32 mem_val = DUMP_MEMFILE_MAX; + int ret = 0; + char *filepath = MEMDUMPINFO; + + /* Read memdump info from the file */ + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); + goto done; + } else { + ret = kernel_read(fp, 0, (char *)&mem_val, 4); + if (ret < 0) { + DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret)); + filp_close(fp, NULL); + goto done; + } + + mem_val = bcm_atoi((char *)&mem_val); + + DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, mem_val)); + filp_close(fp, NULL); + } + +done: +#ifdef CUSTOMER_HW4_DEBUG + dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_DISABLED; +#else + dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_MEMFILE_BUGON; +#endif /* CUSTOMER_HW4_DEBUG */ +} + + +void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size) +{ + dhd_dump_t *dump = NULL; + dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t)); + if (dump == NULL) { + DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__)); + return; + } + dump->buf = buf; + dump->bufsize = size; + +#if defined(CONFIG_ARM64) + DHD_ERROR(("%s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n", __FUNCTION__, + (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size)); +#elif defined(__ARM_ARCH_7A__) + DHD_ERROR(("%s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n", __FUNCTION__, + (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size)); +#endif /* __ARM_ARCH_7A__ */ + if (dhdp->memdump_enabled == DUMP_MEMONLY) { + BUG_ON(1); + } + +#ifdef DHD_LOG_DUMP + if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) { + dhd_schedule_log_dump(dhdp); + } +#endif /* DHD_LOG_DUMP */ + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump, + DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WORK_PRIORITY_HIGH); +} +static void +dhd_mem_dump(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_dump_t *dump = event_info; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + if (!dump) { + DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__)); + return; + } + + if (write_to_file(&dhd->pub, dump->buf, dump->bufsize)) { + DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__)); + } + + if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON && +#ifdef DHD_LOG_DUMP + dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP && +#endif + TRUE) { + BUG_ON(1); + } + MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t)); +} +#endif /* DHD_FW_COREDUMP */ + +#ifdef DHD_LOG_DUMP +static void +dhd_log_dump(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + if (do_dhd_log_dump(&dhd->pub)) { + DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__)); + return; + } +} + +void dhd_schedule_log_dump(dhd_pub_t *dhdp) +{ + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, + (void*)NULL, DHD_WQ_WORK_DHD_LOG_DUMP, + dhd_log_dump, DHD_WORK_PRIORITY_HIGH); +} + +static int +do_dhd_log_dump(dhd_pub_t *dhdp) +{ + int ret = 0; + struct file *fp = NULL; + mm_segment_t old_fs; + loff_t pos = 0; + char dump_path[128]; + char common_info[1024]; + struct timeval curtime; + uint32 file_mode; + unsigned long flags = 0; + + if (!dhdp) { + return -1; + } + + /* Building the additional information like DHD, F/W version */ + memset(common_info, 0, sizeof(common_info)); + snprintf(common_info, sizeof(common_info), + "---------- Common information ----------\n" + "DHD version: %s\n" + "F/W version: %s\n" + "----------------------------------------\n", + dhd_version, fw_version); + + /* change to KERNEL_DS address limit */ + old_fs = get_fs(); + set_fs(KERNEL_DS); + + /* Init file name */ + memset(dump_path, 0, sizeof(dump_path)); + do_gettimeofday(&curtime); + snprintf(dump_path, sizeof(dump_path), "%s_%ld.%ld", + DHD_COMMON_DUMP_PATH "debug_dump", + (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec); + file_mode = O_CREAT | O_WRONLY | O_SYNC; + + DHD_ERROR(("debug_dump_path = %s\n", dump_path)); + fp = filp_open(dump_path, file_mode, 0644); + if (IS_ERR(fp)) { + ret = PTR_ERR(fp); + DHD_ERROR(("open file error, err = %d\n", ret)); + ret = -1; + goto exit; + } + + fp->f_op->write(fp, common_info, strlen(common_info), &pos); + if (dhdp->dld_buf.wraparound) { + fp->f_op->write(fp, dhdp->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE, &pos); + } else { + fp->f_op->write(fp, dhdp->dld_buf.buffer, + (int)(dhdp->dld_buf.present - dhdp->dld_buf.front), &pos); + } + + /* re-init dhd_log_dump_buf structure */ + spin_lock_irqsave(&dhdp->dld_buf.lock, flags); + dhdp->dld_buf.wraparound = 0; + dhdp->dld_buf.present = dhdp->dld_buf.front; + dhdp->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE; + bzero(dhdp->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE); + spin_unlock_irqrestore(&dhdp->dld_buf.lock, flags); +exit: + if (!ret) { + filp_close(fp, NULL); + } + set_fs(old_fs); + + return ret; +} +#endif /* DHD_LOG_DUMP */ + +#ifdef BCMASSERT_LOG +#ifdef CUSTOMER_HW4_DEBUG +#ifdef PLATFORM_SLP +#define ASSERTINFO "/opt/etc/.assert.info" +#else +#define ASSERTINFO "/data/.assert.info" +#endif /* PLATFORM_SLP */ +#elif defined(CUSTOMER_HW2) +#define ASSERTINFO "/data/misc/wifi/.assert.info" +#else +#define ASSERTINFO "/installmedia/.assert.info" +#endif /* CUSTOMER_HW4_DEBUG */ +void dhd_get_assert_info(dhd_pub_t *dhd) +{ + struct file *fp = NULL; + char *filepath = ASSERTINFO; + + /* + * Read assert info from the file + * 0: Trigger Kernel crash by panic() + * 1: Print out the logs and don't trigger Kernel panic. (default) + * 2: Trigger Kernel crash by BUG() + * File doesn't exist: Keep default value (1). + */ + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); + } else { + int mem_val = 0; + int ret = kernel_read(fp, 0, (char *)&mem_val, 4); + if (ret < 0) { + DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret)); + } else { + mem_val = bcm_atoi((char *)&mem_val); + DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val)); + g_assert_type = mem_val; + } + filp_close(fp, NULL); + } +} +#endif /* BCMASSERT_LOG */ + + +#ifdef DHD_WMF +/* Returns interface specific WMF configuration */ +dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + return &ifp->wmf; +} +#endif /* DHD_WMF */ + + +#if defined(DHD_L2_FILTER) +bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac) +{ + return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE; +} +#endif + +#ifdef DHD_L2_FILTER +arp_table_t* +dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(bssidx < DHD_MAX_IFS); + + ifp = dhd->iflist[bssidx]; + return ifp->phnd_arp_table; +} + +int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + if (ifp) + return ifp->parp_enable; + else + return FALSE; +} + +/* Set interface specific proxy arp configuration */ +int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + + if (!ifp) + return BCME_ERROR; + + /* At present all 3 variables are being + * handled at once + */ + ifp->parp_enable = val; + ifp->parp_discard = val; + ifp->parp_allnode = !val; + + /* Flush ARP entries when disabled */ + if (val == FALSE) { + bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL, + FALSE, dhdp->tickcnt); + } + return BCME_OK; +} + +bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + return ifp->parp_discard; +} + +bool +dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx) { - struct iphdr *ipheader; - struct tcphdr *tcpheader; - uint16 win_size; - int32 incremental_checksum; + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; - if (!(op_mode & DHD_FLAG_HOSTAP_MODE)) - return; - if (skb == NULL || skb->data == NULL) - return; + ASSERT(idx < DHD_MAX_IFS); - ipheader = (struct iphdr*)(skb->data); - - if (ipheader->protocol == IPPROTO_TCP) { - tcpheader = (struct tcphdr*) skb_pull(skb, (ipheader->ihl)<<2); - if (tcpheader) { - win_size = ntoh16(tcpheader->window); - if (win_size < MIN_TCP_WIN_SIZE && - dhd_port_list_match(ntoh16(tcpheader->dest))) { - incremental_checksum = ntoh16(tcpheader->check); - incremental_checksum += win_size - win_size*WIN_SIZE_SCALE_FACTOR; - if (incremental_checksum < 0) - --incremental_checksum; - tcpheader->window = hton16(win_size*WIN_SIZE_SCALE_FACTOR); - tcpheader->check = hton16((unsigned short)incremental_checksum); - } - } - skb_push(skb, (ipheader->ihl)<<2); - } + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + return ifp->parp_allnode; } -#endif /* DHD_TCP_WINSIZE_ADJUST */ -/* Get interface specific ap_isolate configuration */ -int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx) +int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx) { dhd_info_t *dhd = dhdp->info; dhd_if_t *ifp; @@ -9919,27 +13532,25 @@ int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx) ifp = dhd->iflist[idx]; - return ifp->ap_isolate; + ASSERT(ifp); + + return ifp->dhcp_unicast; } -/* Set interface specific ap_isolate configuration */ -int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val) +int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val) { dhd_info_t *dhd = dhdp->info; dhd_if_t *ifp; - ASSERT(idx < DHD_MAX_IFS); - ifp = dhd->iflist[idx]; - ifp->ap_isolate = val; + ASSERT(ifp); - return 0; + ifp->dhcp_unicast = val; + return BCME_OK; } -#ifdef DHD_WMF -/* Returns interface specific WMF configuration */ -dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx) +int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx) { dhd_info_t *dhd = dhdp->info; dhd_if_t *ifp; @@ -9947,201 +13558,101 @@ dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx) ASSERT(idx < DHD_MAX_IFS); ifp = dhd->iflist[idx]; - return &ifp->wmf; -} -#endif /* DHD_WMF */ + ASSERT(ifp); -#ifdef DHD_UNICAST_DHCP -static int -dhd_get_pkt_ether_type(dhd_pub_t *pub, void *pktbuf, - uint8 **data_ptr, int *len_ptr, uint16 *et_ptr, bool *snap_ptr) -{ - uint8 *frame = PKTDATA(pub->osh, pktbuf); - int length = PKTLEN(pub->osh, pktbuf); - uint8 *pt; /* Pointer to type field */ - uint16 ethertype; - bool snap = FALSE; - /* Process Ethernet II or SNAP-encapsulated 802.3 frames */ - if (length < ETHER_HDR_LEN) { - DHD_ERROR(("dhd: %s: short eth frame (%d)\n", - __FUNCTION__, length)); - return BCME_ERROR; - } else if (ntoh16_ua(frame + ETHER_TYPE_OFFSET) >= ETHER_TYPE_MIN) { - /* Frame is Ethernet II */ - pt = frame + ETHER_TYPE_OFFSET; - } else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN && - !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) { - pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN; - snap = TRUE; - } else { - DHD_INFO(("DHD: %s: non-SNAP 802.3 frame\n", - __FUNCTION__)); - return BCME_ERROR; - } - - ethertype = ntoh16_ua(pt); + return ifp->block_ping; +} - /* Skip VLAN tag, if any */ - if (ethertype == ETHER_TYPE_8021Q) { - pt += VLAN_TAG_LEN; +int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; - if ((pt + ETHER_TYPE_LEN) > (frame + length)) { - DHD_ERROR(("dhd: %s: short VLAN frame (%d)\n", - __FUNCTION__, length)); - return BCME_ERROR; - } + ASSERT(ifp); - ethertype = ntoh16_ua(pt); - } + ifp->block_ping = val; - *data_ptr = pt + ETHER_TYPE_LEN; - *len_ptr = length - (pt + ETHER_TYPE_LEN - frame); - *et_ptr = ethertype; - *snap_ptr = snap; return BCME_OK; } -static int -dhd_get_pkt_ip_type(dhd_pub_t *pub, void *pktbuf, - uint8 **data_ptr, int *len_ptr, uint8 *prot_ptr) -{ - struct ipv4_hdr *iph; /* IP frame pointer */ - int iplen; /* IP frame length */ - uint16 ethertype, iphdrlen, ippktlen; - uint16 iph_frag; - uint8 prot; - bool snap; - - if (dhd_get_pkt_ether_type(pub, pktbuf, (uint8 **)&iph, - &iplen, ðertype, &snap) != 0) - return BCME_ERROR; - - if (ethertype != ETHER_TYPE_IP) { - return BCME_ERROR; - } +int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; - /* We support IPv4 only */ - if (iplen < IPV4_OPTIONS_OFFSET || (IP_VER(iph) != IP_VER_4)) { - return BCME_ERROR; - } + ASSERT(idx < DHD_MAX_IFS); - /* Header length sanity */ - iphdrlen = IPV4_HLEN(iph); + ifp = dhd->iflist[idx]; - /* - * Packet length sanity; sometimes we receive eth-frame size bigger - * than the IP content, which results in a bad tcp chksum - */ - ippktlen = ntoh16(iph->tot_len); - if (ippktlen < iplen) { - - DHD_INFO(("%s: extra frame length ignored\n", - __FUNCTION__)); - iplen = ippktlen; - } else if (ippktlen > iplen) { - DHD_ERROR(("dhd: %s: truncated IP packet (%d)\n", - __FUNCTION__, ippktlen - iplen)); - return BCME_ERROR; - } + ASSERT(ifp); - if (iphdrlen < IPV4_OPTIONS_OFFSET || iphdrlen > iplen) { - DHD_ERROR(("DHD: %s: IP-header-len (%d) out of range (%d-%d)\n", - __FUNCTION__, iphdrlen, IPV4_OPTIONS_OFFSET, iplen)); - return BCME_ERROR; - } + return ifp->grat_arp; +} - /* - * We don't handle fragmented IP packets. A first frag is indicated by the MF - * (more frag) bit and a subsequent frag is indicated by a non-zero frag offset. - */ - iph_frag = ntoh16(iph->frag); +int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; - if ((iph_frag & IPV4_FRAG_MORE) || (iph_frag & IPV4_FRAG_OFFSET_MASK) != 0) { - DHD_INFO(("DHD:%s: IP fragment not handled\n", - __FUNCTION__)); - return BCME_ERROR; - } + ASSERT(ifp); - prot = IPV4_PROT(iph); + ifp->grat_arp = val; - *data_ptr = (((uint8 *)iph) + iphdrlen); - *len_ptr = iplen - iphdrlen; - *prot_ptr = prot; return BCME_OK; } +#endif /* DHD_L2_FILTER */ -/** check the packet type, if it is DHCP ACK/REPLY, convert into unicast packet */ -static -int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx) -{ - dhd_sta_t* stainfo; - uint8 *eh = PKTDATA(pub->osh, pktbuf); - uint8 *udph; - uint8 *dhcp; - uint8 *chaddr; - int udpl; - int dhcpl; - uint16 port; - uint8 prot; - - if (!ETHER_ISMULTI(eh + ETHER_DEST_OFFSET)) - return BCME_ERROR; - if (dhd_get_pkt_ip_type(pub, pktbuf, &udph, &udpl, &prot) != 0) - return BCME_ERROR; - if (prot != IP_PROT_UDP) - return BCME_ERROR; - /* check frame length, at least UDP_HDR_LEN */ - if (udpl < UDP_HDR_LEN) { - DHD_ERROR(("DHD: %s: short UDP frame, ignored\n", - __FUNCTION__)); - return BCME_ERROR; - } - port = ntoh16_ua(udph + UDP_DEST_PORT_OFFSET); - /* only process DHCP packets from server to client */ - if (port != DHCP_PORT_CLIENT) - return BCME_ERROR; - dhcp = udph + UDP_HDR_LEN; - dhcpl = udpl - UDP_HDR_LEN; +#if defined(SET_RPS_CPUS) +int dhd_rps_cpus_enable(struct net_device *net, int enable) +{ + dhd_info_t *dhd = DHD_DEV_INFO(net); + dhd_if_t *ifp; + int ifidx; + char * RPS_CPU_SETBUF; - if (dhcpl < DHCP_CHADDR_OFFSET + ETHER_ADDR_LEN) { - DHD_ERROR(("DHD: %s: short DHCP frame, ignored\n", - __FUNCTION__)); - return BCME_ERROR; + ifidx = dhd_net2idx(dhd, net); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s bad ifidx\n", __FUNCTION__)); + return -ENODEV; } - /* only process DHCP reply(offer/ack) packets */ - if (*(dhcp + DHCP_TYPE_OFFSET) != DHCP_TYPE_REPLY) - return BCME_ERROR; - chaddr = dhcp + DHCP_CHADDR_OFFSET; - stainfo = dhd_find_sta(pub, ifidx, chaddr); - if (stainfo) { - bcopy(chaddr, eh + ETHER_DEST_OFFSET, ETHER_ADDR_LEN); - return BCME_OK; + + if (ifidx == PRIMARY_INF) { + if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) { + DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__)); + RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS; + } else { + DHD_INFO(("%s : set for BSS.\n", __FUNCTION__)); + RPS_CPU_SETBUF = RPS_CPUS_MASK; + } + } else if (ifidx == VIRTUAL_INF) { + DHD_INFO(("%s : set for P2P.\n", __FUNCTION__)); + RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P; + } else { + DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx)); + return -EINVAL; } - return BCME_ERROR; -} -#endif /* DHD_UNICAST_DHD */ -#ifdef DHD_L2_FILTER -/* Check if packet type is ICMP ECHO */ -static -int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx) -{ - struct bcmicmp_hdr *icmph; - int udpl; - uint8 prot; - if (dhd_get_pkt_ip_type(pub, pktbuf, (uint8 **)&icmph, &udpl, &prot) != 0) - return BCME_ERROR; - if (prot == IP_PROT_ICMP) { - if (icmph->type == ICMP_TYPE_ECHO_REQUEST) - return BCME_OK; + ifp = dhd->iflist[ifidx]; + if (ifp) { + if (enable) { + DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF)); + custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF)); + } else { + custom_rps_map_clear(ifp->net->_rx); + } + } else { + DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__)); + return -ENODEV; } - return BCME_ERROR; + return BCME_OK; } -#endif /* DHD_L2_FILTER */ -#ifdef SET_RPS_CPUS int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len) { struct rps_map *old_map, *map; @@ -10173,15 +13684,18 @@ int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len) } i = 0; - for_each_cpu(cpu, mask) + for_each_cpu(cpu, mask) { map->cpus[i++] = cpu; + } - if (i) + if (i) { map->len = i; - else { + } else { kfree(map); - DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__)); map = NULL; + free_cpumask_var(mask); + DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__)); + return -1; } spin_lock(&rps_map_lock); @@ -10190,8 +13704,9 @@ int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len) rcu_assign_pointer(queue->rps_map, map); spin_unlock(&rps_map_lock); - if (map) + if (map) { static_key_slow_inc(&rps_needed); + } if (old_map) { kfree_rcu(old_map, rcu); static_key_slow_dec(&rps_needed); @@ -10215,187 +13730,352 @@ void custom_rps_map_clear(struct netdev_rx_queue *queue) DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__)); } } -#endif /* SET_RPS_CPUS */ +#endif + + + +#ifdef DHD_DEBUG_PAGEALLOC -#if defined(CUSTOMER_HW20) && defined(WLANAUDIO) void -SDA_setSharedMemory4Send(unsigned int buffer_id, - unsigned char *buffer, unsigned int buffer_size, - unsigned int packet_size, unsigned int headroom_size) +dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len) { - dhd_info_t *dhd = dhd_global; + dhd_pub_t *dhdp = (dhd_pub_t *)handle; - sda_packet_length = packet_size; + DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n", + __FUNCTION__, addr_corrupt, (uint32)len)); - ASSERT(dhd); - if (dhd == NULL) - return; + DHD_OS_WAKE_LOCK(dhdp); + prhex("Page Corruption:", addr_corrupt, len); + dhd_dump_to_kernelog(dhdp); +#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + /* Load the dongle side dump to host memory and then BUG_ON() */ + dhdp->memdump_enabled = DUMP_MEMONLY; + dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION; + dhd_bus_mem_dump(dhdp); +#endif /* BCMPCIE && DHD_FW_COREDUMP */ + DHD_OS_WAKE_UNLOCK(dhdp); } +EXPORT_SYMBOL(dhd_page_corrupt_cb); +#endif /* DHD_DEBUG_PAGEALLOC */ +#ifdef DHD_PKTID_AUDIT_ENABLED void -SDA_registerCallback4SendDone(SDA_SendDoneCallBack packet_cb) +dhd_pktid_audit_fail_cb(dhd_pub_t *dhdp) { - dhd_info_t *dhd = dhd_global; - - ASSERT(dhd); - if (dhd == NULL) - return; + DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__)); + DHD_OS_WAKE_LOCK(dhdp); + dhd_dump_to_kernelog(dhdp); +#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + /* Load the dongle side dump to host memory and then BUG_ON() */ + dhdp->memdump_enabled = DUMP_MEMFILE_BUGON; + dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE; + dhd_bus_mem_dump(dhdp); +#endif /* BCMPCIE && DHD_FW_COREDUMP */ + DHD_OS_WAKE_UNLOCK(dhdp); } +#endif /* DHD_PKTID_AUDIT_ENABLED */ +/* ---------------------------------------------------------------------------- + * Infrastructure code for sysfs interface support for DHD + * + * What is sysfs interface? + * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt + * + * Why sysfs interface? + * This is the Linux standard way of changing/configuring Run Time parameters + * for a driver. We can use this interface to control "linux" specific driver + * parameters. + * + * ----------------------------------------------------------------------------- + */ -unsigned long long -SDA_getTsf(unsigned char vif_id) -{ - dhd_info_t *dhd = dhd_global; - uint64 tsf_val; - char buf[WLC_IOCTL_SMLEN]; - int ifidx = 0; +#include +#include - struct tsf { - uint32 low; - uint32 high; - } tsf_buf; +#if defined(DHD_TRACE_WAKE_LOCK) - memset(buf, 0, sizeof(buf)); +/* Function to show the history buffer */ +static ssize_t +show_wklock_trace(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + dhd_info_t *dhd = (dhd_info_t *)dev; - if (vif_id == 0) /* wlan0 tsf */ - ifidx = dhd_ifname2idx(dhd, "wlan0"); - else if (vif_id == 1) /* p2p0 tsf */ - ifidx = dhd_ifname2idx(dhd, "p2p0"); + buf[ret] = '\n'; + buf[ret+1] = 0; - bcm_mkiovar("tsf_bss", 0, 0, buf, sizeof(buf)); + dhd_wk_lock_stats_dump(&dhd->pub); + return ret+1; +} - if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, buf, sizeof(buf), FALSE, ifidx) < 0) { - DHD_ERROR(("%s wl ioctl error\n", __FUNCTION__)); - return 0; +/* Function to enable/disable wakelock trace */ +static ssize_t +wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long onoff; + unsigned long flags; + dhd_info_t *dhd = (dhd_info_t *)dev; + + onoff = bcm_strtoul(buf, NULL, 10); + if (onoff != 0 && onoff != 1) { + return -EINVAL; } - memcpy(&tsf_buf, buf, sizeof(tsf_buf)); - tsf_val = (uint64)tsf_buf.high; - DHD_TRACE(("%s tsf high 0x%08x, low 0x%08x\n", - __FUNCTION__, tsf_buf.high, tsf_buf.low)); + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + trace_wklock_onoff = onoff; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + if (trace_wklock_onoff) { + printk("ENABLE WAKLOCK TRACE\n"); + } else { + printk("DISABLE WAKELOCK TRACE\n"); + } - return ((tsf_val << 32) | tsf_buf.low); + return (ssize_t)(onoff+1); } -EXPORT_SYMBOL(SDA_getTsf); +#endif /* DHD_TRACE_WAKE_LOCK */ -unsigned int -SDA_syncTsf(void) +/* + * Generic Attribute Structure for DHD. + * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have + * to instantiate an object of type dhd_attr, populate it with + * the required show/store functions (ex:- dhd_attr_cpumask_primary) + * and add the object to default_attrs[] array, that gets registered + * to the kobject of dhd (named bcm-dhd). + */ + +struct dhd_attr { + struct attribute attr; + ssize_t(*show)(struct dhd_info *, char *); + ssize_t(*store)(struct dhd_info *, const char *, size_t count); +}; + +#if defined(DHD_TRACE_WAKE_LOCK) +static struct dhd_attr dhd_attr_wklock = + __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff); +#endif /* defined(DHD_TRACE_WAKE_LOCK */ + +/* Attribute object that gets registered with "bcm-dhd" kobject tree */ +static struct attribute *default_attrs[] = { +#if defined(DHD_TRACE_WAKE_LOCK) + &dhd_attr_wklock.attr, +#endif + NULL +}; + +#define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj) +#define to_attr(a) container_of(a, struct dhd_attr, attr) + +/* + * bcm-dhd kobject show function, the "attr" attribute specifices to which + * node under "bcm-dhd" the show function is called. + */ +static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf) { - dhd_info_t *dhd = dhd_global; - int tsf_sync = 1; - char iovbuf[WLC_IOCTL_SMLEN]; + dhd_info_t *dhd = to_dhd(kobj); + struct dhd_attr *d_attr = to_attr(attr); + int ret; - bcm_mkiovar("wa_tsf_sync", (char *)&tsf_sync, 4, iovbuf, sizeof(iovbuf)); - dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + if (d_attr->show) + ret = d_attr->show(dhd, buf); + else + ret = -EIO; - DHD_TRACE(("%s\n", __FUNCTION__)); - return 0; + return ret; } -extern struct net_device *wl0dot1_dev; -void -BCMFASTPATH SDA_function4Send(uint buffer_id, void *packet, uint packet_size) +/* + * bcm-dhd kobject show function, the "attr" attribute specifices to which + * node under "bcm-dhd" the store function is called. + */ +static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) { - struct sk_buff *skb; - sda_packet_t *shm_packet = packet; - dhd_info_t *dhd = dhd_global; - int cnt; + dhd_info_t *dhd = to_dhd(kobj); + struct dhd_attr *d_attr = to_attr(attr); + int ret; - static unsigned int cnt_t = 1; + if (d_attr->store) + ret = d_attr->store(dhd, buf, count); + else + ret = -EIO; - ASSERT(dhd); - if (dhd == NULL) - return; + return ret; - if (dhd->is_wlanaudio_blist) { - for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) { - if (dhd->wlanaudio_blist[cnt].is_blacklist == true) { - if (!bcmp(dhd->wlanaudio_blist[cnt].blacklist_addr.octet, - shm_packet->headroom.ether_dhost, ETHER_ADDR_LEN)) - return; - } - } - } +} + +static struct sysfs_ops dhd_sysfs_ops = { + .show = dhd_show, + .store = dhd_store, +}; - if ((cnt_t % 10000) == 0) - cnt_t = 0; +static struct kobj_type dhd_ktype = { + .sysfs_ops = &dhd_sysfs_ops, + .default_attrs = default_attrs, +}; - cnt_t++; +/* Create a kobject and attach to sysfs interface */ +static int dhd_sysfs_init(dhd_info_t *dhd) +{ + int ret = -1; - /* packet_size may be smaller than SDA_SHM_PKT_SIZE, remaining will be garbage */ -#define TXOFF 26 - skb = __dev_alloc_skb(TXOFF + sda_packet_length - SDA_PKT_HEADER_SIZE, GFP_ATOMIC); + if (dhd == NULL) { + DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__)); + return ret; + } - skb_reserve(skb, TXOFF - SDA_HEADROOM_SIZE); - skb_put(skb, sda_packet_length - SDA_PKT_HEADER_SIZE + SDA_HEADROOM_SIZE); - skb->priority = PRIO_8021D_VO; /* PRIO_8021D_VO or PRIO_8021D_VI */ + /* Initialize the kobject */ + ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "bcm-dhd"); + if (ret) { + kobject_put(&dhd->dhd_kobj); + DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__)); + return ret; + } - /* p2p_net */ - skb->dev = wl0dot1_dev; - shm_packet->txTsf = 0x0; - shm_packet->rxTsf = 0x0; - memcpy(skb->data, &shm_packet->headroom, - sda_packet_length - OFFSETOF(sda_packet_t, headroom)); - shm_packet->desc.ready_to_copy = 0; + /* + * We are always responsible for sending the uevent that the kobject + * was added to the system. + */ + kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD); - dhd_start_xmit(skb, skb->dev); + return ret; } -void -SDA_registerCallback4Recv(unsigned char *pBufferTotal, - unsigned int BufferTotalSize) +/* Done with the kobject and detach the sysfs interface */ +static void dhd_sysfs_exit(dhd_info_t *dhd) { - dhd_info_t *dhd = dhd_global; - - ASSERT(dhd); - if (dhd == NULL) + if (dhd == NULL) { + DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__)); return; -} + } + /* Releae the kobject */ + kobject_put(&dhd->dhd_kobj); +} +#ifdef DHD_LOG_DUMP void -SDA_setSharedMemory4Recv(unsigned char *pBufferTotal, - unsigned int BufferTotalSize, - unsigned int BufferUnitSize, - unsigned int Headroomsize) +dhd_log_dump_init(dhd_pub_t *dhd) { - dhd_info_t *dhd = dhd_global; + spin_lock_init(&dhd->dld_buf.lock); +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + dhd->dld_buf.buffer = DHD_OS_PREALLOC(dhd, + DHD_PREALLOC_DHD_LOG_DUMP_BUF, DHD_LOG_DUMP_BUFFER_SIZE); +#else + dhd->dld_buf.buffer = kmalloc(DHD_LOG_DUMP_BUFFER_SIZE, GFP_KERNEL); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ - ASSERT(dhd); - if (dhd == NULL) - return; + if (!dhd->dld_buf.buffer) { + dhd->dld_buf.buffer = kmalloc(DHD_LOG_DUMP_BUFFER_SIZE, GFP_KERNEL); + DHD_ERROR(("Try to allocate memory using kmalloc().\n")); + + if (!dhd->dld_buf.buffer) { + DHD_ERROR(("Failed to allocate memory for dld_buf.\n")); + return; + } + } + + dhd->dld_buf.wraparound = 0; + dhd->dld_buf.max = (unsigned long)dhd->dld_buf.buffer + DHD_LOG_DUMP_BUFFER_SIZE; + dhd->dld_buf.present = dhd->dld_buf.buffer; + dhd->dld_buf.front = dhd->dld_buf.buffer; + dhd->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE; + dhd->dld_enable = 1; } +void +dhd_log_dump_deinit(dhd_pub_t *dhd) +{ + dhd->dld_enable = 0; +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + DHD_OS_PREFREE(dhd, + dhd->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE); +#else + kfree(dhd->dld_buf.buffer); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ +} void -SDA_function4RecvDone(unsigned char * pBuffer, unsigned int BufferSize) +dhd_log_dump_print(const char *fmt, ...) { - dhd_info_t *dhd = dhd_global; + int len = 0; + char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, }; + va_list args; + dhd_pub_t *dhd = NULL; + unsigned long flags = 0; - ASSERT(dhd); - if (dhd == NULL) + if (wl_get_bcm_cfg80211_ptr()) { + dhd = (dhd_pub_t*)(wl_get_bcm_cfg80211_ptr()->pub); + } + + if (!dhd || dhd->dld_enable != 1) { + return; + } + + va_start(args, fmt); + + len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args); + if (len < 0) { return; + } + + /* make a critical section to eliminate race conditions */ + spin_lock_irqsave(&dhd->dld_buf.lock, flags); + if (dhd->dld_buf.remain < len) { + dhd->dld_buf.wraparound = 1; + dhd->dld_buf.present = dhd->dld_buf.front; + dhd->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE; + } + + strncpy(dhd->dld_buf.present, tmp_buf, len); + dhd->dld_buf.remain -= len; + dhd->dld_buf.present += len; + spin_unlock_irqrestore(&dhd->dld_buf.lock, flags); + + /* double check invalid memory operation */ + ASSERT((unsigned long)dhd->dld_buf.present <= dhd->dld_buf.max); + va_end(args); +} + +char* +dhd_log_dump_get_timestamp(void) +{ + static char buf[16]; + u64 ts_nsec; + unsigned long rem_nsec; + + ts_nsec = local_clock(); + rem_nsec = do_div(ts_nsec, 1000000000); + snprintf(buf, sizeof(buf), "%5lu.%06lu", + (unsigned long)ts_nsec, rem_nsec / 1000); + + return buf; } -EXPORT_SYMBOL(SDA_setSharedMemory4Send); -EXPORT_SYMBOL(SDA_registerCallback4SendDone); -EXPORT_SYMBOL(SDA_syncTsf); -EXPORT_SYMBOL(SDA_function4Send); -EXPORT_SYMBOL(SDA_registerCallback4Recv); -EXPORT_SYMBOL(SDA_setSharedMemory4Recv); -EXPORT_SYMBOL(SDA_function4RecvDone); +#endif /* DHD_LOG_DUMP */ -#endif /* CUSTOMER_HW20 && WLANAUDIO */ +/* ---------------------------- End of sysfs implementation ------------------------------------- */ void *dhd_get_pub(struct net_device *dev) { dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev); if (dhdinfo) return (void *)&dhdinfo->pub; - else + else { + printf("%s: null dhdinfo\n", __FUNCTION__); + return NULL; + } +} + +void *dhd_get_conf(struct net_device *dev) +{ + dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev); + if (dhdinfo) + return (void *)dhdinfo->pub.conf; + else { + printf("%s: null dhdinfo\n", __FUNCTION__); return NULL; + } } bool dhd_os_wd_timer_enabled(void *bus) diff --git a/drivers/net/wireless/bcmdhd/dhd_linux.h b/drivers/amlogic/wifi/bcmdhd/dhd_linux.h similarity index 60% rename from drivers/net/wireless/bcmdhd/dhd_linux.h rename to drivers/amlogic/wifi/bcmdhd/dhd_linux.h index f0291ae828405..79290db34dd27 100644 --- a/drivers/net/wireless/bcmdhd/dhd_linux.h +++ b/drivers/amlogic/wifi/bcmdhd/dhd_linux.h @@ -1,9 +1,30 @@ /* * DHD Linux header file (dhd_linux exports for cfg80211 and other components) * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_linux.h 399301 2013-04-29 21:41:52Z $ + * + * <> + * + * $Id: dhd_linux.h 591285 2015-10-07 11:56:29Z $ */ /* wifi platform functions for power, interrupt and pre-alloc, either @@ -32,29 +53,48 @@ #define DHD_REGISTRATION_TIMEOUT 12000 /* msec : allowed time to finished dhd registration */ -#if defined(CUSTOMER_HW) -struct wifi_platform_data { - int (*set_power)(bool val); - int (*set_carddetect)(bool val); - void *(*mem_prealloc)(int section, unsigned long size); - int (*get_mac_addr)(unsigned char *buf); - void *(*get_country_code)(char *ccode); -}; -#endif - typedef struct wifi_adapter_info { const char *name; uint irq_num; uint intr_flags; const char *fw_path; const char *nv_path; + const char *clm_path; const char *conf_path; void *wifi_plat_data; /* wifi ctrl func, for backward compatibility */ uint bus_type; uint bus_num; uint slot_num; +#ifdef BUS_POWER_RESTORE +#if defined(BCMSDIO) + struct sdio_func *sdio_func; +#endif /* BCMSDIO */ +#if defined(BCMPCIE) + struct pci_dev *pci_dev; + struct pci_saved_state *pci_saved_state; +#endif /* BCMPCIE */ +#endif } wifi_adapter_info_t; +#define WLAN_PLAT_NODFS_FLAG 0x01 +#define WLAN_PLAT_AP_FLAG 0x02 +struct wifi_platform_data { +#ifdef BUS_POWER_RESTORE + int (*set_power)(bool val, wifi_adapter_info_t *adapter); +#else + int (*set_power)(bool val); +#endif + int (*set_reset)(int val); + int (*set_carddetect)(bool val); + void *(*mem_prealloc)(int section, unsigned long size); + int (*get_mac_addr)(unsigned char *buf); +#if defined(CUSTOM_COUNTRY_CODE) + void *(*get_country_code)(char *ccode, u32 flags); +#else /* defined (CUSTOM_COUNTRY_CODE) */ + void *(*get_country_code)(char *ccode); +#endif +}; + typedef struct bcmdhd_wifi_platdata { uint num_adapters; wifi_adapter_info_t *adapters; @@ -62,6 +102,7 @@ typedef struct bcmdhd_wifi_platdata { /** Per STA params. A list of dhd_sta objects are managed in dhd_if */ typedef struct dhd_sta { + cumm_ctr_t cumm_ctr; /* cummulative queue length of child flowrings */ uint16 flowid[NUMPRIO]; /* allocated flow ring ids (by priority) */ void * ifp; /* associated dhd_if */ struct ether_addr ea; /* stations ethernet mac address */ @@ -79,7 +120,12 @@ int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present); int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr); int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf); +#ifdef CUSTOM_COUNTRY_CODE +void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode, + u32 flags); +#else void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode); +#endif /* CUSTOM_COUNTRY_CODE */ void* wifi_platform_prealloc(wifi_adapter_info_t *adapter, int section, unsigned long size); void* wifi_platform_get_prealloc_func_ptr(wifi_adapter_info_t *adapter); diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_platdev.c b/drivers/amlogic/wifi/bcmdhd/dhd_linux_platdev.c similarity index 90% rename from drivers/net/wireless/bcmdhd/dhd_linux_platdev.c rename to drivers/amlogic/wifi/bcmdhd/dhd_linux_platdev.c index 463191cb91aaa..a154cb3c2e12e 100644 --- a/drivers/net/wireless/bcmdhd/dhd_linux_platdev.c +++ b/drivers/amlogic/wifi/bcmdhd/dhd_linux_platdev.c @@ -1,9 +1,30 @@ /* * Linux platform device for DHD WLAN adapter * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_linux_platdev.c 401742 2013-05-13 15:03:21Z $ + * + * <> + * + * $Id: dhd_linux_platdev.c 591285 2015-10-07 11:56:29Z $ */ #include #include @@ -27,11 +48,8 @@ #endif /* CONFIG_DTS */ #if defined(CUSTOMER_HW) -#if defined(CUSTOMER_OOB) -extern uint bcm_wlan_get_oob_irq(void); -extern uint bcm_wlan_get_oob_irq_flags(void); -#endif -extern int bcm_wlan_set_plat_data(void); +extern int dhd_wlan_init_plat_data(void); +extern void dhd_wlan_deinit_plat_data(wifi_adapter_info_t *adapter); #endif /* CUSTOMER_HW */ #define WIFI_PLAT_NAME "bcmdhd_wlan" @@ -49,13 +67,12 @@ static bool is_power_on = FALSE; #if !defined(CONFIG_DTS) #if defined(DHD_OF_SUPPORT) static bool dts_enabled = TRUE; +extern struct resource dhd_wlan_resources; extern struct wifi_platform_data dhd_wlan_control; #else static bool dts_enabled = FALSE; struct resource dhd_wlan_resources = {0}; -#ifdef CUSTOMER_HW -struct wifi_platform_data dhd_wlan_control = {0}; -#endif +extern struct wifi_platform_data dhd_wlan_control; #endif /* !defind(DHD_OF_SUPPORT) */ #endif /* !defind(CONFIG_DTS) */ @@ -166,7 +183,11 @@ int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long } #endif /* ENABLE_4335BT_WAR */ +#ifdef BUS_POWER_RESTORE + err = plat_data->set_power(on, adapter); +#else err = plat_data->set_power(on); +#endif } if (msec && !err) @@ -213,7 +234,12 @@ int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf) return -EOPNOTSUPP; } -void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode) +void * +#ifdef CUSTOM_COUNTRY_CODE +wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode, u32 flags) +#else +wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode) +#endif /* CUSTOM_COUNTRY_CODE */ { /* get_country_code was added after 2.6.39 */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) @@ -225,7 +251,11 @@ void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode) DHD_TRACE(("%s\n", __FUNCTION__)); if (plat_data->get_country_code) { +#ifdef CUSTOM_COUNTRY_CODE + return plat_data->get_country_code(ccode, flags); +#else return plat_data->get_country_code(ccode); +#endif /* CUSTOM_COUNTRY_CODE */ } #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */ @@ -402,6 +432,10 @@ static int wifi_ctrlfunc_register_drv(void) * DHD (either SDIO, USB or PCIe) */ adapter = kzalloc(sizeof(wifi_adapter_info_t), GFP_KERNEL); + if (adapter == NULL) { + DHD_ERROR(("%s:adapter alloc failed", __FUNCTION__)); + return -ENOMEM; + } adapter->name = "DHD generic adapter"; adapter->bus_type = -1; adapter->bus_num = -1; @@ -434,19 +468,16 @@ static int wifi_ctrlfunc_register_drv(void) #if !defined(CONFIG_DTS) if (dts_enabled) { -#ifdef CUSTOMER_HW - adapter->wifi_plat_data = (void *)&dhd_wlan_control; - bcm_wlan_set_plat_data(); -#ifdef CUSTOMER_OOB - adapter->irq_num = bcm_wlan_get_oob_irq(); - adapter->intr_flags = bcm_wlan_get_oob_irq_flags(); -#endif -#else struct resource *resource; + adapter->wifi_plat_data = (void *)&dhd_wlan_control; resource = &dhd_wlan_resources; +#ifdef CUSTOMER_HW + wifi_plat_dev_probe_ret = dhd_wlan_init_plat_data(); + if (wifi_plat_dev_probe_ret) + return wifi_plat_dev_probe_ret; +#endif adapter->irq_num = resource->start; adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK; -#endif wifi_plat_dev_probe_ret = dhd_wifi_platform_load(); } #endif /* !defined(CONFIG_DTS) */ @@ -462,6 +493,7 @@ static int wifi_ctrlfunc_register_drv(void) void wifi_ctrlfunc_unregister_drv(void) { + wifi_adapter_info_t *adapter; #if defined(CONFIG_DTS) && !defined(CUSTOMER_HW) DHD_ERROR(("unregister wifi platform drivers\n")); @@ -483,7 +515,6 @@ void wifi_ctrlfunc_unregister_drv(void) platform_driver_unregister(&wifi_platform_dev_driver_legacy); #endif if (dts_enabled) { - wifi_adapter_info_t *adapter; adapter = &dhd_wifi_platdata->adapters[0]; if (is_power_on) { wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); @@ -492,6 +523,10 @@ void wifi_ctrlfunc_unregister_drv(void) } #endif /* !defined(CONFIG_DTS) */ +#if defined(CUSTOMER_HW) + dhd_wlan_deinit_plat_data(adapter); +#endif + kfree(dhd_wifi_platdata->adapters); dhd_wifi_platdata->adapters = NULL; dhd_wifi_platdata->num_adapters = 0; @@ -671,7 +706,7 @@ extern int dhd_dpc_prio; extern uint dhd_deferred_tx; #if defined(BCMLXSDMMC) extern struct semaphore dhd_registration_sem; -#endif +#endif #ifdef BCMSDIO static int dhd_wifi_platform_load_sdio(void) @@ -690,7 +725,7 @@ static int dhd_wifi_platform_load_sdio(void) !(dhd_watchdog_prio >= 0 && dhd_dpc_prio >= 0 && dhd_deferred_tx)) return -EINVAL; -#if defined(BCMLXSDMMC) +#if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD) if (dhd_wifi_platdata == NULL) { DHD_ERROR(("DHD wifi platform data is required for Android build\n")); return -EINVAL; @@ -793,7 +828,7 @@ static int dhd_wifi_platform_load_sdio(void) /* x86 bring-up PC needs no power-up operations */ err = dhd_bus_register(); -#endif +#endif return err; } diff --git a/drivers/amlogic/wifi/bcmdhd/dhd_linux_sched.c b/drivers/amlogic/wifi/bcmdhd/dhd_linux_sched.c new file mode 100644 index 0000000000000..66eb8940ba3fb --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/dhd_linux_sched.c @@ -0,0 +1,51 @@ +/* + * Expose some of the kernel scheduler routines + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux_sched.c 514727 2014-11-12 03:02:48Z $ + */ +#include +#include +#include +#include +#include + +int setScheduler(struct task_struct *p, int policy, struct sched_param *param) +{ + int rc = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + rc = sched_setscheduler(p, policy, param); +#endif /* LinuxVer */ + return rc; +} + +int get_scheduler_policy(struct task_struct *p) +{ + int rc = SCHED_NORMAL; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + rc = p->policy; +#endif /* LinuxVer */ + return rc; +} diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_wq.c b/drivers/amlogic/wifi/bcmdhd/dhd_linux_wq.c similarity index 86% rename from drivers/net/wireless/bcmdhd/dhd_linux_wq.c rename to drivers/amlogic/wifi/bcmdhd/dhd_linux_wq.c index 1df1c92db1d97..d2513cc4ab0db 100644 --- a/drivers/net/wireless/bcmdhd/dhd_linux_wq.c +++ b/drivers/amlogic/wifi/bcmdhd/dhd_linux_wq.c @@ -2,9 +2,30 @@ * Broadcom Dongle Host Driver (DHD), Generic work queue framework * Generic interface to handle dhd deferred work events * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_linux_wq.c 449578 2014-01-17 13:53:20Z $ + * + * <> + * + * $Id: dhd_linux_wq.c 514727 2014-11-12 03:02:48Z $ */ #include diff --git a/drivers/amlogic/wifi/bcmdhd/dhd_linux_wq.h b/drivers/amlogic/wifi/bcmdhd/dhd_linux_wq.h new file mode 100644 index 0000000000000..e6197b26f2115 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/dhd_linux_wq.h @@ -0,0 +1,69 @@ +/* + * Broadcom Dongle Host Driver (DHD), Generic work queue framework + * Generic interface to handle dhd deferred work events + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux_wq.h 597512 2015-11-05 11:37:36Z $ + */ +#ifndef _dhd_linux_wq_h_ +#define _dhd_linux_wq_h_ +/* + * Work event definitions + */ +enum _wq_event { + DHD_WQ_WORK_IF_ADD = 1, + DHD_WQ_WORK_IF_DEL, + DHD_WQ_WORK_SET_MAC, + DHD_WQ_WORK_SET_MCAST_LIST, + DHD_WQ_WORK_IPV6_NDO, + DHD_WQ_WORK_HANG_MSG, + DHD_WQ_WORK_SOC_RAM_DUMP, + DHD_WQ_WORK_DHD_LOG_DUMP, + + DHD_MAX_WQ_EVENTS +}; + +/* + * Work event priority + */ +#define DHD_WORK_PRIORITY_LOW 0 +#define DHD_WORK_PRIORITY_HIGH 1 + +/* + * Error definitions + */ +#define DHD_WQ_STS_OK 0 +#define DHD_WQ_STS_FAILED -1 /* General failure */ +#define DHD_WQ_STS_UNINITIALIZED -2 +#define DHD_WQ_STS_SCHED_FAILED -3 +#define DHD_WQ_STS_UNKNOWN_EVENT -4 + +typedef void (*event_handler_t)(void *handle, void *event_data, u8 event); + +void *dhd_deferred_work_init(void *dhd); +void dhd_deferred_work_deinit(void *workq); +int dhd_deferred_schedule_work(void *workq, void *event_data, u8 event, + event_handler_t evt_handler, u8 priority); +#endif /* _dhd_linux_wq_h_ */ diff --git a/drivers/amlogic/wifi/bcmdhd/dhd_msgbuf.c b/drivers/amlogic/wifi/bcmdhd/dhd_msgbuf.c new file mode 100644 index 0000000000000..3bd9a45d7906b --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/dhd_msgbuf.c @@ -0,0 +1,6413 @@ +/** + * @file definition of host message ring functionality + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_msgbuf.c 605475 2015-12-10 12:49:49Z $ + */ + + +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include + + +#include + +#include +#include +#include + +#if defined(DHD_LB) +#include +#include +#define DHD_LB_WORKQ_SZ (8192) +#define DHD_LB_WORKQ_SYNC (16) +#define DHD_LB_WORK_SCHED (DHD_LB_WORKQ_SYNC * 2) +#endif /* DHD_LB */ + + +/** + * Host configures a soft doorbell for d2h rings, by specifying a 32bit host + * address where a value must be written. Host may also interrupt coalescing + * on this soft doorbell. + * Use Case: Hosts with network processors, may register with the dongle the + * network processor's thread wakeup register and a value corresponding to the + * core/thread context. Dongle will issue a write transaction + * to the PCIE RC which will need to be routed to the mapped register space, by + * the host. + */ +/* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */ + +/* Dependency Check */ +#if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF) +#error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF" +#endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */ + +#define RETRIES 2 /* # of retries to retrieve matching ioctl response */ + +#define DEFAULT_RX_BUFFERS_TO_POST 256 +#define RXBUFPOST_THRESHOLD 32 +#define RX_BUF_BURST 32 /* Rx buffers for MSDU Data */ + +#define DHD_STOP_QUEUE_THRESHOLD 200 +#define DHD_START_QUEUE_THRESHOLD 100 + +#define RX_DMA_OFFSET 8 /* Mem2mem DMA inserts an extra 8 */ +#define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN) +#define FLOWRING_SIZE (H2DRING_TXPOST_MAX_ITEM * H2DRING_TXPOST_ITEMSIZE) + +/* flags for ioctl pending status */ +#define MSGBUF_IOCTL_ACK_PENDING (1<<0) +#define MSGBUF_IOCTL_RESP_PENDING (1<<1) + +#define DMA_ALIGN_LEN 4 + +#define DMA_D2H_SCRATCH_BUF_LEN 8 +#define DMA_XFER_LEN_LIMIT 0x400000 + +#define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192 + +#define DHD_FLOWRING_MAX_EVENTBUF_POST 8 +#define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8 + +#define DHD_PROT_FUNCS 37 + +/* Length of buffer in host for bus throughput measurement */ +#define DHD_BUS_TPUT_BUF_LEN 2048 + +#define TXP_FLUSH_NITEMS + +/* optimization to write "n" tx items at a time to ring */ +#define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48 + +#define RING_NAME_MAX_LENGTH 24 + + +struct msgbuf_ring; /* ring context for common and flow rings */ + +/** + * PCIE D2H DMA Complete Sync Modes + * + * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into + * Host system memory. A WAR using one of 3 approaches is needed: + * 1. Dongle places a modulo-253 seqnum in last word of each D2H message + * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum + * writes in the last word of each work item. Each work item has a seqnum + * number = sequence num % 253. + * + * 3. Read Barrier: Dongle does a host memory read access prior to posting an + * interrupt, ensuring that D2H data transfer indeed completed. + * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing + * ring contents before the indices. + * + * Host does not sync for DMA to complete with option #3 or #4, and a noop sync + * callback (see dhd_prot_d2h_sync_none) may be bound. + * + * Dongle advertizes host side sync mechanism requirements. + */ +#define PCIE_D2H_SYNC + +#if defined(PCIE_D2H_SYNC) +#define PCIE_D2H_SYNC_WAIT_TRIES (512UL) +#define PCIE_D2H_SYNC_NUM_OF_STEPS (3UL) +#define PCIE_D2H_SYNC_DELAY (50UL) /* in terms of usecs */ + +/** + * Custom callback attached based upon D2H DMA Sync mode advertized by dongle. + * + * On success: return cmn_msg_hdr_t::msg_type + * On failure: return 0 (invalid msg_type) + */ +typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring, + volatile cmn_msg_hdr_t *msg, int msglen); +#endif /* PCIE_D2H_SYNC */ + + +/* + * +---------------------------------------------------------------------------- + * + * RingIds and FlowId are not equivalent as ringids include D2H rings whereas + * flowids do not. + * + * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes + * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings + * + * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where, + * BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings, + * BCMPCIE_COMMON_MSGRINGS = 5, i.e. include 3 D2H common rings. + * + * H2D Control Submit RingId = 0 FlowId = 0 reserved never allocated + * H2D RxPost Submit RingId = 1 FlowId = 1 reserved never allocated + * + * D2H Control Complete RingId = 2 + * D2H Transmit Complete RingId = 3 + * D2H Receive Complete RingId = 4 + * + * H2D TxPost FLOWRING RingId = 5 FlowId = 2 (1st flowring) + * H2D TxPost FLOWRING RingId = 6 FlowId = 3 (2nd flowring) + * H2D TxPost FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring) + * + * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are + * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS. + * + * Example: when a system supports 4 bc/mc and 128 uc flowrings, with + * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the + * FlowId values would be in the range [2..133] and the corresponding + * RingId values would be in the range [5..136]. + * + * The flowId allocator, may chose to, allocate Flowids: + * bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS)) + * X# of uc flowids in consecutive ranges (per station Id), where X is the + * packet's access category (e.g. 4 uc flowids per station). + * + * CAUTION: + * When DMA indices array feature is used, RingId=5, corresponding to the 0th + * FLOWRING, will actually use the FlowId as index into the H2D DMA index, + * since the FlowId truly represents the index in the H2D DMA indices array. + * + * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS, + * will represent the index in the D2H DMA indices array. + * + * +---------------------------------------------------------------------------- + */ + +/* First TxPost Flowring Id */ +#define DHD_FLOWRING_START_FLOWID BCMPCIE_H2D_COMMON_MSGRINGS + +/* Determine whether a ringid belongs to a TxPost flowring */ +#define DHD_IS_FLOWRING(ringid) \ + ((ringid) >= BCMPCIE_COMMON_MSGRINGS) + +/* Convert a H2D TxPost FlowId to a MsgBuf RingId */ +#define DHD_FLOWID_TO_RINGID(flowid) \ + (BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)) + +/* Convert a MsgBuf RingId to a H2D TxPost FlowId */ +#define DHD_RINGID_TO_FLOWID(ringid) \ + (BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS)) + +/* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array + * This may be used for the H2D DMA WR index array or H2D DMA RD index array or + * any array of H2D rings. + */ +#define DHD_H2D_RING_OFFSET(ringid) \ + ((DHD_IS_FLOWRING(ringid)) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid)) + +/* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array + * This may be used for the D2H DMA WR index array or D2H DMA RD index array or + * any array of D2H rings. + */ +#define DHD_D2H_RING_OFFSET(ringid) \ + ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS) + +/* Convert a D2H DMA Indices Offset to a RingId */ +#define DHD_D2H_RINGID(offset) \ + ((offset) + BCMPCIE_H2D_COMMON_MSGRINGS) + + +#define DHD_DMAH_NULL ((void*)NULL) + +/* + * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able + * buffer does not occupy the entire cacheline, and another object is placed + * following the DMA-able buffer, data corruption may occur if the DMA-able + * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency + * is not available. + */ +#if defined(L1_CACHE_BYTES) +#define DHD_DMA_PAD (L1_CACHE_BYTES) +#else +#define DHD_DMA_PAD (128) +#endif + +/* Used in loopback tests */ +typedef struct dhd_dmaxfer { + dhd_dma_buf_t srcmem; + dhd_dma_buf_t dstmem; + uint32 srcdelay; + uint32 destdelay; + uint32 len; + bool in_progress; +} dhd_dmaxfer_t; + +/** + * msgbuf_ring : This object manages the host side ring that includes a DMA-able + * buffer, the WR and RD indices, ring parameters such as max number of items + * an length of each items, and other miscellaneous runtime state. + * A msgbuf_ring may be used to represent a H2D or D2H common ring or a + * H2D TxPost ring as specified in the PCIE FullDongle Spec. + * Ring parameters are conveyed to the dongle, which maintains its own peer end + * ring state. Depending on whether the DMA Indices feature is supported, the + * host will update the WR/RD index in the DMA indices array in host memory or + * directly in dongle memory. + */ +typedef struct msgbuf_ring { + bool inited; + uint16 idx; /* ring id */ + uint16 rd; /* read index */ + uint16 curr_rd; /* read index for debug */ + uint16 wr; /* write index */ + uint16 max_items; /* maximum number of items in ring */ + uint16 item_len; /* length of each item in the ring */ + sh_addr_t base_addr; /* LITTLE ENDIAN formatted: base address */ + dhd_dma_buf_t dma_buf; /* DMA-able buffer: pa, va, len, dmah, secdma */ + uint32 seqnum; /* next expected item's sequence number */ +#ifdef TXP_FLUSH_NITEMS + void *start_addr; + /* # of messages on ring not yet announced to dongle */ + uint16 pend_items_count; +#endif /* TXP_FLUSH_NITEMS */ + uchar name[RING_NAME_MAX_LENGTH]; +} msgbuf_ring_t; + +#define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va) +#define DHD_RING_END_VA(ring) \ + ((uint8 *)(DHD_RING_BGN_VA((ring))) + \ + (((ring)->max_items - 1) * (ring)->item_len)) + + + +/** DHD protocol handle. Is an opaque type to other DHD software layers. */ +typedef struct dhd_prot { + osl_t *osh; /* OSL handle */ + uint16 rxbufpost; + uint16 max_rxbufpost; + uint16 max_eventbufpost; + uint16 max_ioctlrespbufpost; + uint16 cur_event_bufs_posted; + uint16 cur_ioctlresp_bufs_posted; + + /* Flow control mechanism based on active transmits pending */ + uint16 active_tx_count; /* increments on every packet tx, and decrements on tx_status */ + uint16 max_tx_count; + uint16 txp_threshold; /* optimization to write "n" tx items at a time to ring */ + + /* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */ + msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */ + msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */ + msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */ + msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */ + msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */ + + msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */ + dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */ + uint16 h2d_rings_total; /* total H2D (common rings + flowrings) */ + + uint32 rx_dataoffset; + + dhd_mb_ring_t mb_ring_fn; /* called when dongle needs to be notified of new msg */ + + /* ioctl related resources */ + uint8 ioctl_state; + int16 ioctl_status; /* status returned from dongle */ + uint16 ioctl_resplen; + dhd_ioctl_recieved_status_t ioctl_received; + uint curr_ioctl_cmd; + dhd_dma_buf_t retbuf; /* For holding ioctl response */ + dhd_dma_buf_t ioctbuf; /* For holding ioctl request */ + + dhd_dma_buf_t d2h_dma_scratch_buf; /* For holding d2h scratch */ + + /* DMA-able arrays for holding WR and RD indices */ + uint32 rw_index_sz; /* Size of a RD or WR index in dongle */ + dhd_dma_buf_t h2d_dma_indx_wr_buf; /* Array of H2D WR indices */ + dhd_dma_buf_t h2d_dma_indx_rd_buf; /* Array of H2D RD indices */ + dhd_dma_buf_t d2h_dma_indx_wr_buf; /* Array of D2H WR indices */ + dhd_dma_buf_t d2h_dma_indx_rd_buf; /* Array of D2H RD indices */ + + dhd_dma_buf_t host_bus_throughput_buf; /* bus throughput measure buffer */ + + dhd_dma_buf_t *flowring_buf; /* pool of flow ring buf */ + uint32 flowring_num; + +#if defined(PCIE_D2H_SYNC) + d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */ + ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */ + ulong d2h_sync_wait_tot; /* total wait loops */ +#endif /* PCIE_D2H_SYNC */ + + dhd_dmaxfer_t dmaxfer; /* for test/DMA loopback */ + + uint16 ioctl_seq_no; + uint16 data_seq_no; + uint16 ioctl_trans_id; + void *pktid_map_handle; /* a pktid maps to a packet and its metadata */ + bool metadata_dbg; + void *pktid_map_handle_ioctl; + + /* Applications/utilities can read tx and rx metadata using IOVARs */ + uint16 rx_metadata_offset; + uint16 tx_metadata_offset; + + +#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT) + /* Host's soft doorbell configuration */ + bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS]; +#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */ +#if defined(DHD_LB) + /* Work Queues to be used by the producer and the consumer, and threshold + * when the WRITE index must be synced to consumer's workq + */ +#if defined(DHD_LB_TXC) + uint32 tx_compl_prod_sync ____cacheline_aligned; + bcm_workq_t tx_compl_prod, tx_compl_cons; +#endif /* DHD_LB_TXC */ +#if defined(DHD_LB_RXC) + uint32 rx_compl_prod_sync ____cacheline_aligned; + bcm_workq_t rx_compl_prod, rx_compl_cons; +#endif /* DHD_LB_RXC */ +#endif /* DHD_LB */ +} dhd_prot_t; + +/* Convert a dmaaddr_t to a base_addr with htol operations */ +static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa); + +/* APIs for managing a DMA-able buffer */ +static int dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf); +static int dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len); +static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf); +static void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf); + +/* msgbuf ring management */ +static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, + const char *name, uint16 max_items, uint16 len_item, uint16 ringid); +static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring); +static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring); +static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring); + +/* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */ +static int dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd); +static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd); +static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd); + +/* Fetch and Release a flowring msgbuf_ring from flowring pool */ +static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, + uint16 flowid); +/* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */ + +/* Producer: Allocate space in a msgbuf ring */ +static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring, + uint16 nitems, uint16 *alloced, bool exactly_nitems); +static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, + uint16 *alloced, bool exactly_nitems); + +/* Consumer: Determine the location where the next message may be consumed */ +static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, + uint32 *available_len); + +/* Producer (WR index update) or Consumer (RD index update) indication */ +static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring, + void *p, uint16 len); +static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring); + +/* Allocate DMA-able memory for saving H2D/D2H WR/RD indices */ +static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type, + dhd_dma_buf_t *dma_buf, uint32 bufsz); + +/* Set/Get a RD or WR index in the array of indices */ +/* See also: dhd_prot_dma_indx_init() */ +static void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, + uint16 ringid); +static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid); + +/* Locate a packet given a pktid */ +static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, + bool free_pktid); +/* Locate a packet given a PktId and free it. */ +static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send); + +static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, + void *buf, uint len, uint8 action); +static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, + void *buf, uint len, uint8 action); +static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf); +static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, + void *buf, int ifidx); + +/* Post buffers for Rx, control ioctl response and events */ +static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post); +static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub); +static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub); +static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid); +static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid); + +static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt); + +/* D2H Message handling */ +static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len); + +/* D2H Message handlers */ +static void dhd_prot_noop(dhd_pub_t *dhd, void *msg); +static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg); + +/* Loopback test with dongle */ +static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma); +static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay, + uint destdelay, dhd_dmaxfer_t *dma); +static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg); + +/* Flowring management communication with dongle */ +static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg); + +/* Configure a soft doorbell per D2H ring */ +static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd); +static void dhd_prot_d2h_ring_config_cmplt_process(dhd_pub_t *dhd, void *msg); + +typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg); + +/** callback functions for messages generated by the dongle */ +#define MSG_TYPE_INVALID 0 + +static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = { + dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */ + dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */ + dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */ + NULL, + dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */ + NULL, + dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */ + NULL, + dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */ + NULL, + dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */ + NULL, + dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */ + NULL, + dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */ + NULL, + dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */ + NULL, + dhd_prot_rxcmplt_process, /* MSG_TYPE_RX_CMPLT */ + NULL, + dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */ + NULL, /* MSG_TYPE_FLOW_RING_RESUME */ + NULL, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */ + NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */ + NULL, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */ + NULL, /* MSG_TYPE_INFO_BUF_POST */ + NULL, /* MSG_TYPE_INFO_BUF_CMPLT */ + NULL, /* MSG_TYPE_H2D_RING_CREATE */ + NULL, /* MSG_TYPE_D2H_RING_CREATE */ + NULL, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */ + NULL, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */ + NULL, /* MSG_TYPE_H2D_RING_CONFIG */ + NULL, /* MSG_TYPE_D2H_RING_CONFIG */ + NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */ + dhd_prot_d2h_ring_config_cmplt_process, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */ + NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */ + NULL, /* MSG_TYPE_D2H_MAILBOX_DATA */ +}; + + +#ifdef DHD_RX_CHAINING + +#define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \ + (!ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \ + !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \ + !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \ + !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \ + ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \ + ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \ + (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))) && \ + dhd_l2_filter_chainable((dhd), (evh), (ifidx))) + +static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain); +static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx); +static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd); + +#define DHD_PKT_CTF_MAX_CHAIN_LEN 64 + +#endif /* DHD_RX_CHAINING */ + +static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd); + +#if defined(PCIE_D2H_SYNC) /* avoids problems related to host CPU cache */ + +/** + * D2H DMA to completion callback handlers. Based on the mode advertised by the + * dongle through the PCIE shared region, the appropriate callback will be + * registered in the proto layer to be invoked prior to precessing any message + * from a D2H DMA ring. If the dongle uses a read barrier or another mode that + * does not require host participation, then a noop callback handler will be + * bound that simply returns the msg_type. + */ +static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring, + uint32 tries, uchar *msg, int msglen); +static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen); +static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen); +static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen); +static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd); + +void dhd_prot_collect_memdump(dhd_pub_t *dhd) +{ + DHD_ERROR(("%s(): Collecting mem dump now \r\n", __FUNCTION__)); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK; + dhd_bus_mem_dump(dhd); + } +#endif /* DHD_FW_COREDUMP */ +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + dhd->bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ + dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK; + dhd_os_send_hang_message(dhd); +#endif /* SUPPORT_LINKDOWN_RECOVERY */ +} + +/** + * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has + * not completed, a livelock condition occurs. Host will avert this livelock by + * dropping this message and moving to the next. This dropped message can lead + * to a packet leak, or even something disastrous in the case the dropped + * message happens to be a control response. + * Here we will log this condition. One may choose to reboot the dongle. + * + */ +static void +dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 tries, + uchar *msg, int msglen) +{ + uint32 seqnum = ring->seqnum; + + DHD_ERROR(("LIVELOCK DHD<%p> name<%s> seqnum<%u:%u> tries<%u> max<%lu> tot<%lu>" + "dma_buf va<%p> msg<%p> curr_rd<%d>\n", + dhd, ring->name, seqnum, seqnum% D2H_EPOCH_MODULO, tries, + dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot, + ring->dma_buf.va, msg, ring->curr_rd)); + prhex("D2H MsgBuf Failure", (uchar *)msg, msglen); + dhd_dump_to_kernelog(dhd); + +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK; + dhd_bus_mem_dump(dhd); + } +#endif /* DHD_FW_COREDUMP */ +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + dhd->bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ + dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK; + dhd_os_send_hang_message(dhd); +#endif /* SUPPORT_LINKDOWN_RECOVERY */ +} + +/** + * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM + * mode. Sequence number is always in the last word of a message. + */ +static uint8 BCMFASTPATH +dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen) +{ + uint32 tries; + uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO; + int num_words = msglen / sizeof(uint32); /* num of 32bit words */ + volatile uint32 *marker = (uint32 *)msg + (num_words - 1); /* last word */ + dhd_prot_t *prot = dhd->prot; + uint32 step = 0; + uint32 delay = PCIE_D2H_SYNC_DELAY; + uint32 total_tries = 0; + + ASSERT(msglen == ring->item_len); + + BCM_REFERENCE(delay); + /* + * For retries we have to make some sort of stepper algorithm. + * We see that every time when the Dongle comes out of the D3 + * Cold state, the first D2H mem2mem DMA takes more time to + * complete, leading to livelock issues. + * + * Case 1 - Apart from Host CPU some other bus master is + * accessing the DDR port, probably page close to the ring + * so, PCIE does not get a change to update the memory. + * Solution - Increase the number of tries. + * + * Case 2 - The 50usec delay given by the Host CPU is not + * sufficient for the PCIe RC to start its work. + * In this case the breathing time of 50usec given by + * the Host CPU is not sufficient. + * Solution: Increase the delay in a stepper fashion. + * This is done to ensure that there are no + * unwanted extra delay introdcued in normal conditions. + */ + for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) { + for (tries = 1; tries <= PCIE_D2H_SYNC_WAIT_TRIES; tries++) { + uint32 msg_seqnum = *marker; + if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */ + ring->seqnum++; /* next expected sequence number */ + goto dma_completed; + } + + total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries; + + if (total_tries > prot->d2h_sync_wait_max) + prot->d2h_sync_wait_max = total_tries; + + OSL_CACHE_INV(msg, msglen); /* invalidate and try again */ + OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */ +#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) + /* For ARM there is no pause in cpu_relax, so add extra delay */ + OSL_DELAY(delay * step); +#endif /* defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) */ + } /* for PCIE_D2H_SYNC_WAIT_TRIES */ + } /* for number of steps */ + + dhd_prot_d2h_sync_livelock(dhd, ring, total_tries, (uchar *)msg, msglen); + + ring->seqnum++; /* skip this message ... leak of a pktid */ + return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */ + +dma_completed: + + prot->d2h_sync_wait_tot += total_tries; + return msg->msg_type; +} + +/** + * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM + * mode. The xorcsum is placed in the last word of a message. Dongle will also + * place a seqnum in the epoch field of the cmn_msg_hdr. + */ +static uint8 BCMFASTPATH +dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen) +{ + uint32 tries; + uint32 prot_checksum = 0; /* computed checksum */ + int num_words = msglen / sizeof(uint32); /* num of 32bit words */ + uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO; + dhd_prot_t *prot = dhd->prot; + uint32 step = 0; + uint32 delay = PCIE_D2H_SYNC_DELAY; + uint32 total_tries = 0; + + ASSERT(msglen == ring->item_len); + + BCM_REFERENCE(delay); + + /* + * For retries we have to make some sort of stepper algorithm. + * We see that every time when the Dongle comes out of the D3 + * Cold state, the first D2H mem2mem DMA takes more time to + * complete, leading to livelock issues. + * + * Case 1 - Apart from Host CPU some other bus master is + * accessing the DDR port, probably page close to the ring + * so, PCIE does not get a change to update the memory. + * Solution - Increase the number of tries. + * + * Case 2 - The 50usec delay given by the Host CPU is not + * sufficient for the PCIe RC to start its work. + * In this case the breathing time of 50usec given by + * the Host CPU is not sufficient. + * Solution: Increase the delay in a stepper fashion. + * This is done to ensure that there are no + * unwanted extra delay introdcued in normal conditions. + */ + for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) { + for (tries = 1; tries <= PCIE_D2H_SYNC_WAIT_TRIES; tries++) { + prot_checksum = bcm_compute_xor32((volatile uint32 *)msg, num_words); + if (prot_checksum == 0U) { /* checksum is OK */ + if (msg->epoch == ring_seqnum) { + ring->seqnum++; /* next expected sequence number */ + goto dma_completed; + } + } + + total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries; + + if (total_tries > prot->d2h_sync_wait_max) + prot->d2h_sync_wait_max = total_tries; + + OSL_CACHE_INV(msg, msglen); /* invalidate and try again */ + OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */ +#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) + /* For ARM there is no pause in cpu_relax, so add extra delay */ + OSL_DELAY(delay * step); +#endif /* defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) */ + + } /* for PCIE_D2H_SYNC_WAIT_TRIES */ + } /* for number of steps */ + + dhd_prot_d2h_sync_livelock(dhd, ring, total_tries, (uchar *)msg, msglen); + + ring->seqnum++; /* skip this message ... leak of a pktid */ + return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */ + +dma_completed: + + prot->d2h_sync_wait_tot += total_tries; + return msg->msg_type; +} + +/** + * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host + * need to try to sync. This noop sync handler will be bound when the dongle + * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required. + */ +static uint8 BCMFASTPATH +dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen) +{ + return msg->msg_type; +} + +/** + * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what + * dongle advertizes. + */ +static void +dhd_prot_d2h_sync_init(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + prot->d2h_sync_wait_max = 0UL; + prot->d2h_sync_wait_tot = 0UL; + + prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL; + + if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) { + prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum; + } else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) { + prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum; + } else { + prot->d2h_sync_cb = dhd_prot_d2h_sync_none; + } +} + +#endif /* PCIE_D2H_SYNC */ + +int INLINE +dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason) +{ + /* To synchronize with the previous memory operations call wmb() */ + OSL_SMP_WMB(); + dhd->prot->ioctl_received = reason; + /* Call another wmb() to make sure before waking up the other event value gets updated */ + OSL_SMP_WMB(); + dhd_os_ioctl_resp_wake(dhd); + return 0; +} + +/** + * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum + */ +static void +dhd_prot_h2d_sync_init(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL; + prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL; +} + +/* +----------------- End of PCIE DHD H2D DMA SYNC ------------------------+ */ + + +/* + * +---------------------------------------------------------------------------+ + * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the + * virtual and physical address, the buffer lenght and the DMA handler. + * A secdma handler is also included in the dhd_dma_buf object. + * +---------------------------------------------------------------------------+ + */ + +static INLINE void +dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa) +{ + base_addr->low_addr = htol32(PHYSADDRLO(pa)); + base_addr->high_addr = htol32(PHYSADDRHI(pa)); +} + + +/** + * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer. + */ +static int +dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf) +{ + uint32 base, end; /* dongle uses 32bit ptr arithmetic */ + + ASSERT(dma_buf); + base = PHYSADDRLO(dma_buf->pa); + ASSERT(base); + ASSERT(ISALIGNED(base, DMA_ALIGN_LEN)); + ASSERT(dma_buf->len != 0); + + /* test 32bit offset arithmetic over dma buffer for loss of carry-over */ + end = (base + dma_buf->len); /* end address */ + + if ((end & 0xFFFFFFFF) < (base & 0xFFFFFFFF)) { /* exclude carryover */ + DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n", + __FUNCTION__, base, dma_buf->len)); + return BCME_ERROR; + } + + return BCME_OK; +} + +/** + * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer. + * returns BCME_OK=0 on success + * returns non-zero negative error value on failure. + */ +static int +dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len) +{ + uint32 dma_pad = 0; + osl_t *osh = dhd->osh; + + ASSERT(dma_buf != NULL); + ASSERT(dma_buf->va == NULL); + ASSERT(dma_buf->len == 0); + + /* Pad the buffer length by one extra cacheline size. + * Required for D2H direction. + */ + dma_pad = (buf_len % DHD_DMA_PAD) ? DHD_DMA_PAD : 0; + dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad, + DMA_ALIGN_LEN, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah); + + if (dma_buf->va == NULL) { + DHD_ERROR(("%s: buf_len %d, no memory available\n", + __FUNCTION__, buf_len)); + return BCME_NOMEM; + } + + dma_buf->len = buf_len; /* not including padded len */ + + if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */ + dhd_dma_buf_free(dhd, dma_buf); + return BCME_ERROR; + } + + dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */ + + return BCME_OK; +} + +/** + * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer. + */ +static void +dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf) +{ + if ((dma_buf == NULL) || (dma_buf->va == NULL)) { + return; + } + + (void)dhd_dma_buf_audit(dhd, dma_buf); + + /* Zero out the entire buffer and cache flush */ + memset((void*)dma_buf->va, 0, dma_buf->len); + OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len); +} + +/** + * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using + * dhd_dma_buf_alloc(). + */ +static void +dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf) +{ + osl_t *osh = dhd->osh; + + ASSERT(dma_buf); + + if (dma_buf->va == NULL) { + return; /* Allow for free invocation, when alloc failed */ + } + + /* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */ + (void)dhd_dma_buf_audit(dhd, dma_buf); + + /* dma buffer may have been padded at allocation */ + DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced, + dma_buf->pa, dma_buf->dmah); + + memset(dma_buf, 0, sizeof(dhd_dma_buf_t)); +} + +/** + * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values. + * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0. + */ +void +dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf, + void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma) +{ + dhd_dma_buf_t *dma_buf; + ASSERT(dhd_dma_buf); + dma_buf = (dhd_dma_buf_t *)dhd_dma_buf; + dma_buf->va = va; + dma_buf->len = len; + dma_buf->pa = pa; + dma_buf->dmah = dmah; + dma_buf->secdma = secdma; + + /* Audit user defined configuration */ + (void)dhd_dma_buf_audit(dhd, dma_buf); +} + +/* +------------------ End of PCIE DHD DMA BUF ADT ------------------------+ */ + +/* + * +---------------------------------------------------------------------------+ + * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping. + * Main purpose is to save memory on the dongle, has other purposes as well. + * The packet id map, also includes storage for some packet parameters that + * may be saved. A native packet pointer along with the parameters may be saved + * and a unique 32bit pkt id will be returned. Later, the saved packet pointer + * and the metadata may be retrieved using the previously allocated packet id. + * +---------------------------------------------------------------------------+ + */ +#define DHD_PCIE_PKTID +#define MAX_PKTID_ITEMS (3072) /* Maximum number of pktids supported */ + +/* On Router, the pktptr serves as a pktid. */ + + +#if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID) +#error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC" +#endif + +/* Enum for marking the buffer color based on usage */ +typedef enum dhd_pkttype { + PKTTYPE_DATA_TX = 0, + PKTTYPE_DATA_RX, + PKTTYPE_IOCTL_RX, + PKTTYPE_EVENT_RX, + /* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */ + PKTTYPE_NO_CHECK +} dhd_pkttype_t; + +#define DHD_PKTID_INVALID (0U) +#define DHD_IOCTL_REQ_PKTID (0xFFFE) +#define DHD_FAKE_PKTID (0xFACE) + +#define DHD_PKTID_FREE_LOCKER (FALSE) +#define DHD_PKTID_RSV_LOCKER (TRUE) + +typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */ + +/* Construct a packet id mapping table, returning an opaque map handle */ +static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index); + +/* Destroy a packet id mapping table, freeing all packets active in the table */ +static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map); + +#define PKTID_MAP_HANDLE (0) +#define PKTID_MAP_HANDLE_IOCTL (1) + +#define DHD_NATIVE_TO_PKTID_INIT(dhd, items, index) dhd_pktid_map_init((dhd), (items), (index)) +#define DHD_NATIVE_TO_PKTID_FINI(dhd, map) dhd_pktid_map_fini((dhd), (map)) + +#if defined(DHD_PCIE_PKTID) + + +/* Determine number of pktids that are available */ +static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle); + +/* Allocate a unique pktid against which a pkt and some metadata is saved */ +static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, + void *pkt); +static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, + void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma, + void *dmah, void *secdma, dhd_pkttype_t pkttype); +static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map, + void *pkt, dmaaddr_t pa, uint32 len, uint8 dma, + void *dmah, void *secdma, dhd_pkttype_t pkttype); + +/* Return an allocated pktid, retrieving previously saved pkt and metadata */ +static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map, + uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah, + void **secdma, dhd_pkttype_t pkttype, bool rsv_locker); + +/* + * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees + * + * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator + * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation + * + * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined, + * either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected. + */ +#if defined(DHD_PKTID_AUDIT_ENABLED) +#define USE_DHD_PKTID_AUDIT_LOCK 1 +/* Audit the pktidmap allocator */ +/* #define DHD_PKTID_AUDIT_MAP */ + +/* Audit the pktid during production/consumption of workitems */ +#define DHD_PKTID_AUDIT_RING + +#if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING) +#error "May only enabled audit of MAP or RING, at a time." +#endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */ + +#define DHD_DUPLICATE_ALLOC 1 +#define DHD_DUPLICATE_FREE 2 +#define DHD_TEST_IS_ALLOC 3 +#define DHD_TEST_IS_FREE 4 + +#ifdef USE_DHD_PKTID_AUDIT_LOCK +#define DHD_PKTID_AUDIT_LOCK_INIT(osh) dhd_os_spin_lock_init(osh) +#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock) +#define DHD_PKTID_AUDIT_LOCK(lock) dhd_os_spin_lock(lock) +#define DHD_PKTID_AUDIT_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags) +#else +#define DHD_PKTID_AUDIT_LOCK_INIT(osh) (void *)(1) +#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) do { /* noop */ } while (0) +#define DHD_PKTID_AUDIT_LOCK(lock) 0 +#define DHD_PKTID_AUDIT_UNLOCK(lock, flags) do { /* noop */ } while (0) +#endif /* !USE_DHD_PKTID_AUDIT_LOCK */ + +#endif /* DHD_PKTID_AUDIT_ENABLED */ + +/* #define USE_DHD_PKTID_LOCK 1 */ + +#ifdef USE_DHD_PKTID_LOCK +#define DHD_PKTID_LOCK_INIT(osh) dhd_os_spin_lock_init(osh) +#define DHD_PKTID_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock) +#define DHD_PKTID_LOCK(lock) dhd_os_spin_lock(lock) +#define DHD_PKTID_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags) +#else +#define DHD_PKTID_LOCK_INIT(osh) (void *)(1) +#define DHD_PKTID_LOCK_DEINIT(osh, lock) \ + do { \ + BCM_REFERENCE(osh); \ + BCM_REFERENCE(lock); \ + } while (0) +#define DHD_PKTID_LOCK(lock) 0 +#define DHD_PKTID_UNLOCK(lock, flags) \ + do { \ + BCM_REFERENCE(lock); \ + BCM_REFERENCE(flags); \ + } while (0) +#endif /* !USE_DHD_PKTID_LOCK */ + +/* Packet metadata saved in packet id mapper */ + +/* The Locker can be 3 states + * LOCKER_IS_FREE - Locker is free and can be allocated + * LOCKER_IS_BUSY - Locker is assigned and is being used, values in the + * locker (buffer address, len, phy addr etc) are populated + * with valid values + * LOCKER_IS_RSVD - The locker is reserved for future use, but the values + * in the locker are not valid. Especially pkt should be + * NULL in this state. When the user wants to re-use the + * locker dhd_pktid_map_free can be called with a flag + * to reserve the pktid for future use, which will clear + * the contents of the locker. When the user calls + * dhd_pktid_map_save the locker would move to LOCKER_IS_BUSY + */ +typedef enum dhd_locker_state { + LOCKER_IS_FREE, + LOCKER_IS_BUSY, + LOCKER_IS_RSVD +} dhd_locker_state_t; + +typedef struct dhd_pktid_item { + dhd_locker_state_t state; /* tag a locker to be free, busy or reserved */ + uint8 dir; /* dma map direction (Tx=flush or Rx=invalidate) */ + dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */ + uint16 len; /* length of mapped packet's buffer */ + void *pkt; /* opaque native pointer to a packet */ + dmaaddr_t pa; /* physical address of mapped packet's buffer */ + void *dmah; /* handle to OS specific DMA map */ + void *secdma; +} dhd_pktid_item_t; + +typedef struct dhd_pktid_map { + uint32 items; /* total items in map */ + uint32 avail; /* total available items */ + int failures; /* lockers unavailable count */ + /* Spinlock to protect dhd_pktid_map in process/tasklet context */ + void *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */ + +#if defined(DHD_PKTID_AUDIT_ENABLED) + void *pktid_audit_lock; + struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */ +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + uint32 keys[MAX_PKTID_ITEMS + 1]; /* stack of unique pkt ids */ + dhd_pktid_item_t lockers[0]; /* metadata storage */ +} dhd_pktid_map_t; + +/* + * PktId (Locker) #0 is never allocated and is considered invalid. + * + * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a + * depleted pktid pool and must not be used by the caller. + * + * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID. + */ + +#define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t)) +#define DHD_PKIDMAP_ITEMS(items) (items) +#define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \ + (DHD_PKTID_ITEM_SZ * ((items) + 1))) + +#define DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, map) dhd_pktid_map_fini_ioctl((dhd), (map)) + +/* Convert a packet to a pktid, and save pkt pointer in busy locker */ +#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt) dhd_pktid_map_reserve((dhd), (map), (pkt)) + +/* Reuse a previously reserved locker to save packet params */ +#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \ + dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \ + (uint8)(dir), (void *)(dmah), (void *)(secdma), \ + (dhd_pkttype_t)(pkttype)) + +/* Convert a packet to a pktid, and save packet params in locker */ +#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \ + dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \ + (uint8)(dir), (void *)(dmah), (void *)(secdma), \ + (dhd_pkttype_t)(pkttype)) + +/* Convert pktid to a packet, and free the locker */ +#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \ + dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \ + (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \ + (void **) &secdma, (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER) + +/* Convert the pktid to a packet, empty locker, but keep it reserved */ +#define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \ + dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \ + (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \ + (void **) &secdma, (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER) + +#define DHD_PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map) + +#if defined(DHD_PKTID_AUDIT_ENABLED) + +static int dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid, + const int test_for, const char *errmsg); + +/** +* dhd_pktid_audit - Use the mwbmap to audit validity of a pktid. +*/ +static int +dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid, + const int test_for, const char *errmsg) +{ +#define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: " + + const uint32 max_pktid_items = (MAX_PKTID_ITEMS); + struct bcm_mwbmap *handle; + uint32 flags; + bool ignore_audit; + + if (pktid_map == (dhd_pktid_map_t *)NULL) { + DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg)); + return BCME_OK; + } + + flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock); + + handle = pktid_map->pktid_audit; + if (handle == (struct bcm_mwbmap *)NULL) { + DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg)); + DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags); + return BCME_OK; + } + + /* Exclude special pktids from audit */ + ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID); + if (ignore_audit) { + DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags); + return BCME_OK; + } + + if ((pktid == DHD_PKTID_INVALID) || (pktid > max_pktid_items)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid)); + /* lock is released in "error" */ + goto error; + } + + /* Perform audit */ + switch (test_for) { + case DHD_DUPLICATE_ALLOC: + if (!bcm_mwbmap_isfree(handle, pktid)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n", + errmsg, pktid)); + goto error; + } + bcm_mwbmap_force(handle, pktid); + break; + + case DHD_DUPLICATE_FREE: + if (bcm_mwbmap_isfree(handle, pktid)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n", + errmsg, pktid)); + goto error; + } + bcm_mwbmap_free(handle, pktid); + break; + + case DHD_TEST_IS_ALLOC: + if (bcm_mwbmap_isfree(handle, pktid)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n", + errmsg, pktid)); + goto error; + } + break; + + case DHD_TEST_IS_FREE: + if (!bcm_mwbmap_isfree(handle, pktid)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free", + errmsg, pktid)); + goto error; + } + break; + + default: + goto error; + } + + DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags); + return BCME_OK; + +error: + + DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags); + /* May insert any trap mechanism here ! */ + dhd_pktid_audit_fail_cb(dhd); + + return BCME_ERROR; +} + +#define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \ + dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__) + +#endif /* DHD_PKTID_AUDIT_ENABLED */ + +/* +------------------ End of PCIE DHD PKTID AUDIT ------------------------+ */ + + +/** + * +---------------------------------------------------------------------------+ + * Packet to Packet Id mapper using a paradigm. + * + * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_PKTID_ITEMS]. + * + * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique + * packet id is returned. This unique packet id may be used to retrieve the + * previously saved packet metadata, using dhd_pktid_map_free(). On invocation + * of dhd_pktid_map_free(), the unique packet id is essentially freed. A + * subsequent call to dhd_pktid_map_alloc() may reuse this packet id. + * + * Implementation Note: + * Convert this into a abstraction and place into bcmutils ! + * Locker abstraction should treat contents as opaque storage, and a + * callback should be registered to handle busy lockers on destructor. + * + * +---------------------------------------------------------------------------+ + */ + +/** Allocate and initialize a mapper of num_items */ + +static dhd_pktid_map_handle_t * +dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index) +{ + void *osh; + uint32 nkey; + dhd_pktid_map_t *map; + uint32 dhd_pktid_map_sz; + uint32 map_items; +#ifdef DHD_USE_STATIC_PKTIDMAP + uint32 section; +#endif /* DHD_USE_STATIC_PKTIDMAP */ + osh = dhd->osh; + + ASSERT((num_items >= 1) && (num_items <= MAX_PKTID_ITEMS)); + dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items); + +#ifdef DHD_USE_STATIC_PKTIDMAP + if (index == PKTID_MAP_HANDLE) { + section = DHD_PREALLOC_PKTID_MAP; + } else { + section = DHD_PREALLOC_PKTID_MAP_IOCTL; + } + + map = (dhd_pktid_map_t *)DHD_OS_PREALLOC(dhd, section, dhd_pktid_map_sz); +#else + map = (dhd_pktid_map_t *)MALLOC(osh, dhd_pktid_map_sz); +#endif /* DHD_USE_STATIC_PKTIDMAP */ + + if (map == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for size %d\n", + __FUNCTION__, __LINE__, dhd_pktid_map_sz)); + goto error; + } + + bzero(map, dhd_pktid_map_sz); + + /* Initialize the lock that protects this structure */ + map->pktid_lock = DHD_PKTID_LOCK_INIT(osh); + if (map->pktid_lock == NULL) { + DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__, __LINE__)); + goto error; + } + + map->items = num_items; + map->avail = num_items; + + map_items = DHD_PKIDMAP_ITEMS(map->items); + +#if defined(DHD_PKTID_AUDIT_ENABLED) + /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */ + map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1); + if (map->pktid_audit == (struct bcm_mwbmap *)NULL) { + DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__)); + goto error; + } else { + DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n", + __FUNCTION__, __LINE__, map_items + 1)); + } + + map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh); + +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */ + map->keys[nkey] = nkey; /* populate with unique keys */ + map->lockers[nkey].state = LOCKER_IS_FREE; + map->lockers[nkey].pkt = NULL; /* bzero: redundant */ + map->lockers[nkey].len = 0; + } + + /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be busy */ + map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; + map->lockers[DHD_PKTID_INVALID].pkt = NULL; /* bzero: redundant */ + map->lockers[DHD_PKTID_INVALID].len = 0; + +#if defined(DHD_PKTID_AUDIT_ENABLED) + /* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */ + bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID); +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + return (dhd_pktid_map_handle_t *)map; /* opaque handle */ + +error: + + if (map) { + +#if defined(DHD_PKTID_AUDIT_ENABLED) + if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { + bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */ + map->pktid_audit = (struct bcm_mwbmap *)NULL; + if (map->pktid_audit_lock) + DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock); + } +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + if (map->pktid_lock) + DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock); + + MFREE(osh, map, dhd_pktid_map_sz); + } + + return (dhd_pktid_map_handle_t *)NULL; +} + +/** + * Retrieve all allocated keys and free all . + * Freeing implies: unmapping the buffers and freeing the native packet + * This could have been a callback registered with the pktid mapper. + */ + +static void +dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) +{ + void *osh; + uint32 nkey; + dhd_pktid_map_t *map; + uint32 dhd_pktid_map_sz; + dhd_pktid_item_t *locker; + uint32 map_items; + uint32 flags; + + if (handle == NULL) { + return; + } + + map = (dhd_pktid_map_t *)handle; + flags = DHD_PKTID_LOCK(map->pktid_lock); + osh = dhd->osh; + + dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items); + + nkey = 1; /* skip reserved KEY #0, and start from 1 */ + locker = &map->lockers[nkey]; + + map_items = DHD_PKIDMAP_ITEMS(map->items); + + for (; nkey <= map_items; nkey++, locker++) { + + if (locker->state == LOCKER_IS_BUSY) { /* numbered key still in use */ + + locker->state = LOCKER_IS_FREE; /* force open the locker */ + +#if defined(DHD_PKTID_AUDIT_ENABLED) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */ +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + { /* This could be a callback registered with dhd_pktid_map */ + DMA_UNMAP(osh, locker->pa, locker->len, + locker->dir, 0, DHD_DMAH_NULL); + dhd_prot_packet_free(dhd, (ulong*)locker->pkt, + locker->pkttype, TRUE); + } + } +#if defined(DHD_PKTID_AUDIT_ENABLED) + else { + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE); + } +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + locker->pkt = NULL; /* clear saved pkt */ + locker->len = 0; + } + +#if defined(DHD_PKTID_AUDIT_ENABLED) + if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { + bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */ + map->pktid_audit = (struct bcm_mwbmap *)NULL; + if (map->pktid_audit_lock) { + DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock); + } + } +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock); + +#ifdef DHD_USE_STATIC_PKTIDMAP + DHD_OS_PREFREE(dhd, handle, dhd_pktid_map_sz); +#else + MFREE(osh, handle, dhd_pktid_map_sz); +#endif /* DHD_USE_STATIC_PKTIDMAP */ +} + +#ifdef IOCTLRESP_USE_CONSTMEM +/** Called in detach scenario. Releasing IOCTL buffers. */ +static void +dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) +{ + uint32 nkey; + dhd_pktid_map_t *map; + uint32 dhd_pktid_map_sz; + dhd_pktid_item_t *locker; + uint32 map_items; + uint32 flags; + osl_t *osh = dhd->osh; + + if (handle == NULL) { + return; + } + + map = (dhd_pktid_map_t *)handle; + flags = DHD_PKTID_LOCK(map->pktid_lock); + + dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items); + + nkey = 1; /* skip reserved KEY #0, and start from 1 */ + locker = &map->lockers[nkey]; + + map_items = DHD_PKIDMAP_ITEMS(map->items); + + for (; nkey <= map_items; nkey++, locker++) { + + if (locker->state == LOCKER_IS_BUSY) { /* numbered key still in use */ + + locker->state = LOCKER_IS_FREE; /* force open the locker */ + +#if defined(DHD_PKTID_AUDIT_ENABLED) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */ +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + { + dhd_dma_buf_t retbuf; + retbuf.va = locker->pkt; + retbuf.len = locker->len; + retbuf.pa = locker->pa; + retbuf.dmah = locker->dmah; + retbuf.secdma = locker->secdma; + + /* This could be a callback registered with dhd_pktid_map */ + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + free_ioctl_return_buffer(dhd, &retbuf); + flags = DHD_PKTID_LOCK(map->pktid_lock); + } + } +#if defined(DHD_PKTID_AUDIT_ENABLED) + else { + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE); + } +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + locker->pkt = NULL; /* clear saved pkt */ + locker->len = 0; + } + +#if defined(DHD_PKTID_AUDIT_ENABLED) + if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { + bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */ + map->pktid_audit = (struct bcm_mwbmap *)NULL; + if (map->pktid_audit_lock) { + DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock); + } + } +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock); + +#ifdef DHD_USE_STATIC_PKTIDMAP + DHD_OS_PREFREE(dhd, handle, dhd_pktid_map_sz); +#else + MFREE(osh, handle, dhd_pktid_map_sz); +#endif /* DHD_USE_STATIC_PKTIDMAP */ +} +#endif /* IOCTLRESP_USE_CONSTMEM */ + +/** Get the pktid free count */ +static INLINE uint32 BCMFASTPATH +dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle) +{ + dhd_pktid_map_t *map; + uint32 flags; + uint32 avail; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + + flags = DHD_PKTID_LOCK(map->pktid_lock); + avail = map->avail; + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + + return avail; +} + +/** + * Allocate locker, save pkt contents, and return the locker's numbered key. + * dhd_pktid_map_alloc() is not reentrant, and is the caller's responsibility. + * Caller must treat a returned value DHD_PKTID_INVALID as a failure case, + * implying a depleted pool of pktids. + */ + +static INLINE uint32 +__dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt) +{ + uint32 nkey; + dhd_pktid_map_t *map; + dhd_pktid_item_t *locker; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + + if (map->avail <= 0) { /* no more pktids to allocate */ + map->failures++; + DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__)); + return DHD_PKTID_INVALID; /* failed alloc request */ + } + + ASSERT(map->avail <= map->items); + nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */ + locker = &map->lockers[nkey]; /* save packet metadata in locker */ + map->avail--; + locker->pkt = pkt; /* pkt is saved, other params not yet saved. */ + locker->len = 0; + locker->state = LOCKER_IS_BUSY; /* reserve this locker */ + +#if defined(DHD_PKTID_AUDIT_MAP) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_ALLOC); /* Audit duplicate alloc */ +#endif /* DHD_PKTID_AUDIT_MAP */ + + ASSERT(nkey != DHD_PKTID_INVALID); + return nkey; /* return locker's numbered key */ +} + + +/** + * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not + * yet populated. Invoke the pktid save api to populate the packet parameters + * into the locker. + * Wrapper that takes the required lock when called directly. + */ +static INLINE uint32 +dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt) +{ + dhd_pktid_map_t *map; + uint32 flags; + uint32 ret; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + flags = DHD_PKTID_LOCK(map->pktid_lock); + ret = __dhd_pktid_map_reserve(dhd, handle, pkt); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + + return ret; +} + +static INLINE void +__dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, + uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma, + dhd_pkttype_t pkttype) +{ + dhd_pktid_map_t *map; + dhd_pktid_item_t *locker; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + + ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items))); + + locker = &map->lockers[nkey]; + + ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) || + ((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL))); + +#if defined(DHD_PKTID_AUDIT_MAP) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_ALLOC); /* apriori, reservation */ +#endif /* DHD_PKTID_AUDIT_MAP */ + + /* store contents in locker */ + locker->dir = dir; + locker->pa = pa; + locker->len = (uint16)len; /* 16bit len */ + locker->dmah = dmah; /* 16bit len */ + locker->secdma = secdma; + locker->pkttype = pkttype; + locker->pkt = pkt; + locker->state = LOCKER_IS_BUSY; /* make this locker busy */ +} + +/** + * dhd_pktid_map_save - Save a packet's parameters into a locker corresponding + * to a previously reserved unique numbered key. + * Wrapper that takes the required lock when called directly. + */ +static INLINE void +dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, + uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma, + dhd_pkttype_t pkttype) +{ + dhd_pktid_map_t *map; + uint32 flags; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + flags = DHD_PKTID_LOCK(map->pktid_lock); + __dhd_pktid_map_save(dhd, handle, pkt, nkey, pa, len, + dir, dmah, secdma, pkttype); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); +} + +/** + * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet + * contents into the corresponding locker. Return the numbered key. + */ +static uint32 BCMFASTPATH +dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, + dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma, + dhd_pkttype_t pkttype) +{ + uint32 nkey; + uint32 flags; + dhd_pktid_map_t *map; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + + flags = DHD_PKTID_LOCK(map->pktid_lock); + + nkey = __dhd_pktid_map_reserve(dhd, handle, pkt); + if (nkey != DHD_PKTID_INVALID) { + __dhd_pktid_map_save(dhd, handle, pkt, nkey, pa, + len, dir, dmah, secdma, pkttype); +#if defined(DHD_PKTID_AUDIT_MAP) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_ALLOC); /* apriori, reservation */ +#endif /* DHD_PKTID_AUDIT_MAP */ + } + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + + return nkey; +} + +/** + * dhd_pktid_map_free - Given a numbered key, return the locker contents. + * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility. + * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid + * value. Only a previously allocated pktid may be freed. + */ +static void * BCMFASTPATH +dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey, + dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma, + dhd_pkttype_t pkttype, bool rsv_locker) +{ + dhd_pktid_map_t *map; + dhd_pktid_item_t *locker; + void * pkt; + uint32 flags; + + ASSERT(handle != NULL); + + map = (dhd_pktid_map_t *)handle; + + flags = DHD_PKTID_LOCK(map->pktid_lock); + + ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items))); + + locker = &map->lockers[nkey]; + +#if defined(DHD_PKTID_AUDIT_MAP) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */ +#endif /* DHD_PKTID_AUDIT_MAP */ + + if (locker->state == LOCKER_IS_FREE) { /* Debug check for cloned numbered key */ + DHD_ERROR(("%s:%d: Error! freeing invalid pktid<%u>\n", + __FUNCTION__, __LINE__, nkey)); + ASSERT(locker->state != LOCKER_IS_FREE); + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + return NULL; + } + + /* Check for the colour of the buffer i.e The buffer posted for TX, + * should be freed for TX completion. Similarly the buffer posted for + * IOCTL should be freed for IOCT completion etc. + */ + if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) { + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + + DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n", + __FUNCTION__, __LINE__, nkey)); + ASSERT(locker->pkttype == pkttype); + + return NULL; + } + + if (rsv_locker == DHD_PKTID_FREE_LOCKER) { + map->avail++; + map->keys[map->avail] = nkey; /* make this numbered key available */ + locker->state = LOCKER_IS_FREE; /* open and free Locker */ + } else { + /* pktid will be reused, but the locker does not have a valid pkt */ + locker->state = LOCKER_IS_RSVD; + } + +#if defined(DHD_PKTID_AUDIT_MAP) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE); +#endif /* DHD_PKTID_AUDIT_MAP */ + + *pa = locker->pa; /* return contents of locker */ + *len = (uint32)locker->len; + *dmah = locker->dmah; + *secdma = locker->secdma; + + pkt = locker->pkt; + locker->pkt = NULL; /* Clear pkt */ + locker->len = 0; + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + return pkt; +} + +#else /* ! DHD_PCIE_PKTID */ + + +typedef struct pktlist { + PKT_LIST *tx_pkt_list; /* list for tx packets */ + PKT_LIST *rx_pkt_list; /* list for rx packets */ + PKT_LIST *ctrl_pkt_list; /* list for ioctl/event buf post */ +} pktlists_t; + +/* + * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail + * of a one to one mapping 32bit pktptr and a 32bit pktid. + * + * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail. + * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by + * a lock. + * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined. + */ +#define DHD_PKTID32(pktptr32) ((uint32)(pktptr32)) +#define DHD_PKTPTR32(pktid32) ((void *)(pktid32)) + + +static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32, + dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma, + dhd_pkttype_t pkttype); +static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32, + dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma, + dhd_pkttype_t pkttype); + +static dhd_pktid_map_handle_t * +dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index) +{ + osl_t *osh = dhd->osh; + pktlists_t *handle = NULL; + + if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n", + __FUNCTION__, __LINE__, sizeof(pktlists_t))); + goto error_done; + } + + if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n", + __FUNCTION__, __LINE__, sizeof(PKT_LIST))); + goto error; + } + + if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n", + __FUNCTION__, __LINE__, sizeof(PKT_LIST))); + goto error; + } + + if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n", + __FUNCTION__, __LINE__, sizeof(PKT_LIST))); + goto error; + } + + PKTLIST_INIT(handle->tx_pkt_list); + PKTLIST_INIT(handle->rx_pkt_list); + PKTLIST_INIT(handle->ctrl_pkt_list); + + return (dhd_pktid_map_handle_t *) handle; + +error: + if (handle->ctrl_pkt_list) { + MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST)); + } + + if (handle->rx_pkt_list) { + MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST)); + } + + if (handle->tx_pkt_list) { + MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST)); + } + + if (handle) { + MFREE(osh, handle, sizeof(pktlists_t)); + } + +error_done: + return (dhd_pktid_map_handle_t *)NULL; +} + +static void +dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map) +{ + osl_t *osh = dhd->osh; + pktlists_t *handle = (pktlists_t *) map; + + ASSERT(handle != NULL); + if (handle == (pktlists_t *)NULL) { + return; + } + + if (handle->ctrl_pkt_list) { + PKTLIST_FINI(handle->ctrl_pkt_list); + MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST)); + } + + if (handle->rx_pkt_list) { + PKTLIST_FINI(handle->rx_pkt_list); + MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST)); + } + + if (handle->tx_pkt_list) { + PKTLIST_FINI(handle->tx_pkt_list); + MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST)); + } + + if (handle) { + MFREE(osh, handle, sizeof(pktlists_t)); + } +} + +/** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */ +static INLINE uint32 +dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32, + dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma, + dhd_pkttype_t pkttype) +{ + pktlists_t *handle = (pktlists_t *) map; + ASSERT(pktptr32 != NULL); + DHD_PKT_SET_DMA_LEN(pktptr32, dma_len); + DHD_PKT_SET_DMAH(pktptr32, dmah); + DHD_PKT_SET_PA(pktptr32, pa); + DHD_PKT_SET_SECDMA(pktptr32, secdma); + + if (pkttype == PKTTYPE_DATA_TX) { + PKTLIST_ENQ(handle->tx_pkt_list, pktptr32); + } else if (pkttype == PKTTYPE_DATA_RX) { + PKTLIST_ENQ(handle->rx_pkt_list, pktptr32); + } else { + PKTLIST_ENQ(handle->ctrl_pkt_list, pktptr32); + } + + return DHD_PKTID32(pktptr32); +} + +/** Convert a pktid to pktptr and retrieve saved dma parameters from packet */ +static INLINE void * +dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32, + dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma, + dhd_pkttype_t pkttype) +{ + pktlists_t *handle = (pktlists_t *) map; + void *pktptr32; + + ASSERT(pktid32 != 0U); + pktptr32 = DHD_PKTPTR32(pktid32); + *dma_len = DHD_PKT_GET_DMA_LEN(pktptr32); + *dmah = DHD_PKT_GET_DMAH(pktptr32); + *pa = DHD_PKT_GET_PA(pktptr32); + *secdma = DHD_PKT_GET_SECDMA(pktptr32); + + if (pkttype == PKTTYPE_DATA_TX) { + PKTLIST_UNLINK(handle->tx_pkt_list, pktptr32); + } else if (pkttype == PKTTYPE_DATA_RX) { + PKTLIST_UNLINK(handle->rx_pkt_list, pktptr32); + } else { + PKTLIST_UNLINK(handle->ctrl_pkt_list, pktptr32); + } + + return pktptr32; +} + +#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt) DHD_PKTID32(pkt) + +#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \ + ({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \ + dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \ + (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \ + }) + +#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \ + ({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \ + dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \ + (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \ + }) + +#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \ + ({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype); \ + dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \ + (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \ + (void **)&secdma, (dhd_pkttype_t)(pkttype)); \ + }) + +#define DHD_PKTID_AVAIL(map) (~0) + +#endif /* ! DHD_PCIE_PKTID */ + +/* +------------------ End of PCIE DHD PKTID MAPPER -----------------------+ */ + + +/** + * The PCIE FD protocol layer is constructed in two phases: + * Phase 1. dhd_prot_attach() + * Phase 2. dhd_prot_init() + * + * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields. + * All Common rings are allose attached (msgbuf_ring_t objects are allocated + * with DMA-able buffers). + * All dhd_dma_buf_t objects are also allocated here. + * + * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any + * initialization of objects that requires information advertized by the dongle + * may not be performed here. + * E.g. the number of TxPost flowrings is not know at this point, neither do + * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or + * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H + * rings (common + flow). + * + * dhd_prot_init() is invoked after the bus layer has fetched the information + * advertized by the dongle in the pcie_shared_t. + */ +int +dhd_prot_attach(dhd_pub_t *dhd) +{ + osl_t *osh = dhd->osh; + dhd_prot_t *prot; + + /* Allocate prot structure */ + if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT, + sizeof(dhd_prot_t)))) { + DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + goto fail; + } + memset(prot, 0, sizeof(*prot)); + + prot->osh = osh; + dhd->prot = prot; + + /* DMAing ring completes supported? FALSE by default */ + dhd->dma_d2h_ring_upd_support = FALSE; + dhd->dma_h2d_ring_upd_support = FALSE; + + /* Common Ring Allocations */ + + /* Ring 0: H2D Control Submission */ + if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl", + H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE, + BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n", + __FUNCTION__)); + goto fail; + } + + /* Ring 1: H2D Receive Buffer Post */ + if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp", + H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE, + BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n", + __FUNCTION__)); + goto fail; + } + + /* Ring 2: D2H Control Completion */ + if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl", + D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE, + BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n", + __FUNCTION__)); + goto fail; + } + + /* Ring 3: D2H Transmit Complete */ + if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl", + D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE, + BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n", + __FUNCTION__)); + goto fail; + + } + + /* Ring 4: D2H Receive Complete */ + if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl", + D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE, + BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n", + __FUNCTION__)); + goto fail; + + } + + /* + * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able + * buffers for flowrings will be instantiated, in dhd_prot_init() . + * See dhd_prot_flowrings_pool_attach() + */ + /* ioctl response buffer */ + if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) { + goto fail; + } + + /* IOCTL request buffer */ + if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) { + goto fail; + } + + /* Scratch buffer for dma rx offset */ + if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN)) { + goto fail; + } + + /* scratch buffer bus throughput measurement */ + if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) { + goto fail; + } + +#ifdef DHD_RX_CHAINING + dhd_rxchain_reset(&prot->rxchain); +#endif + +#if defined(DHD_LB) + + /* Initialize the work queues to be used by the Load Balancing logic */ +#if defined(DHD_LB_TXC) + { + void *buffer; + buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ); + bcm_workq_init(&prot->tx_compl_prod, &prot->tx_compl_cons, + buffer, DHD_LB_WORKQ_SZ); + prot->tx_compl_prod_sync = 0; + DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n", + __FUNCTION__, buffer, DHD_LB_WORKQ_SZ)); + } +#endif /* DHD_LB_TXC */ + +#if defined(DHD_LB_RXC) + { + void *buffer; + buffer = MALLOC(dhd->osh, sizeof(uint32) * DHD_LB_WORKQ_SZ); + bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons, + buffer, DHD_LB_WORKQ_SZ); + prot->rx_compl_prod_sync = 0; + DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n", + __FUNCTION__, buffer, DHD_LB_WORKQ_SZ)); + } +#endif /* DHD_LB_RXC */ + +#endif /* DHD_LB */ + + return BCME_OK; + +fail: + +#ifndef CONFIG_DHD_USE_STATIC_BUF + if (prot != NULL) { + dhd_prot_detach(dhd); + } +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + + return BCME_NOMEM; +} /* dhd_prot_attach */ + + +/** + * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has + * completed it's initialization of the pcie_shared structure, we may now fetch + * the dongle advertized features and adjust the protocol layer accordingly. + * + * dhd_prot_init() may be invoked again after a dhd_prot_reset(). + */ +int +dhd_prot_init(dhd_pub_t *dhd) +{ + sh_addr_t base_addr; + dhd_prot_t *prot = dhd->prot; + + /* PKTID handle INIT */ + if (prot->pktid_map_handle != NULL) { + DHD_ERROR(("%s: pktid_map_handle already set!\n", __FUNCTION__)); + ASSERT(0); + return BCME_ERROR; + } + +#ifdef IOCTLRESP_USE_CONSTMEM + if (prot->pktid_map_handle_ioctl != NULL) { + DHD_ERROR(("%s: pktid_map_handle_ioctl already set!\n", __FUNCTION__)); + ASSERT(0); + return BCME_ERROR; + } +#endif /* IOCTLRESP_USE_CONSTMEM */ + + prot->pktid_map_handle = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_ITEMS, PKTID_MAP_HANDLE); + if (prot->pktid_map_handle == NULL) { + DHD_ERROR(("%s: Unable to map packet id's\n", __FUNCTION__)); + ASSERT(0); + return BCME_NOMEM; + } + +#ifdef IOCTLRESP_USE_CONSTMEM + prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd, + DHD_FLOWRING_MAX_IOCTLRESPBUF_POST, PKTID_MAP_HANDLE_IOCTL); + if (prot->pktid_map_handle_ioctl == NULL) { + DHD_ERROR(("%s: Unable to map ioctl response buffers\n", __FUNCTION__)); + ASSERT(0); + return BCME_NOMEM; + } +#endif /* IOCTLRESP_USE_CONSTMEM */ + + /* Max pkts in ring */ + prot->max_tx_count = H2DRING_TXPOST_MAX_ITEM; + + DHD_INFO(("%s:%d: MAX_TX_COUNT = %d\n", __FUNCTION__, __LINE__, prot->max_tx_count)); + + /* Read max rx packets supported by dongle */ + dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0); + if (prot->max_rxbufpost == 0) { + /* This would happen if the dongle firmware is not */ + /* using the latest shared structure template */ + prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST; + } + DHD_INFO(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost)); + + /* Initialize. bzero() would blow away the dma pointers. */ + prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST; + prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST; + + prot->cur_ioctlresp_bufs_posted = 0; + prot->active_tx_count = 0; + prot->data_seq_no = 0; + prot->ioctl_seq_no = 0; + prot->rxbufpost = 0; + prot->cur_event_bufs_posted = 0; + prot->ioctl_state = 0; + prot->curr_ioctl_cmd = 0; + prot->ioctl_received = IOCTL_WAIT; + + prot->dmaxfer.srcmem.va = NULL; + prot->dmaxfer.dstmem.va = NULL; + prot->dmaxfer.in_progress = FALSE; + + prot->metadata_dbg = FALSE; + prot->rx_metadata_offset = 0; + prot->tx_metadata_offset = 0; + prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT; + + prot->ioctl_trans_id = 0; + + /* Register the interrupt function upfront */ + /* remove corerev checks in data path */ + prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus); + + /* Initialize Common MsgBuf Rings */ + + dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn); + dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn); + dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln); + dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln); + dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln); + +#if defined(PCIE_D2H_SYNC) + dhd_prot_d2h_sync_init(dhd); +#endif /* PCIE_D2H_SYNC */ + + dhd_prot_h2d_sync_init(dhd); + + /* init the scratch buffer */ + dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + D2H_DMA_SCRATCH_BUF, 0); + dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len, + sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0); + + /* If supported by the host, indicate the memory block + * for completion writes / submission reads to shared space + */ + if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { + dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + D2H_DMA_INDX_WR_BUF, 0); + dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + H2D_DMA_INDX_RD_BUF, 0); + } + + if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) { + dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + H2D_DMA_INDX_WR_BUF, 0); + dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + D2H_DMA_INDX_RD_BUF, 0); + } + + /* + * If the DMA-able buffers for flowring needs to come from a specific + * contiguous memory region, then setup prot->flowrings_dma_buf here. + * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from + * this contiguous memory region, for each of the flowrings. + */ + + /* Pre-allocate pool of msgbuf_ring for flowrings */ + if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) { + return BCME_ERROR; + } + + /* Host should configure soft doorbells if needed ... here */ + + /* Post to dongle host configured soft doorbells */ + dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd); + + /* Post buffers for packet reception and ioctl/event responses */ + dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */ + dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd); + dhd_msgbuf_rxbuf_post_event_bufs(dhd); + + return BCME_OK; +} /* dhd_prot_init */ + + +/** + * dhd_prot_detach - PCIE FD protocol layer destructor. + * Unlink, frees allocated protocol memory (including dhd_prot) + */ +void +dhd_prot_detach(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + + /* Stop the protocol module */ + if (prot) { + + /* free up all DMA-able buffers allocated during prot attach/init */ + + dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf); + dhd_dma_buf_free(dhd, &prot->retbuf); /* ioctl return buffer */ + dhd_dma_buf_free(dhd, &prot->ioctbuf); + dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf); + + /* DMA-able buffers for DMAing H2D/D2H WR/RD indices */ + dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf); + dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf); + dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf); + dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf); + + /* Common MsgBuf Rings */ + dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn); + dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn); + dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln); + dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln); + dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln); + + /* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */ + dhd_prot_flowrings_pool_detach(dhd); + + DHD_NATIVE_TO_PKTID_FINI(dhd, dhd->prot->pktid_map_handle); + +#ifndef CONFIG_DHD_USE_STATIC_BUF + MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t)); +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + +#if defined(DHD_LB) +#if defined(DHD_LB_TXC) + if (prot->tx_compl_prod.buffer) { + MFREE(dhd->osh, prot->tx_compl_prod.buffer, + sizeof(void*) * DHD_LB_WORKQ_SZ); + } +#endif /* DHD_LB_TXC */ +#if defined(DHD_LB_RXC) + if (prot->rx_compl_prod.buffer) { + MFREE(dhd->osh, prot->rx_compl_prod.buffer, + sizeof(void*) * DHD_LB_WORKQ_SZ); + } +#endif /* DHD_LB_RXC */ +#endif /* DHD_LB */ + + dhd->prot = NULL; + } +} /* dhd_prot_detach */ + + +/** + * dhd_prot_reset - Reset the protocol layer without freeing any objects. This + * may be invoked to soft reboot the dongle, without having to detach and attach + * the entire protocol layer. + * + * After dhd_prot_reset(), dhd_prot_init() may be invoked without going through + * a dhd_prot_attach() phase. + */ +void +dhd_prot_reset(dhd_pub_t *dhd) +{ + struct dhd_prot *prot = dhd->prot; + + DHD_TRACE(("%s\n", __FUNCTION__)); + + if (prot == NULL) { + return; + } + + dhd_prot_flowrings_pool_reset(dhd); + + dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn); + dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn); + dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln); + dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln); + dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln); + + dhd_dma_buf_reset(dhd, &prot->retbuf); + dhd_dma_buf_reset(dhd, &prot->ioctbuf); + dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf); + dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf); + dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf); + dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf); + dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf); + + + prot->rx_metadata_offset = 0; + prot->tx_metadata_offset = 0; + + prot->rxbufpost = 0; + prot->cur_event_bufs_posted = 0; + prot->cur_ioctlresp_bufs_posted = 0; + + prot->active_tx_count = 0; + prot->data_seq_no = 0; + prot->ioctl_seq_no = 0; + prot->ioctl_state = 0; + prot->curr_ioctl_cmd = 0; + prot->ioctl_received = IOCTL_WAIT; + prot->ioctl_trans_id = 0; + + /* dhd_flow_rings_init is located at dhd_bus_start, + * so when stopping bus, flowrings shall be deleted + */ + if (dhd->flow_rings_inited) { + dhd_flow_rings_deinit(dhd); + } + + if (prot->pktid_map_handle) { + DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_map_handle); + prot->pktid_map_handle = NULL; + } + +#ifdef IOCTLRESP_USE_CONSTMEM + if (prot->pktid_map_handle_ioctl) { + DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl); + prot->pktid_map_handle_ioctl = NULL; + } +#endif /* IOCTLRESP_USE_CONSTMEM */ +} /* dhd_prot_reset */ + + +void +dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset) +{ + dhd_prot_t *prot = dhd->prot; + prot->rx_dataoffset = rx_offset; +} + +/** + * Initialize protocol: sync w/dongle state. + * Sets dongle media info (iswl, drv_version, mac address). + */ +int +dhd_sync_with_dongle(dhd_pub_t *dhd) +{ + int ret = 0; + wlc_rev_info_t revinfo; + + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT); + + + +#ifdef DHD_FW_COREDUMP + /* Check the memdump capability */ + dhd_get_memdump_info(dhd); +#endif /* DHD_FW_COREDUMP */ +#ifdef BCMASSERT_LOG + dhd_get_assert_info(dhd); +#endif /* BCMASSERT_LOG */ + + /* Get the device rev info */ + memset(&revinfo, 0, sizeof(revinfo)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0); + if (ret < 0) { + DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__)); + goto done; + } + DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__, + revinfo.deviceid, revinfo.vendorid, revinfo.chipnum)); + + dhd_process_cid_mac(dhd, TRUE); + + ret = dhd_preinit_ioctls(dhd); + + if (!ret) { + dhd_process_cid_mac(dhd, FALSE); + } + + /* Always assumes wl for now */ + dhd->iswl = TRUE; +done: + return ret; +} /* dhd_sync_with_dongle */ + +#if defined(DHD_LB) + +/* DHD load balancing: deferral of work to another online CPU */ + +/* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */ +extern void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp); +extern void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp); +extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp); + +extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx); + +/** + * dhd_lb_dispatch - load balance by dispatch work to other CPU cores + * Note: rx_compl_tasklet is dispatched explicitly. + */ +static INLINE void +dhd_lb_dispatch(dhd_pub_t *dhdp, uint16 ring_idx) +{ + switch (ring_idx) { + +#if defined(DHD_LB_TXC) + case BCMPCIE_D2H_MSGRING_TX_COMPLETE: + bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */ + dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */ + break; +#endif /* DHD_LB_TXC */ + + case BCMPCIE_D2H_MSGRING_RX_COMPLETE: + { +#if defined(DHD_LB_RXC) + dhd_prot_t *prot = dhdp->prot; + /* Schedule the takslet only if we have to */ + if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) { + /* flush WR index */ + bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod); + dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */ + } +#endif /* DHD_LB_RXC */ +#if defined(DHD_LB_RXP) + dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */ +#endif /* DHD_LB_RXP */ + break; + } + default: + break; + } +} + + +#if defined(DHD_LB_TXC) +/** + * DHD load balanced tx completion tasklet handler, that will perform the + * freeing of packets on the selected CPU. Packet pointers are delivered to + * this tasklet via the tx complete workq. + */ +void +dhd_lb_tx_compl_handler(unsigned long data) +{ + int elem_ix; + void *pkt, **elem; + dmaaddr_t pa; + uint32 pa_len; + dhd_pub_t *dhd = (dhd_pub_t *)data; + dhd_prot_t *prot = dhd->prot; + bcm_workq_t *workq = &prot->tx_compl_cons; + uint32 count = 0; + + DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd); + + while (1) { + elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ); + + if (elem_ix == BCM_RING_EMPTY) { + break; + } + + elem = WORKQ_ELEMENT(void *, workq, elem_ix); + pkt = *elem; + + DHD_INFO(("%s: tx_compl_cons pkt<%p>\n", __FUNCTION__, pkt)); + + OSL_PREFETCH(PKTTAG(pkt)); + OSL_PREFETCH(pkt); + + pa = DHD_PKTTAG_PA((dhd_pkttag_fr_t *)PKTTAG(pkt)); + pa_len = DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt)); + + DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0); + +#if defined(BCMPCIE) + dhd_txcomplete(dhd, pkt, true); +#endif + + PKTFREE(dhd->osh, pkt, TRUE); + count++; + } + + /* smp_wmb(); */ + bcm_workq_cons_sync(workq); + DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, count); +} +#endif /* DHD_LB_TXC */ + +#if defined(DHD_LB_RXC) +void +dhd_lb_rx_compl_handler(unsigned long data) +{ + dhd_pub_t *dhd = (dhd_pub_t *)data; + bcm_workq_t *workq = &dhd->prot->rx_compl_cons; + + DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhd); + + dhd_msgbuf_rxbuf_post(dhd, TRUE); /* re-use pktids */ + bcm_workq_cons_sync(workq); +} +#endif /* DHD_LB_RXC */ + +#endif /* DHD_LB */ + +#define DHD_DBG_SHOW_METADATA 0 + +#if DHD_DBG_SHOW_METADATA +static void BCMFASTPATH +dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len) +{ + uint8 tlv_t; + uint8 tlv_l; + uint8 *tlv_v = (uint8 *)ptr; + + if (len <= BCMPCIE_D2H_METADATA_HDRLEN) + return; + + len -= BCMPCIE_D2H_METADATA_HDRLEN; + tlv_v += BCMPCIE_D2H_METADATA_HDRLEN; + + while (len > TLV_HDR_LEN) { + tlv_t = tlv_v[TLV_TAG_OFF]; + tlv_l = tlv_v[TLV_LEN_OFF]; + + len -= TLV_HDR_LEN; + tlv_v += TLV_HDR_LEN; + if (len < tlv_l) + break; + if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER)) + break; + + switch (tlv_t) { + case WLFC_CTL_TYPE_TXSTATUS: { + uint32 txs; + memcpy(&txs, tlv_v, sizeof(uint32)); + if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) { + printf("METADATA TX_STATUS: %08x\n", txs); + } else { + wl_txstatus_additional_info_t tx_add_info; + memcpy(&tx_add_info, tlv_v + sizeof(uint32), + sizeof(wl_txstatus_additional_info_t)); + printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]" + " rate = %08x tries = %d - %d\n", txs, + tx_add_info.seq, tx_add_info.entry_ts, + tx_add_info.enq_ts, tx_add_info.last_ts, + tx_add_info.rspec, tx_add_info.rts_cnt, + tx_add_info.tx_cnt); + } + } break; + + case WLFC_CTL_TYPE_RSSI: { + if (tlv_l == 1) + printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v); + else + printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n", + (*(tlv_v + 3) << 8) | *(tlv_v + 2), + (int8)(*tlv_v), *(tlv_v + 1)); + } break; + + case WLFC_CTL_TYPE_FIFO_CREDITBACK: + bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l); + break; + + case WLFC_CTL_TYPE_TX_ENTRY_STAMP: + bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l); + break; + + case WLFC_CTL_TYPE_RX_STAMP: { + struct { + uint32 rspec; + uint32 bus_time; + uint32 wlan_time; + } rx_tmstamp; + memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp)); + printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n", + rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec); + } break; + + case WLFC_CTL_TYPE_TRANS_ID: + bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l); + break; + + case WLFC_CTL_TYPE_COMP_TXSTATUS: + bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l); + break; + + default: + bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l); + break; + } + + len -= tlv_l; + tlv_v += tlv_l; + } +} +#endif /* DHD_DBG_SHOW_METADATA */ + +static INLINE void BCMFASTPATH +dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send) +{ + if (pkt) { + if (pkttype == PKTTYPE_IOCTL_RX || + pkttype == PKTTYPE_EVENT_RX) { +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhd->osh, pkt, send); +#else + PKTFREE(dhd->osh, pkt, send); +#endif /* DHD_USE_STATIC_CTRLBUF */ + } else { + PKTFREE(dhd->osh, pkt, send); + } + } +} + +static INLINE void * BCMFASTPATH +dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid) +{ + void *PKTBUF; + dmaaddr_t pa; + uint32 len; + void *dmah; + void *secdma; + +#ifdef DHD_PCIE_PKTID + if (free_pktid) { + PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, + pktid, pa, len, dmah, secdma, pkttype); + } else { + PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_map_handle, + pktid, pa, len, dmah, secdma, pkttype); + } +#else + PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, pktid, pa, + len, dmah, secdma, pkttype); +#endif /* DHD_PCIE_PKTID */ + + if (PKTBUF) { + { + if (SECURE_DMA_ENAB(dhd->osh)) { + SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah, + secdma, 0); + } else { + DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah); + } + } + } + + return PKTBUF; +} + +#ifdef IOCTLRESP_USE_CONSTMEM +static INLINE void BCMFASTPATH +dhd_prot_ioctl_ret_buffer_get(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf) +{ + memset(retbuf, 0, sizeof(dhd_dma_buf_t)); + retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid, + retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX); + + return; +} +#endif /* IOCTLRESP_USE_CONSTMEM */ + +static void BCMFASTPATH +dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid) +{ + dhd_prot_t *prot = dhd->prot; + int16 fillbufs; + uint16 cnt = 256; + int retcount = 0; + + fillbufs = prot->max_rxbufpost - prot->rxbufpost; + while (fillbufs >= RX_BUF_BURST) { + cnt--; + if (cnt == 0) { + /* find a better way to reschedule rx buf post if space not available */ + DHD_ERROR(("h2d rx post ring not available to post host buffers \n")); + DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost)); + break; + } + + /* Post in a burst of 32 buffers at a time */ + fillbufs = MIN(fillbufs, RX_BUF_BURST); + + /* Post buffers */ + retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid); + + if (retcount >= 0) { + prot->rxbufpost += (uint16)retcount; +#ifdef DHD_LB_RXC + /* dhd_prot_rxbuf_post returns the number of buffers posted */ + DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, retcount); +#endif /* DHD_LB_RXC */ + /* how many more to post */ + fillbufs = prot->max_rxbufpost - prot->rxbufpost; + } else { + /* Make sure we don't run loop any further */ + fillbufs = 0; + } + } +} + +/** Post 'count' no of rx buffers to dongle */ +static int BCMFASTPATH +dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid) +{ + void *p; + uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; + uint8 *rxbuf_post_tmp; + host_rxbuf_post_t *rxbuf_post; + void *msg_start; + dmaaddr_t pa; + uint32 pktlen; + uint8 i = 0; + uint16 alloced = 0; + unsigned long flags; + uint32 pktid; + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring = &prot->h2dring_rxp_subn; + + DHD_GENERAL_LOCK(dhd, flags); + + /* Claim space for exactly 'count' no of messages, for mitigation purpose */ + msg_start = (void *) + dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE); + + DHD_GENERAL_UNLOCK(dhd, flags); + + if (msg_start == NULL) { + DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__)); + return -1; + } + /* if msg_start != NULL, we should have alloced space for atleast 1 item */ + ASSERT(alloced > 0); + + rxbuf_post_tmp = (uint8*)msg_start; + + /* loop through each allocated message in the rxbuf post msgbuf_ring */ + for (i = 0; i < alloced; i++) { + rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp; + /* Create a rx buffer */ + if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) { + DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__)); + dhd->rx_pktgetfail++; + break; + } + + pktlen = PKTLEN(dhd->osh, p); + if (SECURE_DMA_ENAB(dhd->osh)) { + DHD_GENERAL_LOCK(dhd, flags); + pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, + DMA_RX, p, 0, ring->dma_buf.secdma, 0); + DHD_GENERAL_UNLOCK(dhd, flags); + } else { + pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0); + } + + if (PHYSADDRISZERO(pa)) { + if (SECURE_DMA_ENAB(dhd->osh)) { + DHD_GENERAL_LOCK(dhd, flags); + SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL, + ring->dma_buf.secdma, 0); + DHD_GENERAL_UNLOCK(dhd, flags); + } else { + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); + } + + PKTFREE(dhd->osh, p, FALSE); + DHD_ERROR(("Invalid phyaddr 0\n")); + ASSERT(0); + break; + } + + PKTPULL(dhd->osh, p, prot->rx_metadata_offset); + pktlen = PKTLEN(dhd->osh, p); + + /* Common msg header */ + rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST; + rxbuf_post->cmn_hdr.if_id = 0; + rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + +#if defined(DHD_LB_RXC) + if (use_rsv_pktid == TRUE) { + bcm_workq_t *workq = &prot->rx_compl_cons; + int elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ); + if (elem_ix == BCM_RING_EMPTY) { + DHD_ERROR(("%s rx_compl_cons ring is empty\n", __FUNCTION__)); + pktid = DHD_PKTID_INVALID; + goto alloc_pkt_id; + } else { + uint32 *elem = WORKQ_ELEMENT(uint32, workq, elem_ix); + pktid = *elem; + } + + /* Now populate the previous locker with valid information */ + if (pktid != DHD_PKTID_INVALID) { + rxbuf_post->cmn_hdr.request_id = htol32(pktid); + DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_map_handle, p, pktid, + pa, pktlen, DMA_RX, NULL, ring->dma_buf.secdma, + PKTTYPE_DATA_RX); + } + } else +#endif /* DHD_LB_RXC */ + { +#if defined(DHD_LB_RXC) +alloc_pkt_id: +#endif +#if defined(DHD_PCIE_PKTID) + /* get the lock before calling DHD_NATIVE_TO_PKTID */ + DHD_GENERAL_LOCK(dhd, flags); +#endif + pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_map_handle, p, pa, + pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX); + +#if defined(DHD_PCIE_PKTID) + /* free lock */ + DHD_GENERAL_UNLOCK(dhd, flags); + + if (pktid == DHD_PKTID_INVALID) { + + if (SECURE_DMA_ENAB(dhd->osh)) { + DHD_GENERAL_LOCK(dhd, flags); + SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL, + ring->dma_buf.secdma, 0); + DHD_GENERAL_UNLOCK(dhd, flags); + } else { + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); + } + + PKTFREE(dhd->osh, p, FALSE); + DHD_ERROR(("Pktid pool depleted.\n")); + break; + } +#endif /* DHD_PCIE_PKTID */ + } + + rxbuf_post->data_buf_len = htol16((uint16)pktlen); + rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + rxbuf_post->data_buf_addr.low_addr = + htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset); + + if (prot->rx_metadata_offset) { + rxbuf_post->metadata_buf_len = prot->rx_metadata_offset; + rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + rxbuf_post->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); + } else { + rxbuf_post->metadata_buf_len = 0; + rxbuf_post->metadata_buf_addr.high_addr = 0; + rxbuf_post->metadata_buf_addr.low_addr = 0; + } + +#if defined(DHD_PKTID_AUDIT_RING) + DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pktid, DHD_DUPLICATE_ALLOC); +#endif /* DHD_PKTID_AUDIT_RING */ + + rxbuf_post->cmn_hdr.request_id = htol32(pktid); + + /* Move rxbuf_post_tmp to next item */ + rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len; + } + + if (i < alloced) { + if (ring->wr < (alloced - i)) { + ring->wr = ring->max_items - (alloced - i); + } else { + ring->wr -= (alloced - i); + } + + alloced = i; + } + + /* Update ring's WR index and ring doorbell to dongle */ + if (alloced > 0) { + dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced); + } + + return alloced; +} /* dhd_prot_rxbuf_post */ + +#ifdef IOCTLRESP_USE_CONSTMEM +static int +alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf) +{ + int err; + memset(retbuf, 0, sizeof(dhd_dma_buf_t)); + + if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) { + DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err)); + ASSERT(0); + return BCME_NOMEM; + } + + return BCME_OK; +} + +static void +free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf) +{ + /* retbuf (declared on stack) not fully populated ... */ + if (retbuf->va) { + uint32 dma_pad; + dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0; + retbuf->len = IOCT_RETBUF_SIZE; + retbuf->_alloced = retbuf->len + dma_pad; + /* JIRA:SWWLAN-70021 The pa value would be overwritten by the dongle. + * Need to reassign before free to pass the check in dhd_dma_buf_audit(). + */ + retbuf->pa = DMA_MAP(dhd->osh, retbuf->va, retbuf->len, DMA_RX, NULL, NULL); + } + + dhd_dma_buf_free(dhd, retbuf); + return; +} +#endif /* IOCTLRESP_USE_CONSTMEM */ + +static int +dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf) +{ + void *p; + uint16 pktsz; + ioctl_resp_evt_buf_post_msg_t *rxbuf_post; + dmaaddr_t pa; + uint32 pktlen; + dhd_prot_t *prot = dhd->prot; + uint16 alloced = 0; + unsigned long flags; + dhd_dma_buf_t retbuf; + void *dmah = NULL; + uint32 pktid; + void *map_handle; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); + return -1; + } + + memset(&retbuf, 0, sizeof(dhd_dma_buf_t)); + + if (event_buf) { + /* Allocate packet for event buffer post */ + pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; + } else { + /* Allocate packet for ctrl/ioctl buffer post */ + pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ; + } + +#ifdef IOCTLRESP_USE_CONSTMEM + if (!event_buf) { + if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) { + DHD_ERROR(("Could not allocate IOCTL response buffer\n")); + return -1; + } + ASSERT(retbuf.len == IOCT_RETBUF_SIZE); + p = retbuf.va; + pktlen = retbuf.len; + pa = retbuf.pa; + dmah = retbuf.dmah; + } else +#endif /* IOCTLRESP_USE_CONSTMEM */ + { +#ifdef DHD_USE_STATIC_CTRLBUF + p = PKTGET_STATIC(dhd->osh, pktsz, FALSE); +#else + p = PKTGET(dhd->osh, pktsz, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + if (p == NULL) { + DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n", + __FUNCTION__, __LINE__, event_buf ? + "EVENT" : "IOCTL RESP")); + dhd->rx_pktgetfail++; + return -1; + } + + pktlen = PKTLEN(dhd->osh, p); + + if (SECURE_DMA_ENAB(dhd->osh)) { + DHD_GENERAL_LOCK(dhd, flags); + pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, + DMA_RX, p, 0, ring->dma_buf.secdma, 0); + DHD_GENERAL_UNLOCK(dhd, flags); + } else { + pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0); + } + + if (PHYSADDRISZERO(pa)) { + DHD_ERROR(("Invalid physaddr 0\n")); + ASSERT(0); + goto free_pkt_return; + } + } + + DHD_GENERAL_LOCK(dhd, flags); + + rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (rxbuf_post == NULL) { + DHD_GENERAL_UNLOCK(dhd, flags); + DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n", + __FUNCTION__, __LINE__)); + +#ifdef IOCTLRESP_USE_CONSTMEM + if (event_buf) +#endif /* IOCTLRESP_USE_CONSTMEM */ + { + if (SECURE_DMA_ENAB(dhd->osh)) { + DHD_GENERAL_LOCK(dhd, flags); + SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL, + ring->dma_buf.secdma, 0); + DHD_GENERAL_UNLOCK(dhd, flags); + } else { + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); + } + } + goto free_pkt_return; + } + + /* CMN msg header */ + if (event_buf) { + rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_EVENT_BUF_POST; + } else { + rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_IOCTLRESP_BUF_POST; + } + +#ifdef IOCTLRESP_USE_CONSTMEM + if (!event_buf) { + map_handle = dhd->prot->pktid_map_handle_ioctl; + pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, + DMA_RX, dmah, ring->dma_buf.secdma, PKTTYPE_IOCTL_RX); + } else +#endif /* IOCTLRESP_USE_CONSTMEM */ + { + map_handle = dhd->prot->pktid_map_handle; + pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, + p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma, + event_buf ? PKTTYPE_EVENT_RX : PKTTYPE_IOCTL_RX); + } + + if (pktid == DHD_PKTID_INVALID) { + if (ring->wr == 0) { + ring->wr = ring->max_items - 1; + } else { + ring->wr--; + } + DHD_GENERAL_UNLOCK(dhd, flags); + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); + goto free_pkt_return; + } + +#if defined(DHD_PKTID_AUDIT_RING) + DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC); +#endif /* DHD_PKTID_AUDIT_RING */ + + rxbuf_post->cmn_hdr.request_id = htol32(pktid); + rxbuf_post->cmn_hdr.if_id = 0; + rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + +#if defined(DHD_PCIE_PKTID) + if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) { + if (ring->wr == 0) { + ring->wr = ring->max_items - 1; + } else { + ring->wr--; + } + DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef IOCTLRESP_USE_CONSTMEM + if (event_buf) +#endif /* IOCTLRESP_USE_CONSTMEM */ + { + if (SECURE_DMA_ENAB(dhd->osh)) { + DHD_GENERAL_LOCK(dhd, flags); + SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL, + ring->dma_buf.secdma, 0); + DHD_GENERAL_UNLOCK(dhd, flags); + } else { + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); + } + } + goto free_pkt_return; + } +#endif /* DHD_PCIE_PKTID */ + + rxbuf_post->cmn_hdr.flags = 0; +#ifndef IOCTLRESP_USE_CONSTMEM + rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p)); +#else + rxbuf_post->host_buf_len = htol16((uint16)pktlen); +#endif /* IOCTLRESP_USE_CONSTMEM */ + rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1); + DHD_GENERAL_UNLOCK(dhd, flags); + + return 1; + +free_pkt_return: +#ifdef IOCTLRESP_USE_CONSTMEM + if (!event_buf) { + free_ioctl_return_buffer(dhd, &retbuf); + } else +#endif /* IOCTLRESP_USE_CONSTMEM */ + { + dhd_prot_packet_free(dhd, p, + event_buf ? PKTTYPE_EVENT_RX : PKTTYPE_IOCTL_RX, + FALSE); + } + + return -1; +} /* dhd_prot_rxbufpost_ctrl */ + +static uint16 +dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post) +{ + uint32 i = 0; + int32 ret_val; + + DHD_INFO(("max to post %d, event %d \n", max_to_post, event_buf)); + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); + return 0; + } + + while (i < max_to_post) { + ret_val = dhd_prot_rxbufpost_ctrl(dhd, event_buf); + if (ret_val < 0) { + break; + } + i++; + } + DHD_INFO(("posted %d buffers to event_pool/ioctl_resp_pool %d\n", i, event_buf)); + return (uint16)i; +} + +static void +dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int max_to_post; + + DHD_INFO(("ioctl resp buf post\n")); + max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted; + if (max_to_post <= 0) { + DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n", + __FUNCTION__)); + return; + } + prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, + FALSE, max_to_post); +} + +static void +dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int max_to_post; + + max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted; + if (max_to_post <= 0) { + DHD_INFO(("%s: Cannot post more than max event buffers\n", + __FUNCTION__)); + return; + } + prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, + TRUE, max_to_post); +} + +/** called when DHD needs to check for 'receive complete' messages from the dongle */ +bool BCMFASTPATH +dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound) +{ + bool more = TRUE; + uint n = 0; + msgbuf_ring_t *ring = &dhd->prot->d2hring_rx_cpln; + + /* Process all the messages - DTOH direction */ + while (!dhd_is_device_removed(dhd)) { + uint8 *msg_addr; + uint32 msg_len; + + if (dhd->hang_was_sent) { + more = FALSE; + break; + } + + /* Get the address of the next message to be read from ring */ + msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); + if (msg_addr == NULL) { + more = FALSE; + break; + } + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(msg_addr); + + if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) { + DHD_ERROR(("%s: process %s msg addr %p len %d\n", + __FUNCTION__, ring->name, msg_addr, msg_len)); + } + + /* Update read pointer */ + dhd_prot_upd_read_idx(dhd, ring); + + /* After batch processing, check RX bound */ + n += msg_len / ring->item_len; + if (n >= bound) { + break; + } + } + + return more; +} + +/** + * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring) + */ +void +dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring) +{ + msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring; + + /* Update read pointer */ + if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { + ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx); + } + + DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n", + ring->idx, flowid, ring->wr, ring->rd)); + + /* Need more logic here, but for now use it directly */ + dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */ +} + +/** called when DHD needs to check for 'transmit complete' messages from the dongle */ +bool BCMFASTPATH +dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound) +{ + bool more = TRUE; + uint n = 0; + msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln; + + /* Process all the messages - DTOH direction */ + while (!dhd_is_device_removed(dhd)) { + uint8 *msg_addr; + uint32 msg_len; + + if (dhd->hang_was_sent) { + more = FALSE; + break; + } + + /* Get the address of the next message to be read from ring */ + msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); + if (msg_addr == NULL) { + more = FALSE; + break; + } + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(msg_addr); + + if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) { + DHD_ERROR(("%s: process %s msg addr %p len %d\n", + __FUNCTION__, ring->name, msg_addr, msg_len)); + } + + /* Write to dngl rd ptr */ + dhd_prot_upd_read_idx(dhd, ring); + + /* After batch processing, check bound */ + n += msg_len / ring->item_len; + if (n >= bound) { + break; + } + } + + return more; +} + +/** called when DHD needs to check for 'ioctl complete' messages from the dongle */ +int BCMFASTPATH +dhd_prot_process_ctrlbuf(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln; + + /* Process all the messages - DTOH direction */ + while (!dhd_is_device_removed(dhd)) { + uint8 *msg_addr; + uint32 msg_len; + + if (dhd->hang_was_sent) { + break; + } + + /* Get the address of the next message to be read from ring */ + msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); + if (msg_addr == NULL) { + break; + } + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(msg_addr); + + if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) { + DHD_ERROR(("%s: process %s msg addr %p len %d\n", + __FUNCTION__, ring->name, msg_addr, msg_len)); + } + + /* Write to dngl rd ptr */ + dhd_prot_upd_read_idx(dhd, ring); + } + + return 0; +} + +/** + * Consume messages out of the D2H ring. Ensure that the message's DMA to host + * memory has completed, before invoking the message handler via a table lookup + * of the cmn_msg_hdr::msg_type. + */ +static int BCMFASTPATH +dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len) +{ + int buf_len = len; + uint16 item_len; + uint8 msg_type; + cmn_msg_hdr_t *msg = NULL; + int ret = BCME_OK; + + ASSERT(ring); + item_len = ring->item_len; + if (item_len == 0) { + DHD_ERROR(("%s: ringidx %d item_len %d buf_len %d\n", + __FUNCTION__, ring->idx, item_len, buf_len)); + return BCME_ERROR; + } + + while (buf_len > 0) { + if (dhd->hang_was_sent) { + ret = BCME_ERROR; + goto done; + } + + msg = (cmn_msg_hdr_t *)buf; + + /* + * Update the curr_rd to the current index in the ring, from where + * the work item is fetched. This way if the fetched work item + * fails in LIVELOCK, we can print the exact read index in the ring + * that shows up the corrupted work item. + */ + if ((ring->curr_rd + 1) >= ring->max_items) { + ring->curr_rd = 0; + } else { + ring->curr_rd += 1; + } + +#if defined(PCIE_D2H_SYNC) + /* Wait until DMA completes, then fetch msg_type */ + msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len); +#else + msg_type = msg->msg_type; +#endif /* !PCIE_D2H_SYNC */ + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(buf + item_len); + + DHD_INFO(("msg_type %d item_len %d buf_len %d\n", + msg_type, item_len, buf_len)); + + if (msg_type == MSG_TYPE_LOOPBACK) { + bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len); + DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len)); + } + + ASSERT(msg_type < DHD_PROT_FUNCS); + if (msg_type >= DHD_PROT_FUNCS) { + DHD_ERROR(("%s: msg_type %d item_len %d buf_len %d\n", + __FUNCTION__, msg_type, item_len, buf_len)); + ret = BCME_ERROR; + goto done; + } + + if (table_lookup[msg_type]) { + table_lookup[msg_type](dhd, buf); + } + + if (buf_len < item_len) { + ret = BCME_ERROR; + goto done; + } + buf_len = buf_len - item_len; + buf = buf + item_len; + } + +done: + +#ifdef DHD_RX_CHAINING + dhd_rxchain_commit(dhd); +#endif +#if defined(DHD_LB) + dhd_lb_dispatch(dhd, ring->idx); +#endif + return ret; +} /* dhd_prot_process_msgtype */ + +static void +dhd_prot_noop(dhd_pub_t *dhd, void *msg) +{ + return; +} + +/** called on MSG_TYPE_RING_STATUS message received from dongle */ +static void +dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg) +{ + pcie_ring_status_t *ring_status = (pcie_ring_status_t *)msg; + DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n", + ring_status->cmn_hdr.request_id, ring_status->compl_hdr.status, + ring_status->compl_hdr.flow_ring_id, ring_status->write_idx)); + /* How do we track this to pair it with ??? */ + return; +} + +/** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */ +static void +dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg) +{ + pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg; + DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n", + gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status, + gen_status->compl_hdr.flow_ring_id)); + + /* How do we track this to pair it with ??? */ + return; +} + +/** + * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the + * dongle received the ioctl message in dongle memory. + */ +static void +dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg) +{ + uint32 pktid; + ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg; + unsigned long flags; + + pktid = ltoh32(ioct_ack->cmn_hdr.request_id); + +#if defined(DHD_PKTID_AUDIT_RING) + /* Skip DHD_IOCTL_REQ_PKTID = 0xFFFE */ + if (pktid != DHD_IOCTL_REQ_PKTID) { + if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid, + DHD_TEST_IS_ALLOC) == BCME_ERROR) { + prhex("dhd_prot_ioctack_process:", + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); + } + } +#endif /* DHD_PKTID_AUDIT_RING */ + + DHD_GENERAL_LOCK(dhd, flags); + if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) && + (dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) { + dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING; + } else { + DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n", + __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id)); + prhex("dhd_prot_ioctack_process:", + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); + } + DHD_GENERAL_UNLOCK(dhd, flags); + + DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n", + ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status, + ioct_ack->compl_hdr.flow_ring_id)); + if (ioct_ack->compl_hdr.status != 0) { + DHD_ERROR(("got an error status for the ioctl request...need to handle that\n")); + } +} + +/** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */ +static void +dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg) +{ + dhd_prot_t *prot = dhd->prot; + uint32 pkt_id, xt_id; + ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg; + void *pkt; + unsigned long flags; + dhd_dma_buf_t retbuf; + + memset(&retbuf, 0, sizeof(dhd_dma_buf_t)); + + pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id); + +#if defined(DHD_PKTID_AUDIT_RING) + { + int ret; +#ifndef IOCTLRESP_USE_CONSTMEM + ret = DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pkt_id, + DHD_DUPLICATE_FREE); +#else + ret = DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle_ioctl, pkt_id, + DHD_DUPLICATE_FREE); +#endif /* !IOCTLRESP_USE_CONSTMEM */ + if (ret == BCME_ERROR) { + prhex("dhd_prot_ioctcmplt_process:", + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); + } + } +#endif /* DHD_PKTID_AUDIT_RING */ + + DHD_GENERAL_LOCK(dhd, flags); + if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) || + !(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) { + DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n", + __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id)); + prhex("dhd_prot_ioctcmplt_process:", + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); + DHD_GENERAL_UNLOCK(dhd, flags); + return; + } +#ifndef IOCTLRESP_USE_CONSTMEM + pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE); +#else + dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf); + pkt = retbuf.va; +#endif /* !IOCTLRESP_USE_CONSTMEM */ + if (!pkt) { + prot->ioctl_state = 0; + DHD_GENERAL_UNLOCK(dhd, flags); + DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__)); + return; + } + DHD_GENERAL_UNLOCK(dhd, flags); + + prot->ioctl_resplen = ltoh16(ioct_resp->resp_len); + prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status); + xt_id = ltoh16(ioct_resp->trans_id); + if (xt_id != prot->ioctl_trans_id) { + ASSERT(0); + goto exit; + } + + DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n", + pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen)); + + if (prot->ioctl_resplen > 0) { +#ifndef IOCTLRESP_USE_CONSTMEM + bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen); +#else + bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen); +#endif /* !IOCTLRESP_USE_CONSTMEM */ + } + + /* wake up any dhd_os_ioctl_resp_wait() */ + dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS); + +exit: +#ifndef IOCTLRESP_USE_CONSTMEM + dhd_prot_packet_free(dhd, pkt, + PKTTYPE_IOCTL_RX, FALSE); +#else + free_ioctl_return_buffer(dhd, &retbuf); +#endif /* !IOCTLRESP_USE_CONSTMEM */ +} + +/** called on MSG_TYPE_TX_STATUS message received from dongle */ +static void BCMFASTPATH +dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg) +{ + dhd_prot_t *prot = dhd->prot; + host_txbuf_cmpl_t * txstatus; + unsigned long flags; + uint32 pktid; + void *pkt = NULL; + dmaaddr_t pa; + uint32 len; + void *dmah; + void *secdma; + + /* locks required to protect circular buffer accesses */ + DHD_GENERAL_LOCK(dhd, flags); + + txstatus = (host_txbuf_cmpl_t *)msg; + pktid = ltoh32(txstatus->cmn_hdr.request_id); + +#if defined(DHD_PKTID_AUDIT_RING) + if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid, + DHD_DUPLICATE_FREE) == BCME_ERROR) { + prhex("dhd_prot_txstatus_process:", + (uchar *)msg, D2HRING_TXCMPLT_ITEMSIZE); + } +#endif /* DHD_PKTID_AUDIT_RING */ + + DHD_INFO(("txstatus for pktid 0x%04x\n", pktid)); + if (prot->active_tx_count) { + prot->active_tx_count--; + + /* Release the Lock when no more tx packets are pending */ + if (prot->active_tx_count == 0) + DHD_OS_WAKE_UNLOCK(dhd); + + } else { + DHD_ERROR(("Extra packets are freed\n")); + } + + ASSERT(pktid != 0); + +#if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA) + { + int elem_ix; + void **elem; + bcm_workq_t *workq; + + pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, + pktid, pa, len, dmah, secdma, PKTTYPE_DATA_TX); + + workq = &prot->tx_compl_prod; + /* + * Produce the packet into the tx_compl workq for the tx compl tasklet + * to consume. + */ + OSL_PREFETCH(PKTTAG(pkt)); + + /* fetch next available slot in workq */ + elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ); + + DHD_PKTTAG_SET_PA((dhd_pkttag_fr_t *)PKTTAG(pkt), pa); + DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt), len); + + if (elem_ix == BCM_RING_FULL) { + DHD_ERROR(("tx_compl_prod BCM_RING_FULL\n")); + goto workq_ring_full; + } + + elem = WORKQ_ELEMENT(void *, &prot->tx_compl_prod, elem_ix); + *elem = pkt; + + smp_wmb(); + + /* Sync WR index to consumer if the SYNC threshold has been reached */ + if (++prot->tx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) { + bcm_workq_prod_sync(workq); + prot->tx_compl_prod_sync = 0; + } + + DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n", + __FUNCTION__, pkt, prot->tx_compl_prod_sync)); + + DHD_GENERAL_UNLOCK(dhd, flags); + return; + } + +workq_ring_full: + +#endif /* !DHD_LB_TXC */ + + /* + * We can come here if no DHD_LB_TXC is enabled and in case where DHD_LB_TXC is + * defined but the tx_compl queue is full. + */ + if (pkt == NULL) { + pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, + pktid, pa, len, dmah, secdma, PKTTYPE_DATA_TX); + } + + if (pkt) { + if (SECURE_DMA_ENAB(dhd->osh)) { + int offset = 0; + BCM_REFERENCE(offset); + + if (dhd->prot->tx_metadata_offset) + offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN; + SECURE_DMA_UNMAP(dhd->osh, (uint) pa, + (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah, + secdma, offset); + } else { + DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah); + } +#if defined(BCMPCIE) + dhd_txcomplete(dhd, pkt, true); +#endif + +#if DHD_DBG_SHOW_METADATA + if (dhd->prot->metadata_dbg && + dhd->prot->tx_metadata_offset && txstatus->metadata_len) { + uchar *ptr; + /* The Ethernet header of TX frame was copied and removed. + * Here, move the data pointer forward by Ethernet header size. + */ + PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN); + ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset); + bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len); + dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len); + } +#endif /* DHD_DBG_SHOW_METADATA */ + PKTFREE(dhd->osh, pkt, TRUE); + DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id, + txstatus->tx_status); + } + + DHD_GENERAL_UNLOCK(dhd, flags); + + return; +} /* dhd_prot_txstatus_process */ + +/** called on MSG_TYPE_WL_EVENT message received from dongle */ +static void +dhd_prot_event_process(dhd_pub_t *dhd, void *msg) +{ + wlevent_req_msg_t *evnt; + uint32 bufid; + uint16 buflen; + int ifidx = 0; + void* pkt; + unsigned long flags; + dhd_prot_t *prot = dhd->prot; + + /* Event complete header */ + evnt = (wlevent_req_msg_t *)msg; + bufid = ltoh32(evnt->cmn_hdr.request_id); + +#if defined(DHD_PKTID_AUDIT_RING) + if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, bufid, + DHD_DUPLICATE_FREE) == BCME_ERROR) { + prhex("dhd_prot_event_process:", + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); + } +#endif /* DHD_PKTID_AUDIT_RING */ + + buflen = ltoh16(evnt->event_data_len); + + ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr); + + /* Post another rxbuf to the device */ + if (prot->cur_event_bufs_posted) { + prot->cur_event_bufs_posted--; + } + dhd_msgbuf_rxbuf_post_event_bufs(dhd); + + /* locks required to protect pktid_map */ + DHD_GENERAL_LOCK(dhd, flags); + pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE); + DHD_GENERAL_UNLOCK(dhd, flags); + + if (!pkt) { + return; + } + + /* DMA RX offset updated through shared area */ + if (dhd->prot->rx_dataoffset) { + PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); + } + + PKTSETLEN(dhd->osh, pkt, buflen); + + dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1); +} + +/** called on MSG_TYPE_RX_CMPLT message received from dongle */ +static void BCMFASTPATH +dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void *msg) +{ + host_rxbuf_cmpl_t *rxcmplt_h; + uint16 data_offset; /* offset at which data starts */ + void *pkt; + unsigned long flags; + uint ifidx; + uint32 pktid; +#if defined(DHD_LB_RXC) + const bool free_pktid = FALSE; +#else + const bool free_pktid = TRUE; +#endif /* DHD_LB_RXC */ + + /* RXCMPLT HDR */ + rxcmplt_h = (host_rxbuf_cmpl_t *)msg; + + /* offset from which data starts is populated in rxstatus0 */ + data_offset = ltoh16(rxcmplt_h->data_offset); + + pktid = ltoh32(rxcmplt_h->cmn_hdr.request_id); + +#if defined(DHD_PKTID_AUDIT_RING) + if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid, + DHD_DUPLICATE_FREE) == BCME_ERROR) { + prhex("dhd_prot_rxcmplt_process:", + (uchar *)msg, D2HRING_RXCMPLT_ITEMSIZE); + } +#endif /* DHD_PKTID_AUDIT_RING */ + + DHD_GENERAL_LOCK(dhd, flags); + pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_DATA_RX, free_pktid); + DHD_GENERAL_UNLOCK(dhd, flags); + + if (!pkt) { + return; + } + + /* Post another set of rxbufs to the device */ + dhd_prot_return_rxbuf(dhd, pktid, 1); + + DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, pktdata %p, metalen %d\n", + ltoh32(rxcmplt_h->cmn_hdr.request_id), data_offset, ltoh16(rxcmplt_h->data_len), + rxcmplt_h->cmn_hdr.if_id, rxcmplt_h->cmn_hdr.flags, PKTDATA(dhd->osh, pkt), + ltoh16(rxcmplt_h->metadata_len))); +#if DHD_DBG_SHOW_METADATA + if (dhd->prot->metadata_dbg && + dhd->prot->rx_metadata_offset && rxcmplt_h->metadata_len) { + uchar *ptr; + ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->rx_metadata_offset); + /* header followed by data */ + bcm_print_bytes("rxmetadata", ptr, rxcmplt_h->metadata_len); + dhd_prot_print_metadata(dhd, ptr, rxcmplt_h->metadata_len); + } +#endif /* DHD_DBG_SHOW_METADATA */ + + if (rxcmplt_h->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) { + DHD_INFO(("D11 frame rxed \n")); + } + + /* data_offset from buf start */ + if (data_offset) { + /* data offset given from dongle after split rx */ + PKTPULL(dhd->osh, pkt, data_offset); /* data offset */ + } else { + /* DMA RX offset updated through shared area */ + if (dhd->prot->rx_dataoffset) { + PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); + } + } + /* Actual length of the packet */ + PKTSETLEN(dhd->osh, pkt, ltoh16(rxcmplt_h->data_len)); + + ifidx = rxcmplt_h->cmn_hdr.if_id; + +#if defined(DHD_LB_RXP) + dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx); +#else /* ! DHD_LB_RXP */ +#ifdef DHD_RX_CHAINING + /* Chain the packets */ + dhd_rxchain_frame(dhd, pkt, ifidx); +#else /* ! DHD_RX_CHAINING */ + /* offset from which data starts is populated in rxstatus0 */ + dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1); +#endif /* ! DHD_RX_CHAINING */ +#endif /* ! DHD_LB_RXP */ +} /* dhd_prot_rxcmplt_process */ + +/** Stop protocol: sync w/dongle state. */ +void dhd_prot_stop(dhd_pub_t *dhd) +{ + ASSERT(dhd); + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +} + +/* Add any protocol-specific data header. + * Caller must reserve prot_hdrlen prepend space. + */ +void BCMFASTPATH +dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF) +{ + return; +} + +uint +dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF) +{ + return 0; +} + + +#define PKTBUF pktbuf + +/** + * Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in + * the corresponding flow ring. + */ +int BCMFASTPATH +dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx) +{ + unsigned long flags; + dhd_prot_t *prot = dhd->prot; + host_txbuf_post_t *txdesc = NULL; + dmaaddr_t pa, meta_pa; + uint8 *pktdata; + uint32 pktlen; + uint32 pktid; + uint8 prio; + uint16 flowid = 0; + uint16 alloced = 0; + uint16 headroom; + msgbuf_ring_t *ring; + flow_ring_table_t *flow_ring_table; + flow_ring_node_t *flow_ring_node; + + if (dhd->flow_ring_table == NULL) { + return BCME_NORESOURCE; + } + + flowid = DHD_PKT_GET_FLOWID(PKTBUF); + + flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table; + flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid]; + + ring = (msgbuf_ring_t *)flow_ring_node->prot_info; + + + DHD_GENERAL_LOCK(dhd, flags); + + /* Create a unique 32-bit packet id */ + pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_map_handle, PKTBUF); +#if defined(DHD_PCIE_PKTID) + if (pktid == DHD_PKTID_INVALID) { + DHD_ERROR(("Pktid pool depleted.\n")); + /* + * If we return error here, the caller would queue the packet + * again. So we'll just free the skb allocated in DMA Zone. + * Since we have not freed the original SKB yet the caller would + * requeue the same. + */ + goto err_no_res_pktfree; + } +#endif /* DHD_PCIE_PKTID */ + + /* Reserve space in the circular buffer */ + txdesc = (host_txbuf_post_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + if (txdesc == NULL) { +#if defined(DHD_PCIE_PKTID) + void *dmah; + void *secdma; + /* Free up the PKTID. physaddr and pktlen will be garbage. */ + DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, pktid, + pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK); +#endif /* DHD_PCIE_PKTID */ + DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n", + __FUNCTION__, __LINE__, prot->active_tx_count)); + goto err_no_res_pktfree; + } + + /* Extract the data pointer and length information */ + pktdata = PKTDATA(dhd->osh, PKTBUF); + pktlen = PKTLEN(dhd->osh, PKTBUF); + + /* Ethernet header: Copy before we cache flush packet using DMA_MAP */ + bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN); + + /* Extract the ethernet header and adjust the data pointer and length */ + pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN); + pktlen -= ETHER_HDR_LEN; + + /* Map the data pointer to a DMA-able address */ + if (SECURE_DMA_ENAB(dhd->osh)) { + int offset = 0; + BCM_REFERENCE(offset); + + if (prot->tx_metadata_offset) { + offset = prot->tx_metadata_offset + ETHER_HDR_LEN; + } + + pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, + DMA_TX, PKTBUF, 0, ring->dma_buf.secdma, offset); + } else { + pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0); + } + + if ((PHYSADDRHI(pa) == 0) && (PHYSADDRLO(pa) == 0)) { + DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n")); + ASSERT(0); + } + + /* No need to lock. Save the rest of the packet's metadata */ + DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_map_handle, PKTBUF, pktid, + pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX); + +#ifdef TXP_FLUSH_NITEMS + if (ring->pend_items_count == 0) { + ring->start_addr = (void *)txdesc; + } + ring->pend_items_count++; +#endif + + /* Form the Tx descriptor message buffer */ + + /* Common message hdr */ + txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST; + txdesc->cmn_hdr.if_id = ifidx; + + txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3; + prio = (uint8)PKTPRIO(PKTBUF); + + + txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT; + txdesc->seg_cnt = 1; + + txdesc->data_len = htol16((uint16) pktlen); + txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + txdesc->data_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); + + /* Move data pointer to keep ether header in local PKTBUF for later reference */ + PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN); + + /* Handle Tx metadata */ + headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF); + if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset)) { + DHD_ERROR(("No headroom for Metadata tx %d %d\n", + prot->tx_metadata_offset, headroom)); + } + + if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) { + DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset)); + + /* Adjust the data pointer to account for meta data in DMA_MAP */ + PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset); + + if (SECURE_DMA_ENAB(dhd->osh)) { + meta_pa = SECURE_DMA_MAP_TXMETA(dhd->osh, PKTDATA(dhd->osh, PKTBUF), + prot->tx_metadata_offset + ETHER_HDR_LEN, DMA_RX, PKTBUF, + 0, ring->dma_buf.secdma); + } else { + meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), + prot->tx_metadata_offset, DMA_RX, PKTBUF, 0); + } + + if (PHYSADDRISZERO(meta_pa)) { + DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n")); + ASSERT(0); + } + + /* Adjust the data pointer back to original value */ + PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset); + + txdesc->metadata_buf_len = prot->tx_metadata_offset; + txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa)); + txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa)); + } else { + txdesc->metadata_buf_len = htol16(0); + txdesc->metadata_buf_addr.high_addr = 0; + txdesc->metadata_buf_addr.low_addr = 0; + } + +#if defined(DHD_PKTID_AUDIT_RING) + DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pktid, + DHD_DUPLICATE_ALLOC); +#endif /* DHD_PKTID_AUDIT_RING */ + + txdesc->cmn_hdr.request_id = htol32(pktid); + + DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len, + txdesc->cmn_hdr.request_id)); + + /* Update the write pointer in TCM & ring bell */ +#ifdef TXP_FLUSH_NITEMS + /* Flush if we have either hit the txp_threshold or if this msg is */ + /* occupying the last slot in the flow_ring - before wrap around. */ + if ((ring->pend_items_count == prot->txp_threshold) || + ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) { + dhd_prot_txdata_write_flush(dhd, flowid, TRUE); + } +#else + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, txdesc, 1); +#endif + + prot->active_tx_count++; + + /* + * Take a wake lock, do not sleep if we have atleast one packet + * to finish. + */ + if (prot->active_tx_count == 1) + DHD_OS_WAKE_LOCK(dhd); + + DHD_GENERAL_UNLOCK(dhd, flags); + + return BCME_OK; + +err_no_res_pktfree: + + + + DHD_GENERAL_UNLOCK(dhd, flags); + return BCME_NORESOURCE; +} /* dhd_prot_txdata */ + +/* called with a lock */ +/** optimization to write "n" tx items at a time to ring */ +void BCMFASTPATH +dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid, bool in_lock) +{ +#ifdef TXP_FLUSH_NITEMS + unsigned long flags = 0; + flow_ring_table_t *flow_ring_table; + flow_ring_node_t *flow_ring_node; + msgbuf_ring_t *ring; + + if (dhd->flow_ring_table == NULL) { + return; + } + + if (!in_lock) { + DHD_GENERAL_LOCK(dhd, flags); + } + + flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table; + flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid]; + ring = (msgbuf_ring_t *)flow_ring_node->prot_info; + + if (ring->pend_items_count) { + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, ring->start_addr, + ring->pend_items_count); + ring->pend_items_count = 0; + ring->start_addr = NULL; + } + + if (!in_lock) { + DHD_GENERAL_UNLOCK(dhd, flags); + } +#endif /* TXP_FLUSH_NITEMS */ +} + +#undef PKTBUF /* Only defined in the above routine */ + +int BCMFASTPATH +dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len) +{ + return 0; +} + +/** post a set of receive buffers to the dongle */ +static void BCMFASTPATH +dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt) +{ + dhd_prot_t *prot = dhd->prot; +#if defined(DHD_LB_RXC) + int elem_ix; + uint32 *elem; + bcm_workq_t *workq; + + workq = &prot->rx_compl_prod; + + /* Produce the work item */ + elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ); + if (elem_ix == BCM_RING_FULL) { + DHD_ERROR(("%s LB RxCompl workQ is full\n", __FUNCTION__)); + ASSERT(0); + return; + } + + elem = WORKQ_ELEMENT(uint32, workq, elem_ix); + *elem = pktid; + + smp_wmb(); + + /* Sync WR index to consumer if the SYNC threshold has been reached */ + if (++prot->rx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) { + bcm_workq_prod_sync(workq); + prot->rx_compl_prod_sync = 0; + } + + DHD_INFO(("%s: rx_compl_prod pktid<%u> sync<%d>\n", + __FUNCTION__, pktid, prot->rx_compl_prod_sync)); + +#endif /* DHD_LB_RXC */ + + + if (prot->rxbufpost >= rxcnt) { + prot->rxbufpost -= rxcnt; + } else { + /* ASSERT(0); */ + prot->rxbufpost = 0; + } + +#if !defined(DHD_LB_RXC) + if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) { + dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */ + } +#endif /* !DHD_LB_RXC */ +} + +/* called before an ioctl is sent to the dongle */ +static void +dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf) +{ + dhd_prot_t *prot = dhd->prot; + + if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) { + int slen = 0; + pcie_bus_tput_params_t *tput_params; + + slen = strlen("pcie_bus_tput") + 1; + tput_params = (pcie_bus_tput_params_t*)((char *)buf + slen); + bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr, + sizeof(tput_params->host_buf_addr)); + tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN; + } +} + + +/** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */ +int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len) +{ + int ret = -1; + uint8 action; + + if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + goto done; + } + + if (dhd->busstate == DHD_BUS_SUSPEND) { + DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__)); + goto done; + } + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (ioc->cmd == WLC_SET_PM) { + DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, *(char *)buf)); + } + + ASSERT(len <= WLC_IOCTL_MAXLEN); + + if (len > WLC_IOCTL_MAXLEN) { + goto done; + } + + action = ioc->set; + + dhd_prot_wlioctl_intercept(dhd, ioc, buf); + + if (action & WL_IOCTL_ACTION_SET) { + ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); + } else { + ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); + if (ret > 0) { + ioc->used = ret; + } + } + + /* Too many programs assume ioctl() returns 0 on success */ + if (ret >= 0) { + ret = 0; + } else { + DHD_ERROR(("%s: status ret value is %d \n", __FUNCTION__, ret)); + dhd->dongle_error = ret; + } + + if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) { + /* Intercept the wme_dp ioctl here */ + if (!strcmp(buf, "wme_dp")) { + int slen, val = 0; + + slen = strlen("wme_dp") + 1; + if (len >= (int)(slen + sizeof(int))) { + bcopy(((char *)buf + slen), &val, sizeof(int)); + } + dhd->wme_dp = (uint8) ltoh32(val); + } + + } + +done: + return ret; + +} /* dhd_prot_ioctl */ + +/** test / loopback */ + +int +dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len) +{ + unsigned long flags; + dhd_prot_t *prot = dhd->prot; + uint16 alloced = 0; + + ioct_reqst_hdr_t *ioct_rqst; + + uint16 hdrlen = sizeof(ioct_reqst_hdr_t); + uint16 msglen = len + hdrlen; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN); + msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE); + + DHD_GENERAL_LOCK(dhd, flags); + + ioct_rqst = (ioct_reqst_hdr_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (ioct_rqst == NULL) { + DHD_GENERAL_UNLOCK(dhd, flags); + return 0; + } + + { + uint8 *ptr; + uint16 i; + + ptr = (uint8 *)ioct_rqst; + for (i = 0; i < msglen; i++) { + ptr[i] = i % 256; + } + } + + /* Common msg buf hdr */ + ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK; + ioct_rqst->msg.if_id = 0; + + bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1); + DHD_GENERAL_UNLOCK(dhd, flags); + + return 0; +} + +/** test / loopback */ +void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer) +{ + if (dmaxfer == NULL) { + return; + } + + dhd_dma_buf_free(dhd, &dmaxfer->srcmem); + dhd_dma_buf_free(dhd, &dmaxfer->dstmem); +} + +/** test / loopback */ +int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, + uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer) +{ + uint i; + if (!dmaxfer) { + return BCME_ERROR; + } + + /* First free up existing buffers */ + dmaxfer_free_dmaaddr(dhd, dmaxfer); + + if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) { + return BCME_NOMEM; + } + + if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) { + dhd_dma_buf_free(dhd, &dmaxfer->srcmem); + return BCME_NOMEM; + } + + dmaxfer->len = len; + + /* Populate source with a pattern */ + for (i = 0; i < dmaxfer->len; i++) { + ((uint8*)dmaxfer->srcmem.va)[i] = i % 256; + } + OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len); + + dmaxfer->srcdelay = srcdelay; + dmaxfer->destdelay = destdelay; + + return BCME_OK; +} /* dmaxfer_prepare_dmaaddr */ + +static void +dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg) +{ + dhd_prot_t *prot = dhd->prot; + + OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len); + if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) { + if (memcmp(prot->dmaxfer.srcmem.va, + prot->dmaxfer.dstmem.va, prot->dmaxfer.len)) { + bcm_print_bytes("XFER SRC: ", + prot->dmaxfer.srcmem.va, prot->dmaxfer.len); + bcm_print_bytes("XFER DST: ", + prot->dmaxfer.dstmem.va, prot->dmaxfer.len); + } else { + DHD_INFO(("DMA successful\n")); + } + } + dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer); + dhd->prot->dmaxfer.in_progress = FALSE; +} + +/** Test functionality. + * Transfers bytes from host to dongle and to host again using DMA + * This function is not reentrant, as prot->dmaxfer.in_progress is not protected + * by a spinlock. + */ +int +dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay) +{ + unsigned long flags; + int ret = BCME_OK; + dhd_prot_t *prot = dhd->prot; + pcie_dma_xfer_params_t *dmap; + uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT); + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + if (prot->dmaxfer.in_progress) { + DHD_ERROR(("DMA is in progress...\n")); + return ret; + } + + prot->dmaxfer.in_progress = TRUE; + if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay, + &prot->dmaxfer)) != BCME_OK) { + prot->dmaxfer.in_progress = FALSE; + return ret; + } + + DHD_GENERAL_LOCK(dhd, flags); + + dmap = (pcie_dma_xfer_params_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (dmap == NULL) { + dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer); + prot->dmaxfer.in_progress = FALSE; + DHD_GENERAL_UNLOCK(dhd, flags); + return BCME_NOMEM; + } + + /* Common msg buf hdr */ + dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER; + dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID); + dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa)); + dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa)); + dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa)); + dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa)); + dmap->xfer_len = htol32(prot->dmaxfer.len); + dmap->srcdelay = htol32(prot->dmaxfer.srcdelay); + dmap->destdelay = htol32(prot->dmaxfer.destdelay); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, dmap, 1); + DHD_GENERAL_UNLOCK(dhd, flags); + + DHD_ERROR(("DMA Started...\n")); + + return BCME_OK; +} /* dhdmsgbuf_dmaxfer_req */ + +/** Called in the process of submitting an ioctl to the dongle */ +static int +dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) +{ + int ret = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Respond "bcmerror" and "bcmerrorstr" with local cache */ + if (cmd == WLC_GET_VAR && buf) + { + if (!strcmp((char *)buf, "bcmerrorstr")) + { + strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN); + goto done; + } + else if (!strcmp((char *)buf, "bcmerror")) + { + *(int *)buf = dhd->dongle_error; + goto done; + } + } + + ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx); + + DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n", + action, ifidx, cmd, len)); + + /* wait for IOCTL completion message from dongle and get first fragment */ + ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf); + +done: + return ret; +} + +/** + * Waits for IOCTL completion message from the dongle, copies this into caller + * provided parameter 'buf'. + */ +static int +dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf) +{ + dhd_prot_t *prot = dhd->prot; + int timeleft; + unsigned long flags; + int ret = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhd->dongle_reset) { + ret = -EIO; + goto out; + } + + if (prot->cur_ioctlresp_bufs_posted) { + prot->cur_ioctlresp_bufs_posted--; + } + + dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd); + + timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received); + if (timeleft == 0) { + dhd->rxcnt_timeout++; + dhd->rx_ctlerrs++; + DHD_ERROR(("%s: resumed on timeout rxcnt_timeout %d ioctl_cmd %d " + "trans_id %d state %d busstate=%d ioctl_received=%d\n", + __FUNCTION__, dhd->rxcnt_timeout, prot->curr_ioctl_cmd, + prot->ioctl_trans_id, prot->ioctl_state, + dhd->busstate, prot->ioctl_received)); + + dhd_prot_debug_info_print(dhd); + +#ifdef DHD_FW_COREDUMP + /* As soon as FW TRAP occurs, FW dump will be collected from dhdpcie_checkdied */ + if (dhd->memdump_enabled && !dhd->dongle_trap_occured) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT; + dhd_bus_mem_dump(dhd); + } +#endif /* DHD_FW_COREDUMP */ + if (dhd->rxcnt_timeout >= MAX_CNTL_RX_TIMEOUT) { +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + dhd->bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + DHD_ERROR(("%s: timeout > MAX_CNTL_TX_TIMEOUT\n", __FUNCTION__)); + } + ret = -ETIMEDOUT; + goto out; + } else { + if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) { + DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n", + __FUNCTION__, prot->ioctl_received)); + ret = -ECONNABORTED; + goto out; + } + dhd->rxcnt_timeout = 0; + dhd->rx_ctlpkts++; + DHD_CTL(("%s: ioctl resp resumed, got %d\n", + __FUNCTION__, prot->ioctl_resplen)); + } + + if (dhd->dongle_trap_occured) { +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + dhd->bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + DHD_ERROR(("%s: TRAP occurred!!\n", __FUNCTION__)); + ret = -EREMOTEIO; + goto out; + } + + if (dhd->prot->ioctl_resplen > len) { + dhd->prot->ioctl_resplen = (uint16)len; + } + if (buf) { + bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen); + } + + ret = (int)(dhd->prot->ioctl_status); +out: + DHD_GENERAL_LOCK(dhd, flags); + dhd->prot->ioctl_state = 0; + dhd->prot->ioctl_resplen = 0; + dhd->prot->ioctl_received = IOCTL_WAIT; + dhd->prot->curr_ioctl_cmd = 0; + DHD_GENERAL_UNLOCK(dhd, flags); + + return ret; +} /* dhd_msgbuf_wait_ioctl_cmplt */ + +static int +dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) +{ + int ret = 0; + + DHD_TRACE(("%s: Enter \n", __FUNCTION__)); + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + return -EIO; + } + + /* don't talk to the dongle if fw is about to be reloaded */ + if (dhd->hang_was_sent) { + DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n", + __FUNCTION__)); + return -EIO; + } + + /* Fill up msgbuf for ioctl req */ + ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx); + + DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n", + action, ifidx, cmd, len)); + + ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf); + + return ret; +} + +/** Called by upper DHD layer. Handles a protocol control response asynchronously. */ +int dhd_prot_ctl_complete(dhd_pub_t *dhd) +{ + return 0; +} + +/** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */ +int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + return BCME_UNSUPPORTED; +} + +/** Add prot dump output to a buffer */ +void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b) +{ + +#if defined(PCIE_D2H_SYNC) + if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) + bcm_bprintf(b, "\nd2h_sync: SEQNUM:"); + else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) + bcm_bprintf(b, "\nd2h_sync: XORCSUM:"); + else + bcm_bprintf(b, "\nd2h_sync: NONE:"); + bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n", + dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot); +#endif /* PCIE_D2H_SYNC */ + + bcm_bprintf(b, "\nDongle DMA Indices: h2d %d d2h %d index size %d bytes\n", + DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support), + DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support), + dhd->prot->rw_index_sz); +} + +/* Update local copy of dongle statistics */ +void dhd_prot_dstats(dhd_pub_t *dhd) +{ + return; +} + +/** Called by upper DHD layer */ +int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, + uint reorder_info_len, void **pkt, uint32 *free_buf_count) +{ + return 0; +} + +/** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */ +int +dhd_post_dummy_msg(dhd_pub_t *dhd) +{ + unsigned long flags; + hostevent_hdr_t *hevent = NULL; + uint16 alloced = 0; + + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + DHD_GENERAL_LOCK(dhd, flags); + + hevent = (hostevent_hdr_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (hevent == NULL) { + DHD_GENERAL_UNLOCK(dhd, flags); + return -1; + } + + /* CMN msg header */ + hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + hevent->msg.msg_type = MSG_TYPE_HOST_EVNT; + hevent->msg.if_id = 0; + + /* Event payload */ + hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD); + + /* Since, we are filling the data directly into the bufptr obtained + * from the msgbuf, we can directly call the write_complete + */ + dhd_prot_ring_write_complete(dhd, ring, hevent, 1); + DHD_GENERAL_UNLOCK(dhd, flags); + + return 0; +} + +/** + * If exactly_nitems is true, this function will allocate space for nitems or fail + * If exactly_nitems is false, this function will allocate space for nitems or less + */ +static void * BCMFASTPATH +dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring, + uint16 nitems, uint16 * alloced, bool exactly_nitems) +{ + void * ret_buf; + + /* Alloc space for nitems in the ring */ + ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems); + + if (ret_buf == NULL) { + /* if alloc failed , invalidate cached read ptr */ + if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { + ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx); + } else { + dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx); + } + + /* Try allocating once more */ + ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems); + + if (ret_buf == NULL) { + DHD_INFO(("%s: Ring space not available \n", ring->name)); + return NULL; + } + } + + /* Return alloced space */ + return ret_buf; +} + +/** + * Non inline ioct request. + * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer + * Form a separate request buffer where a 4 byte cmn header is added in the front + * buf contents from parent function is copied to remaining section of this buffer + */ +static int +dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx) +{ + dhd_prot_t *prot = dhd->prot; + ioctl_req_msg_t *ioct_rqst; + void * ioct_buf; /* For ioctl payload */ + uint16 rqstlen, resplen; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + rqstlen = len; + resplen = len; + + /* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */ + /* 8K allocation of dongle buffer fails */ + /* dhd doesnt give separate input & output buf lens */ + /* so making the assumption that input length can never be more than 1.5k */ + rqstlen = MIN(rqstlen, MSGBUF_MAX_MSG_SIZE); + + DHD_GENERAL_LOCK(dhd, flags); + + if (prot->ioctl_state) { + DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state)); + DHD_GENERAL_UNLOCK(dhd, flags); + return BCME_BUSY; + } else { + prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING; + } + + /* Request for cbuf space */ + ioct_rqst = (ioctl_req_msg_t*) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + if (ioct_rqst == NULL) { + DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n")); + prot->ioctl_state = 0; + prot->curr_ioctl_cmd = 0; + prot->ioctl_received = IOCTL_WAIT; + DHD_GENERAL_UNLOCK(dhd, flags); + return -1; + } + + /* Common msg buf hdr */ + ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ; + ioct_rqst->cmn_hdr.if_id = (uint8)ifidx; + ioct_rqst->cmn_hdr.flags = 0; + ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID); + ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + ioct_rqst->cmd = htol32(cmd); + prot->curr_ioctl_cmd = cmd; + ioct_rqst->output_buf_len = htol16(resplen); + prot->ioctl_trans_id++; + ioct_rqst->trans_id = prot->ioctl_trans_id; + + /* populate ioctl buffer info */ + ioct_rqst->input_buf_len = htol16(rqstlen); + ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa)); + ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa)); + /* copy ioct payload */ + ioct_buf = (void *) prot->ioctbuf.va; + + if (buf) { + memcpy(ioct_buf, buf, len); + } + + OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len); + + if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN)) { + DHD_ERROR(("host ioct address unaligned !!!!! \n")); + } + + DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n", + ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len, + ioct_rqst->trans_id)); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1); + DHD_GENERAL_UNLOCK(dhd, flags); + + return 0; +} /* dhd_fillup_ioct_reqst */ + + +/** + * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a + * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring + * information is posted to the dongle. + * + * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for + * each flowring in pool of flowrings. + * + * returns BCME_OK=0 on success + * returns non-zero negative error value on failure. + */ +static int +dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name, + uint16 max_items, uint16 item_len, uint16 ringid) +{ + int dma_buf_alloced = BCME_NOMEM; + uint32 dma_buf_len = max_items * item_len; + dhd_prot_t *prot = dhd->prot; + + ASSERT(ring); + ASSERT(name); + ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF)); + + /* Init name */ + strncpy(ring->name, name, RING_NAME_MAX_LENGTH); + ring->name[RING_NAME_MAX_LENGTH - 1] = '\0'; + + ring->idx = ringid; + + ring->max_items = max_items; + ring->item_len = item_len; + + /* A contiguous space may be reserved for all flowrings */ + if (DHD_IS_FLOWRING(ringid) && (prot->flowrings_dma_buf.va)) { + /* Carve out from the contiguous DMA-able flowring buffer */ + uint16 flowid; + uint32 base_offset; + + dhd_dma_buf_t *dma_buf = &ring->dma_buf; + dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf; + + flowid = DHD_RINGID_TO_FLOWID(ringid); + base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len; + + ASSERT(base_offset + dma_buf_len <= rsv_buf->len); + + dma_buf->len = dma_buf_len; + dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset); + PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa)); + PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset); + + /* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */ + ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa)); + + dma_buf->dmah = rsv_buf->dmah; + dma_buf->secdma = rsv_buf->secdma; + + (void)dhd_dma_buf_audit(dhd, &ring->dma_buf); + } else { + /* Allocate a dhd_dma_buf */ + dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len); + if (dma_buf_alloced != BCME_OK) { + return BCME_NOMEM; + } + } + + /* CAUTION: Save ring::base_addr in little endian format! */ + dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa); + +#ifdef BCM_SECURE_DMA + if (SECURE_DMA_ENAB(prot->osh)) { + ring->dma_buf.secdma = MALLOCZ(prot->osh, sizeof(sec_cma_info_t)); + if (ring->dma_buf.secdma == NULL) { + goto free_dma_buf; + } + } +#endif /* BCM_SECURE_DMA */ + + DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d " + "ring start %p buf phys addr %x:%x \n", + ring->name, ring->max_items, ring->item_len, + dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr))); + + return BCME_OK; + +#ifdef BCM_SECURE_DMA +free_dma_buf: + if (dma_buf_alloced == BCME_OK) { + dhd_dma_buf_free(dhd, &ring->dma_buf); + } +#endif /* BCM_SECURE_DMA */ + + return BCME_NOMEM; + +} /* dhd_prot_ring_attach */ + + +/** + * dhd_prot_ring_init - Post the common ring information to dongle. + * + * Used only for common rings. + * + * The flowrings information is passed via the create flowring control message + * (tx_flowring_create_request_t) sent over the H2D control submission common + * ring. + */ +static void +dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring) +{ + ring->wr = 0; + ring->rd = 0; + ring->curr_rd = 0; + + /* CAUTION: ring::base_addr already in Little Endian */ + dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr, + sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx); + dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items, + sizeof(uint16), RING_MAX_ITEMS, ring->idx); + dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len, + sizeof(uint16), RING_ITEM_LEN, ring->idx); + + dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr), + sizeof(uint16), RING_WR_UPD, ring->idx); + dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd), + sizeof(uint16), RING_RD_UPD, ring->idx); + + /* ring inited */ + ring->inited = TRUE; + +} /* dhd_prot_ring_init */ + + +/** + * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush + * Reset WR and RD indices to 0. + */ +static void +dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring) +{ + DHD_TRACE(("%s\n", __FUNCTION__)); + + dhd_dma_buf_reset(dhd, &ring->dma_buf); + + ring->rd = ring->wr = 0; + ring->curr_rd = 0; +} + + +/** + * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects + * hanging off the msgbuf_ring. + */ +static void +dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring) +{ + dhd_prot_t *prot = dhd->prot; + ASSERT(ring); + + ring->inited = FALSE; + /* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */ + +#ifdef BCM_SECURE_DMA + if (SECURE_DMA_ENAB(prot->osh)) { + SECURE_DMA_UNMAP_ALL(prot->osh, ring->dma_buf.secdma); + if (ring->dma_buf.secdma) { + MFREE(prot->osh, ring->dma_buf.secdma, sizeof(sec_cma_info_t)); + } + ring->dma_buf.secdma = NULL; + } +#endif /* BCM_SECURE_DMA */ + + /* If the DMA-able buffer was carved out of a pre-reserved contiguous + * memory, then simply stop using it. + */ + if (DHD_IS_FLOWRING(ring->idx) && (prot->flowrings_dma_buf.va)) { + (void)dhd_dma_buf_audit(dhd, &ring->dma_buf); + memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t)); + } else { + dhd_dma_buf_free(dhd, &ring->dma_buf); + } + +} /* dhd_prot_ring_detach */ + + +/* + * +---------------------------------------------------------------------------- + * Flowring Pool + * + * Unlike common rings, which are attached very early on (dhd_prot_attach), + * flowrings are dynamically instantiated. Moreover, flowrings may require a + * larger DMA-able buffer. To avoid issues with fragmented cache coherent + * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once. + * The DMA-able buffers are attached to these pre-allocated msgbuf_ring. + * + * Each DMA-able buffer may be allocated independently, or may be carved out + * of a single large contiguous region that is registered with the protocol + * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region + * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic). + * + * No flowring pool action is performed in dhd_prot_attach(), as the number + * of h2d rings is not yet known. + * + * In dhd_prot_init(), the dongle advertized number of h2d rings is used to + * determine the number of flowrings required, and a pool of msgbuf_rings are + * allocated and a DMA-able buffer (carved or allocated) is attached. + * See: dhd_prot_flowrings_pool_attach() + * + * A flowring msgbuf_ring object may be fetched from this pool during flowring + * creation, using the flowid. Likewise, flowrings may be freed back into the + * pool on flowring deletion. + * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release() + * + * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers + * are detached (returned back to the carved region or freed), and the pool of + * msgbuf_ring and any objects allocated against it are freed. + * See: dhd_prot_flowrings_pool_detach() + * + * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a + * state as-if upon an attach. All DMA-able buffers are retained. + * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring + * pool attach will notice that the pool persists and continue to use it. This + * will avoid the case of a fragmented DMA-able region. + * + * +---------------------------------------------------------------------------- + */ + +/* Fetch number of H2D flowrings given the total number of h2d rings */ +#define DHD_FLOWRINGS_POOL_TOTAL(h2d_rings_total) \ + ((h2d_rings_total) - BCMPCIE_H2D_COMMON_MSGRINGS) + +/* Conversion of a flowid to a flowring pool index */ +#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \ + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS) + +/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */ +#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \ + (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + DHD_FLOWRINGS_POOL_OFFSET(flowid) + +/* Traverse each flowring in the flowring pool, assigning ring and flowid */ +#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) \ + for ((flowid) = DHD_FLOWRING_START_FLOWID, \ + (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \ + (flowid) < (prot)->h2d_rings_total; \ + (flowid)++, (ring)++) + +/** + * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t. + * + * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings. + * Dongle includes common rings when it advertizes the number of H2D rings. + * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to + * allocate the DMA-able buffer and initialize each msgbuf_ring_t object. + * + * dhd_prot_ring_attach is invoked to perform the actual initialization and + * attaching the DMA-able buffer. + * + * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and + * initialized msgbuf_ring_t object. + * + * returns BCME_OK=0 on success + * returns non-zero negative error value on failure. + */ +static int +dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd) +{ + uint16 flowid; + msgbuf_ring_t *ring; + uint16 h2d_flowrings_total; /* exclude H2D common rings */ + dhd_prot_t *prot = dhd->prot; + char ring_name[RING_NAME_MAX_LENGTH]; + + if (prot->h2d_flowrings_pool != NULL) { + return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */ + } + + ASSERT(prot->h2d_rings_total == 0); + + /* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */ + prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus); + + if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) { + DHD_ERROR(("%s: h2d_rings_total advertized as %u\n", + __FUNCTION__, prot->h2d_rings_total)); + return BCME_ERROR; + } + + /* Subtract number of H2D common rings, to determine number of flowrings */ + h2d_flowrings_total = DHD_FLOWRINGS_POOL_TOTAL(prot->h2d_rings_total); + + DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total)); + + /* Allocate pool of msgbuf_ring_t objects for all flowrings */ + prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh, + (h2d_flowrings_total * sizeof(msgbuf_ring_t))); + + if (prot->h2d_flowrings_pool == NULL) { + DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n", + __FUNCTION__, h2d_flowrings_total)); + goto fail; + } + + /* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */ + FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) { + snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid); + ring_name[RING_NAME_MAX_LENGTH - 1] = '\0'; + if (dhd_prot_ring_attach(dhd, ring, ring_name, + H2DRING_TXPOST_MAX_ITEM, H2DRING_TXPOST_ITEMSIZE, + DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) { + goto attach_fail; + } + } + + return BCME_OK; + +attach_fail: + dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */ + +fail: + prot->h2d_rings_total = 0; + return BCME_NOMEM; + +} /* dhd_prot_flowrings_pool_attach */ + + +/** + * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool. + * Invokes dhd_prot_ring_reset to perform the actual reset. + * + * The DMA-able buffer is not freed during reset and neither is the flowring + * pool freed. + * + * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following + * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool + * from a previous flowring pool instantiation will be reused. + * + * This will avoid a fragmented DMA-able memory condition, if multiple + * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach + * cycle. + */ +static void +dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd) +{ + uint16 flowid; + msgbuf_ring_t *ring; + dhd_prot_t *prot = dhd->prot; + + if (prot->h2d_flowrings_pool == NULL) { + ASSERT(prot->h2d_rings_total == 0); + return; + } + + /* Reset each flowring in the flowring pool */ + FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) { + dhd_prot_ring_reset(dhd, ring); + ring->inited = FALSE; + } + + /* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */ +} + + +/** + * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with + * DMA-able buffers for flowrings. + * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any + * de-initialization of each msgbuf_ring_t. + */ +static void +dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd) +{ + int flowid; + msgbuf_ring_t *ring; + int h2d_flowrings_total; /* exclude H2D common rings */ + dhd_prot_t *prot = dhd->prot; + + if (prot->h2d_flowrings_pool == NULL) { + ASSERT(prot->h2d_rings_total == 0); + return; + } + + /* Detach the DMA-able buffer for each flowring in the flowring pool */ + FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) { + dhd_prot_ring_detach(dhd, ring); + } + + h2d_flowrings_total = DHD_FLOWRINGS_POOL_TOTAL(prot->h2d_rings_total); + + MFREE(prot->osh, prot->h2d_flowrings_pool, + (h2d_flowrings_total * sizeof(msgbuf_ring_t))); + + prot->h2d_flowrings_pool = (msgbuf_ring_t*)NULL; + prot->h2d_rings_total = 0; + +} /* dhd_prot_flowrings_pool_detach */ + + +/** + * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized + * msgbuf_ring from the flowring pool, and assign it. + * + * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common + * ring information to the dongle, a flowring's information is passed via a + * flowring create control message. + * + * Only the ring state (WR, RD) index are initialized. + */ +static msgbuf_ring_t * +dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid) +{ + msgbuf_ring_t *ring; + dhd_prot_t *prot = dhd->prot; + + ASSERT(flowid >= DHD_FLOWRING_START_FLOWID); + ASSERT(flowid < prot->h2d_rings_total); + ASSERT(prot->h2d_flowrings_pool != NULL); + + ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); + + /* ASSERT flow_ring->inited == FALSE */ + + ring->wr = 0; + ring->rd = 0; + ring->curr_rd = 0; + ring->inited = TRUE; + + return ring; +} + + +/** + * dhd_prot_flowrings_pool_release - release a previously fetched flowring's + * msgbuf_ring back to the flow_ring pool. + */ +void +dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid, void *flow_ring) +{ + msgbuf_ring_t *ring; + dhd_prot_t *prot = dhd->prot; + + ASSERT(flowid >= DHD_FLOWRING_START_FLOWID); + ASSERT(flowid < prot->h2d_rings_total); + ASSERT(prot->h2d_flowrings_pool != NULL); + + ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); + + ASSERT(ring == (msgbuf_ring_t*)flow_ring); + /* ASSERT flow_ring->inited == TRUE */ + + (void)dhd_dma_buf_audit(dhd, &ring->dma_buf); + + ring->wr = 0; + ring->rd = 0; + ring->inited = FALSE; + + ring->curr_rd = 0; +} + + +/* Assumes only one index is updated at a time */ +/* If exactly_nitems is true, this function will allocate space for nitems or fail */ +/* Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */ +/* If exactly_nitems is false, this function will allocate space for nitems or less */ +static void *BCMFASTPATH +dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced, + bool exactly_nitems) +{ + void *ret_ptr = NULL; + uint16 ring_avail_cnt; + + ASSERT(nitems <= ring->max_items); + + ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items); + + if ((ring_avail_cnt == 0) || + (exactly_nitems && (ring_avail_cnt < nitems) && + ((ring->max_items - ring->wr) >= nitems))) { + DHD_INFO(("Space not available: ring %s items %d write %d read %d\n", + ring->name, nitems, ring->wr, ring->rd)); + return NULL; + } + *alloced = MIN(nitems, ring_avail_cnt); + + /* Return next available space */ + ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len); + + /* Update write index */ + if ((ring->wr + *alloced) == ring->max_items) { + ring->wr = 0; + } else if ((ring->wr + *alloced) < ring->max_items) { + ring->wr += *alloced; + } else { + /* Should never hit this */ + ASSERT(0); + return NULL; + } + + return ret_ptr; +} /* dhd_prot_get_ring_space */ + + +/** + * dhd_prot_ring_write_complete - Host updates the new WR index on producing + * new messages in a H2D ring. The messages are flushed from cache prior to + * posting the new WR index. The new WR index will be updated in the DMA index + * array or directly in the dongle's ring state memory. + * A PCIE doorbell will be generated to wake up the dongle. + */ +static void BCMFASTPATH +dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, + uint16 nitems) +{ + dhd_prot_t *prot = dhd->prot; + + /* cache flush */ + OSL_CACHE_FLUSH(p, ring->item_len * nitems); + + if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) { + dhd_prot_dma_indx_set(dhd, ring->wr, + H2D_DMA_INDX_WR_UPD, ring->idx); + } else { + dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr), + sizeof(uint16), RING_WR_UPD, ring->idx); + } + + /* raise h2d interrupt */ + prot->mb_ring_fn(dhd->bus, ring->wr); +} + + +/** + * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages + * from a D2H ring. The new RD index will be updated in the DMA Index array or + * directly in dongle's ring state memory. + */ +static void +dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring) +{ + /* update read index */ + /* If dma'ing h2d indices supported + * update the r -indices in the + * host memory o/w in TCM + */ + if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) { + dhd_prot_dma_indx_set(dhd, ring->rd, + D2H_DMA_INDX_RD_UPD, ring->idx); + } else { + dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd), + sizeof(uint16), RING_RD_UPD, ring->idx); + } +} + + +/** + * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array. + * Dongle will DMA the entire array (if DMA_INDX feature is enabled). + * See dhd_prot_dma_indx_init() + */ +static void +dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid) +{ + uint8 *ptr; + uint16 offset; + dhd_prot_t *prot = dhd->prot; + + switch (type) { + case H2D_DMA_INDX_WR_UPD: + ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va); + offset = DHD_H2D_RING_OFFSET(ringid); + break; + + case D2H_DMA_INDX_RD_UPD: + ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va); + offset = DHD_D2H_RING_OFFSET(ringid); + break; + + default: + DHD_ERROR(("%s: Invalid option for DMAing read/write index\n", + __FUNCTION__)); + return; + } + + ASSERT(prot->rw_index_sz != 0); + ptr += offset * prot->rw_index_sz; + + *(uint16*)ptr = htol16(new_index); + + OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz); + + DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n", + __FUNCTION__, new_index, type, ringid, ptr, offset)); + +} /* dhd_prot_dma_indx_set */ + + +/** + * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index + * array. + * Dongle DMAes an entire array to host memory (if the feature is enabled). + * See dhd_prot_dma_indx_init() + */ +static uint16 +dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid) +{ + uint8 *ptr; + uint16 data; + uint16 offset; + dhd_prot_t *prot = dhd->prot; + + switch (type) { + case H2D_DMA_INDX_WR_UPD: + ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va); + offset = DHD_H2D_RING_OFFSET(ringid); + break; + + case H2D_DMA_INDX_RD_UPD: + ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va); + offset = DHD_H2D_RING_OFFSET(ringid); + break; + + case D2H_DMA_INDX_WR_UPD: + ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va); + offset = DHD_D2H_RING_OFFSET(ringid); + break; + + case D2H_DMA_INDX_RD_UPD: + ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va); + offset = DHD_D2H_RING_OFFSET(ringid); + break; + + default: + DHD_ERROR(("%s: Invalid option for DMAing read/write index\n", + __FUNCTION__)); + return 0; + } + + ASSERT(prot->rw_index_sz != 0); + ptr += offset * prot->rw_index_sz; + + OSL_CACHE_INV((void *)ptr, prot->rw_index_sz); + + data = LTOH16(*((uint16*)ptr)); + + DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n", + __FUNCTION__, data, type, ringid, ptr, offset)); + + return (data); + +} /* dhd_prot_dma_indx_get */ + +/** + * An array of DMA read/write indices, containing information about host rings, can be maintained + * either in host memory or in device memory, dependent on preprocessor options. This function is, + * dependent on these options, called during driver initialization. It reserves and initializes + * blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical + * address of these host memory blocks are communicated to the dongle later on. By reading this host + * memory, the dongle learns about the state of the host rings. + */ + +static INLINE int +dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type, + dhd_dma_buf_t *dma_buf, uint32 bufsz) +{ + int rc; + + if ((dma_buf->len == bufsz) || (dma_buf->va != NULL)) + return BCME_OK; + + rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz); + + return rc; +} + +int +dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 length) +{ + uint32 bufsz; + dhd_prot_t *prot = dhd->prot; + dhd_dma_buf_t *dma_buf; + + if (prot == NULL) { + DHD_ERROR(("prot is not inited\n")); + return BCME_ERROR; + } + + /* Dongle advertizes 2B or 4B RW index size */ + ASSERT(rw_index_sz != 0); + prot->rw_index_sz = rw_index_sz; + + bufsz = rw_index_sz * length; + + switch (type) { + case H2D_DMA_INDX_WR_BUF: + dma_buf = &prot->h2d_dma_indx_wr_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) { + goto ret_no_mem; + } + DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + + case H2D_DMA_INDX_RD_BUF: + dma_buf = &prot->h2d_dma_indx_rd_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) { + goto ret_no_mem; + } + DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + + case D2H_DMA_INDX_WR_BUF: + dma_buf = &prot->d2h_dma_indx_wr_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) { + goto ret_no_mem; + } + DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + + case D2H_DMA_INDX_RD_BUF: + dma_buf = &prot->d2h_dma_indx_rd_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) { + goto ret_no_mem; + } + DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + + default: + DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__)); + return BCME_BADOPTION; + } + + return BCME_OK; + +ret_no_mem: + DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n", + __FUNCTION__, type, bufsz)); + return BCME_NOMEM; + +} /* dhd_prot_dma_indx_init */ + + +/** + * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read + * from, or NULL if there are no more messages to read. + */ +static uint8* +dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_len) +{ + uint16 wr; + uint16 rd; + uint16 depth; + uint16 items; + void *read_addr = NULL; /* address of next msg to be read in ring */ + uint16 d2h_wr = 0; + + DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n", + __FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va), + (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va))); + + /* Remember the read index in a variable. + * This is becuase ring->rd gets updated in the end of this function + * So if we have to print the exact read index from which the + * message is read its not possible. + */ + ring->curr_rd = ring->rd; + + /* update write pointer */ + if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { + /* DMAing write/read indices supported */ + d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx); + ring->wr = d2h_wr; + } else { + dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx); + } + + wr = ring->wr; + rd = ring->rd; + depth = ring->max_items; + + /* check for avail space, in number of ring items */ + items = READ_AVAIL_SPACE(wr, rd, depth); + if (items == 0) { + return NULL; + } + + ASSERT(items < ring->max_items); + + /* + * Note that there are builds where Assert translates to just printk + * so, even if we had hit this condition we would never halt. Now + * dhd_prot_process_msgtype can get into an big loop if this + * happens. + */ + if (items >= ring->max_items) { + DHD_ERROR(("\r\n======================= \r\n")); + DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n", + __FUNCTION__, ring, ring->name, ring->max_items, items)); + DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n", wr, rd, depth)); + DHD_ERROR(("dhd->busstate %d bus->suspended %d bus->wait_for_d3_ack %d \r\n", + dhd->busstate, dhd->bus->suspended, dhd->bus->wait_for_d3_ack)); + DHD_ERROR(("\r\n======================= \r\n")); + + *available_len = 0; + return NULL; + } + + /* if space is available, calculate address to be read */ + read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len); + + /* update read pointer */ + if ((ring->rd + items) >= ring->max_items) { + ring->rd = 0; + } else { + ring->rd += items; + } + + ASSERT(ring->rd < ring->max_items); + + /* convert items to bytes : available_len must be 32bits */ + *available_len = (uint32)(items * ring->item_len); + + OSL_CACHE_INV(read_addr, *available_len); + + /* return read address */ + return read_addr; + +} /* dhd_prot_get_read_addr */ + +/** Creates a flow ring and informs dongle of this event */ +int +dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) +{ + tx_flowring_create_request_t *flow_create_rqst; + msgbuf_ring_t *flow_ring; + dhd_prot_t *prot = dhd->prot; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn; + + /* Fetch a pre-initialized msgbuf_ring from the flowring pool */ + flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid); + if (flow_ring == NULL) { + DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n", + __FUNCTION__, flow_ring_node->flowid)); + return BCME_NOMEM; + } + + DHD_GENERAL_LOCK(dhd, flags); + + /* Request for ctrl_ring buffer space */ + flow_create_rqst = (tx_flowring_create_request_t *) + dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE); + + if (flow_create_rqst == NULL) { + dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring); + DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n", + __FUNCTION__, flow_ring_node->flowid)); + DHD_GENERAL_UNLOCK(dhd, flags); + return BCME_NOMEM; + } + + flow_ring_node->prot_info = (void *)flow_ring; + + /* Common msg buf hdr */ + flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE; + flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; + flow_create_rqst->msg.request_id = htol32(0); /* TBD */ + + flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO; + ctrl_ring->seqnum++; + + /* Update flow create message */ + flow_create_rqst->tid = flow_ring_node->flow_info.tid; + flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); + memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa)); + memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da)); + /* CAUTION: ring::base_addr already in Little Endian */ + flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr; + flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr; + flow_create_rqst->max_items = htol16(H2DRING_TXPOST_MAX_ITEM); + flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE); + DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG + " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid, + MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid, + flow_ring_node->flow_info.ifindex)); + + /* Update the flow_ring's WRITE index */ + if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) { + dhd_prot_dma_indx_set(dhd, flow_ring->wr, + H2D_DMA_INDX_WR_UPD, flow_ring->idx); + } else { + dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr), + sizeof(uint16), RING_WR_UPD, flow_ring->idx); + } + + /* update control subn ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1); + + DHD_GENERAL_UNLOCK(dhd, flags); + + return BCME_OK; +} /* dhd_prot_flow_ring_create */ + +/** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */ +static void +dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg) +{ + tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)msg; + + DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__, + ltoh16(flow_create_resp->cmplt.status), + ltoh16(flow_create_resp->cmplt.flow_ring_id))); + + dhd_bus_flow_ring_create_response(dhd->bus, + ltoh16(flow_create_resp->cmplt.flow_ring_id), + ltoh16(flow_create_resp->cmplt.status)); +} + +/** called on e.g. flow ring delete */ +void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info) +{ + msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info; + dhd_prot_ring_detach(dhd, flow_ring); + DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__)); +} + +void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info, + struct bcmstrbuf *strbuf, const char * fmt) +{ + const char *default_fmt = "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x SIZE %d\n"; + msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info; + uint16 rd, wr; + uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len; + + if (fmt == NULL) { + fmt = default_fmt; + } + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx); + bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va, + ltoh32(flow_ring->base_addr.high_addr), + ltoh32(flow_ring->base_addr.low_addr), dma_buf_len); +} + +void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) +{ + dhd_prot_t *prot = dhd->prot; + bcm_bprintf(strbuf, "CtrlPost: "); + dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf, NULL); + bcm_bprintf(strbuf, "CtrlCpl: "); + dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf, NULL); + + bcm_bprintf(strbuf, "RxPost: "); + bcm_bprintf(strbuf, "RBP %d ", prot->rxbufpost); + dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf, NULL); + bcm_bprintf(strbuf, "RxCpl: "); + dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf, NULL); + + bcm_bprintf(strbuf, "TxCpl: "); + dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf, NULL); + bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail %d\n", + dhd->prot->active_tx_count, + DHD_PKTID_AVAIL(dhd->prot->pktid_map_handle)); +} + +int +dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) +{ + tx_flowring_delete_request_t *flow_delete_rqst; + dhd_prot_t *prot = dhd->prot; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + DHD_GENERAL_LOCK(dhd, flags); + + /* Request for ring buffer space */ + flow_delete_rqst = (tx_flowring_delete_request_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (flow_delete_rqst == NULL) { + DHD_GENERAL_UNLOCK(dhd, flags); + DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__)); + return BCME_NOMEM; + } + + /* Common msg buf hdr */ + flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE; + flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; + flow_delete_rqst->msg.request_id = htol32(0); /* TBD */ + + flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + /* Update Delete info */ + flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); + flow_delete_rqst->reason = htol16(BCME_OK); + + DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer " MACDBG + " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid, + MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid, + flow_ring_node->flow_info.ifindex)); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1); + DHD_GENERAL_UNLOCK(dhd, flags); + + return BCME_OK; +} + +static void +dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg) +{ + tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)msg; + + DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__, + flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id)); + + dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id, + flow_delete_resp->cmplt.status); +} + +int +dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) +{ + tx_flowring_flush_request_t *flow_flush_rqst; + dhd_prot_t *prot = dhd->prot; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + DHD_GENERAL_LOCK(dhd, flags); + + /* Request for ring buffer space */ + flow_flush_rqst = (tx_flowring_flush_request_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + if (flow_flush_rqst == NULL) { + DHD_GENERAL_UNLOCK(dhd, flags); + DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__)); + return BCME_NOMEM; + } + + /* Common msg buf hdr */ + flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH; + flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; + flow_flush_rqst->msg.request_id = htol32(0); /* TBD */ + + flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); + flow_flush_rqst->reason = htol16(BCME_OK); + + DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__)); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1); + DHD_GENERAL_UNLOCK(dhd, flags); + + return BCME_OK; +} /* dhd_prot_flow_ring_flush */ + +static void +dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg) +{ + tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)msg; + + DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__, + flow_flush_resp->cmplt.status)); + + dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id, + flow_flush_resp->cmplt.status); +} + +/** + * Request dongle to configure soft doorbells for D2H rings. Host populated soft + * doorbell information is transferred to dongle via the d2h ring config control + * message. + */ +void +dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd) +{ +#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT) + uint16 ring_idx; + uint8 *msg_next; + void *msg_start; + uint16 alloced = 0; + unsigned long flags; + dhd_prot_t *prot = dhd->prot; + ring_config_req_t *ring_config_req; + bcmpcie_soft_doorbell_t *soft_doorbell; + msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn; + const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS; + + /* Claim space for d2h_ring number of d2h_ring_config_req_t messages */ + DHD_GENERAL_LOCK(dhd, flags); + msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE); + + if (msg_start == NULL) { + DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n", + __FUNCTION__, d2h_rings)); + DHD_GENERAL_UNLOCK(dhd, flags); + return; + } + + msg_next = (uint8*)msg_start; + + for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) { + + /* position the ring_config_req into the ctrl subm ring */ + ring_config_req = (ring_config_req_t *)msg_next; + + /* Common msg header */ + ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG; + ring_config_req->msg.if_id = 0; + ring_config_req->msg.flags = 0; + + ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO; + ctrl_ring->seqnum++; + + ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */ + + /* Ring Config subtype and d2h ring_id */ + ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL); + ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx)); + + /* Host soft doorbell configuration */ + soft_doorbell = &prot->soft_doorbell[ring_idx]; + + ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value); + ring_config_req->soft_doorbell.haddr.high = + htol32(soft_doorbell->haddr.high); + ring_config_req->soft_doorbell.haddr.low = + htol32(soft_doorbell->haddr.low); + ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items); + ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs); + + DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n", + __FUNCTION__, ring_config_req->soft_doorbell.haddr.high, + ring_config_req->soft_doorbell.haddr.low, + ring_config_req->soft_doorbell.value)); + + msg_next = msg_next + ctrl_ring->item_len; + } + + /* update control subn ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings); + DHD_GENERAL_UNLOCK(dhd, flags); +#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */ +} + +static void +dhd_prot_d2h_ring_config_cmplt_process(dhd_pub_t *dhd, void *msg) +{ + DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n", + __FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status), + ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id))); +} + +int +dhd_prot_debug_info_print(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring; + uint16 rd, wr; + uint32 intstatus = 0; + uint32 intmask = 0; + uint32 mbintstatus = 0; + uint32 d2h_mb_data = 0; + uint32 dma_buf_len; + + DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n")); + + ring = &prot->h2dring_ctrl_subn; + dma_buf_len = ring->max_items * ring->item_len; + DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), dma_buf_len)); + DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + + ring = &prot->d2hring_ctrl_cpln; + dma_buf_len = ring->max_items * ring->item_len; + DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), dma_buf_len)); + DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum)); + + intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0); + intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0); + mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0); + dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0); + + DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n")); + DHD_ERROR(("intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n,", + intstatus, intmask, mbintstatus)); + DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data, dhd->bus->def_intmask)); + + return 0; +} + +int +dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b) +{ + uint32 *ptr; + uint32 value; + uint32 i; + uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus); + + OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va, + dhd->prot->d2h_dma_indx_wr_buf.len); + + ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va); + + bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues); + + bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr); + value = ltoh32(*ptr); + bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value); + ptr++; + value = ltoh32(*ptr); + bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value); + + ptr++; + bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr); + for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) { + value = ltoh32(*ptr); + bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value); + ptr++; + } + + OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va, + dhd->prot->h2d_dma_indx_rd_buf.len); + + ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va); + + bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr); + value = ltoh32(*ptr); + bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value); + ptr++; + value = ltoh32(*ptr); + bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value); + ptr++; + value = ltoh32(*ptr); + bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value); + + return 0; +} + +uint32 +dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val) +{ + dhd_prot_t *prot = dhd->prot; +#if DHD_DBG_SHOW_METADATA + prot->metadata_dbg = val; +#endif + return (uint32)prot->metadata_dbg; +} + +uint32 +dhd_prot_metadata_dbg_get(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + return (uint32)prot->metadata_dbg; +} + +uint32 +dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx) +{ + dhd_prot_t *prot = dhd->prot; + if (rx) + prot->rx_metadata_offset = (uint16)val; + else + prot->tx_metadata_offset = (uint16)val; + return dhd_prot_metadatalen_get(dhd, rx); +} + +uint32 +dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx) +{ + dhd_prot_t *prot = dhd->prot; + if (rx) + return prot->rx_metadata_offset; + else + return prot->tx_metadata_offset; +} + +/** optimization to write "n" tx items at a time to ring */ +uint32 +dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val) +{ + dhd_prot_t *prot = dhd->prot; + if (set) + prot->txp_threshold = (uint16)val; + val = prot->txp_threshold; + return val; +} + +#ifdef DHD_RX_CHAINING + +static INLINE void BCMFASTPATH +dhd_rxchain_reset(rxchain_info_t *rxchain) +{ + rxchain->pkt_count = 0; +} + +static void BCMFASTPATH +dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx) +{ + uint8 *eh; + uint8 prio; + dhd_prot_t *prot = dhd->prot; + rxchain_info_t *rxchain = &prot->rxchain; + + ASSERT(!PKTISCHAINED(pkt)); + ASSERT(PKTCLINK(pkt) == NULL); + ASSERT(PKTCGETATTR(pkt) == 0); + + eh = PKTDATA(dhd->osh, pkt); + prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT; + + if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa, + rxchain->h_da, rxchain->h_prio))) { + /* Different flow - First release the existing chain */ + dhd_rxchain_commit(dhd); + } + + /* For routers, with HNDCTF, link the packets using PKTSETCLINK, */ + /* so that the chain can be handed off to CTF bridge as is. */ + if (rxchain->pkt_count == 0) { + /* First packet in chain */ + rxchain->pkthead = rxchain->pkttail = pkt; + + /* Keep a copy of ptr to ether_da, ether_sa and prio */ + rxchain->h_da = ((struct ether_header *)eh)->ether_dhost; + rxchain->h_sa = ((struct ether_header *)eh)->ether_shost; + rxchain->h_prio = prio; + rxchain->ifidx = ifidx; + rxchain->pkt_count++; + } else { + /* Same flow - keep chaining */ + PKTSETCLINK(rxchain->pkttail, pkt); + rxchain->pkttail = pkt; + rxchain->pkt_count++; + } + + if ((!ETHER_ISMULTI(rxchain->h_da)) && + ((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) || + (((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) { + PKTSETCHAINED(dhd->osh, pkt); + PKTCINCRCNT(rxchain->pkthead); + PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt)); + } else { + dhd_rxchain_commit(dhd); + return; + } + + /* If we have hit the max chain length, dispatch the chain and reset */ + if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) { + dhd_rxchain_commit(dhd); + } +} + +static void BCMFASTPATH +dhd_rxchain_commit(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + rxchain_info_t *rxchain = &prot->rxchain; + + if (rxchain->pkt_count == 0) + return; + + /* Release the packets to dhd_linux */ + dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count); + + /* Reset the chain */ + dhd_rxchain_reset(rxchain); +} + +#endif /* DHD_RX_CHAINING */ diff --git a/drivers/net/wireless/bcmdhd/dhd_pcie.c b/drivers/amlogic/wifi/bcmdhd/dhd_pcie.c similarity index 63% rename from drivers/net/wireless/bcmdhd/dhd_pcie.c rename to drivers/amlogic/wifi/bcmdhd/dhd_pcie.c index eb7638d83661e..fb29fd8ad9df4 100644 --- a/drivers/net/wireless/bcmdhd/dhd_pcie.c +++ b/drivers/amlogic/wifi/bcmdhd/dhd_pcie.c @@ -1,9 +1,30 @@ /* * DHD Bus Module for PCIE * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_pcie.c 506043 2014-10-02 12:29:45Z $ + * + * <> + * + * $Id: dhd_pcie.c 609007 2015-12-30 07:44:52Z $ */ @@ -15,8 +36,8 @@ #include #include #include -#if defined(DHD_DEBUG) #include +#if defined(DHD_DEBUG) #include #endif /* defined(DHD_DEBUG) */ #include @@ -42,8 +63,12 @@ #include BCMEMBEDIMAGE #endif /* BCMEMBEDIMAGE */ +#ifdef PCIE_OOB +#include "ftdi_sio_external.h" +#endif /* PCIE_OOB */ + #define MEMBLOCK 2048 /* Block size used for downloading of dongle image */ -#define MAX_NVRAMBUF_SIZE 6144 /* max nvram buf size */ +#define MAX_WKLK_IDLE_CHECK 3 /* times wake_lock checked before deciding not to suspend */ #define ARMCR4REG_BANKIDX (0x40/sizeof(uint32)) #define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32)) @@ -55,10 +80,14 @@ int dhd_dongle_memsize; int dhd_dongle_ramsize; -#ifdef DHD_DEBUG static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size); +#ifdef DHD_DEBUG static int dhdpcie_bus_readconsole(dhd_bus_t *bus); -#endif +#endif /* DHD_DEBUG */ +#if defined(DHD_FW_COREDUMP) +static int dhdpcie_mem_dump(dhd_bus_t *bus); +#endif /* DHD_FW_COREDUMP */ + static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size); static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name, void *params, @@ -75,7 +104,6 @@ static bool dhdpci_bus_read_frames(dhd_bus_t *bus); static int dhdpcie_readshared(dhd_bus_t *bus); static void dhdpcie_init_shared_addr(dhd_bus_t *bus); static bool dhdpcie_dongle_attach(dhd_bus_t *bus); -static void dhdpcie_bus_intr_enable(dhd_bus_t *bus); static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size); static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag); @@ -90,10 +118,10 @@ static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset); static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data); static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset); static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data); -#ifdef CONFIG_ARCH_MSM8994 +#if defined(DHD_PCIE_BAR1_WIN_BASE_FIX) static void dhdpcie_bus_cfg_set_bar1_win(dhd_bus_t *bus, uint32 data); static ulong dhd_bus_cmn_check_offset(dhd_bus_t *bus, ulong offset); -#endif +#endif /* defined(DHD_PCIE_BAR1_WIN_BASE_FIX) */ static void dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size); static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b); static void dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data); @@ -105,9 +133,32 @@ static int dhdpcie_download_code_array(dhd_bus_t *bus); #endif /* BCMEMBEDIMAGE */ +#ifdef EXYNOS_PCIE_DEBUG +extern void exynos_pcie_register_dump(int ch_num); +#endif /* EXYNOS_PCIE_DEBUG */ #define PCI_VENDOR_ID_BROADCOM 0x14e4 +static void dhd_bus_set_device_wake(struct dhd_bus *bus, bool val); +extern void wl_nddbg_wpp_log(const char *format, ...); +#ifdef PCIE_OOB +static void dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus); + +#define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */ +static uint dhd_doorbell_timeout = DHD_DEFAULT_DOORBELL_TIMEOUT; + +#define HOST_WAKE 4 /* GPIO_0 (HOST_WAKE) - Output from WLAN */ +#define DEVICE_WAKE 5 /* GPIO_1 (DEVICE_WAKE) - Input to WLAN */ +#define BIT_WL_REG_ON 6 +#define BIT_BT_REG_ON 7 + +int gpio_handle_val = 0; +unsigned char gpio_port = 0; +unsigned char gpio_direction = 0; +#define OOB_PORT "ttyUSB0" +#endif /* PCIE_OOB */ +static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version); + /* IOVar table */ enum { IOV_INTR = 1, @@ -132,6 +183,7 @@ enum { IOV_SBREG, IOV_DONGLEISOLATION, IOV_LTRSLEEPON_UNLOOAD, + IOV_METADATA_DBG, IOV_RX_METADATALEN, IOV_TX_METADATALEN, IOV_TXP_THRESHOLD, @@ -140,8 +192,16 @@ enum { IOV_DMA_RINGINDICES, IOV_DB1_FOR_MB, IOV_FLOW_PRIO_MAP, +#ifdef DHD_PCIE_RUNTIMEPM + IOV_IDLETIME, +#endif /* DHD_PCIE_RUNTIMEPM */ IOV_RXBOUND, - IOV_TXBOUND + IOV_TXBOUND, + IOV_HANGREPORT, +#ifdef PCIE_OOB + IOV_OOB_BT_REG_ON, + IOV_OOB_ENABLE +#endif /* PCIE_OOB */ }; @@ -160,26 +220,36 @@ const bcm_iovar_t dhdpcie_iovars[] = { {"pciecfgreg", IOV_PCIECFGREG, 0, IOVT_BUFFER, 2 * sizeof(int32) }, {"pciecorereg", IOV_PCIECOREREG, 0, IOVT_BUFFER, 2 * sizeof(int32) }, {"pcieserdesreg", IOV_PCIESERDESREG, 0, IOVT_BUFFER, 3 * sizeof(int32) }, - {"bar0secwinreg", IOV_BAR0_SECWIN_REG, 0, IOVT_BUFFER, 2 * sizeof(int32) }, + {"bar0secwinreg", IOV_BAR0_SECWIN_REG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, {"sbreg", IOV_SBREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, IOVT_BUFFER, 3 * sizeof(int32) }, {"pcie_suspend", IOV_PCIE_SUSPEND, 0, IOVT_UINT32, 0 }, +#ifdef PCIE_OOB + {"oob_bt_reg_on", IOV_OOB_BT_REG_ON, 0, IOVT_UINT32, 0 }, + {"oob_enable", IOV_OOB_ENABLE, 0, IOVT_UINT32, 0 }, +#endif /* PCIE_OOB */ {"sleep_allowed", IOV_SLEEP_ALLOWED, 0, IOVT_BOOL, 0 }, {"dngl_isolation", IOV_DONGLEISOLATION, 0, IOVT_UINT32, 0 }, {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, IOVT_UINT32, 0 }, {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK, 0, IOVT_BUFFER, 0 }, {"dma_ring_indices", IOV_DMA_RINGINDICES, 0, IOVT_UINT32, 0}, + {"metadata_dbg", IOV_METADATA_DBG, 0, IOVT_BOOL, 0 }, {"rx_metadata_len", IOV_RX_METADATALEN, 0, IOVT_UINT32, 0 }, {"tx_metadata_len", IOV_TX_METADATALEN, 0, IOVT_UINT32, 0 }, {"db1_for_mb", IOV_DB1_FOR_MB, 0, IOVT_UINT32, 0 }, {"txp_thresh", IOV_TXP_THRESHOLD, 0, IOVT_UINT32, 0 }, {"buzzz_dump", IOV_BUZZZ_DUMP, 0, IOVT_UINT32, 0 }, {"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, IOVT_UINT32, 0 }, +#ifdef DHD_PCIE_RUNTIMEPM + {"idletime", IOV_IDLETIME, 0, IOVT_INT32, 0 }, +#endif /* DHD_PCIE_RUNTIMEPM */ {"rxbound", IOV_RXBOUND, 0, IOVT_UINT32, 0 }, {"txbound", IOV_TXBOUND, 0, IOVT_UINT32, 0 }, + {"fw_hang_report", IOV_HANGREPORT, 0, IOVT_BOOL, 0 }, {NULL, 0, 0, 0, 0 } }; + #define MAX_READ_TIMEOUT 5 * 1000 * 1000 #ifndef DHD_RXBOUND @@ -235,33 +305,43 @@ dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size) * * 'tcm' is the *host* virtual address at which tcm is mapped. */ -dhd_bus_t* dhdpcie_bus_attach(osl_t *osh, volatile char* regs, volatile char* tcm, uint32 tcm_size) +#if defined(DHD_PCIE_BAR1_WIN_BASE_FIX) +dhd_bus_t* dhdpcie_bus_attach(osl_t *osh, + volatile char *regs, volatile char *tcm, uint32 tcm_size, void *pci_dev) +#else +dhd_bus_t* dhdpcie_bus_attach(osl_t *osh, + volatile char *regs, volatile char *tcm, void *pci_dev) +#endif /* DHD_PCIE_BAR1_WIN_BASE_FIX */ { dhd_bus_t *bus; DHD_TRACE(("%s: ENTER\n", __FUNCTION__)); do { - if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) { + if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) { DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__)); break; } - bzero(bus, sizeof(dhd_bus_t)); + bus->regs = regs; bus->tcm = tcm; +#if defined(DHD_PCIE_BAR1_WIN_BASE_FIX) bus->tcm_size = tcm_size; +#endif /* defined(DHD_PCIE_BAR1_WIN_BASE_FIX) */ bus->osh = osh; + /* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */ + bus->dev = (struct pci_dev *)pci_dev; dll_init(&bus->const_flowring); /* Attach pcie shared structure */ - bus->pcie_sh = MALLOC(osh, sizeof(pciedev_shared_t)); - if (!bus->pcie_sh) { + if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) { DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__)); break; } /* dhd_common_init(osh); */ + if (dhdpcie_dongle_attach(bus)) { DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__)); break; @@ -275,7 +355,10 @@ dhd_bus_t* dhdpcie_bus_attach(osl_t *osh, volatile char* regs, volatile char* tc } bus->dhd->busstate = DHD_BUS_DOWN; bus->db1_for_mb = TRUE; - bus->dhd->hang_report = TRUE; + bus->dhd->hang_report = TRUE; + bus->irq_registered = FALSE; + + bus->d3_ack_war_cnt = 0; DHD_TRACE(("%s: EXIT SUCCESS\n", __FUNCTION__)); @@ -285,12 +368,13 @@ dhd_bus_t* dhdpcie_bus_attach(osl_t *osh, volatile char* regs, volatile char* tc DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__)); - if (bus && bus->pcie_sh) + if (bus && bus->pcie_sh) { MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t)); + } - if (bus) + if (bus) { MFREE(osh, bus, sizeof(dhd_bus_t)); - + } return NULL; } @@ -327,93 +411,148 @@ dhd_bus_txq(struct dhd_bus *bus) return &bus->txq; } -/* Get Chip ID version */ +/** Get Chip ID version */ uint dhd_bus_chip_id(dhd_pub_t *dhdp) { dhd_bus_t *bus = dhdp->bus; return bus->sih->chip; } -/* Get Chip Rev ID version */ +/** Get Chip Rev ID version */ uint dhd_bus_chiprev_id(dhd_pub_t *dhdp) { dhd_bus_t *bus = dhdp->bus; return bus->sih->chiprev; } -/* Get Chip Pkg ID version */ +/** Get Chip Pkg ID version */ uint dhd_bus_chippkg_id(dhd_pub_t *dhdp) { dhd_bus_t *bus = dhdp->bus; return bus->sih->chippkg; } +/** Read and clear intstatus. This should be called with interupts disabled or inside isr */ +uint32 +dhdpcie_bus_intstatus(dhd_bus_t *bus) +{ + uint32 intstatus = 0; + uint32 intmask = 0; -/* - -Name: dhdpcie_bus_isr - -Parametrs: - -1: IN int irq -- interrupt vector -2: IN void *arg -- handle to private data structure + if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) || + (bus->sih->buscorerev == 2)) { + intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4); + dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus); + intstatus &= I_MB; + } else { + /* this is a PCIE core register..not a config register... */ + intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0); -Return value: + /* this is a PCIE core register..not a config register... */ + intmask = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, 0, 0); -Status (TRUE or FALSE) + /* + * The fourth argument to si_corereg is the "mask" fields of the register to update + * and the fifth field is the "value" to update. Now if we are interested in only + * few fields of the "mask" bit map, we should not be writing back what we read + * By doing so, we might clear/ack interrupts that are not handled yet. + */ + si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, bus->def_intmask, + intstatus); + + intstatus &= intmask; + + /* Is device removed. intstatus & intmask read 0xffffffff */ + if (intstatus == (uint32)-1) { + DHD_ERROR(("%s: !!!!!!Device Removed or dead chip.\n", __FUNCTION__)); + intstatus = 0; +#ifdef CUSTOMER_HW4_DEBUG +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN; + dhd_os_send_hang_message(bus->dhd); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */ +#endif /* CUSTOMER_HW4_DEBUG */ + } -Description: -Interrupt Service routine checks for the status register, -disable interrupt and queue DPC if mail box interrupts are raised. -*/ + intstatus &= bus->def_intmask; + } + return intstatus; +} +/** + * Name: dhdpcie_bus_isr + * Parameters: + * 1: IN int irq -- interrupt vector + * 2: IN void *arg -- handle to private data structure + * Return value: + * Status (TRUE or FALSE) + * + * Description: + * Interrupt Service routine checks for the status register, + * disable interrupt and queue DPC if mail box interrupts are raised. + */ int32 dhdpcie_bus_isr(dhd_bus_t *bus) { + uint32 intstatus = 0; do { - DHD_TRACE(("%s: Enter\n", __FUNCTION__)); - /* verify argument */ - if (!bus) { - DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__)); - break; - } + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + /* verify argument */ + if (!bus) { + DHD_ERROR(("%s : bus is null pointer, exit \n", __FUNCTION__)); + break; + } - if (bus->dhd->busstate == DHD_BUS_DOWN) { - DHD_TRACE(("%s : bus is down. we have nothing to do\n", - __FUNCTION__)); - break; - } + if (bus->dhd->dongle_reset) { + break; + } - /* Overall operation: - * - Mask further interrupts - * - Read/ack intstatus - * - Take action based on bits and state - * - Reenable interrupts (as per state) - */ + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: BUS is down, not processing the interrupt \r\n", + __FUNCTION__)); + break; + } - /* Count the interrupt call */ - bus->intrcount++; + intstatus = dhdpcie_bus_intstatus(bus); - /* read interrupt status register!! Status bits will be cleared in DPC !! */ - bus->ipend = TRUE; - dhdpcie_bus_intr_disable(bus); /* Disable interrupt!! */ - bus->intdis = TRUE; + /* Check if the interrupt is ours or not */ + if (intstatus == 0) { + break; + } + + /* save the intstatus */ + bus->intstatus = intstatus; + + /* Overall operation: + * - Mask further interrupts + * - Read/ack intstatus + * - Take action based on bits and state + * - Reenable interrupts (as per state) + */ + + /* Count the interrupt call */ + bus->intrcount++; + + /* read interrupt status register!! Status bits will be cleared in DPC !! */ + bus->ipend = TRUE; + dhdpcie_bus_intr_disable(bus); /* Disable interrupt!! */ + bus->intdis = TRUE; #if defined(PCIE_ISR_THREAD) - DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__)); - DHD_OS_WAKE_LOCK(bus->dhd); - while (dhd_bus_dpc(bus)); - DHD_OS_WAKE_UNLOCK(bus->dhd); + DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__)); + DHD_OS_WAKE_LOCK(bus->dhd); + while (dhd_bus_dpc(bus)); + DHD_OS_WAKE_UNLOCK(bus->dhd); #else - bus->dpc_sched = TRUE; - dhd_sched_dpc(bus->dhd); /* queue DPC now!! */ + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); /* queue DPC now!! */ #endif /* defined(SDIO_ISR_THREAD) */ - DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__)); - return TRUE; + DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__)); + return TRUE; } while (0); @@ -421,6 +560,9 @@ dhdpcie_bus_isr(dhd_bus_t *bus) return FALSE; } +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY +dhd_pub_t *link_recovery = NULL; +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ static bool dhdpcie_dongle_attach(dhd_bus_t *bus) { @@ -431,9 +573,11 @@ dhdpcie_dongle_attach(dhd_bus_t *bus) uint32 val; sbpcieregs_t *sbpcieregs; - DHD_TRACE(("%s: ENTER\n", - __FUNCTION__)); + DHD_TRACE(("%s: ENTER\n", __FUNCTION__)); +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY + link_recovery = bus->dhd; +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ bus->alp_only = TRUE; bus->sih = NULL; @@ -441,11 +585,18 @@ dhdpcie_dongle_attach(dhd_bus_t *bus) /* Set bar0 window to si_enum_base */ dhdpcie_bus_cfg_set_bar0_win(bus, SI_ENUM_BASE); -#ifdef CONFIG_ARCH_MSM8994 +#if defined(DHD_PCIE_BAR1_WIN_BASE_FIX) /* Read bar1 window */ bus->bar1_win_base = OSL_PCI_READ_CONFIG(bus->osh, PCI_BAR1_WIN, 4); DHD_ERROR(("%s: PCI_BAR1_WIN = %x\n", __FUNCTION__, bus->bar1_win_base)); -#endif +#else + /* Checking PCIe bus status with reading configuration space */ + val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32)); + if ((val & 0xFFFF) != VENDOR_BROADCOM) { + DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__)); + goto fail; + } +#endif /* defined(DHD_PCIE_BAR1_WIN_BASE_FIX) */ /* si_attach() will provide an SI handle and scan the backplane */ if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus, @@ -461,24 +612,36 @@ dhdpcie_dongle_attach(dhd_bus_t *bus) /* WAR where the BAR1 window may not be sized properly */ W_REG(osh, &sbpcieregs->configaddr, 0x4e0); val = R_REG(osh, &sbpcieregs->configdata); -#ifdef CONFIG_ARCH_MSM8994 +#if defined(DHD_PCIE_BAR1_WIN_BASE_FIX) + /* Fix to 2M */ + val = 22; /* 1M: 21, 2M: 22, 4M: 23 */ bus->bar1_win_mask = 0xffffffff - (bus->tcm_size - 1); - DHD_ERROR(("%s: BAR1 window val=%d mask=%x\n", __FUNCTION__, val, bus->bar1_win_mask)); -#endif + DHD_ERROR(("%s: BAR1 window val=%d, base=%d mask=%x, tcm=%x(%x)\n", __FUNCTION__, val, + bus->bar1_win_base, bus->bar1_win_mask, + bus->tcm_size, DHD_PCIE_BAR1_WIN_BASE_FIX)); +#endif /* defined(DHD_PCIE_BAR1_WIN_BASE_FIX) */ W_REG(osh, &sbpcieregs->configdata, val); /* Get info on the ARM and SOCRAM cores... */ /* Should really be qualified by device id */ if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) || (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) || - (si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) { + (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) { bus->armrev = si_corerev(bus->sih); } else { DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__)); goto fail; } - if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) { + if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) { + DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__)); + goto fail; + } + /* also populate base address */ + bus->dongle_ram_base = CA7_4365_RAM_BASE; + } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { if (!(bus->orig_ramsize = si_socram_size(bus->sih))) { DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__)); goto fail; @@ -507,15 +670,17 @@ dhdpcie_dongle_attach(dhd_bus_t *bus) case BCM4360_CHIP_ID: bus->dongle_ram_base = CR4_4360_RAM_BASE; break; - case BCM4345_CHIP_ID: + CASE_BCM4345_CHIP: bus->dongle_ram_base = (bus->sih->chiprev < 6) /* changed at 4345C0 */ ? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE; break; - case BCM43602_CHIP_ID: + CASE_BCM43602_CHIP: bus->dongle_ram_base = CR4_43602_RAM_BASE; break; case BCM4349_CHIP_GRPID: - bus->dongle_ram_base = CR4_4349_RAM_BASE; + /* RAM base changed from 4349c0(revid=9) onwards */ + bus->dongle_ram_base = ((bus->sih->chiprev < 9) ? + CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9); break; default: bus->dongle_ram_base = 0; @@ -540,15 +705,42 @@ dhdpcie_dongle_attach(dhd_bus_t *bus) bus->wait_for_d3_ack = 1; bus->suspended = FALSE; - DHD_TRACE(("%s: EXIT: SUCCESS\n", - __FUNCTION__)); + +#ifdef PCIE_OOB + gpio_handle_val = get_handle(OOB_PORT); + if (gpio_handle_val < 0) + { + DHD_ERROR(("%s: Could not get GPIO handle.\n", __FUNCTION__)); + ASSERT(FALSE); + } + + gpio_direction = 0; + ftdi_set_bitmode(gpio_handle_val, 0, BITMODE_BITBANG); + + /* Note BT core is also enabled here */ + gpio_port = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE; + gpio_write_port(gpio_handle_val, gpio_port); + + gpio_direction = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE; + ftdi_set_bitmode(gpio_handle_val, gpio_direction, BITMODE_BITBANG); + + bus->oob_enabled = TRUE; + + /* drive the Device_Wake GPIO low on startup */ + bus->device_wake_state = TRUE; + dhd_bus_set_device_wake(bus, FALSE); + dhd_bus_doorbell_timeout_reset(bus); +#endif /* PCIE_OOB */ + + DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__)); return 0; fail: - if (bus->sih != NULL) + if (bus->sih != NULL) { si_detach(bus->sih); - DHD_TRACE(("%s: EXIT: FAILURE\n", - __FUNCTION__)); + bus->sih = NULL; + } + DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__)); return -1; } @@ -569,17 +761,18 @@ void dhdpcie_bus_intr_enable(dhd_bus_t *bus) { DHD_TRACE(("%s: enable interrupts\n", __FUNCTION__)); - - if (!bus || !bus->sih) - return; - - if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || - (bus->sih->buscorerev == 4)) { - dhpcie_bus_unmask_interrupt(bus); - } - else if (bus->sih) { + if (bus && bus->sih && !bus->is_linkdown) { + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + dhpcie_bus_unmask_interrupt(bus); + } else { si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, bus->def_intmask, bus->def_intmask); + } + } else { + DHD_ERROR(("****** %s: failed ******\n", __FUNCTION__)); + DHD_ERROR(("bus: %p sih: %p bus->is_linkdown %d\n", + bus, bus ? bus->sih : NULL, bus ? bus->is_linkdown: -1)); } } @@ -589,41 +782,74 @@ dhdpcie_bus_intr_disable(dhd_bus_t *bus) DHD_TRACE(("%s Enter\n", __FUNCTION__)); - if (!bus || !bus->sih) - return; - - if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || - (bus->sih->buscorerev == 4)) { - dhpcie_bus_mask_interrupt(bus); - } - else if (bus->sih) { - si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, - bus->def_intmask, 0); + if (bus && bus->sih && !bus->is_linkdown) { + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + dhpcie_bus_mask_interrupt(bus); + } else { + si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, + bus->def_intmask, 0); + } + } else { + DHD_ERROR(("****** %s: failed ******\n", __FUNCTION__)); + DHD_ERROR(("bus: %p sih: %p bus->is_linkdown %d\n", + bus, bus ? bus->sih : NULL, bus ? bus->is_linkdown: -1)); } DHD_TRACE(("%s Exit\n", __FUNCTION__)); } -void +/* + * dhdpcie_advertise_bus_cleanup advertises that clean up is under progress + * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts + * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for + * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so + * they will exit from there itself without marking dhd_bus_busy_state as BUSY. + */ +static void +dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp) +{ + unsigned long flags; + int timeleft; + + DHD_GENERAL_LOCK(dhdp, flags); + dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS; + DHD_GENERAL_UNLOCK(dhdp, flags); + + timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state); + if (timeleft == 0) { + DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + BUG_ON(1); + } + + return; +} + +static void dhdpcie_bus_remove_prep(dhd_bus_t *bus) { + unsigned long flags; DHD_TRACE(("%s Enter\n", __FUNCTION__)); + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + dhd_os_sdlock(bus->dhd); - bus->dhd->busstate = DHD_BUS_DOWN; dhdpcie_bus_intr_disable(bus); - // terence 20150406: fix for null pointer handle - if (bus->sih) + // terence 20150406: fix for null pointer handle when doing remove driver + if (!bus->dhd->dongle_isolation && bus->sih) { pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs)); + } dhd_os_sdunlock(bus->dhd); DHD_TRACE(("%s Exit\n", __FUNCTION__)); } - -/* Detach and free everything */ +/** Detach and free everything */ void dhdpcie_bus_release(dhd_bus_t *bus) { @@ -638,13 +864,16 @@ dhdpcie_bus_release(dhd_bus_t *bus) ASSERT(osh); if (bus->dhd) { + dhdpcie_advertise_bus_cleanup(bus->dhd); dongle_isolation = bus->dhd->dongle_isolation; + dhdpcie_bus_remove_prep(bus); + if (bus->intr) { dhdpcie_bus_intr_disable(bus); dhdpcie_free_irq(bus); } - dhd_detach(bus->dhd); dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE); + dhd_detach(bus->dhd); dhd_free(bus->dhd); bus->dhd = NULL; } @@ -655,14 +884,16 @@ dhdpcie_bus_release(dhd_bus_t *bus) bus->regs = NULL; } if (bus->tcm) { - dhdpcie_bus_reg_unmap(osh, (ulong)bus->tcm, bus->tcm_size); + dhdpcie_bus_reg_unmap(osh, (ulong)bus->tcm, DONGLE_TCM_MAP_SIZE); bus->tcm = NULL; } dhdpcie_bus_release_malloc(bus, osh); /* Detach pcie shared structure */ - if (bus->pcie_sh) + if (bus->pcie_sh) { MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t)); + bus->pcie_sh = NULL; + } #ifdef DHD_DEBUG @@ -677,14 +908,12 @@ dhdpcie_bus_release(dhd_bus_t *bus) } DHD_TRACE(("%s: Exit\n", __FUNCTION__)); - -} +} /* dhdpcie_bus_release */ void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag) { - DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__, bus->dhd, bus->dhd->dongle_reset)); @@ -702,9 +931,14 @@ dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bo si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0); } - si_detach(bus->sih); - // terence 20150420: fix for sih incorrectly handled in other function - bus->sih = NULL; + + if (bus->sih->buscorerev == 13) + pcie_serdes_iddqdisable(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs)); + + if (bus->sih != NULL) { + si_detach(bus->sih); + bus->sih = NULL; + } if (bus->vars && bus->varsz) MFREE(osh, bus->vars, bus->varsz); bus->vars = NULL; @@ -720,7 +954,7 @@ dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size) return data; } -/* 32 bit config write */ +/** 32 bit config write */ void dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data) { @@ -733,13 +967,13 @@ dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data) OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data); } -#ifdef CONFIG_ARCH_MSM8994 -void +#if defined(DHD_PCIE_BAR1_WIN_BASE_FIX) +static void dhdpcie_bus_cfg_set_bar1_win(dhd_bus_t *bus, uint32 data) { OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR1_WIN, 4, data); } -#endif +#endif /* defined(DHD_PCIE_BAR1_WIN_BASE_FIX) */ void dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size) @@ -771,10 +1005,11 @@ dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh) } -/* Stop bus module: clear pending frames, disable data flow */ +/** Stop bus module: clear pending frames, disable data flow */ void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) { uint32 status; + unsigned long flags; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); @@ -786,28 +1021,49 @@ void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) goto done; } + DHD_DISABLE_RUNTIME_PM(bus->dhd); + + DHD_GENERAL_LOCK(bus->dhd, flags); bus->dhd->busstate = DHD_BUS_DOWN; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + dhdpcie_bus_intr_disable(bus); status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4); dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status); - if (!dhd_download_fw_on_driverload) + + if (!dhd_download_fw_on_driverload) { dhd_dpc_kill(bus->dhd); + } /* Clear rx control and wake any waiters */ - bus->rxlen = 0; - dhd_os_ioctl_resp_wake(bus->dhd); + dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT); + dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_BUS_STOP); done: return; } -/* Watchdog timer function */ +/** Watchdog timer function */ bool dhd_bus_watchdog(dhd_pub_t *dhd) { + unsigned long flags; #ifdef DHD_DEBUG dhd_bus_t *bus; bus = dhd->bus; + DHD_GENERAL_LOCK(dhd, flags); + if (dhd->busstate == DHD_BUS_DOWN || + dhd->busstate == DHD_BUS_DOWN_IN_PROGRESS) { + DHD_GENERAL_UNLOCK(dhd, flags); + return FALSE; + } + dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_WD; + DHD_GENERAL_UNLOCK(dhd, flags); + +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(dhd, TRUE, __builtin_return_address(0)); +#endif /* DHD_PCIE_RUNTIMEPM */ + /* Poll for console output periodically */ @@ -822,47 +1078,237 @@ bool dhd_bus_watchdog(dhd_pub_t *dhd) } #endif /* DHD_DEBUG */ - return FALSE; +#ifdef PCIE_OOB + /* If haven't communicated with device for a while, deassert the Device_Wake GPIO */ + if (dhd_doorbell_timeout != 0 && !(bus->dhd->busstate == DHD_BUS_SUSPEND) && + dhd_timeout_expired(&bus->doorbell_timer)) { + dhd_bus_set_device_wake(bus, FALSE); + } +#endif /* PCIE_OOB */ + + DHD_GENERAL_LOCK(dhd, flags); + dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_WD; + DHD_GENERAL_UNLOCK(dhd, flags); + + return TRUE; +} /* dhd_bus_watchdog */ + + +#define DEADBEEF_PATTERN 0xADDEADDE // "DeadDead" +#define MEMCHECKINFO "/data/.memcheck.info" + +static int +dhd_get_memcheck_info(void) +{ + struct file *fp = NULL; + uint32 mem_val = 0; + int ret = 0; + char *filepath = MEMCHECKINFO; + + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("[WIFI_SEC] %s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); + goto done; + } else { + ret = kernel_read(fp, 0, (char *)&mem_val, 4); + if (ret < 0) { + DHD_ERROR(("[WIFI_SEC] %s: File read error, ret=%d\n", __FUNCTION__, ret)); + filp_close(fp, NULL); + goto done; + } + + mem_val = bcm_atoi((char *)&mem_val); + + DHD_ERROR(("[WIFI_SEC]%s: MEMCHECK ENABLED = %d\n", __FUNCTION__, mem_val)); + filp_close(fp, NULL); + } +done: + return mem_val; } +static int +dhdpcie_mem_check(struct dhd_bus *bus) +{ + int bcmerror = BCME_OK; + int offset = 0; + int len = 0; + uint8 *memblock = NULL, *memptr; + int size = bus->ramsize; + int i; + uint32 memcheck_enabled; + + /* Read memcheck info from the file */ + /* 0 : Disable */ + /* 1 : "Dead Beef" pattern write */ + /* 2 : "Dead Beef" pattern write and checking the pattern value */ + + memcheck_enabled = dhd_get_memcheck_info(); + + DHD_ERROR(("%s: memcheck_enabled: %d \n", __FUNCTION__, memcheck_enabled)); + if (memcheck_enabled == 0) { + return bcmerror; + } + + memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); + goto err; + } + + if ((ulong)memblock % DHD_SDALIGN) { + memptr += (DHD_SDALIGN - ((ulong)memblock % DHD_SDALIGN)); + } + + for (i = 0; i < MEMBLOCK; i = i + 4) { + *(ulong*)(memptr + i) = DEADBEEF_PATTERN; + } + + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || + si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) { + if (offset == 0) { + /* Add start of RAM address to the address given by user */ + offset += bus->dongle_ram_base; + } + } + + /* Write "DeadBeef" pattern with MEMBLOCK size */ + while (size) { + len = MIN(MEMBLOCK, size); + + bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + + if (memcheck_enabled == 2) { + bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, (uint8 *)memptr, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on read %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } else { + for (i = 0; i < len; i = i+4) { + if ((*(uint32*)(memptr + i)) != DEADBEEF_PATTERN) { + DHD_ERROR(("%s: error on reading pattern at " + "0x%08x\n", __FUNCTION__, (offset + i))); + bcmerror = BCME_ERROR; + goto err; + } + } + } + } + offset += MEMBLOCK; + size -= MEMBLOCK; + } + + DHD_ERROR(("%s: Writing the Dead Beef pattern is Done \n", __FUNCTION__)); + +err: + if (memblock) { + MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN); + } + + return bcmerror; +} /* Download firmware image and nvram image */ int dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, - char *pfw_path, char *pnv_path, char *pconf_path) + char *pfw_path, char *pnv_path, + char *pclm_path, char *pconf_path) { int ret; bus->fw_path = pfw_path; bus->nv_path = pnv_path; + bus->dhd->clm_path = pclm_path; bus->dhd->conf_path = pconf_path; + DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n", + __FUNCTION__, bus->fw_path, bus->nv_path)); + + dhdpcie_mem_check(bus); + ret = dhdpcie_download_firmware(bus, osh); return ret; } -static int -dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh) +void +dhd_set_path_params(struct dhd_bus *bus) { - int ret = 0; - - DHD_TRACE_HW4(("%s: firmware path=%s, nvram path=%s\n", - __FUNCTION__, bus->fw_path, bus->nv_path)); - - DHD_OS_WAKE_LOCK(bus->dhd); - /* External conf takes precedence if specified */ dhd_conf_preinit(bus->dhd); + + if (bus->dhd->clm_path[0] == '\0') { + dhd_conf_set_path(bus->dhd, "clm.blob", bus->dhd->clm_path, bus->fw_path); + } + dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path); + if (bus->dhd->conf_path[0] == '\0') { + dhd_conf_set_path(bus->dhd, "config.txt", bus->dhd->conf_path, bus->nv_path); + } +#ifdef CONFIG_PATH_AUTO_SELECT + dhd_conf_set_conf_name_by_chip(bus->dhd, bus->dhd->conf_path); +#endif + dhd_conf_read_config(bus->dhd, bus->dhd->conf_path); + dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path); dhd_conf_set_nv_name_by_chip(bus->dhd, bus->nv_path); + dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path); printf("Final fw_path=%s\n", bus->fw_path); printf("Final nv_path=%s\n", bus->nv_path); + printf("Final clm_path=%s\n", bus->dhd->clm_path); printf("Final conf_path=%s\n", bus->dhd->conf_path); +} + +static int +dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh) +{ + int ret = 0; +#if defined(BCM_REQUEST_FW) + uint chipid = bus->sih->chip; + uint revid = bus->sih->chiprev; + char fw_path[64] = "/lib/firmware/brcm/bcm"; /* path to firmware image */ + char nv_path[64]; /* path to nvram vars file */ + bus->fw_path = fw_path; + bus->nv_path = nv_path; + switch (chipid) { + case BCM43570_CHIP_ID: + bcmstrncat(fw_path, "43570", 5); + switch (revid) { + case 0: + bcmstrncat(fw_path, "a0", 2); + break; + case 2: + bcmstrncat(fw_path, "a2", 2); + break; + default: + DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__, + revid)); + break; + } + break; + default: + DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__, + chipid)); + return 0; + } + /* load board specific nvram file */ + snprintf(bus->nv_path, sizeof(nv_path), "%s.nvm", fw_path); + /* load firmware */ + snprintf(bus->fw_path, sizeof(fw_path), "%s-firmware.bin", fw_path); +#endif /* BCM_REQUEST_FW */ + + DHD_OS_WAKE_LOCK(bus->dhd); + + dhd_set_path_params(bus); + ret = _dhdpcie_download_firmware(bus); DHD_OS_WAKE_UNLOCK(bus->dhd); @@ -872,19 +1318,22 @@ dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh) static int dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path) { - int bcmerror = -1; + int bcmerror = BCME_ERROR; int offset = 0; - int len; - void *image = NULL; + int len = 0; + char *imgbuf = NULL; uint8 *memblock = NULL, *memptr; + uint8 *memptr_tmp = NULL; // terence: check downloaded firmware is correct + + int offset_end = bus->ramsize; DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path)); /* Should succeed in opening image if it is actually given through registry * entry or in module param. */ - image = dhd_os_open_image(pfw_path); - if (image == NULL) { + imgbuf = dhd_os_open_image(pfw_path); + if (imgbuf == NULL) { printf("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path); goto err; } @@ -894,97 +1343,156 @@ dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path) DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); goto err; } + if (dhd_msg_level & DHD_TRACE_VAL) { + memptr_tmp = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); + if (memptr_tmp == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); + goto err; + } + } if ((uint32)(uintptr)memblock % DHD_SDALIGN) memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN)); - /* Download image */ - while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) { + DHD_INFO_HW4(("%s: dongle_ram_base: 0x%x ramsize: 0x%x tcm: %p\n", + __FUNCTION__, bus->dongle_ram_base, bus->ramsize, bus->tcm)); + /* Download image with MEMBLOCK size */ + while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf))) { if (len < 0) { DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len)); bcmerror = BCME_ERROR; goto err; } - /* check if CR4 */ - if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + /* check if CR4/CA7 */ + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || + si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) { /* if address is 0, store the reset instruction to be written in 0 */ - if (offset == 0) { bus->resetinstr = *(((uint32*)memptr)); /* Add start of RAM address to the address given by user */ offset += bus->dongle_ram_base; + offset_end += offset; } } - - bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, memptr, len); + bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len); if (bcmerror) { DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", - __FUNCTION__, bcmerror, MEMBLOCK, offset)); + __FUNCTION__, bcmerror, MEMBLOCK, offset)); goto err; } + if (dhd_msg_level & DHD_TRACE_VAL) { + bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, memptr_tmp, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + if (memcmp(memptr_tmp, memptr, len)) { + DHD_ERROR(("%s: Downloaded image is corrupted.\n", __FUNCTION__)); + goto err; + } else + DHD_INFO(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__)); + } offset += MEMBLOCK; + + if (offset >= offset_end) { + DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n", + __FUNCTION__, offset, offset_end)); + bcmerror = BCME_ERROR; + goto err; + } } err: if (memblock) MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN); + if (dhd_msg_level & DHD_TRACE_VAL) { + if (memptr_tmp) + MFREE(bus->dhd->osh, memptr_tmp, MEMBLOCK + DHD_SDALIGN); + } - if (image) - dhd_os_close_image(image); + if (imgbuf) + dhd_os_close_image(imgbuf); return bcmerror; -} +} /* dhdpcie_download_code_file */ +#ifdef CUSTOMER_HW4_DEBUG +#define MIN_NVRAMVARS_SIZE 128 +#endif /* CUSTOMER_HW4_DEBUG */ static int dhdpcie_download_nvram(struct dhd_bus *bus) { - int bcmerror = -1; + int bcmerror = BCME_ERROR; uint len; - void * image = NULL; char * memblock = NULL; char *bufp; char *pnv_path; bool nvram_file_exists; - + bool nvram_uefi_exists = FALSE; + bool local_alloc = FALSE; pnv_path = bus->nv_path; nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0')); - if (!nvram_file_exists && (bus->nvram_params == NULL)) - return (0); - if (nvram_file_exists) { - image = dhd_os_open_image(pnv_path); - if (image == NULL) { - printf("%s: Open nvram file failed %s\n", __FUNCTION__, pnv_path); - goto err; + /* First try UEFI */ + len = MAX_NVRAMBUF_SIZE; + dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, &len); + + /* If UEFI empty, then read from file system */ + if ((len == 0) || (memblock[0] == '\0')) { + + if (nvram_file_exists) { + len = MAX_NVRAMBUF_SIZE; + dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, &len); + if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) { + goto err; + } } + else { + /* For SROM OTP no external file or UEFI required */ + bcmerror = BCME_OK; + } + } else { + nvram_uefi_exists = TRUE; } - memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE); - if (memblock == NULL) { - DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", - __FUNCTION__, MAX_NVRAMBUF_SIZE)); - goto err; - } + DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len)); - /* Download variables */ - if (nvram_file_exists) { - len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image); - } - else { + if (len > 0 && len <= MAX_NVRAMBUF_SIZE) { + bufp = (char *) memblock; - /* nvram is string with null terminated. cannot use strlen */ - len = bus->nvram_params_len; - ASSERT(len <= MAX_NVRAMBUF_SIZE); - memcpy(memblock, bus->nvram_params, len); - } - if (len > 0 && len < MAX_NVRAMBUF_SIZE) { - bufp = (char *)memblock; - bufp[len] = 0; +#ifdef CACHE_FW_IMAGES + if (bus->processed_nvram_params_len) { + len = bus->processed_nvram_params_len; + } - if (nvram_file_exists) - len = process_nvram_vars(bufp, len); + if (!bus->processed_nvram_params_len) { + bufp[len] = 0; + if (nvram_uefi_exists || nvram_file_exists) { + len = process_nvram_vars(bufp, len); + bus->processed_nvram_params_len = len; + } + } else +#else + { + bufp[len] = 0; + if (nvram_uefi_exists || nvram_file_exists) { + len = process_nvram_vars(bufp, len); + } + } +#endif /* CACHE_FW_IMAGES */ + + DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len)); +#ifdef CUSTOMER_HW4_DEBUG + if (len < MIN_NVRAMVARS_SIZE) { + DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n", + __FUNCTION__)); + bcmerror = BCME_ERROR; + goto err; + } +#endif /* CUSTOMER_HW4_DEBUG */ if (len % 4) { len += 4 - (len % 4); @@ -995,21 +1503,19 @@ dhdpcie_download_nvram(struct dhd_bus *bus) bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1); if (bcmerror) { DHD_ERROR(("%s: error downloading vars: %d\n", - __FUNCTION__, bcmerror)); + __FUNCTION__, bcmerror)); } } - else { - DHD_ERROR(("%s: error reading nvram file: %d\n", - __FUNCTION__, len)); - bcmerror = BCME_ERROR; - } -err: - if (memblock) - MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE); - if (image) - dhd_os_close_image(image); +err: + if (memblock) { + if (local_alloc) { + MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE); + } else { + dhd_free_download_buffer(bus->dhd, memblock, MAX_NVRAMBUF_SIZE); + } + } return bcmerror; } @@ -1057,8 +1563,9 @@ dhdpcie_download_code_array(struct dhd_bus *bus) len = remaining_len; memcpy(memptr, (p_dlarray + downloded_len), len); - /* check if CR4 */ - if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + /* check if CR4/CA7 */ + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || + si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) { /* if address is 0, store the reset instruction to be written in 0 */ if (offset == 0) { bus->resetinstr = *(((uint32*)memptr)); @@ -1126,7 +1633,7 @@ dhdpcie_download_code_array(struct dhd_bus *bus) MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN); return bcmerror; -} +} /* dhdpcie_download_code_array */ #endif /* BCMEMBEDIMAGE */ @@ -1163,8 +1670,7 @@ _dhdpcie_download_firmware(struct dhd_bus *bus) #else goto err; #endif - } - else { + } else { embed = FALSE; dlok = TRUE; } @@ -1175,8 +1681,7 @@ _dhdpcie_download_firmware(struct dhd_bus *bus) if (dhdpcie_download_code_array(bus)) { DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__)); goto err; - } - else { + } else { dlok = TRUE; } } @@ -1209,57 +1714,7 @@ _dhdpcie_download_firmware(struct dhd_bus *bus) err: return bcmerror; -} - -int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen) -{ - int timeleft; - uint rxlen = 0; - bool pending; - - DHD_TRACE(("%s: Enter\n", __FUNCTION__)); - - if (bus->dhd->dongle_reset) - return -EIO; - - /* Wait until control frame is available */ - timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending); - rxlen = bus->rxlen; - bcopy(&bus->ioct_resp, msg, MIN(rxlen, sizeof(ioctl_comp_resp_msg_t))); - bus->rxlen = 0; - - if (rxlen) { - DHD_CTL(("%s: resumed on rxctl frame, got %d\n", __FUNCTION__, rxlen)); - } else if (timeleft == 0) { - DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__)); - bus->ioct_resp.cmn_hdr.request_id = 0; - bus->ioct_resp.compl_hdr.status = 0xffff; - bus->dhd->rxcnt_timeout++; - DHD_ERROR(("%s: rxcnt_timeout=%d\n", __FUNCTION__, bus->dhd->rxcnt_timeout)); - } else if (pending == TRUE) { - DHD_CTL(("%s: canceled\n", __FUNCTION__)); - return -ERESTARTSYS; - } else { - DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__)); - } - - if (timeleft != 0) - bus->dhd->rxcnt_timeout = 0; - - if (rxlen) - bus->dhd->rx_ctlpkts++; - else - bus->dhd->rx_ctlerrs++; - - if (bus->dhd->rxcnt_timeout >= MAX_CNTL_TX_TIMEOUT) - return -ETIMEDOUT; - - if (bus->dhd->dongle_trap_occured) - return -EREMOTEIO; - - return rxlen ? (int)rxlen : -EIO; - -} +} /* _dhdpcie_download_firmware */ #define CONSOLE_LINE_MAX 192 @@ -1327,12 +1782,14 @@ dhdpcie_bus_readconsole(dhd_bus_t *bus) n--; line[n] = 0; printf("CONSOLE: %s\n", line); + } } break2: return BCME_OK; -} +} /* dhdpcie_bus_readconsole */ +#endif /* DHD_DEBUG */ static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) @@ -1353,8 +1810,9 @@ dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) DHD_TRACE(("%s: Enter\n", __FUNCTION__)); - if (DHD_NOCHECKDIED_ON()) + if (DHD_NOCHECKDIED_ON()) { return 0; + } if (data == NULL) { /* @@ -1377,8 +1835,9 @@ dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) goto done; } - if ((bcmerror = dhdpcie_readshared(bus)) < 0) + if ((bcmerror = dhdpcie_readshared(bus)) < 0) { goto done; + } bcm_binit(&strbuf, data, size); @@ -1406,9 +1865,10 @@ dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) if (bus->pcie_sh->assert_exp_addr != 0) { str[0] = '\0'; if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE, - bus->pcie_sh->assert_exp_addr, - (uint8 *)str, maxstrlen)) < 0) + bus->pcie_sh->assert_exp_addr, + (uint8 *)str, maxstrlen)) < 0) { goto done; + } str[maxstrlen - 1] = '\0'; bcm_bprintf(&strbuf, " expr \"%s\"", str); @@ -1417,9 +1877,10 @@ dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) if (bus->pcie_sh->assert_file_addr != 0) { str[0] = '\0'; if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE, - bus->pcie_sh->assert_file_addr, - (uint8 *)str, maxstrlen)) < 0) + bus->pcie_sh->assert_file_addr, + (uint8 *)str, maxstrlen)) < 0) { goto done; + } str[maxstrlen - 1] = '\0'; bcm_bprintf(&strbuf, " file \"%s\"", str); @@ -1431,14 +1892,14 @@ dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) { bus->dhd->dongle_trap_occured = TRUE; if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE, - bus->pcie_sh->trap_addr, - (uint8*)&tr, sizeof(trap_t))) < 0) + bus->pcie_sh->trap_addr, (uint8*)&tr, sizeof(trap_t))) < 0) { goto done; + } bcm_bprintf(&strbuf, - "Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x," - "lp 0x%x, rpc 0x%x Trap offset 0x%x, " - "r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, " + "\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x," + " lp 0x%x, rpc 0x%x" + "\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, " "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n", ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr), ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc), @@ -1448,30 +1909,35 @@ dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log); if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, - (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) + (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) { goto printbuf; + } addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size); if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, - (uint8 *)&console_size, sizeof(console_size))) < 0) + (uint8 *)&console_size, sizeof(console_size))) < 0) { goto printbuf; + } addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx); if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, - (uint8 *)&console_index, sizeof(console_index))) < 0) + (uint8 *)&console_index, sizeof(console_index))) < 0) { goto printbuf; + } console_ptr = ltoh32(console_ptr); console_size = ltoh32(console_size); console_index = ltoh32(console_index); if (console_size > CONSOLE_BUFFER_MAX || - !(console_buffer = MALLOC(bus->dhd->osh, console_size))) + !(console_buffer = MALLOC(bus->dhd->osh, console_size))) { goto printbuf; + } if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr, - (uint8 *)console_buffer, console_size)) < 0) + (uint8 *)console_buffer, console_size)) < 0) { goto printbuf; + } for (i = 0, n = 0; i < console_size; i += n + 1) { for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { @@ -1491,8 +1957,7 @@ dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) * will truncate a lot of the printfs */ - if (dhd_msg_level & DHD_ERROR_VAL) - printf("CONSOLE: %s\n", line); + printf("CONSOLE: %s\n", line); } } } @@ -1500,7 +1965,20 @@ dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) printbuf: if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) { - DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf)); + printf("%s: %s\n", __FUNCTION__, strbuf.origbuf); + + /* wake up IOCTL wait event */ + dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP); + +#if defined(DHD_FW_COREDUMP) + /* save core dump or write to a file */ + if (bus->dhd->memdump_enabled) { + bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP; + dhdpcie_mem_dump(bus); + } +#endif /* DHD_FW_COREDUMP */ + + } done: @@ -1513,10 +1991,132 @@ dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) MFREE(bus->dhd->osh, console_buffer, console_size); return bcmerror; +} /* dhdpcie_checkdied */ + + +/* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */ +void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf) +{ + int ret = 0; + int size; /* Full mem size */ + int start; /* Start address */ + int read_size = 0; /* Read size of each iteration */ + uint8 *databuf = buf; + + if (bus == NULL) { + return; + } + + start = bus->dongle_ram_base; + /* Get full mem size */ + size = bus->ramsize; + /* Read mem content */ + while (size) + { + read_size = MIN(MEMBLOCK, size); + if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) { + return; + } + + /* Decrement size and increment start address */ + size -= read_size; + start += read_size; + databuf += read_size; + } + bus->dhd->soc_ram = buf; + bus->dhd->soc_ram_length = bus->ramsize; + return; } -#endif /* DHD_DEBUG */ +#if defined(DHD_FW_COREDUMP) +static int +dhdpcie_mem_dump(dhd_bus_t *bus) +{ + int ret = 0; + int size; /* Full mem size */ + int start = bus->dongle_ram_base; /* Start address */ + int read_size = 0; /* Read size of each iteration */ + uint8 *buf = NULL, *databuf = NULL; + +#ifdef EXYNOS_PCIE_DEBUG + exynos_pcie_register_dump(1); +#endif /* EXYNOS_PCIE_DEBUG */ + +#ifdef SUPPORT_LINKDOWN_RECOVERY + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down so skip\n", __FUNCTION__)); + return BCME_ERROR; + } +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + + /* Get full mem size */ + size = bus->ramsize; +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + buf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_MEMDUMP_BUF, size); + bzero(buf, size); +#else + buf = MALLOC(bus->dhd->osh, size); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + if (!buf) { + DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size)); + return BCME_ERROR; + } + + /* Read mem content */ + DHD_TRACE_HW4(("Dump dongle memory")); + databuf = buf; + while (size) + { + read_size = MIN(MEMBLOCK, size); + if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) + { + DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret)); + if (buf) { + MFREE(bus->dhd->osh, buf, size); + } + return BCME_ERROR; + } + DHD_TRACE((".")); + + /* Decrement size and increment start address */ + size -= read_size; + start += read_size; + databuf += read_size; + } + + DHD_TRACE_HW4(("%s FUNC: Copy fw image to the embedded buffer \n", __FUNCTION__)); + + dhd_save_fwdump(bus->dhd, buf, bus->ramsize); + dhd_schedule_memdump(bus->dhd, buf, bus->ramsize); + + return ret; +} + +int +dhd_bus_mem_dump(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + + if (bus->suspended) { + DHD_ERROR(("%s: Bus is suspend so skip\n", __FUNCTION__)); + return 0; + } + + return dhdpcie_mem_dump(bus); +} +#endif /* DHD_FW_COREDUMP */ + +int +dhd_socram_dump(dhd_bus_t *bus) +{ +#if defined(DHD_FW_COREDUMP) + return (dhdpcie_mem_dump(bus)); +#else + return -1; +#endif +} + /** * Transfers bytes from host to dongle using pio mode. * Parameter 'address' is a backplane address. @@ -1524,21 +2124,26 @@ dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size) { - int bcmerror = 0; uint dsize; int detect_endian_flag = 0x01; bool little_endian; -#ifdef CONFIG_ARCH_MSM8994 +#if defined(CONFIG_64BIT) || defined(DHD_PCIE_BAR1_WIN_BASE_FIX) bool is_64bit_unaligned; -#endif +#endif /* defined(CONFIG_64BIT) || defined(DHD_PCIE_BAR1_WIN_BASE_FIX) */ + + if (write && bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return BCME_ERROR; + } /* Detect endianness. */ little_endian = *(char *)&detect_endian_flag; -#ifdef CONFIG_ARCH_MSM8994 +#if defined(CONFIG_64BIT) || defined(DHD_PCIE_BAR1_WIN_BASE_FIX) /* Check 64bit aligned or not. */ is_64bit_unaligned = (address & 0x7); -#endif +#endif /* defined(CONFIG_64BIT) || defined(DHD_PCIE_BAR1_WIN_BASE_FIX) */ + /* In remap mode, adjust address beyond socram and redirect * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize * is not backplane accessible @@ -1551,7 +2156,7 @@ dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uin if (write) { while (size) { if (size >= sizeof(uint64) && little_endian) { -#ifdef CONFIG_ARCH_MSM8994 +#if defined(CONFIG_64BIT) || defined(DHD_PCIE_BAR1_WIN_BASE_FIX) if (is_64bit_unaligned) { DHD_INFO(("%s: write unaligned %lx\n", __FUNCTION__, address)); @@ -1579,7 +2184,7 @@ dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uin } else { while (size) { if (size >= sizeof(uint64) && little_endian) { -#ifdef CONFIG_ARCH_MSM8994 +#if defined(CONFIG_64BIT) || defined(DHD_PCIE_BAR1_WIN_BASE_FIX) if (is_64bit_unaligned) { DHD_INFO(("%s: read unaligned %lx\n", __FUNCTION__, address)); @@ -1605,16 +2210,23 @@ dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uin } } } - return bcmerror; -} + return BCME_OK; +} /* dhdpcie_bus_membytes */ +/** + * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue + * to the (non flow controlled) flow ring. + */ int BCMFASTPATH dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs) { flow_ring_node_t *flow_ring_node; int ret = BCME_OK; - +#ifdef DHD_LOSSLESS_ROAMING + dhd_pub_t *dhdp = bus->dhd; +#endif DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id)); + /* ASSERT on flow_id */ if (flow_id >= bus->max_sub_queues) { DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__, @@ -1624,10 +2236,22 @@ dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs) flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id); +#ifdef DHD_LOSSLESS_ROAMING + if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) { + DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n", + __FUNCTION__, flow_ring_node->flow_info.tid)); + return BCME_OK; + } +#endif /* DHD_LOSSLESS_ROAMING */ + { unsigned long flags; void *txp = NULL; flow_queue_t *queue; +#ifdef DHD_LOSSLESS_ROAMING + struct ether_header *eh; + uint8 *pktdata; +#endif /* DHD_LOSSLESS_ROAMING */ queue = &flow_ring_node->queue; /* queue associated with flow ring */ @@ -1639,15 +2263,39 @@ dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs) } while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { - PKTORPHAN(txp); + PKTORPHAN(txp, bus->dhd->conf->tsq); + + /* + * Modifying the packet length caused P2P cert failures. + * Specifically on test cases where a packet of size 52 bytes + * was injected, the sniffer capture showed 62 bytes because of + * which the cert tests failed. So making the below change + * only Router specific. + */ #ifdef DHDTCPACK_SUPPRESS - if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) { - dhd_tcpack_check_xmit(bus->dhd, txp); - } + if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) { + ret = dhd_tcpack_check_xmit(bus->dhd, txp); + if (ret != BCME_OK) { + DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n", + __FUNCTION__)); + } + } #endif /* DHDTCPACK_SUPPRESS */ - /* Attempt to transfer packet over flow ring */ +#ifdef DHD_LOSSLESS_ROAMING + pktdata = (uint8 *)PKTDATA(OSH_NULL, txp); + eh = (struct ether_header *) pktdata; + if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) { + uint8 prio = (uint8)PKTPRIO(txp); + + /* Restore to original priority for 802.1X packet */ + if (prio == PRIO_8021D_NC) { + PKTSETPRIO(txp, PRIO_8021D_BE); + } + } +#endif /* DHD_LOSSLESS_ROAMING */ + /* Attempt to transfer packet over flow ring */ ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex); if (ret != BCME_OK) { /* may not have resources in flow ring */ DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret)); @@ -1667,160 +2315,85 @@ dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs) } return ret; -} - -#ifndef PCIE_TX_DEFERRAL -/* Send a data frame to the dongle. Callee disposes of txp. */ -int BCMFASTPATH -dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx) -{ - unsigned long flags; - int ret = BCME_OK; - void *txp_pend = NULL; - if (!bus->txmode_push) { - uint16 flowid; - flow_queue_t *queue; - flow_ring_node_t *flow_ring_node; - if (!bus->dhd->flowid_allocator) { - DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__)); - goto toss; - } - - flowid = DHD_PKTTAG_FLOWID((dhd_pkttag_fr_t*)PKTTAG(txp)); - - flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); - - DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n", - __FUNCTION__, flowid, flow_ring_node->status, - flow_ring_node->active)); - - if ((flowid >= bus->dhd->num_flow_rings) || - (!flow_ring_node->active) || - (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING)) { - DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n", - __FUNCTION__, flowid, flow_ring_node->status, - flow_ring_node->active)); - ret = BCME_ERROR; - goto toss; - } - - queue = &flow_ring_node->queue; /* queue associated with flow ring */ - - DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); - - if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK) - txp_pend = txp; - - DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); - - if (flow_ring_node->status) { - DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n", - __FUNCTION__, flowid, flow_ring_node->status, - flow_ring_node->active)); - if (txp_pend) { - txp = txp_pend; - goto toss; - } - return BCME_OK; - } - ret = dhd_bus_schedule_queue(bus, flowid, FALSE); - - /* If we have anything pending, try to push into q */ - if (txp_pend) { - DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); - - if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) { - DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); - txp = txp_pend; - goto toss; - } - - DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); - } - - return ret; - - } else { /* bus->txmode_push */ - return dhd_prot_txdata(bus->dhd, txp, ifidx); - } +} /* dhd_bus_schedule_queue */ -toss: - DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret)); - PKTCFREE(bus->dhd->osh, txp, TRUE); - return ret; -} -#else /* PCIE_TX_DEFERRAL */ +/** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */ int BCMFASTPATH dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx) { - unsigned long flags; - int ret = BCME_OK; uint16 flowid; flow_queue_t *queue; flow_ring_node_t *flow_ring_node; - uint8 *pktdata = (uint8 *)PKTDATA(bus->dhd->osh, txp); - struct ether_header *eh = (struct ether_header *)pktdata; + unsigned long flags; + int ret = BCME_OK; + void *txp_pend = NULL; if (!bus->dhd->flowid_allocator) { DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__)); goto toss; } - flowid = dhd_flowid_find(bus->dhd, ifidx, - bus->dhd->flow_prio_map[(PKTPRIO(txp))], - eh->ether_shost, eh->ether_dhost); - if (flowid == FLOWID_INVALID) { - DHD_PKTTAG_SET_FLOWID((dhd_pkttag_fr_t *)PKTTAG(txp), ifidx); - skb_queue_tail(&bus->orphan_list, txp); - queue_work(bus->tx_wq, &bus->create_flow_work); - return BCME_OK; - } + flowid = DHD_PKT_GET_FLOWID(txp); - DHD_PKTTAG_SET_FLOWID((dhd_pkttag_fr_t *)PKTTAG(txp), flowid); flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); - queue = &flow_ring_node->queue; /* queue associated with flow ring */ - DHD_DATA(("%s: pkt flowid %d, status %d active %d\n", + DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n", __FUNCTION__, flowid, flow_ring_node->status, flow_ring_node->active)); DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); if ((flowid >= bus->dhd->num_flow_rings) || (!flow_ring_node->active) || - (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING)) { + (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) || + (flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING)) { DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); - DHD_DATA(("%s: Dropping pkt flowid %d, status %d active %d\n", + DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n", __FUNCTION__, flowid, flow_ring_node->status, flow_ring_node->active)); ret = BCME_ERROR; goto toss; } - if (flow_ring_node->status == FLOW_RING_STATUS_PENDING) { - DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); - DHD_PKTTAG_SET_FLOWID((dhd_pkttag_fr_t *)PKTTAG(txp), ifidx); - skb_queue_tail(&bus->orphan_list, txp); - queue_work(bus->tx_wq, &bus->create_flow_work); - return BCME_OK; - } + queue = &flow_ring_node->queue; /* queue associated with flow ring */ if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK) { - DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); - goto toss; + txp_pend = txp; } DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); - ret = dhd_bus_schedule_queue(bus, flowid, FALSE); + if (flow_ring_node->status) { + DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n", + __FUNCTION__, flowid, flow_ring_node->status, + flow_ring_node->active)); + if (txp_pend) { + txp = txp_pend; + goto toss; + } + return BCME_OK; + } + ret = dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */ + + /* If we have anything pending, try to push into q */ + if (txp_pend) { + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + + if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) { + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + txp = txp_pend; + goto toss; + } + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + } return ret; toss: - DHD_DATA(("%s: Toss %d\n", __FUNCTION__, ret)); + DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret)); PKTCFREE(bus->dhd->osh, txp, TRUE); return ret; -} -#endif /* !PCIE_TX_DEFERRAL */ +} /* dhd_bus_txdata */ void @@ -1837,16 +2410,6 @@ dhd_bus_start_queue(struct dhd_bus *bus) bus->bus_flowctrl = TRUE; } -void -dhd_bus_update_retlen(dhd_bus_t *bus, uint32 retlen, uint32 pkt_id, uint16 status, - uint32 resp_len) -{ - bus->rxlen = retlen; - bus->ioct_resp.cmn_hdr.request_id = pkt_id; - bus->ioct_resp.compl_hdr.status = status; - bus->ioct_resp.resp_len = (uint16)resp_len; -} - #if defined(DHD_DEBUG) /* Device console input function */ int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen) @@ -1881,21 +2444,24 @@ int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen) if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) goto done; - /* generate an interurpt to dongle to indicate that it needs to process cons command */ + /* generate an interrupt to dongle to indicate that it needs to process cons command */ dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT); done: return rv; -} +} /* dhd_bus_console_in */ #endif /* defined(DHD_DEBUG) */ -/* Process rx frame , Send up the layer to netif */ +/** + * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is + * contained in 'pkt'. Processes rx frame, forwards up the layer to netif. + */ void BCMFASTPATH dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count) { dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0); } -#ifdef CONFIG_ARCH_MSM8994 +#if defined(DHD_PCIE_BAR1_WIN_BASE_FIX) static ulong dhd_bus_cmn_check_offset(dhd_bus_t *bus, ulong offset) { uint new_bar1_wbase = 0; @@ -1910,12 +2476,11 @@ static ulong dhd_bus_cmn_check_offset(dhd_bus_t *bus, ulong offset) } address = offset - bus->bar1_win_base; - return address; } #else #define dhd_bus_cmn_check_offset(x, y) y -#endif /* CONFIG_ARCH_MSM8994 */ +#endif /* defined(DHD_PCIE_BAR1_WIN_BASE_FIX) */ /** 'offset' is a backplane address */ void @@ -1928,12 +2493,9 @@ uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset) { volatile uint8 data; -#ifdef BCM47XX_ACP_WAR - data = R_REG(bus->dhd->osh, - (volatile uint8 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset))); -#else + data = *(volatile uint8 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)); -#endif + return data; } @@ -1957,12 +2519,9 @@ uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset) { volatile uint16 data; -#ifdef BCM47XX_ACP_WAR - data = R_REG(bus->dhd->osh, - (volatile uint16 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset))); -#else + data = *(volatile uint16 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)); -#endif + return data; } @@ -1970,12 +2529,9 @@ uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset) { volatile uint32 data; -#ifdef BCM47XX_ACP_WAR - data = R_REG(bus->dhd->osh, - (volatile uint32 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset))); -#else + data = *(volatile uint32 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)); -#endif + return data; } @@ -1983,44 +2539,49 @@ uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset) { volatile uint64 data; -#ifdef BCM47XX_ACP_WAR - data = R_REG(bus->dhd->osh, - (volatile uint64 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset))); -#else + data = *(volatile uint64 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)); -#endif + return data; } +/** A snippet of dongle memory is shared between host and dongle */ void -dhd_bus_cmn_writeshared(dhd_bus_t *bus, void * data, uint32 len, uint8 type, uint16 ringid) +dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid) { uint64 long_data; ulong tcm_offset; - pciedev_shared_t *sh; - pciedev_shared_t *shmem = NULL; - sh = (pciedev_shared_t*)bus->shared_addr; + DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len)); - DHD_INFO(("%s: writing to msgbuf type %d, len %d\n", __FUNCTION__, type, len)); + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } switch (type) { - case DNGL_TO_HOST_DMA_SCRATCH_BUFFER: + case D2H_DMA_SCRATCH_BUF: + { + pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr; long_data = HTOL64(*(uint64 *)data); tcm_offset = (ulong)&(sh->host_dma_scratch_buffer); dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len); prhex(__FUNCTION__, data, len); break; + } - case DNGL_TO_HOST_DMA_SCRATCH_BUFFER_LEN : + case D2H_DMA_SCRATCH_BUF_LEN: + { + pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr; tcm_offset = (ulong)&(sh->host_dma_scratch_buffer_len); dhdpcie_bus_wtcm32(bus, tcm_offset, (uint32) HTOL32(*(uint32 *)data)); prhex(__FUNCTION__, data, len); break; + } - case HOST_TO_DNGL_DMA_WRITEINDX_BUFFER: - /* ring_info_ptr stored in pcie_sh */ - shmem = (pciedev_shared_t *)bus->pcie_sh; + case H2D_DMA_INDX_WR_BUF: + { + pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh; long_data = HTOL64(*(uint64 *)data); tcm_offset = (ulong)shmem->rings_info_ptr; @@ -2028,53 +2589,54 @@ dhd_bus_cmn_writeshared(dhd_bus_t *bus, void * data, uint32 len, uint8 type, uin dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len); prhex(__FUNCTION__, data, len); break; + } - case HOST_TO_DNGL_DMA_READINDX_BUFFER: - /* ring_info_ptr stored in pcie_sh */ - shmem = (pciedev_shared_t *)bus->pcie_sh; - + case H2D_DMA_INDX_RD_BUF: + { + pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh; long_data = HTOL64(*(uint64 *)data); tcm_offset = (ulong)shmem->rings_info_ptr; tcm_offset += OFFSETOF(ring_info_t, h2d_r_idx_hostaddr); dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len); prhex(__FUNCTION__, data, len); break; + } - case DNGL_TO_HOST_DMA_WRITEINDX_BUFFER: - /* ring_info_ptr stored in pcie_sh */ - shmem = (pciedev_shared_t *)bus->pcie_sh; - + case D2H_DMA_INDX_WR_BUF: + { + pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh; long_data = HTOL64(*(uint64 *)data); tcm_offset = (ulong)shmem->rings_info_ptr; tcm_offset += OFFSETOF(ring_info_t, d2h_w_idx_hostaddr); dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len); prhex(__FUNCTION__, data, len); break; + } - case DNGL_TO_HOST_DMA_READINDX_BUFFER: - /* ring_info_ptr stored in pcie_sh */ - shmem = (pciedev_shared_t *)bus->pcie_sh; - + case D2H_DMA_INDX_RD_BUF: + { + pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh; long_data = HTOL64(*(uint64 *)data); tcm_offset = (ulong)shmem->rings_info_ptr; tcm_offset += OFFSETOF(ring_info_t, d2h_r_idx_hostaddr); dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len); prhex(__FUNCTION__, data, len); break; + } - case RING_LEN_ITEMS : + case RING_ITEM_LEN: tcm_offset = bus->ring_sh[ringid].ring_mem_addr; tcm_offset += OFFSETOF(ring_mem_t, len_items); dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data)); break; - case RING_MAX_ITEM : + case RING_MAX_ITEMS: tcm_offset = bus->ring_sh[ringid].ring_mem_addr; tcm_offset += OFFSETOF(ring_mem_t, max_item); dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data)); break; - case RING_BUF_ADDR : + case RING_BUF_ADDR: long_data = HTOL64(*(uint64 *)data); tcm_offset = bus->ring_sh[ringid].ring_mem_addr; tcm_offset += OFFSETOF(ring_mem_t, base_addr); @@ -2082,61 +2644,66 @@ dhd_bus_cmn_writeshared(dhd_bus_t *bus, void * data, uint32 len, uint8 type, uin prhex(__FUNCTION__, data, len); break; - case RING_WRITE_PTR : + case RING_WR_UPD: tcm_offset = bus->ring_sh[ringid].ring_state_w; dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data)); break; - case RING_READ_PTR : + + case RING_RD_UPD: tcm_offset = bus->ring_sh[ringid].ring_state_r; dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data)); break; - case DTOH_MB_DATA: + case D2H_MB_DATA: dhdpcie_bus_wtcm32(bus, bus->d2h_mb_data_ptr_addr, (uint32) HTOL32(*(uint32 *)data)); break; - case HTOD_MB_DATA: + case H2D_MB_DATA: dhdpcie_bus_wtcm32(bus, bus->h2d_mb_data_ptr_addr, (uint32) HTOL32(*(uint32 *)data)); break; + default: break; } -} - +} /* dhd_bus_cmn_writeshared */ +/** A snippet of dongle memory is shared between host and dongle */ void dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid) { - pciedev_shared_t *sh; ulong tcm_offset; - sh = (pciedev_shared_t*)bus->shared_addr; - switch (type) { - case RING_WRITE_PTR : + case RING_WR_UPD: tcm_offset = bus->ring_sh[ringid].ring_state_w; *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset)); break; - case RING_READ_PTR : + case RING_RD_UPD: tcm_offset = bus->ring_sh[ringid].ring_state_r; *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset)); break; - case TOTAL_LFRAG_PACKET_CNT : + case TOTAL_LFRAG_PACKET_CNT: + { + pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr; *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, (ulong) &sh->total_lfrag_pkt_cnt)); break; - case HTOD_MB_DATA: + } + case H2D_MB_DATA: *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->h2d_mb_data_ptr_addr)); break; - case DTOH_MB_DATA: + case D2H_MB_DATA: *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->d2h_mb_data_ptr_addr)); break; - case MAX_HOST_RXBUFS : + case MAX_HOST_RXBUFS: + { + pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr; *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, (ulong) &sh->max_host_rxbufs)); break; + } default : break; } @@ -2203,18 +2770,22 @@ dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, exit: return bcmerror; -} +} /* dhd_bus_iovar_op */ #ifdef BCM_BUZZZ #include -int dhd_buzzz_dump_cntrs3(char *p, uint32 *core, uint32 * ovhd, uint32 *log) +int +dhd_buzzz_dump_cntrs(char *p, uint32 *core, uint32 *log, + const int num_counters) { int bytes = 0; - uint32 ctr, curr[3], prev[3], delta[3]; + uint32 ctr; + uint32 curr[BCM_BUZZZ_COUNTERS_MAX], prev[BCM_BUZZZ_COUNTERS_MAX]; + uint32 delta[BCM_BUZZZ_COUNTERS_MAX]; /* Compute elapsed counter values per counter event type */ - for (ctr = 0U; ctr < 3; ctr++) { + for (ctr = 0U; ctr < num_counters; ctr++) { prev[ctr] = core[ctr]; curr[ctr] = *log++; core[ctr] = curr[ctr]; /* saved for next log */ @@ -2224,12 +2795,6 @@ int dhd_buzzz_dump_cntrs3(char *p, uint32 *core, uint32 * ovhd, uint32 *log) else delta[ctr] = (curr[ctr] - prev[ctr]); - /* Adjust for instrumentation overhead */ - if (delta[ctr] >= ovhd[ctr]) - delta[ctr] -= ovhd[ctr]; - else - delta[ctr] = 0; - bytes += sprintf(p + bytes, "%12u ", delta[ctr]); } @@ -2247,7 +2812,8 @@ typedef union cm3_cnts { /* export this in bcm_buzzz.h */ }; } cm3_cnts_t; -int dhd_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 * ovhd, uint32 *log) +int +dhd_bcm_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 *log) { int bytes = 0; @@ -2262,10 +2828,6 @@ int dhd_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 * ovhd, uint32 *log) delta = curr + (~0U - prev); else delta = (curr - prev); - if (delta >= ovhd[0]) - delta -= ovhd[0]; - else - delta = 0; bytes += sprintf(p + bytes, "%12u ", delta); cyccnt = delta; @@ -2281,10 +2843,6 @@ int dhd_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 * ovhd, uint32 *log) delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]); else delta.u8[i] = (curr.u8[i] - prev.u8[i]); - if (delta.u8[i] >= ovhd[i + 1]) - delta.u8[i] -= ovhd[i + 1]; - else - delta.u8[i] = 0; bytes += sprintf(p + bytes, "%4u ", delta.u8[i]); } cm3_cnts.u32 = delta.u32; @@ -2292,16 +2850,12 @@ int dhd_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 * ovhd, uint32 *log) { /* Extract the foldcnt from arg0 */ uint8 curr, prev, delta, max8 = ~0; - buzzz_arg0_t arg0; arg0.u32 = *log; + bcm_buzzz_arg0_t arg0; arg0.u32 = *log; prev = core[2]; curr = arg0.klog.cnt; core[2] = curr; if (curr < prev) delta = curr + (max8 - prev); else delta = (curr - prev); - if (delta >= ovhd[5]) - delta -= ovhd[5]; - else - delta = 0; bytes += sprintf(p + bytes, "%4u ", delta); foldcnt = delta; } @@ -2315,18 +2869,19 @@ int dhd_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 * ovhd, uint32 *log) return bytes; } -int dhd_buzzz_dump_log(char * p, uint32 * core, uint32 * log, buzzz_t * buzzz) +int +dhd_buzzz_dump_log(char *p, uint32 *core, uint32 *log, bcm_buzzz_t *buzzz) { int bytes = 0; - buzzz_arg0_t arg0; - static uint8 * fmt[] = BUZZZ_FMT_STRINGS; + bcm_buzzz_arg0_t arg0; + static uint8 * fmt[] = BCM_BUZZZ_FMT_STRINGS; if (buzzz->counters == 6) { - bytes += dhd_buzzz_dump_cntrs6(p, core, buzzz->ovhd, log); + bytes += dhd_bcm_buzzz_dump_cntrs6(p, core, log); log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */ } else { - bytes += dhd_buzzz_dump_cntrs3(p, core, buzzz->ovhd, log); - log += 3; /* (3 x 32bit) CR4 */ + bytes += dhd_buzzz_dump_cntrs(p, core, log, buzzz->counters); + log += buzzz->counters; /* (N x 32bit) CR4=3, CA7=4 */ } /* Dump the logged arguments using the registered formats */ @@ -2342,23 +2897,47 @@ int dhd_buzzz_dump_log(char * p, uint32 * core, uint32 * log, buzzz_t * buzzz) bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1); break; } + case 2: + { + uint32 arg1, arg2; + arg1 = *log++; arg2 = *log++; + bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2); + break; + } + case 3: + { + uint32 arg1, arg2, arg3; + arg1 = *log++; arg2 = *log++; arg3 = *log++; + bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3); + break; + } + case 4: + { + uint32 arg1, arg2, arg3, arg4; + arg1 = *log++; arg2 = *log++; + arg3 = *log++; arg4 = *log++; + bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3, arg4); + break; + } default: printf("%s: Maximum one argument supported\n", __FUNCTION__); break; } + bytes += sprintf(p + bytes, "\n"); return bytes; } -void dhd_buzzz_dump(buzzz_t * buzzz_p, void * buffer_p, char * p) +void dhd_buzzz_dump(bcm_buzzz_t *buzzz_p, void *buffer_p, char *p) { int i; - uint32 total, part1, part2, log_sz, core[BUZZZ_COUNTERS_MAX]; + uint32 total, part1, part2, log_sz, core[BCM_BUZZZ_COUNTERS_MAX]; void * log; - for (i = 0; i < BUZZZ_COUNTERS_MAX; i++) + for (i = 0; i < BCM_BUZZZ_COUNTERS_MAX; i++) { core[i] = 0; + } log_sz = buzzz_p->log_sz; @@ -2366,17 +2945,17 @@ void dhd_buzzz_dump(buzzz_t * buzzz_p, void * buffer_p, char * p) if (buzzz_p->wrap == TRUE) { part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz; - total = (buzzz_p->buffer_sz - BUZZZ_LOGENTRY_MAXSZ) / log_sz; + total = (buzzz_p->buffer_sz - BCM_BUZZZ_LOGENTRY_MAXSZ) / log_sz; } else { part2 = 0U; total = buzzz_p->count; } if (total == 0U) { - printf("%s: buzzz_dump total<%u> done\n", __FUNCTION__, total); + printf("%s: bcm_buzzz_dump total<%u> done\n", __FUNCTION__, total); return; } else { - printf("%s: buzzz_dump total<%u> : part2<%u> + part1<%u>\n", __FUNCTION__, + printf("%s: bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n", __FUNCTION__, total, part2, part1); } @@ -2398,12 +2977,12 @@ void dhd_buzzz_dump(buzzz_t * buzzz_p, void * buffer_p, char * p) log = (void*)((size_t)log + buzzz_p->log_sz); } - printf("%s: buzzz_dump done.\n", __FUNCTION__); + printf("%s: bcm_buzzz_dump done.\n", __FUNCTION__); } int dhd_buzzz_dump_dngl(dhd_bus_t *bus) { - buzzz_t * buzzz_p = NULL; + bcm_buzzz_t * buzzz_p = NULL; void * buffer_p = NULL; char * page_p = NULL; pciedev_shared_t *sh; @@ -2416,8 +2995,8 @@ int dhd_buzzz_dump_dngl(dhd_bus_t *bus) printf("%s: Page memory allocation failure\n", __FUNCTION__); goto done; } - if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(buzzz_t))) == NULL) { - printf("%s: Buzzz memory allocation failure\n", __FUNCTION__); + if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(bcm_buzzz_t))) == NULL) { + printf("%s: BCM BUZZZ memory allocation failure\n", __FUNCTION__); goto done; } @@ -2432,36 +3011,56 @@ int dhd_buzzz_dump_dngl(dhd_bus_t *bus) DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzzz)); if (sh->buzzz != 0U) { /* Fetch and display dongle BUZZZ Trace */ + dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzzz, - (uint8 *)buzzz_p, sizeof(buzzz_t)); + (uint8 *)buzzz_p, sizeof(bcm_buzzz_t)); + + printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> " + "count<%u> status<%u> wrap<%u>\n" + "cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n", + (int)sh->buzzz, + (int)buzzz_p->log, (int)buzzz_p->cur, (int)buzzz_p->end, + buzzz_p->count, buzzz_p->status, buzzz_p->wrap, + buzzz_p->cpu_idcode, buzzz_p->counters, buzzz_p->group, + buzzz_p->buffer_sz, buzzz_p->log_sz); + if (buzzz_p->count == 0) { printf("%s: Empty dongle BUZZZ trace\n\n", __FUNCTION__); goto done; } - if (buzzz_p->counters != 3) { /* 3 counters for CR4 */ - printf("%s: Counters<%u> mismatch\n", __FUNCTION__, buzzz_p->counters); - goto done; - } + /* Allocate memory for trace buffer and format strings */ buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz); if (buffer_p == NULL) { printf("%s: Buffer memory allocation failure\n", __FUNCTION__); goto done; } - /* Fetch the trace and format strings */ + + /* Fetch the trace. format strings are exported via bcm_buzzz.h */ dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log, /* Trace */ (uint8 *)buffer_p, buzzz_p->buffer_sz); + /* Process and display the trace using formatted output */ - printf("%s: <#cycle> <#instruction> <#ctr3> \n", __FUNCTION__); + + { + int ctr; + for (ctr = 0; ctr < buzzz_p->counters; ctr++) { + printf(" ", buzzz_p->eventid[ctr]); + } + printf("\n"); + } + dhd_buzzz_dump(buzzz_p, buffer_p, page_p); - printf("%s: ----- End of dongle BUZZZ Trace -----\n\n", __FUNCTION__); + + printf("%s: ----- End of dongle BCM BUZZZ Trace -----\n\n", __FUNCTION__); + MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL; } done: if (page_p) MFREE(bus->dhd->osh, page_p, 4096); - if (buzzz_p) MFREE(bus->dhd->osh, buzzz_p, sizeof(buzzz_t)); + if (buzzz_p) MFREE(bus->dhd->osh, buzzz_p, sizeof(bcm_buzzz_t)); if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); return BCME_OK; @@ -2504,57 +3103,12 @@ pcie2_mdiosetblock(dhd_bus_t *bus, uint blk) } -static int -pcie2_mdioop(dhd_bus_t *bus, uint physmedia, uint regaddr, bool write, uint *val, - bool slave_bypass) -{ - uint pcie_serdes_spinwait = 200, i = 0, mdio_ctrl; - uint32 reg32; - - pcie2_mdiosetblock(bus, physmedia); - - /* enable mdio access to SERDES */ - mdio_ctrl = MDIOCTL2_DIVISOR_VAL; - mdio_ctrl |= (regaddr << MDIOCTL2_REGADDR_SHF); - - if (slave_bypass) - mdio_ctrl |= MDIOCTL2_SLAVE_BYPASS; - - if (!write) - mdio_ctrl |= MDIOCTL2_READ; - - si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdio_ctrl); - - if (write) { - reg32 = PCIE2_MDIO_WR_DATA; - si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0, - *val | MDIODATA2_DONE); - } - else - reg32 = PCIE2_MDIO_RD_DATA; - - /* retry till the transaction is complete */ - while (i < pcie_serdes_spinwait) { - uint done_val = si_corereg(bus->sih, bus->sih->buscoreidx, reg32, 0, 0); - if (!(done_val & MDIODATA2_DONE)) { - if (!write) { - *val = si_corereg(bus->sih, bus->sih->buscoreidx, - PCIE2_MDIO_RD_DATA, 0, 0); - *val = *val & MDIODATA2_MASK; - } - return 0; - } - OSL_DELAY(1000); - i++; - } - return -1; -} - int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) { dhd_bus_t *bus = dhdp->bus; int bcmerror = 0; + unsigned long flags; #ifdef CONFIG_ARCH_MSM int retry = POWERUP_MAX_RETRY; #endif /* CONFIG_ARCH_MSM */ @@ -2565,8 +3119,15 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) if (flag == TRUE) { /* Turn off WLAN */ /* Removing Power */ DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__)); + bus->dhd->up = FALSE; + if (bus->dhd->busstate != DHD_BUS_DOWN) { + dhdpcie_advertise_bus_cleanup(bus->dhd); + if (bus->intr) { + dhdpcie_bus_intr_disable(bus); + dhdpcie_free_irq(bus); + } #ifdef BCMPCIE_OOB_HOST_WAKE /* Clean up any pending host wake IRQ */ dhd_bus_oob_intr_set(bus->dhd, FALSE); @@ -2574,11 +3135,7 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) #endif /* BCMPCIE_OOB_HOST_WAKE */ dhd_os_wd_timer(dhdp, 0); dhd_bus_stop(bus, TRUE); - if (bus->intr) { - dhdpcie_bus_intr_disable(bus); - dhdpcie_free_irq(bus); - } - dhd_prot_clear(dhdp); + dhd_prot_reset(dhdp); dhd_clear(dhdp); dhd_bus_release_dongle(bus); dhdpcie_bus_free_resource(bus); @@ -2596,10 +3153,11 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) goto done; } #endif /* CONFIG_ARCH_MSM */ + DHD_GENERAL_LOCK(bus->dhd, flags); bus->dhd->busstate = DHD_BUS_DOWN; + DHD_GENERAL_UNLOCK(bus->dhd, flags); } else { if (bus->intr) { - dhdpcie_bus_intr_disable(bus); dhdpcie_free_irq(bus); } #ifdef BCMPCIE_OOB_HOST_WAKE @@ -2607,7 +3165,7 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) dhd_bus_oob_intr_set(bus->dhd, FALSE); dhd_bus_oob_intr_unregister(bus->dhd); #endif /* BCMPCIE_OOB_HOST_WAKE */ - dhd_prot_clear(dhdp); + dhd_prot_reset(dhdp); dhd_clear(dhdp); dhd_bus_release_dongle(bus); dhdpcie_bus_free_resource(bus); @@ -2625,7 +3183,7 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) __FUNCTION__, bcmerror)); goto done; } -#endif /* CONFIG_ARCH_MSM */ +#endif /* CONFIG_ARCH_MSM */ } bus->dhd->dongle_reset = TRUE; @@ -2642,9 +3200,9 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n", __FUNCTION__)); break; - } - else + } else { OSL_SLEEP(10); + } } if (bcmerror && !retry) { @@ -2653,6 +3211,8 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) goto done; } #endif /* CONFIG_ARCH_MSM */ + bus->is_linkdown = 0; + bus->pci_d3hot_done = 0; bcmerror = dhdpcie_bus_enable_device(bus); if (bcmerror) { DHD_ERROR(("%s: host configuration restore failed: %d\n", @@ -2698,13 +3258,62 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) } } } + done: - if (bcmerror) + if (bcmerror) { + DHD_GENERAL_LOCK(bus->dhd, flags); bus->dhd->busstate = DHD_BUS_DOWN; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + } return bcmerror; } +static int +pcie2_mdioop(dhd_bus_t *bus, uint physmedia, uint regaddr, bool write, uint *val, + bool slave_bypass) +{ + uint pcie_serdes_spinwait = 200, i = 0, mdio_ctrl; + uint32 reg32; + + pcie2_mdiosetblock(bus, physmedia); + + /* enable mdio access to SERDES */ + mdio_ctrl = MDIOCTL2_DIVISOR_VAL; + mdio_ctrl |= (regaddr << MDIOCTL2_REGADDR_SHF); + + if (slave_bypass) + mdio_ctrl |= MDIOCTL2_SLAVE_BYPASS; + + if (!write) + mdio_ctrl |= MDIOCTL2_READ; + + si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdio_ctrl); + + if (write) { + reg32 = PCIE2_MDIO_WR_DATA; + si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0, + *val | MDIODATA2_DONE); + } else + reg32 = PCIE2_MDIO_RD_DATA; + + /* retry till the transaction is complete */ + while (i < pcie_serdes_spinwait) { + uint done_val = si_corereg(bus->sih, bus->sih->buscoreidx, reg32, 0, 0); + if (!(done_val & MDIODATA2_DONE)) { + if (!write) { + *val = si_corereg(bus->sih, bus->sih->buscoreidx, + PCIE2_MDIO_RD_DATA, 0, 0); + *val = *val & MDIODATA2_MASK; + } + return 0; + } + OSL_DELAY(1000); + i++; + } + return -1; +} + static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name, void *params, int plen, void *arg, int len, int val_size) @@ -2758,58 +3367,62 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons int_val); int_val = si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), 0, 0); - bcopy(&int_val, arg, val_size); + bcopy(&int_val, arg, sizeof(int_val)); break; + case IOV_SVAL(IOV_PCIECOREREG): + si_corereg(bus->sih, bus->sih->buscoreidx, int_val, ~0, int_val2); + break; case IOV_GVAL(IOV_BAR0_SECWIN_REG): { - uint32 cur_base, base; - uchar *bar0; - volatile uint32 *offset; - /* set the bar0 secondary window to this */ - /* write the register value */ - cur_base = dhdpcie_bus_cfg_read_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint)); - base = int_val & 0xFFFFF000; - dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint32), base); - bar0 = (uchar *)bus->regs; - offset = (uint32 *)(bar0 + 0x4000 + (int_val & 0xFFF)); - int_val = *offset; - bcopy(&int_val, arg, val_size); - dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint32), cur_base); - } + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = sdreg.offset; + size = sdreg.func; + + if (si_backplane_access(bus->sih, addr, size, &int_val, TRUE) != BCME_OK) { + DHD_ERROR(("Invalid size/addr combination \n")); + bcmerror = BCME_ERROR; + break; + } + bcopy(&int_val, arg, sizeof(int32)); break; + } + case IOV_SVAL(IOV_BAR0_SECWIN_REG): { - uint32 cur_base, base; - uchar *bar0; - volatile uint32 *offset; - /* set the bar0 secondary window to this */ - /* write the register value */ - cur_base = dhdpcie_bus_cfg_read_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint)); - base = int_val & 0xFFFFF000; - dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint32), base); - bar0 = (uchar *)bus->regs; - offset = (uint32 *)(bar0 + 0x4000 + (int_val & 0xFFF)); - *offset = int_val2; - bcopy(&int_val2, arg, val_size); - dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint32), cur_base); - } - break; + sdreg_t sdreg; + uint32 addr, size; - case IOV_SVAL(IOV_PCIECOREREG): - si_corereg(bus->sih, bus->sih->buscoreidx, int_val, ~0, int_val2); + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = sdreg.offset; + size = sdreg.func; + if (si_backplane_access(bus->sih, addr, size, &sdreg.value, FALSE) != BCME_OK) { + DHD_ERROR(("Invalid size/addr combination \n")); + bcmerror = BCME_ERROR; + } break; + } + case IOV_GVAL(IOV_SBREG): { sdreg_t sdreg; - uint32 addr, coreidx; + uint32 addr, size; bcopy(params, &sdreg, sizeof(sdreg)); - addr = sdreg.offset; - coreidx = (addr & 0xF000) >> 12; + addr = sdreg.offset | SI_ENUM_BASE; + size = sdreg.func; - int_val = si_corereg(bus->sih, coreidx, (addr & 0xFFF), 0, 0); + if (si_backplane_access(bus->sih, addr, size, &int_val, TRUE) != BCME_OK) { + DHD_ERROR(("Invalid size/addr combination \n")); + bcmerror = BCME_ERROR; + break; + } bcopy(&int_val, arg, sizeof(int32)); break; } @@ -2817,15 +3430,16 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons case IOV_SVAL(IOV_SBREG): { sdreg_t sdreg; - uint32 addr, coreidx; + uint32 addr, size; bcopy(params, &sdreg, sizeof(sdreg)); - addr = sdreg.offset; - coreidx = (addr & 0xF000) >> 12; - - si_corereg(bus->sih, coreidx, (addr & 0xFFF), ~0, sdreg.value); - + addr = sdreg.offset | SI_ENUM_BASE; + size = sdreg.func; + if (si_backplane_access(bus->sih, addr, size, &sdreg.value, FALSE) != BCME_OK) { + DHD_ERROR(("Invalid size/addr combination \n")); + bcmerror = BCME_ERROR; + } break; } @@ -2837,15 +3451,16 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons bcmerror = BCME_ERROR; break; } + if (!pcie2_mdioop(bus, int_val, int_val2, FALSE, &val, FALSE)) { bcopy(&val, arg, sizeof(int32)); - } - else { + } else { DHD_ERROR(("%s: pcie2_mdioop failed.\n", __FUNCTION__)); bcmerror = BCME_ERROR; } break; } + case IOV_SVAL(IOV_PCIESERDESREG): if (!PCIE_GEN2(bus->sih)) { DHD_ERROR(("%s: supported only in pcie gen2\n", __FUNCTION__)); @@ -2859,7 +3474,7 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons break; case IOV_GVAL(IOV_PCIECOREREG): int_val = si_corereg(bus->sih, bus->sih->buscoreidx, int_val, 0, 0); - bcopy(&int_val, arg, val_size); + bcopy(&int_val, arg, sizeof(int_val)); break; case IOV_SVAL(IOV_PCIECFGREG): @@ -2868,7 +3483,7 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons case IOV_GVAL(IOV_PCIECFGREG): int_val = OSL_PCI_READ_CONFIG(bus->osh, int_val, 4); - bcopy(&int_val, arg, val_size); + bcopy(&int_val, arg, sizeof(int_val)); break; case IOV_SVAL(IOV_PCIE_LPBK): @@ -2920,7 +3535,8 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons (set ? "write" : "read"), size, address, dsize)); /* check if CR4 */ - if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || + si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) { /* if address is 0, store the reset instruction to be written in 0 */ if (set && address == bus->dongle_ram_base) { bus->resetinstr = *(((uint32*)params) + 2); @@ -2979,6 +3595,7 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons } #ifdef BCM_BUZZZ + /* Dump dongle side buzzz trace to console */ case IOV_GVAL(IOV_BUZZZ_DUMP): bcmerror = dhd_buzzz_dump_dngl(bus); break; @@ -3047,7 +3664,7 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons d2h_support = DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ? 1 : 0; h2d_support = DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0; int_val = d2h_support | (h2d_support << 1); - bcopy(&int_val, arg, val_size); + bcopy(&int_val, arg, sizeof(int_val)); break; } case IOV_SVAL(IOV_DMA_RINGINDICES): @@ -3067,12 +3684,20 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons } break; + case IOV_GVAL(IOV_METADATA_DBG): + int_val = dhd_prot_metadata_dbg_get(bus->dhd); + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_METADATA_DBG): + dhd_prot_metadata_dbg_set(bus->dhd, (int_val != 0)); + break; + case IOV_GVAL(IOV_RX_METADATALEN): int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE); bcopy(&int_val, arg, val_size); break; - case IOV_SVAL(IOV_RX_METADATALEN): + case IOV_SVAL(IOV_RX_METADATALEN): if (int_val > 64) { bcmerror = BCME_BUFTOOLONG; break; @@ -3117,6 +3742,10 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE); break; + case IOV_SVAL(IOV_DEVRESET): + dhd_bus_devreset(bus->dhd, (uint8)bool_val); + break; + case IOV_GVAL(IOV_FLOW_PRIO_MAP): int_val = bus->dhd->flow_prio_map_type; bcopy(&int_val, arg, val_size); @@ -3127,6 +3756,21 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons bcopy(&int_val, arg, val_size); break; +#ifdef DHD_PCIE_RUNTIMEPM + case IOV_GVAL(IOV_IDLETIME): + int_val = bus->idletime; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_IDLETIME): + if (int_val < 0) { + bcmerror = BCME_BADARG; + } else { + bus->idletime = int_val; + } + break; +#endif /* DHD_PCIE_RUNTIMEPM */ + case IOV_GVAL(IOV_TXBOUND): int_val = (int32)dhd_txbound; bcopy(&int_val, arg, val_size); @@ -3145,6 +3789,17 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons dhd_rxbound = (uint)int_val; break; + case IOV_SVAL(IOV_HANGREPORT): + bus->dhd->hang_report = bool_val; + DHD_ERROR(("%s: Set hang_report as %d\n", + __FUNCTION__, bus->dhd->hang_report)); + break; + + case IOV_GVAL(IOV_HANGREPORT): + int_val = (int32)bus->dhd->hang_report; + bcopy(&int_val, arg, val_size); + break; + default: bcmerror = BCME_UNSUPPORTED; break; @@ -3152,9 +3807,9 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons exit: return bcmerror; -} +} /* dhdpcie_bus_doiovar */ -/* Transfers bytes from host to dongle using pio mode */ +/** Transfers bytes from host to dongle using pio mode */ static int dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 len) { @@ -3174,23 +3829,14 @@ dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 len) return 0; } -void -dhd_bus_set_suspend_resume(dhd_pub_t *dhdp, bool state) -{ - struct dhd_bus *bus = dhdp->bus; - if (bus) { - dhdpcie_bus_suspend(bus, state); - } -} - int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) { - int timeleft; - bool pending; + unsigned long flags; int rc = 0; + printf("%s: state=%d\n", __FUNCTION__, state); if (bus->dhd == NULL) { DHD_ERROR(("%s: bus not inited\n", __FUNCTION__)); return BCME_ERROR; @@ -3199,74 +3845,209 @@ dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__)); return BCME_ERROR; } + DHD_GENERAL_LOCK(bus->dhd, flags); if (bus->dhd->busstate != DHD_BUS_DATA && bus->dhd->busstate != DHD_BUS_SUSPEND) { DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__)); + DHD_GENERAL_UNLOCK(bus->dhd, flags); return BCME_ERROR; } - if (bus->dhd->dongle_reset) + DHD_GENERAL_UNLOCK(bus->dhd, flags); + if (bus->dhd->dongle_reset) { + DHD_ERROR(("Dongle is in reset state.\n")); return -EIO; + } - if (bus->suspended == state) /* Set to same state */ + if (bus->suspended == state) { /* Set to same state */ + DHD_ERROR(("Bus is already in SUSPEND state.\n")); return BCME_OK; + } if (state) { + int idle_retry = 0; + int active; + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down, state=%d\n", + __FUNCTION__, state)); + return BCME_ERROR; + } + + /* Suspend */ + DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__)); bus->wait_for_d3_ack = 0; bus->suspended = TRUE; + + + DHD_GENERAL_LOCK(bus->dhd, flags); + /* stop all interface network queue. */ + dhd_bus_stop_queue(bus); bus->dhd->busstate = DHD_BUS_SUSPEND; + if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_IN_TX) { + DHD_ERROR(("Tx Request is not ended\n")); + bus->dhd->busstate = DHD_BUS_DATA; + /* resume all interface network queue. */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + bus->suspended = FALSE; + return -EBUSY; + } + + bus->dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SUSPEND; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); - dhd_os_set_ioctl_resp_timeout(DEFAULT_IOCTL_RESP_TIMEOUT); + dhd_os_set_ioctl_resp_timeout(D3_ACK_RESP_TIMEOUT); dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM); - timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->wait_for_d3_ack, &pending); + timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack); dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT); DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); + + { + uint32 d2h_mb_data = 0; + uint32 zero = 0; + + /* If wait_for_d3_ack was not updated because D2H MB was not received */ + if (bus->wait_for_d3_ack == 0) { + /* Read the Mb data to see if the Dongle has actually sent D3 ACK */ + dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0); + + if (d2h_mb_data & D2H_DEV_D3_ACK) { + DHD_ERROR(("*** D3 WAR for missing interrupt ***\r\n")); + /* Clear the MB Data */ + dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), + D2H_MB_DATA, 0); + + /* Consider that D3 ACK is received */ + bus->wait_for_d3_ack = 1; + bus->d3_ack_war_cnt++; + + } /* d2h_mb_data & D2H_DEV_D3_ACK */ + } /* bus->wait_for_d3_ack was 0 */ + } + + /* To allow threads that got pre-empted to complete. + */ + while ((active = dhd_os_check_wakelock_all(bus->dhd)) && + (idle_retry < MAX_WKLK_IDLE_CHECK)) { + msleep(1); + idle_retry++; + } + if (bus->wait_for_d3_ack) { + DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__)); /* Got D3 Ack. Suspend the bus */ - if (dhd_os_check_wakelock_all(bus->dhd)) { - DHD_ERROR(("%s: Suspend failed because of wakelock\n", __FUNCTION__)); - bus->dev->current_state = PCI_D3hot; - pci_set_master(bus->dev); - rc = pci_set_power_state(bus->dev, PCI_D0); - if (rc) { - DHD_ERROR(("%s: pci_set_power_state failed:" - " current_state[%d], ret[%d]\n", - __FUNCTION__, bus->dev->current_state, rc)); - } + if (active) { + DHD_ERROR(("%s():Suspend failed because of wakelock restoring " + "Dongle to D0\n", __FUNCTION__)); + + /* + * Dongle still thinks that it has to be in D3 state + * until gets a D0 Inform, but we are backing off from suspend. + * Ensure that Dongle is brought back to D0. + * + * Bringing back Dongle from D3 Ack state to D0 state + * is a 2 step process. Dongle would want to know that D0 Inform + * would be sent as a MB interrupt + * to bring it out of D3 Ack state to D0 state. + * So we have to send both this message. + */ + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + dhdpcie_send_mb_data(bus, + (H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM)); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); + bus->suspended = FALSE; + DHD_GENERAL_LOCK(bus->dhd, flags); bus->dhd->busstate = DHD_BUS_DATA; + /* resume all interface network queue. */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(bus->dhd, flags); rc = BCME_ERROR; } else { + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE)); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); dhdpcie_bus_intr_disable(bus); rc = dhdpcie_pci_suspend_resume(bus, state); + dhd_bus_set_device_wake(bus, FALSE); } + bus->dhd->d3ackcnt_timeout = 0; +#if defined(BCMPCIE_OOB_HOST_WAKE) + dhdpcie_oob_intr_set(bus, TRUE); +#endif /* BCMPCIE_OOB_HOST_WAKE */ } else if (timeleft == 0) { - DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__)); - bus->dev->current_state = PCI_D3hot; - pci_set_master(bus->dev); - rc = pci_set_power_state(bus->dev, PCI_D0); - if (rc) { - DHD_ERROR(("%s: pci_set_power_state failed:" - " current_state[%d], ret[%d]\n", - __FUNCTION__, bus->dev->current_state, rc)); + bus->dhd->d3ackcnt_timeout++; + DHD_ERROR(("%s: resumed on timeout for D3 ACK d3_inform_cnt %d \n", + __FUNCTION__, bus->dhd->d3ackcnt_timeout)); + dhd_prot_debug_info_print(bus->dhd); +#ifdef DHD_FW_COREDUMP + if (bus->dhd->memdump_enabled) { + /* write core dump to file */ + bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT; + dhdpcie_mem_dump(bus); } +#endif /* DHD_FW_COREDUMP */ bus->suspended = FALSE; + DHD_GENERAL_LOCK(bus->dhd, flags); bus->dhd->busstate = DHD_BUS_DATA; + /* resume all interface network queue. */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + if (bus->dhd->d3ackcnt_timeout >= MAX_CNTL_D3ACK_TIMEOUT) { + DHD_ERROR(("%s: Event HANG send up " + "due to PCIe linkdown\n", __FUNCTION__)); +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT); + } rc = -ETIMEDOUT; + } + bus->wait_for_d3_ack = 1; + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SUSPEND; + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); } else { /* Resume */ -#ifdef BCMPCIE_OOB_HOST_WAKE +#if defined(BCMPCIE_OOB_HOST_WAKE) DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd); #endif /* BCMPCIE_OOB_HOST_WAKE */ + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_RESUME; + DHD_GENERAL_UNLOCK(bus->dhd, flags); rc = dhdpcie_pci_suspend_resume(bus, state); + if (bus->dhd->busstate == DHD_BUS_SUSPEND) { + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM)); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); + dhd_bus_set_device_wake(bus, TRUE); + } bus->suspended = FALSE; + DHD_GENERAL_LOCK(bus->dhd, flags); bus->dhd->busstate = DHD_BUS_DATA; + bus->dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_RESUME; +#ifdef DHD_PCIE_RUNTIMEPM + if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_DONE) { + bus->bus_wake = 1; + OSL_SMP_WMB(); + wake_up_interruptible(&bus->rpm_queue); + } +#endif /* DHD_PCIE_RUNTIMEPM */ + /* resume all interface network queue. */ + dhd_bus_start_queue(bus); + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); dhdpcie_bus_intr_enable(bus); } return rc; } -/* Transfers bytes from host to dongle and to host again using DMA */ +/** Transfers bytes from host to dongle and to host again using DMA */ static int dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus, uint32 len, uint32 srcdelay, uint32 destdelay) { @@ -3298,8 +4079,10 @@ dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter) int bcmerror = 0; uint32 *cr4_regs; - if (!bus->sih) + if (!bus->sih) { + DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__)); return BCME_ERROR; + } /* To enter download state, disable ARM and reset SOCRAM. * To exit download state, simply reset ARM (default is RAM boot). */ @@ -3310,13 +4093,25 @@ dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter) cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0); if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && - !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) { + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) { DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); bcmerror = BCME_ERROR; goto fail; } - if (cr4_regs == NULL) { /* no CR4 present on chip */ + if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) { + /* Halt ARM & remove reset */ + si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT); + if (!(si_setcore(bus->sih, SYSMEM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + si_core_reset(bus->sih, 0, 0); + /* reset last 4 bytes of RAM address. to be used for shared area */ + dhdpcie_init_shared_addr(bus); + } else if (cr4_regs == NULL) { /* no CR4 present on chip */ si_core_disable(bus->sih, 0); if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { @@ -3327,7 +4122,6 @@ dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter) si_core_reset(bus->sih, 0, 0); - /* Clear the top bit of memory */ if (bus->ramsize) { uint32 zeros = 0; @@ -3348,7 +4142,7 @@ dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter) */ /* Halt ARM & remove reset */ si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT); - if (bus->sih->chip == BCM43602_CHIP_ID) { + if (BCM43602_CHIP(bus->sih->chip)) { W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5); W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0); W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7); @@ -3358,7 +4152,23 @@ dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter) dhdpcie_init_shared_addr(bus); } } else { - if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) { + /* write vars */ + if ((bcmerror = dhdpcie_bus_write_vars(bus))) { + DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__)); + goto fail; + } + /* switch back to arm core again */ + if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM CA7 core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + /* write address 0 with reset instruction */ + bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0, + (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr)); + /* now remove reset and halt and continue to run CA7 */ + } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); bcmerror = BCME_ERROR; @@ -3371,11 +4181,9 @@ dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter) goto fail; } - /* Enable remap before ARM reset but after vars. * No backplane access in remap mode */ - if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) && !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) { DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__)); @@ -3391,7 +4199,7 @@ dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter) goto fail; } } else { - if (bus->sih->chip == BCM43602_CHIP_ID) { + if (BCM43602_CHIP(bus->sih->chip)) { /* Firmware crashes on SOCSRAM access when core is in reset */ if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { DHD_ERROR(("%s: Failed to find SOCRAM core!\n", @@ -3409,7 +4217,6 @@ dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter) goto fail; } - /* switch back to arm core again */ if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) { DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__)); @@ -3417,10 +4224,26 @@ dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter) goto fail; } - /* write address 0 with reset instruction */ - bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0, - (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr)); - + /* write address 0 with reset instruction */ + bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0, + (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr)); + + if (bcmerror == BCME_OK) { + uint32 tmp; + + bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0, + (uint8 *)&tmp, sizeof(tmp)); + + if (bcmerror == BCME_OK && tmp != bus->resetinstr) { + DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n", + __FUNCTION__, bus->resetinstr)); + DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n", + __FUNCTION__, tmp)); + bcmerror = BCME_ERROR; + goto fail; + } + } + /* now remove reset and halt and continue to run CR4 */ } @@ -3437,7 +4260,7 @@ dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter) si_setcore(bus->sih, PCIE2_CORE_ID, 0); return bcmerror; -} +} /* dhdpcie_bus_download_state */ static int dhdpcie_bus_write_vars(dhd_bus_t *bus) @@ -3466,12 +4289,14 @@ dhdpcie_bus_write_vars(dhd_bus_t *bus) bzero(vbuffer, varsize); bcopy(bus->vars, vbuffer, bus->varsz); /* Write the vars list */ + DHD_ERROR(("%s: tcm: %p varaddr: 0x%x varsize: %d\n", + __FUNCTION__, bus->tcm, varaddr, varsize)); bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize); /* Implement read back and verify later */ #ifdef DHD_DEBUG /* Verify NVRAM bytes */ - DHD_INFO(("%s: Compare NVRAM dl & ul; varsize=%d\n", __FUNCTION__, varsize)); + DHD_ERROR(("%s: Compare NVRAM dl & ul; varsize=%d\n", __FUNCTION__, varsize)); nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize); if (!nvram_ularray) return BCME_NOMEM; @@ -3504,9 +4329,9 @@ dhdpcie_bus_write_vars(dhd_bus_t *bus) phys_size += bus->dongle_ram_base; /* adjust to the user specified RAM */ - DHD_INFO(("%s: Physical memory size: %d, usable memory size: %d\n", __FUNCTION__, + DHD_ERROR(("%s: Physical memory size: %d, usable memory size: %d\n", __FUNCTION__, phys_size, bus->ramsize)); - DHD_INFO(("%s: Vars are at %d, orig varsize is %d\n", __FUNCTION__, + DHD_ERROR(("%s: Vars are at %d, orig varsize is %d\n", __FUNCTION__, varaddr, varsize)); varsize = ((phys_size - 4) - varaddr); @@ -3527,11 +4352,13 @@ dhdpcie_bus_write_vars(dhd_bus_t *bus) DHD_INFO(("%s: New varsize is %d, length token=0x%08x\n", __FUNCTION__, varsize, varsizew)); /* Write the length token to the last word */ + DHD_INFO_HW4(("%s: tcm: %p phys_size: 0x%x varsizew: %x\n", + __FUNCTION__, bus->tcm, phys_size, varsizew)); bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4), (uint8*)&varsizew, 4); return bcmerror; -} +} /* dhdpcie_bus_write_vars */ int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len) @@ -3563,6 +4390,8 @@ dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len) /* Copy the passed variables, which should include the terminating double-null */ bcopy(arg, bus->vars, bus->varsz); + + err: return bcmerror; } @@ -3636,44 +4465,127 @@ dhdpcie_pme_active(osl_t *osh, bool enable) OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr); } -#endif /* BCMPCIE_OOB_HOST_WAKE */ -/* Add bus dump output to a buffer */ +bool +dhdpcie_pme_cap(osl_t *osh) +{ + uint8 cap_ptr; + uint32 pme_cap; + + cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID); + + if (!cap_ptr) { + DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__)); + return FALSE; + } + + pme_cap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32)); + + DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__, pme_cap)); + + return ((pme_cap & PME_CAP_PM_STATES) != 0); +} +#endif /* !BCMPCIE_OOB_HOST_WAKE */ + +void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) +{ + uint32 intstatus = 0; + uint32 intmask = 0; + uint32 mbintstatus = 0; + uint32 d2h_mb_data = 0; + + intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0); + intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0); + mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0); + dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0); + + bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n", + intstatus, intmask, mbintstatus); + bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n", + d2h_mb_data, dhd->bus->def_intmask); +} + +/** Add bus dump output to a buffer */ void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) { uint16 flowid; + int ix = 0; flow_ring_node_t *flow_ring_node; + flow_info_t *flow_info; + char eabuf[ETHER_ADDR_STR_LEN]; + + if (dhdp->busstate != DHD_BUS_DATA) + return; dhd_prot_print_info(dhdp, strbuf); + dhd_dump_intr_registers(dhdp, strbuf); + bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n", + dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr); + bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr)); + bcm_bprintf(strbuf, + "%s %4s %2s %4s %17s %4s %4s %10s %4s %4s ", + "Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", + "Overflows", "RD", "WR"); + bcm_bprintf(strbuf, "%5s %6s %5s \n", "Acked", "tossed", "noack"); + for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) { flow_ring_node = DHD_FLOW_RING(dhdp, flowid); if (flow_ring_node->active) { - bcm_bprintf(strbuf, "Flow:%d IF %d Prio %d Qlen %d ", - flow_ring_node->flowid, flow_ring_node->flow_info.ifindex, - flow_ring_node->flow_info.tid, flow_ring_node->queue.len); - dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf); + flow_info = &flow_ring_node->flow_info; + bcm_bprintf(strbuf, + "%3d. %4d %2d %4d %17s %4d %4d %10u ", ix++, + flow_ring_node->flowid, flow_info->ifindex, flow_info->tid, + bcm_ether_ntoa((struct ether_addr *)&flow_info->da, eabuf), + DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue), + DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)), + DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue)); + dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf, + "%4d %4d "); + bcm_bprintf(strbuf, + "%5s %6s %5s\n", "NA", "NA", "NA"); } } + bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt); + bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt); + bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt); + bcm_bprintf(strbuf, "D3 Ack WAR cnt %d\n", dhdp->bus->d3_ack_war_cnt); } +/** + * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their + * flow queue to their flow ring. + */ static void dhd_update_txflowrings(dhd_pub_t *dhd) { + unsigned long flags; dll_t *item, *next; flow_ring_node_t *flow_ring_node; struct dhd_bus *bus = dhd->bus; + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); for (item = dll_head_p(&bus->const_flowring); - !dll_end(&bus->const_flowring, item); item = next) { - next = dll_next_p(item); + (!dhd_is_device_removed(dhd) && !dll_end(&bus->const_flowring, item)); + item = next) { + if (dhd->hang_was_sent) { + break; + } + next = dll_next_p(item); flow_ring_node = dhd_constlist_to_flowring(item); + + /* Ensure that flow_ring_node in the list is Not Null */ + ASSERT(flow_ring_node != NULL); + + /* Ensure that the flowring node has valid contents */ + ASSERT(flow_ring_node->prot_info != NULL); + dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info); } + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); } - -/* Mailbox ringbell Function */ +/** Mailbox ringbell Function */ static void dhd_bus_gen_devmb_intr(struct dhd_bus *bus) { @@ -3683,18 +4595,88 @@ dhd_bus_gen_devmb_intr(struct dhd_bus *bus) return; } if (bus->db1_for_mb) { - /* this is a pcie core register, not the config regsiter */ + /* this is a pcie core register, not the config register */ DHD_INFO(("%s: writing a mail box interrupt to the device, through doorbell 1\n", __FUNCTION__)); si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_DB1, ~0, 0x12345678); - } - else { + } else { DHD_INFO(("%s: writing a mail box interrupt to the device, through config space\n", __FUNCTION__)); dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0)); dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0)); } } -/* doorbell ring Function */ +static void +dhd_bus_set_device_wake(struct dhd_bus *bus, bool val) +{ + if (bus->device_wake_state != val) + { + DHD_INFO(("Set Device_Wake to %d\n", val)); +#ifdef PCIE_OOB + if (bus->oob_enabled) + { + if (val) + { + gpio_port = gpio_port | (1 << DEVICE_WAKE); + gpio_write_port_non_block(gpio_handle_val, gpio_port); + } else { + gpio_port = gpio_port & (0xff ^ (1 << DEVICE_WAKE)); + gpio_write_port_non_block(gpio_handle_val, gpio_port); + } + } +#endif /* PCIE_OOB */ + bus->device_wake_state = val; + } +} + +#ifdef PCIE_OOB +void +dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val) +{ + DHD_INFO(("Set Device_Wake to %d\n", val)); + if (val) + { + gpio_port = gpio_port | (1 << BIT_BT_REG_ON); + gpio_write_port(gpio_handle_val, gpio_port); + } else { + gpio_port = gpio_port & (0xff ^ (1 << BIT_BT_REG_ON)); + gpio_write_port(gpio_handle_val, gpio_port); + } +} + +int +dhd_oob_get_bt_reg_on(struct dhd_bus *bus) +{ + int ret; + uint8 val; + ret = gpio_read_port(gpio_handle_val, &val); + + if (ret < 0) { + DHD_ERROR(("gpio_read_port returns %d\n", ret)); + return ret; + } + + if (val & (1 << BIT_BT_REG_ON)) + { + ret = 1; + } else { + ret = 0; + } + + return ret; +} + +static void +dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus) +{ + if (dhd_doorbell_timeout) + dhd_timeout_start(&bus->doorbell_timer, + (dhd_doorbell_timeout * 1000) / dhd_watchdog_ms); + else if (!(bus->dhd->busstate == DHD_BUS_SUSPEND)) + dhd_bus_set_device_wake(bus, FALSE); +} +#endif /* PCIE_OOB */ + +/** mailbox doorbell ring function */ void dhd_bus_ringbell(struct dhd_bus *bus, uint32 value) { @@ -3708,9 +4690,13 @@ dhd_bus_ringbell(struct dhd_bus *bus, uint32 value) } } -static void -dhd_bus_ringbell_fast(struct dhd_bus *bus, uint32 value) +void +dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value) { +#ifdef PCIE_OOB + dhd_bus_set_device_wake(bus, TRUE); + dhd_bus_doorbell_timeout_reset(bus); +#endif W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value); } @@ -3738,7 +4724,7 @@ dhd_bus_get_mbintr_fn(struct dhd_bus *bus) PCIH2D_MailBox); if (bus->pcie_mb_intr_addr) { bus->pcie_mb_intr_osh = si_osh(bus->sih); - return dhd_bus_ringbell_fast; + return dhdpcie_bus_ringbell_fast; } } return dhd_bus_ringbell; @@ -3747,47 +4733,42 @@ dhd_bus_get_mbintr_fn(struct dhd_bus *bus) bool BCMFASTPATH dhd_bus_dpc(struct dhd_bus *bus) { - uint32 intstatus = 0; - uint32 newstatus = 0; bool resched = FALSE; /* Flag indicating resched wanted */ + unsigned long flags; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_GENERAL_LOCK(bus->dhd, flags); + /* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS + * to avoid IOCTL Resumed On timeout when ioctl is waiting for response + * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS + * and if we return from here, then IOCTL response will never be handled + */ if (bus->dhd->busstate == DHD_BUS_DOWN) { DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__)); bus->intstatus = 0; + DHD_GENERAL_UNLOCK(bus->dhd, flags); return 0; } + bus->dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DPC; + DHD_GENERAL_UNLOCK(bus->dhd, flags); - intstatus = bus->intstatus; - - if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) || - (bus->sih->buscorerev == 2)) { - newstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4); - dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, newstatus); - /* Merge new bits with previous */ - intstatus |= newstatus; - bus->intstatus = 0; - if (intstatus & I_MB) { - resched = dhdpcie_bus_process_mailbox_intr(bus, intstatus); - } - } else { - /* this is a PCIE core register..not a config register... */ - newstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0); - intstatus |= (newstatus & bus->def_intmask); - si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, newstatus, newstatus); - if (intstatus & bus->def_intmask) { - resched = dhdpcie_bus_process_mailbox_intr(bus, intstatus); - intstatus &= ~bus->def_intmask; - } - } - + resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus); if (!resched) { - // terence 20150420: no need to enable interrupt if busstate is down - if (bus->dhd->busstate) { + bus->intstatus = 0; + if (!bus->pci_d3hot_done) { dhdpcie_bus_intr_enable(bus); + } else { + DHD_ERROR(("%s: dhdpcie_bus_intr_enable skip in pci D3hot state \n", + __FUNCTION__)); } } + + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DPC; + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + return resched; } @@ -3798,24 +4779,45 @@ dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data) { uint32 cur_h2d_mb_data = 0; - dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, HTOD_MB_DATA, 0); + DHD_INFO_HW4(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data)); + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + + dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0); if (cur_h2d_mb_data != 0) { uint32 i = 0; DHD_INFO(("%s: GRRRRRRR: MB transaction is already pending 0x%04x\n", __FUNCTION__, cur_h2d_mb_data)); while ((i++ < 100) && cur_h2d_mb_data) { OSL_DELAY(10); - dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, HTOD_MB_DATA, 0); + dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0); + } + if (i >= 100) { + DHD_ERROR(("%s : waited 1ms for the dngl " + "to ack the previous mb transaction\n", __FUNCTION__)); + DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n", + __FUNCTION__, cur_h2d_mb_data)); } - if (i >= 100) - DHD_ERROR(("%s: waited 1ms for the dngl to ack the previous mb transaction\n", __FUNCTION__)); } - dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), HTOD_MB_DATA, 0); + dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0); dhd_bus_gen_devmb_intr(bus); - if (h2d_mb_data == H2D_HOST_D3_INFORM) + if (h2d_mb_data == H2D_HOST_D3_INFORM) { DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__)); + bus->d3_inform_cnt++; + } + if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) { + DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__)); + bus->d0_inform_in_use_cnt++; + } + if (h2d_mb_data == H2D_HOST_D0_INFORM) { + DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__)); + bus->d0_inform_cnt++; + } } static void @@ -3823,13 +4825,23 @@ dhdpcie_handle_mb_data(dhd_bus_t *bus) { uint32 d2h_mb_data = 0; uint32 zero = 0; - dhd_bus_cmn_readshared(bus, &d2h_mb_data, DTOH_MB_DATA, 0); - if (!d2h_mb_data) + dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0); + if (!d2h_mb_data) { + DHD_INFO_HW4(("%s: Invalid D2H_MB_DATA: 0x%08x\n", + __FUNCTION__, d2h_mb_data)); return; + } - dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), DTOH_MB_DATA, 0); + dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0); - DHD_INFO(("%s: D2H_MB_DATA: 0x%04x\n", __FUNCTION__, d2h_mb_data)); + DHD_INFO_HW4(("%s: D2H_MB_DATA: 0x%08x\n", __FUNCTION__, d2h_mb_data)); + if (d2h_mb_data & D2H_DEV_FWHALT) { + DHD_ERROR(("FW trap has happened\n")); + dhdpcie_checkdied(bus, NULL, 0); + /* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */ + bus->dhd->busstate = DHD_BUS_DOWN; + return; + } if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) { /* what should we do */ DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP REQ\n", __FUNCTION__)); @@ -3842,19 +4854,18 @@ dhdpcie_handle_mb_data(dhd_bus_t *bus) } if (d2h_mb_data & D2H_DEV_D3_ACK) { /* what should we do */ - DHD_INFO_HW4(("%s D2H_MB_DATA: Received D3 ACK\n", __FUNCTION__)); + DHD_INFO_HW4(("%s: D2H_MB_DATA: D3 ACK\n", __FUNCTION__)); if (!bus->wait_for_d3_ack) { bus->wait_for_d3_ack = 1; - dhd_os_ioctl_resp_wake(bus->dhd); + dhd_os_d3ack_wake(bus->dhd); } } - if (d2h_mb_data & D2H_DEV_FWHALT) { - DHD_INFO(("%s: FW trap has happened\n", __FUNCTION__)); -#ifdef DHD_DEBUG - dhdpcie_checkdied(bus, NULL, 0); -#endif - bus->dhd->busstate = DHD_BUS_DOWN; - } +} + +/* Inform Dongle to print HW Registers for Livelock Debug */ +void dhdpcie_bus_dongle_print_hwregs(struct dhd_bus *bus) +{ + dhdpcie_send_mb_data(bus, H2D_FW_TRAP); } static bool @@ -3870,8 +4881,7 @@ dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus) } else if (intstatus & I_BIT0) { /* do nothing for Now */ } - } - else { + } else { if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1)) dhdpcie_handle_mb_data(bus); @@ -3883,23 +4893,23 @@ dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus) resched = dhdpci_bus_read_frames(bus); } } + exit: return resched; } -/* Decode dongle to host message stream */ static bool dhdpci_bus_read_frames(dhd_bus_t *bus) { bool more = FALSE; /* There may be frames in both ctrl buf and data buf; check ctrl buf first */ - DHD_PERIM_LOCK(bus->dhd); /* Take the perimeter lock */ + DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); dhd_prot_process_ctrlbuf(bus->dhd); /* Unlock to give chance for resp to be handled */ - DHD_PERIM_UNLOCK(bus->dhd); /* Release the perimeter lock */ + DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); - DHD_PERIM_LOCK(bus->dhd); /* Take the perimeter lock */ + DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); /* update the flow ring cpls */ dhd_update_txflowrings(bus->dhd); @@ -3912,26 +4922,85 @@ dhdpci_bus_read_frames(dhd_bus_t *bus) * processing RX frames without RX bound */ more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound); - DHD_PERIM_UNLOCK(bus->dhd); /* Release the perimeter lock */ + + /* don't talk to the dongle if fw is about to be reloaded */ + if (bus->dhd->hang_was_sent) { + more = FALSE; + } + DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); return more; } +bool +dhdpcie_tcm_valid(dhd_bus_t *bus) +{ + uint32 addr = 0; + int rv; + uint32 shaddr = 0; + pciedev_shared_t sh; + + shaddr = bus->dongle_ram_base + bus->ramsize - 4; + + /* Read last word in memory to determine address of pciedev_shared structure */ + addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr)); + + if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) || + (addr > shaddr)) { + DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n", + __FUNCTION__, addr)); + return FALSE; + } + + /* Read hndrte_shared structure */ + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&sh, + sizeof(pciedev_shared_t))) < 0) { + DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv)); + return FALSE; + } + + /* Compare any field in pciedev_shared_t */ + if (sh.console_addr != bus->pcie_sh->console_addr) { + DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n")); + return FALSE; + } + + return TRUE; +} + +static bool +dhdpcie_check_firmware_compatible(uint32 firmware_api_version, uint32 host_api_version) +{ + DHD_INFO(("firmware api revision %d, host api revision %d\n", + firmware_api_version, host_api_version)); + if (firmware_api_version <= host_api_version) + return TRUE; + if ((firmware_api_version == 6) && (host_api_version == 5)) + return TRUE; + if ((firmware_api_version == 5) && (host_api_version == 6)) + return TRUE; + return FALSE; +} + static int dhdpcie_readshared(dhd_bus_t *bus) { uint32 addr = 0; - int rv, w_init, r_init; + int rv, dma_indx_wr_buf, dma_indx_rd_buf; uint32 shaddr = 0; pciedev_shared_t *sh = bus->pcie_sh; dhd_timeout_t tmo; shaddr = bus->dongle_ram_base + bus->ramsize - 4; + + DHD_INFO_HW4(("%s: ram_base: 0x%x ramsize 0x%x tcm: %p shaddr: 0x%x nvram_csm: 0x%x\n", + __FUNCTION__, bus->dongle_ram_base, bus->ramsize, + bus->tcm, shaddr, bus->nvram_csm)); /* start a timer for 5 seconds */ dhd_timeout_start(&tmo, MAX_READ_TIMEOUT); while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) { - /* Read last word in memory to determine address of sdpcm_shared structure */ + /* Read last word in memory to determine address of pciedev_shared structure */ addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr)); } @@ -3943,15 +5012,14 @@ dhdpcie_readshared(dhd_bus_t *bus) return BCME_ERROR; } else { bus->shared_addr = (ulong)addr; - DHD_ERROR(("%s: PCIe shared addr read took %u usec " - "before dongle is ready\n", __FUNCTION__, tmo.elapsed)); + DHD_ERROR(("%s: PCIe shared addr (0x%08x) read took %u usec " + "before dongle is ready\n", __FUNCTION__, addr, tmo.elapsed)); } /* Read hndrte_shared structure */ if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh, sizeof(pciedev_shared_t))) < 0) { - DHD_ERROR(("%s: Failed to read PCIe shared struct," - "size read %d < %d\n", __FUNCTION__, rv, (int)sizeof(pciedev_shared_t))); + DHD_ERROR(("%s: Failed to read PCIe shared struct with %d\n", __FUNCTION__, rv)); return rv; } @@ -3965,9 +5033,9 @@ dhdpcie_readshared(dhd_bus_t *bus) sh->msgtrace_addr = ltoh32(sh->msgtrace_addr); sh->dma_rxoffset = ltoh32(sh->dma_rxoffset); sh->rings_info_ptr = ltoh32(sh->rings_info_ptr); - /* load bus console address */ #ifdef DHD_DEBUG + /* load bus console address */ bus->console_addr = sh->console_addr; #endif @@ -3977,28 +5045,25 @@ dhdpcie_readshared(dhd_bus_t *bus) DHD_ERROR(("%s: DMA RX offset from shared Area %d\n", __FUNCTION__, bus->dma_rxoffset)); - if ((sh->flags & PCIE_SHARED_VERSION_MASK) > PCIE_SHARED_VERSION) { + if (!(dhdpcie_check_firmware_compatible(sh->flags & PCIE_SHARED_VERSION_MASK, + PCIE_SHARED_VERSION))) + { DHD_ERROR(("%s: pcie_shared version %d in dhd " "is older than pciedev_shared version %d in dongle\n", __FUNCTION__, PCIE_SHARED_VERSION, sh->flags & PCIE_SHARED_VERSION_MASK)); return BCME_ERROR; } - if ((sh->flags & PCIE_SHARED_VERSION_MASK) >= 4) { - if (sh->flags & PCIE_SHARED_TXPUSH_SPRT) { -#ifdef DHDTCPACK_SUPPRESS - /* Do not use tcpack suppress as packets don't stay in queue */ - dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF); -#endif - bus->txmode_push = TRUE; - } else - bus->txmode_push = FALSE; - } - DHD_ERROR(("%s: bus->txmode_push is set to %d\n", __FUNCTION__, bus->txmode_push)); + + bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ? + sizeof(uint16) : sizeof(uint32); + DHD_ERROR(("%s: Dongle advertizes %d size indices\n", + __FUNCTION__, bus->rw_index_sz)); /* Does the FW support DMA'ing r/w indices */ if (sh->flags & PCIE_SHARED_DMA_INDEX) { + DHD_ERROR(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n", __FUNCTION__, (DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0), @@ -4037,33 +5102,29 @@ dhdpcie_readshared(dhd_bus_t *bus) * The max_sub_queues is read from FW initialized ring_info */ if (DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support)) { - w_init = dhd_prot_init_index_dma_block(bus->dhd, - HOST_TO_DNGL_DMA_WRITEINDX_BUFFER, - bus->max_sub_queues); - r_init = dhd_prot_init_index_dma_block(bus->dhd, - DNGL_TO_HOST_DMA_READINDX_BUFFER, - BCMPCIE_D2H_COMMON_MSGRINGS); - - if ((w_init != BCME_OK) || (r_init != BCME_OK)) { + dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + H2D_DMA_INDX_WR_BUF, bus->max_sub_queues); + dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + D2H_DMA_INDX_RD_BUF, BCMPCIE_D2H_COMMON_MSGRINGS); + + if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) { DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices" - "Host will use w/r indices in TCM\n", - __FUNCTION__)); + "Host will use w/r indices in TCM\n", + __FUNCTION__)); bus->dhd->dma_h2d_ring_upd_support = FALSE; } } if (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support)) { - w_init = dhd_prot_init_index_dma_block(bus->dhd, - DNGL_TO_HOST_DMA_WRITEINDX_BUFFER, - BCMPCIE_D2H_COMMON_MSGRINGS); - r_init = dhd_prot_init_index_dma_block(bus->dhd, - HOST_TO_DNGL_DMA_READINDX_BUFFER, - bus->max_sub_queues); - - if ((w_init != BCME_OK) || (r_init != BCME_OK)) { + dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + D2H_DMA_INDX_WR_BUF, BCMPCIE_D2H_COMMON_MSGRINGS); + dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + H2D_DMA_INDX_RD_BUF, bus->max_sub_queues); + + if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) { DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices" - "Host will use w/r indices in TCM\n", - __FUNCTION__)); + "Host will use w/r indices in TCM\n", + __FUNCTION__)); bus->dhd->dma_d2h_ring_upd_support = FALSE; } } @@ -4074,19 +5135,24 @@ dhdpcie_readshared(dhd_bus_t *bus) bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t)); DHD_INFO(("%s: ring_info\n", __FUNCTION__)); - DHD_ERROR(("%s: max H2D queues %d\n", __FUNCTION__, ltoh16(ring_info.max_sub_queues))); + DHD_ERROR(("%s: max H2D queues %d\n", + __FUNCTION__, ltoh16(ring_info.max_sub_queues))); - DHD_INFO(("%s: mail box address\n", __FUNCTION__)); - DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n", __FUNCTION__, bus->h2d_mb_data_ptr_addr)); - DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n", __FUNCTION__, bus->d2h_mb_data_ptr_addr)); + DHD_INFO(("mail box address\n")); + DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n", + __FUNCTION__, bus->h2d_mb_data_ptr_addr)); + DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n", + __FUNCTION__, bus->d2h_mb_data_ptr_addr)); } bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK; - DHD_INFO(("%s: d2h_sync_mode 0x%08x\n", __FUNCTION__, bus->dhd->d2h_sync_mode)); + DHD_INFO(("%s: d2h_sync_mode 0x%08x\n", + __FUNCTION__, bus->dhd->d2h_sync_mode)); return BCME_OK; -} -/* Read ring mem and ring state ptr info from shared are in TCM */ +} /* dhdpcie_readshared */ + +/** Read ring mem and ring state ptr info from shared memory area in device memory */ static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info) { @@ -4102,7 +5168,6 @@ dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info) D2H_MSGRING_CONTROL_COMPLETE 2 D2H_MSGRING_TX_COMPLETE 3 D2H_MSGRING_RX_COMPLETE 4 - TX_FLOW_RING 5 */ { @@ -4117,13 +5182,6 @@ dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info) DHD_INFO(("%s: ring id %d ring mem addr 0x%04x \n", __FUNCTION__, i, bus->ring_sh[i].ring_mem_addr)); } - - /* Tx flow Ring */ - if (bus->txmode_push) { - bus->ring_sh[i].ring_mem_addr = tcm_memloc; - DHD_INFO(("%s: TX ring ring id %d ring mem addr 0x%04x \n", __FUNCTION__, - i, bus->ring_sh[i].ring_mem_addr)); - } } /* Ring state mem ptr info */ @@ -4132,59 +5190,56 @@ dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info) d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr); h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr); h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr); + /* Store h2d common ring write/read pointers */ for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) { bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr; bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr; /* update mem block */ - h2d_w_idx_ptr = h2d_w_idx_ptr + sizeof(uint32); - h2d_r_idx_ptr = h2d_r_idx_ptr + sizeof(uint32); + h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz; + h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz; DHD_INFO(("%s: h2d w/r : idx %d write %x read %x \n", __FUNCTION__, i, bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r)); } + /* Store d2h common ring write/read pointers */ for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) { bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr; bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr; /* update mem block */ - d2h_w_idx_ptr = d2h_w_idx_ptr + sizeof(uint32); - d2h_r_idx_ptr = d2h_r_idx_ptr + sizeof(uint32); + d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz; + d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz; DHD_INFO(("%s: d2h w/r : idx %d write %x read %x \n", __FUNCTION__, i, bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r)); } /* Store txflow ring write/read pointers */ - if (bus->txmode_push) { + for (j = 0; j < (bus->max_sub_queues - BCMPCIE_H2D_COMMON_MSGRINGS); + i++, j++) + { bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr; bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr; - DHD_INFO(("%s: txflow : idx %d write %x read %x \n", __FUNCTION__, i, - bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r)); - } else { - for (j = 0; j < (bus->max_sub_queues - BCMPCIE_H2D_COMMON_MSGRINGS); - i++, j++) - { - bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr; - bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr; - - /* update mem block */ - h2d_w_idx_ptr = h2d_w_idx_ptr + sizeof(uint32); - h2d_r_idx_ptr = h2d_r_idx_ptr + sizeof(uint32); + /* update mem block */ + h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz; + h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz; - DHD_INFO(("%s: FLOW Rings h2d w/r : idx %d write %x read %x \n", - __FUNCTION__, i, - bus->ring_sh[i].ring_state_w, - bus->ring_sh[i].ring_state_r)); - } + DHD_INFO(("%s: FLOW Rings h2d w/r : idx %d write %x read %x \n", + __FUNCTION__, i, + bus->ring_sh[i].ring_state_w, + bus->ring_sh[i].ring_state_r)); } } -} +} /* dhd_fillup_ring_sharedptr_info */ -/* Initialize bus module: prepare for communication w/dongle */ +/** + * Initialize bus module: prepare for communication with the dongle. Called after downloading + * firmware into the dongle. + */ int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) { dhd_bus_t *bus = dhdp->bus; @@ -4207,7 +5262,6 @@ int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) return ret; } - /* Make sure we're talking to the core. */ bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0); ASSERT(bus->reg != NULL); @@ -4215,15 +5269,25 @@ int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) /* Set bus state according to enable result */ dhdp->busstate = DHD_BUS_DATA; + if (!dhd_download_fw_on_driverload) + dhd_dpc_enable(bus->dhd); + /* Enable the interrupt after device is up */ dhdpcie_bus_intr_enable(bus); /* bcmsdh_intr_unmask(bus->sdh); */ - return ret; +#ifdef DHD_PCIE_RUNTIMEPM + bus->idlecount = 0; + bus->idletime = (int32)MAX_IDLE_COUNT; + init_waitqueue_head(&bus->rpm_queue); + mutex_init(&bus->pm_lock); +#endif /* DHD_PCIE_RUNTIMEPM */ -} + bus->d3_ack_war_cnt = 0; + return ret; +} static void dhdpcie_init_shared_addr(dhd_bus_t *bus) @@ -4231,6 +5295,10 @@ dhdpcie_init_shared_addr(dhd_bus_t *bus) uint32 addr = 0; uint32 val = 0; addr = bus->dongle_ram_base + bus->ramsize - 4; +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0)); +#endif /* DHD_PCIE_RUNTIMEPM */ + DHD_INFO_HW4(("%s: tcm: %p, addr: 0x%x val: 0x%x\n", __FUNCTION__, bus->tcm, addr, val)); dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val)); } @@ -4245,7 +5313,8 @@ dhdpcie_chipmatch(uint16 vendor, uint16 device) } if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) || - (device == BCM4350_D11AC5G_ID) || BCM4350_CHIP(device)) + (device == BCM4350_D11AC5G_ID) || (device == BCM4350_CHIP_ID) || + (device == BCM43569_CHIP_ID)) return 0; if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) || @@ -4257,7 +5326,7 @@ dhdpcie_chipmatch(uint16 vendor, uint16 device) return 0; if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) || - (device == BCM4345_D11AC5G_ID) || (device == BCM4345_CHIP_ID)) + (device == BCM4345_D11AC5G_ID) || BCM4345_CHIP(device)) return 0; if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) || @@ -4273,36 +5342,47 @@ dhdpcie_chipmatch(uint16 vendor, uint16 device) return 0; if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) || - (device == BCM4358_D11AC5G_ID) || (device == BCM4358_CHIP_ID)) + (device == BCM4358_D11AC5G_ID)) return 0; if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) || (device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID)) return 0; + if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) || (device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID)) return 0; + if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) || - (device == BCM4359_D11AC5G_ID) || (device == BCM4359_CHIP_ID)) + (device == BCM4359_D11AC5G_ID)) return 0; - - DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device)); - return (-ENODEV); -} + if ((device == BCM43596_D11AC_ID) || (device == BCM43596_D11AC2G_ID) || + (device == BCM43596_D11AC5G_ID)) + return 0; -/* + if ((device == BCM4365_D11AC_ID) || (device == BCM4365_D11AC2G_ID) || + (device == BCM4365_D11AC5G_ID) || (device == BCM4365_CHIP_ID)) + return 0; -Name: dhdpcie_cc_nvmshadow + if ((device == BCM4366_D11AC_ID) || (device == BCM4366_D11AC2G_ID) || + (device == BCM4366_D11AC5G_ID) || (device == BCM4366_CHIP_ID)) + return 0; -Description: -A shadow of OTP/SPROM exists in ChipCommon Region -betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF). -Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size -can also be read from ChipCommon Registers. -*/ + DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device)); + return (-ENODEV); +} /* dhdpcie_chipmatch */ +/** + * Name: dhdpcie_cc_nvmshadow + * + * Description: + * A shadow of OTP/SPROM exists in ChipCommon Region + * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF). + * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size + * can also be read from ChipCommon Registers. + */ static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b) { @@ -4318,11 +5398,12 @@ dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b) uint chipc_corerev; chipcregs_t *chipcregs; - /* Save the current core */ cur_coreid = si_coreid(bus->sih); /* Switch to ChipC */ chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0); + ASSERT(chipcregs != NULL); + chipc_corerev = si_corerev(bus->sih); /* Check ChipcommonCore Rev */ @@ -4332,8 +5413,7 @@ dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b) } /* Check ChipID */ - if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && - ((uint16)bus->sih->chip != BCM4345_CHIP_ID)) { + if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip)) { DHD_ERROR(("%s: cc_nvmdump cmd. supported for 4350/4345 only\n", __FUNCTION__)); return BCME_UNSUPPORTED; @@ -4391,8 +5471,7 @@ dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b) /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */ /* dump_size in 16bit words */ dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16; - } - else { + } else { DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n", __FUNCTION__)); return BCME_NOTFOUND; @@ -4425,15 +5504,9 @@ dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b) si_setcore(bus->sih, cur_coreid, 0); return BCME_OK; -} - - -uint8 BCMFASTPATH -dhd_bus_is_txmode_push(dhd_bus_t *bus) -{ - return bus->txmode_push; -} +} /* dhdpcie_cc_nvmshadow */ +/** Flow rings are dynamically created and destroyed */ void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node) { void *pkt; @@ -4457,25 +5530,29 @@ void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node) while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { PKTFREE(bus->dhd->osh, pkt, TRUE); } - ASSERT(flow_queue_empty(queue)); + ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); flow_ring_node->status = FLOW_RING_STATUS_CLOSED; flow_ring_node->active = FALSE; - dll_delete(&flow_ring_node->list); DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); - /* Call Flow ring clean up */ - dhd_prot_clean_flow_ring(bus->dhd, flow_ring_node->prot_info); - dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex, - flow_ring_node->flowid); + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + dll_delete(&flow_ring_node->list); + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); + + /* Release the flowring object back into the pool */ + dhd_prot_flowrings_pool_release(bus->dhd, + flow_ring_node->flowid, flow_ring_node->prot_info); + /* Free the flowid back to the flowid allocator */ + dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex, + flow_ring_node->flowid); } -/* +/** * Allocate a Flow ring buffer, - * Init Ring buffer, - * Send Msg to device about flow ring creation + * Init Ring buffer, send Msg to device about flow ring creation */ int dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg) @@ -4491,6 +5568,7 @@ dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg) return BCME_OK; } +/** Handle response from dongle on a 'flow ring create' request */ void dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status) { @@ -4514,7 +5592,28 @@ dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status) flow_ring_node->status = FLOW_RING_STATUS_OPEN; DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); - dhd_bus_schedule_queue(bus, flowid, FALSE); + /* Now add the Flow ring node into the active list + * Note that this code to add the newly created node to the active + * list was living in dhd_flowid_lookup. But note that after + * adding the node to the active list the contents of node is being + * filled in dhd_prot_flow_ring_create. + * If there is a D2H interrupt after the node gets added to the + * active list and before the node gets populated with values + * from the Bottom half dhd_update_txflowrings would be called. + * which will then try to walk through the active flow ring list, + * pickup the nodes and operate on them. Now note that since + * the function dhd_prot_flow_ring_create is not finished yet + * the contents of flow_ring_node can still be NULL leading to + * crashes. Hence the flow_ring_node should be added to the + * active list only after its truely created, which is after + * receiving the create response message from the Host. + */ + + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + dll_prepend(&bus->const_flowring, &flow_ring_node->list); + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); + + dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */ return; } @@ -4532,9 +5631,10 @@ dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg) flow_ring_node = (flow_ring_node_t *)arg; DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); - if (flow_ring_node->status & FLOW_RING_STATUS_DELETE_PENDING) { + if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) { DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); - DHD_ERROR(("%s :Delete Pending\n", __FUNCTION__)); + DHD_ERROR(("%s :Delete Pending Flow %d\n", + __FUNCTION__, flow_ring_node->flowid)); return BCME_ERROR; } flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING; @@ -4551,7 +5651,7 @@ dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg) while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { PKTFREE(bus->dhd->osh, pkt, TRUE); } - ASSERT(flow_queue_empty(queue)); + ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); @@ -4583,6 +5683,7 @@ dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status) } +/** This function is not called. Obsolete ? */ int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg) { void *pkt; @@ -4603,11 +5704,12 @@ int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg) */ dhd_tcpack_info_tbl_clean(bus->dhd); #endif /* DHDTCPACK_SUPPRESS */ + /* Flush all pending packets in the queue, if any */ while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { PKTFREE(bus->dhd->osh, pkt, TRUE); } - ASSERT(flow_queue_empty(queue)); + ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); @@ -4637,15 +5739,24 @@ dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status) } uint32 -dhd_bus_max_h2d_queues(struct dhd_bus *bus, uint8 *txpush) +dhd_bus_max_h2d_queues(struct dhd_bus *bus) { - if (bus->txmode_push) - *txpush = 1; - else - *txpush = 0; return bus->max_sub_queues; } +/* To be symmetric with SDIO */ +void +dhd_bus_pktq_flush(dhd_pub_t *dhdp) +{ + return; +} + +void +dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val) +{ + dhdp->bus->is_linkdown = val; +} + int dhdpcie_bus_clock_start(struct dhd_bus *bus) { @@ -4698,7 +5809,7 @@ int dhd_bus_release_dongle(struct dhd_bus *bus) { bool dongle_isolation; - osl_t *osh; + osl_t *osh; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); @@ -4716,17 +5827,20 @@ dhd_bus_release_dongle(struct dhd_bus *bus) } #ifdef BCMPCIE_OOB_HOST_WAKE -int dhd_bus_oob_intr_register(dhd_pub_t *dhdp) +int +dhd_bus_oob_intr_register(dhd_pub_t *dhdp) { return dhdpcie_oob_intr_register(dhdp->bus); } -void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp) +void +dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp) { dhdpcie_oob_intr_unregister(dhdp->bus); } -void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable) +void +dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable) { dhdpcie_oob_intr_set(dhdp->bus, enable); } diff --git a/drivers/amlogic/wifi/bcmdhd/dhd_pcie.h b/drivers/amlogic/wifi/bcmdhd/dhd_pcie.h new file mode 100644 index 0000000000000..dc65947a3ae5a --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/dhd_pcie.h @@ -0,0 +1,329 @@ +/* + * Linux DHD Bus Module for PCIE + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_pcie.h 607608 2015-12-21 13:14:19Z $ + */ + + +#ifndef dhd_pcie_h +#define dhd_pcie_h + +#include +#include +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM +#ifdef CONFIG_PCI_MSM +#include +#else +#include +#endif /* CONFIG_PCI_MSM */ +#endif /* CONFIG_ARCH_MSM */ +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY +#ifdef CONFIG_SOC_EXYNOS8890 +#include +extern int exynos_pcie_register_event(struct exynos_pcie_register_event *reg); +extern int exynos_pcie_deregister_event(struct exynos_pcie_register_event *reg); +#endif /* CONFIG_SOC_EXYNOS8890 */ +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + +#ifdef DHD_PCIE_RUNTIMEPM +#include +#include + +#define DEFAULT_DHD_RUNTIME_MS 100 +#ifndef CUSTOM_DHD_RUNTIME_MS +#define CUSTOM_DHD_RUNTIME_MS DEFAULT_DHD_RUNTIME_MS +#endif /* CUSTOM_DHD_RUNTIME_MS */ + + +#ifndef MAX_IDLE_COUNT +#define MAX_IDLE_COUNT 16 +#endif /* MAX_IDLE_COUNT */ + +#ifndef MAX_RESUME_WAIT +#define MAX_RESUME_WAIT 100 +#endif /* MAX_RESUME_WAIT */ +#endif /* DHD_PCIE_RUNTIMEPM */ + +/* defines */ + +#define PCMSGBUF_HDRLEN 0 +#define DONGLE_REG_MAP_SIZE (32 * 1024) +#if defined(DHD_PCIE_BAR1_WIN_BASE_FIX) +#define DONGLE_TCM_MAP_SIZE DHD_PCIE_BAR1_WIN_BASE_FIX +#else +#define DONGLE_TCM_MAP_SIZE (4096 * 1024) +#endif /* defined(DHD_PCIE_BAR1_WIN_BASE_FIX) */ +#define DONGLE_MIN_MEMSIZE (128 *1024) +#ifdef DHD_DEBUG +#define DHD_PCIE_SUCCESS 0 +#define DHD_PCIE_FAILURE 1 +#endif /* DHD_DEBUG */ +#define REMAP_ENAB(bus) ((bus)->remap) +#define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize))) + +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM +#define struct_pcie_notify struct msm_pcie_notify +#define struct_pcie_register_event struct msm_pcie_register_event +#endif /* CONFIG_ARCH_MSM */ +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY +#ifdef CONFIG_SOC_EXYNOS8890 +#define struct_pcie_notify struct exynos_pcie_notify +#define struct_pcie_register_event struct exynos_pcie_register_event +#endif /* CONFIG_SOC_EXYNOS8890 */ +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + +/* + * Router with 4366 can have 128 stations and 16 BSS, + * hence (128 stations x 4 access categories for ucast) + 16 bc/mc flowrings + */ +#define MAX_DHD_TX_FLOWS 320 + +/* user defined data structures */ +/* Device console log buffer state */ +#define CONSOLE_LINE_MAX 192 +#define CONSOLE_BUFFER_MAX (8 * 1024) + +#ifndef MAX_CNTL_D3ACK_TIMEOUT +#define MAX_CNTL_D3ACK_TIMEOUT 2 +#endif /* MAX_CNTL_D3ACK_TIMEOUT */ + +#ifdef DHD_DEBUG + +typedef struct dhd_console { + uint count; /* Poll interval msec counter */ + uint log_addr; /* Log struct address (fixed) */ + hnd_log_t log; /* Log struct (host copy) */ + uint bufsize; /* Size of log buffer */ + uint8 *buf; /* Log buffer (host copy) */ + uint last; /* Last buffer read index */ +} dhd_console_t; +#endif /* DHD_DEBUG */ +typedef struct ring_sh_info { + uint32 ring_mem_addr; + uint32 ring_state_w; + uint32 ring_state_r; +} ring_sh_info_t; + +typedef struct dhd_bus { + dhd_pub_t *dhd; + struct pci_dev *dev; /* pci device handle */ + dll_t const_flowring; /* constructed list of tx flowring queues */ + + si_t *sih; /* Handle for SI calls */ + char *vars; /* Variables (from CIS and/or other) */ + uint varsz; /* Size of variables buffer */ + uint32 sbaddr; /* Current SB window pointer (-1, invalid) */ + sbpcieregs_t *reg; /* Registers for PCIE core */ + + uint armrev; /* CPU core revision */ + uint ramrev; /* SOCRAM core revision */ + uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */ + uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */ + uint32 srmemsize; /* Size of SRMEM */ + + uint32 bus; /* gSPI or SDIO bus */ + uint32 intstatus; /* Intstatus bits (events) pending */ + bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */ + bool fcstate; /* State of dongle flow-control */ + + uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */ + char *fw_path; /* module_param: path to firmware image */ + char *nv_path; /* module_param: path to nvram vars file */ +#ifdef CACHE_FW_IMAGES + int processed_nvram_params_len; /* Modified len of NVRAM info */ +#endif + + + struct pktq txq; /* Queue length used for flow-control */ + + bool intr; /* Use interrupts */ + bool ipend; /* Device interrupt is pending */ + bool intdis; /* Interrupts disabled by isr */ + uint intrcount; /* Count of device interrupt callbacks */ + uint lastintrs; /* Count as of last watchdog timer */ + +#ifdef DHD_DEBUG + dhd_console_t console; /* Console output polling support */ + uint console_addr; /* Console address from shared struct */ +#endif /* DHD_DEBUG */ + + bool alp_only; /* Don't use HT clock (ALP only) */ + + bool remap; /* Contiguous 1MB RAM: 512K socram + 512K devram + * Available with socram rev 16 + * Remap region not DMA-able + */ + uint32 resetinstr; + uint32 dongle_ram_base; + + ulong shared_addr; + pciedev_shared_t *pcie_sh; + bool bus_flowctrl; + uint32 dma_rxoffset; + volatile char *regs; /* pci device memory va */ + volatile char *tcm; /* pci device memory va */ +#if defined(DHD_PCIE_BAR1_WIN_BASE_FIX) + uint32 tcm_size; + uint32 bar1_win_base; + uint32 bar1_win_mask; +#endif /* defined(DHD_PCIE_BAR1_WIN_BASE_FIX) */ + osl_t *osh; + uint32 nvram_csm; /* Nvram checksum */ + uint16 pollrate; + uint16 polltick; + + uint32 *pcie_mb_intr_addr; + void *pcie_mb_intr_osh; + bool sleep_allowed; + + /* version 3 shared struct related info start */ + ring_sh_info_t ring_sh[BCMPCIE_COMMON_MSGRINGS + MAX_DHD_TX_FLOWS]; + uint8 h2d_ring_count; + uint8 d2h_ring_count; + uint32 ringmem_ptr; + uint32 ring_state_ptr; + + uint32 d2h_dma_scratch_buffer_mem_addr; + + uint32 h2d_mb_data_ptr_addr; + uint32 d2h_mb_data_ptr_addr; + /* version 3 shared struct related info end */ + + uint32 def_intmask; + bool ltrsleep_on_unload; + uint wait_for_d3_ack; + uint32 max_sub_queues; + uint32 rw_index_sz; + bool db1_for_mb; + bool suspended; + + dhd_timeout_t doorbell_timer; + bool device_wake_state; + bool irq_registered; +#ifdef PCIE_OOB + bool oob_enabled; +#endif /* PCIE_OOB */ +#ifdef SUPPORT_LINKDOWN_RECOVERY +#if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \ + defined(CONFIG_SOC_EXYNOS8890)) +#ifdef CONFIG_ARCH_MSM + uint8 no_cfg_restore; +#endif /* CONFIG_ARCH_MSM */ + struct_pcie_register_event pcie_event; +#endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY && CONFIG_SOC_EXYNOS8890) */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ +#ifdef DHD_PCIE_RUNTIMEPM + int32 idlecount; /* Activity timeout counter */ + int32 idletime; /* Control for activity timeout */ + int32 bus_wake; /* For wake up the bus */ + bool runtime_resume_done; /* For check runtime suspend end */ + struct mutex pm_lock; /* Synchronize for system PM & runtime PM */ + wait_queue_head_t rpm_queue; /* wait-queue for bus wake up */ +#endif /* DHD_PCIE_RUNTIMEPM */ + uint32 d3_inform_cnt; + uint32 d0_inform_cnt; + uint32 d0_inform_in_use_cnt; + uint8 force_suspend; + uint32 d3_ack_war_cnt; + uint8 is_linkdown; + uint32 pci_d3hot_done; +} dhd_bus_t; + +/* function declarations */ + +extern uint32* dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size); +extern int dhdpcie_bus_register(void); +extern void dhdpcie_bus_unregister(void); +extern bool dhdpcie_chipmatch(uint16 vendor, uint16 device); + +#if defined(DHD_PCIE_BAR1_WIN_BASE_FIX) +extern struct dhd_bus* dhdpcie_bus_attach(osl_t *osh, + volatile char *regs, volatile char *tcm, uint32 tcm_size, void *pci_dev); +#else +extern struct dhd_bus* dhdpcie_bus_attach(osl_t *osh, + volatile char *regs, volatile char *tcm, void *pci_dev); +#endif /* defined(DHD_PCIE_BAR1_WIN_BASE_FIX) */ +extern uint32 dhdpcie_bus_cfg_read_dword(struct dhd_bus *bus, uint32 addr, uint32 size); +extern void dhdpcie_bus_cfg_write_dword(struct dhd_bus *bus, uint32 addr, uint32 size, uint32 data); +extern void dhdpcie_bus_intr_enable(struct dhd_bus *bus); +extern void dhdpcie_bus_intr_disable(struct dhd_bus *bus); +extern void dhdpcie_bus_release(struct dhd_bus *bus); +extern int32 dhdpcie_bus_isr(struct dhd_bus *bus); +extern void dhdpcie_free_irq(dhd_bus_t *bus); +extern void dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value); +extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state); +extern int dhdpcie_pci_suspend_resume(struct dhd_bus *bus, bool state); +extern bool dhdpcie_tcm_valid(dhd_bus_t *bus); +extern void dhdpcie_bus_dongle_print_hwregs(struct dhd_bus *bus); +#ifndef BCMPCIE_OOB_HOST_WAKE +extern void dhdpcie_pme_active(osl_t *osh, bool enable); +#endif /* !BCMPCIE_OOB_HOST_WAKE */ +extern bool dhdpcie_pme_cap(osl_t *osh); +extern int dhdpcie_start_host_pcieclock(dhd_bus_t *bus); +extern int dhdpcie_stop_host_pcieclock(dhd_bus_t *bus); +extern int dhdpcie_disable_device(dhd_bus_t *bus); +extern int dhdpcie_enable_device(dhd_bus_t *bus); +extern int dhdpcie_alloc_resource(dhd_bus_t *bus); +extern void dhdpcie_free_resource(dhd_bus_t *bus); +extern int dhdpcie_bus_request_irq(struct dhd_bus *bus); +#ifdef BCMPCIE_OOB_HOST_WAKE +extern int dhdpcie_oob_intr_register(dhd_bus_t *bus); +extern void dhdpcie_oob_intr_unregister(dhd_bus_t *bus); +extern void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef PCIE_OOB +extern void dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val); +extern int dhd_oob_get_bt_reg_on(struct dhd_bus *bus); +#endif /* PCIE_OOB */ + +#ifdef USE_EXYNOS_PCIE_RC_PMPATCH +#if defined(CONFIG_MACH_UNIVERSAL5433) +#define SAMSUNG_PCIE_DEVICE_ID 0xa5e3 +#define SAMSUNG_PCIE_CH_NUM +#elif defined(CONFIG_MACH_UNIVERSAL7420) +#define SAMSUNG_PCIE_DEVICE_ID 0xa575 +#define SAMSUNG_PCIE_CH_NUM 1 +#elif defined(CONFIG_SOC_EXYNOS8890) +#define SAMSUNG_PCIE_DEVICE_ID 0xa544 +#define SAMSUNG_PCIE_CH_NUM 0 +#else +#error "Not supported platform" +#endif +#ifdef CONFIG_MACH_UNIVERSAL5433 +extern int exynos_pcie_pm_suspend(void); +extern int exynos_pcie_pm_resume(void); +#else +extern int exynos_pcie_pm_suspend(int ch_num); +extern int exynos_pcie_pm_resume(int ch_num); +#endif /* CONFIG_MACH_UNIVERSAL5433 */ +#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */ + +extern int dhd_buzzz_dump_dngl(dhd_bus_t *bus); +#endif /* dhd_pcie_h */ diff --git a/drivers/net/wireless/bcmdhd/dhd_pcie_linux.c b/drivers/amlogic/wifi/bcmdhd/dhd_pcie_linux.c similarity index 62% rename from drivers/net/wireless/bcmdhd/dhd_pcie_linux.c rename to drivers/amlogic/wifi/bcmdhd/dhd_pcie_linux.c index e6a96fedafe58..c1c054c32393b 100644 --- a/drivers/net/wireless/bcmdhd/dhd_pcie_linux.c +++ b/drivers/amlogic/wifi/bcmdhd/dhd_pcie_linux.c @@ -1,9 +1,30 @@ /* * Linux DHD Bus Module for PCIE * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_pcie_linux.c 506043 2014-10-02 12:29:45Z $ + * + * <> + * + * $Id: dhd_pcie_linux.c 610267 2016-01-06 16:03:53Z $ */ @@ -31,14 +52,14 @@ #include #include #ifdef CONFIG_ARCH_MSM -#ifdef CONFIG_ARCH_MSM8994 +#ifdef CONFIG_PCI_MSM #include #else #include -#endif +#endif /* CONFIG_PCI_MSM */ #endif /* CONFIG_ARCH_MSM */ -#define PCI_CFG_RETRY 10 +#define PCI_CFG_RETRY 10 #define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */ #define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */ @@ -76,6 +97,7 @@ typedef struct dhdpcie_info uint16 last_intrstatus; /* to cache intrstatus */ int irq; char pciname[32]; + struct pci_saved_state* default_state; struct pci_saved_state* state; #ifdef BCMPCIE_OOB_HOST_WAKE void *os_cxt; /* Pointer to per-OS private data */ @@ -118,13 +140,22 @@ static irqreturn_t dhdpcie_isr(int irq, void *arg); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) DEFINE_MUTEX(_dhd_sdio_mutex_lock_); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */ -#endif +#endif -static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state); static int dhdpcie_set_suspend_resume(struct pci_dev *dev, bool state); -static int dhdpcie_pci_resume(struct pci_dev *dev); +static int dhdpcie_resume_host_dev(dhd_bus_t *bus); +static int dhdpcie_suspend_host_dev(dhd_bus_t *bus); static int dhdpcie_resume_dev(struct pci_dev *dev); static int dhdpcie_suspend_dev(struct pci_dev *dev); +#ifdef DHD_PCIE_RUNTIMEPM +static int dhdpcie_pm_suspend(struct device *dev); +static int dhdpcie_pm_prepare(struct device *dev); +static int dhdpcie_pm_resume(struct device *dev); +static void dhdpcie_pm_complete(struct device *dev); +#else +static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state); +static int dhdpcie_pci_resume(struct pci_dev *dev); +#endif /* DHD_PCIE_RUNTIMEPM */ static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = { { vendor: 0x14e4, device: PCI_ANY_ID, @@ -138,6 +169,16 @@ static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = { }; MODULE_DEVICE_TABLE(pci, dhdpcie_pci_devid); +/* Power Management Hooks */ +#ifdef DHD_PCIE_RUNTIMEPM +static const struct dev_pm_ops dhd_pcie_pm_ops = { + .prepare = dhdpcie_pm_prepare, + .suspend = dhdpcie_pm_suspend, + .resume = dhdpcie_pm_resume, + .complete = dhdpcie_pm_complete, +}; +#endif /* DHD_PCIE_RUNTIMEPM */ + static struct pci_driver dhdpcie_driver = { node: {}, name: "pcieh", @@ -147,45 +188,57 @@ static struct pci_driver dhdpcie_driver = { #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) save_state: NULL, #endif +#ifdef DHD_PCIE_RUNTIMEPM + .driver.pm = &dhd_pcie_pm_ops, +#else suspend: dhdpcie_pci_suspend, resume: dhdpcie_pci_resume, +#endif /* DHD_PCIE_RUNTIMEPM */ }; int dhdpcie_init_succeeded = FALSE; -static int dhdpcie_set_suspend_resume(struct pci_dev *pdev, bool state) +#ifdef DHD_PCIE_RUNTIMEPM +static int dhdpcie_pm_suspend(struct device *dev) { - int ret = 0; + struct pci_dev *pdev = to_pci_dev(dev); + return dhdpcie_set_suspend_resume(pdev, TRUE); +} + +static int dhdpcie_pm_prepare(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); dhdpcie_info_t *pch = pci_get_drvdata(pdev); dhd_bus_t *bus = NULL; if (pch) { bus = pch->bus; + DHD_DISABLE_RUNTIME_PM(bus->dhd); } - /* When firmware is not loaded do the PCI bus */ - /* suspend/resume only */ - if (bus && (bus->dhd->busstate == DHD_BUS_DOWN) && -#ifdef CONFIG_MACH_UNIVERSAL5433 - /* RB:34285 check_rev() : return 1 - new rev., 0 - old rev. */ - (!check_rev() || (check_rev() && !bus->dhd->dongle_reset))) -#else - !bus->dhd->dongle_reset) -#endif /* CONFIG_MACH_UNIVERSAL5433 */ - { - ret = dhdpcie_pci_suspend_resume(bus, state); - return ret; - } + return 0; +} - if (bus && ((bus->dhd->busstate == DHD_BUS_SUSPEND)|| - (bus->dhd->busstate == DHD_BUS_DATA)) && - (bus->suspended != state)) { +static int dhdpcie_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + return dhdpcie_set_suspend_resume(pdev, FALSE); +} - ret = dhdpcie_bus_suspend(bus, state); +static void dhdpcie_pm_complete(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + + if (pch) { + bus = pch->bus; + DHD_ENABLE_RUNTIME_PM(bus->dhd); } - return ret; -} + return; +} +#else static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state) { BCM_REFERENCE(state); @@ -197,55 +250,185 @@ static int dhdpcie_pci_resume(struct pci_dev *pdev) return dhdpcie_set_suspend_resume(pdev, FALSE); } +#endif /* DHD_PCIE_RUNTIMEPM */ + +static int dhdpcie_set_suspend_resume(struct pci_dev *pdev, bool state) +{ + int ret = 0; + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + + if (pch) { + bus = pch->bus; + } + +#ifdef DHD_PCIE_RUNTIMEPM + if (bus && !bus->dhd->dongle_reset) { + /* if wakelock is held during suspend, return failed */ + if (state == TRUE && dhd_os_check_wakelock_all(bus->dhd)) { + return -EBUSY; + } + + mutex_lock(&bus->pm_lock); + } +#endif /* DHD_PCIE_RUNTIMEPM */ + + /* When firmware is not loaded do the PCI bus */ + /* suspend/resume only */ + if (bus && (bus->dhd->busstate == DHD_BUS_DOWN) && + !bus->dhd->dongle_reset) { + ret = dhdpcie_pci_suspend_resume(bus, state); +#ifdef DHD_PCIE_RUNTIMEPM + mutex_unlock(&bus->pm_lock); +#endif /* DHD_PCIE_RUNTIMEPM */ + return ret; + } + + if (bus && ((bus->dhd->busstate == DHD_BUS_SUSPEND)|| + (bus->dhd->busstate == DHD_BUS_DATA)) && + (bus->suspended != state)) { + ret = dhdpcie_bus_suspend(bus, state); + } + +#ifdef DHD_PCIE_RUNTIMEPM + if (bus && !bus->dhd->dongle_reset) { + mutex_unlock(&bus->pm_lock); + } +#endif /* DHD_PCIE_RUNTIMEPM */ + return ret; +} + static int dhdpcie_suspend_dev(struct pci_dev *dev) { int ret; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + dhdpcie_info_t *pch = pci_get_drvdata(dev); + dhd_bus_t *bus = pch->bus; + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__)); + return BCME_ERROR; + } +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__)); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + bus->pci_d3hot_done = 1; +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ pci_save_state(dev); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + pch->state = pci_store_saved_state(dev); +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ pci_enable_wake(dev, PCI_D0, TRUE); - pci_disable_device(dev); + if (pci_is_enabled(dev)) { + pci_disable_device(dev); + } ret = pci_set_power_state(dev, PCI_D3hot); if (ret) { DHD_ERROR(("%s: pci_set_power_state error %d\n", __FUNCTION__, ret)); } + disable_irq(dev->irq); return ret; } static int dhdpcie_resume_dev(struct pci_dev *dev) { int err = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + dhdpcie_info_t *pch = pci_get_drvdata(dev); + dhd_bus_t *bus = pch->bus; + pci_load_and_free_saved_state(dev, &pch->state); +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__)); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + bus->pci_d3hot_done = 0; +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ pci_restore_state(dev); err = pci_enable_device(dev); if (err) { printf("%s:pci_enable_device error %d \n", __FUNCTION__, err); - return err; + goto out; } pci_set_master(dev); err = pci_set_power_state(dev, PCI_D0); if (err) { printf("%s:pci_set_power_state error %d \n", __FUNCTION__, err); - return err; + goto out; } + +out: + enable_irq(dev->irq); return err; } -int dhdpcie_pci_suspend_resume(struct dhd_bus *bus, bool state) +static int dhdpcie_resume_host_dev(dhd_bus_t *bus) +{ + int bcmerror = 0; +#ifdef USE_EXYNOS_PCIE_RC_PMPATCH + bcmerror = exynos_pcie_pm_resume(SAMSUNG_PCIE_CH_NUM); +#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */ +#ifdef CONFIG_ARCH_MSM + bcmerror = dhdpcie_start_host_pcieclock(bus); +#endif /* CONFIG_ARCH_MSM */ + if (bcmerror < 0) { + DHD_ERROR(("%s: PCIe RC resume failed!!! (%d)\n", + __FUNCTION__, bcmerror)); + bus->is_linkdown = 1; +#ifdef CONFIG_ARCH_MSM + bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ + } + + return bcmerror; +} + +static int dhdpcie_suspend_host_dev(dhd_bus_t *bus) +{ + int bcmerror = 0; +#ifdef USE_EXYNOS_PCIE_RC_PMPATCH + struct pci_dev *rc_pci_dev; + rc_pci_dev = pci_get_device(0x144d, SAMSUNG_PCIE_DEVICE_ID, NULL); + if (rc_pci_dev) { + pci_save_state(rc_pci_dev); + } + exynos_pcie_pm_suspend(SAMSUNG_PCIE_CH_NUM); +#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */ +#ifdef CONFIG_ARCH_MSM + bcmerror = dhdpcie_stop_host_pcieclock(bus); +#endif /* CONFIG_ARCH_MSM */ + return bcmerror; +} + +int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state) { int rc; + struct pci_dev *dev = bus->dev; if (state) { + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return BCME_ERROR; + } #ifndef BCMPCIE_OOB_HOST_WAKE dhdpcie_pme_active(bus->osh, state); -#endif /* BCMPCIE_OOB_HOST_WAKE */ +#endif /* !BCMPCIE_OOB_HOST_WAKE */ rc = dhdpcie_suspend_dev(dev); + if (!rc) { + dhdpcie_suspend_host_dev(bus); + } } else { + dhdpcie_resume_host_dev(bus); rc = dhdpcie_resume_dev(dev); -#ifndef BCMPCIE_OOB_HOST_WAKE +#ifndef BCMPCIE_OOB_HOST_WAKE dhdpcie_pme_active(bus->osh, state); -#endif /* BCMPCIE_OOB_HOST_WAKE */ +#endif /* !BCMPCIE_OOB_HOST_WAKE */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + if (bus->is_linkdown) { + bus->dhd->hang_reason = HANG_REASON_PCIE_RC_LINK_UP_FAIL; + dhd_os_send_hang_message(bus->dhd); + } +#endif } return rc; } @@ -310,6 +493,9 @@ dhdpcie_bus_unregister(void) int __devinit dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { +#ifdef BUS_POWER_RESTORE + wifi_adapter_info_t *adapter = NULL; +#endif if (dhdpcie_chipmatch (pdev->vendor, pdev->device)) { DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__)); @@ -329,6 +515,14 @@ dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) device_disable_async_suspend(&pdev->dev); #endif /* BCMPCIE_DISABLE_ASYNC_SUSPEND */ +#ifdef BUS_POWER_RESTORE + adapter = dhd_wifi_platform_get_adapter(PCI_BUS, pdev->bus->number, + PCI_SLOT(pdev->devfn)); + + if (adapter != NULL) + adapter->pci_dev = pdev; +#endif + DHD_TRACE(("%s: PCIe Enumeration done!!\n", __FUNCTION__)); return 0; } @@ -337,12 +531,12 @@ int dhdpcie_detach(dhdpcie_info_t *pch) { if (pch) { - osl_t *osh = pch->osh; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) - if (!dhd_download_fw_on_driverload) - pci_load_and_free_saved_state(pch->dev, &pch->state); -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ - MFREE(osh, pch, sizeof(dhdpcie_info_t)); + if (!dhd_download_fw_on_driverload) { + pci_load_and_free_saved_state(pch->dev, &pch->default_state); + } +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + MFREE(pch->osh, pch, sizeof(dhdpcie_info_t)); } return 0; } @@ -354,9 +548,6 @@ dhdpcie_pci_remove(struct pci_dev *pdev) osl_t *osh = NULL; dhdpcie_info_t *pch = NULL; dhd_bus_t *bus = NULL; -#ifdef PCIE_TX_DEFERRAL - struct sk_buff *skb; -#endif DHD_TRACE(("%s Enter\n", __FUNCTION__)); @@ -370,30 +561,24 @@ dhdpcie_pci_remove(struct pci_dev *pdev) } mutex_lock(&_dhd_sdio_mutex_lock_); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */ -#endif +#endif pch = pci_get_drvdata(pdev); bus = pch->bus; osh = pch->osh; -#ifdef PCIE_TX_DEFERRAL - if (bus->tx_wq) - destroy_workqueue(bus->tx_wq); - skb = skb_dequeue(&bus->orphan_list); - while (skb) { - PKTCFREE(osh, skb, TRUE); - skb = skb_dequeue(&bus->orphan_list); - } -#endif - #ifdef SUPPORT_LINKDOWN_RECOVERY + if (bus) { #ifdef CONFIG_ARCH_MSM - if (bus) msm_pcie_deregister_event(&bus->pcie_event); #endif /* CONFIG_ARCH_MSM */ +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY +#ifdef CONFIG_SOC_EXYNOS8890 + exynos_pcie_deregister_event(&bus->pcie_event); +#endif /* CONFIG_SOC_EXYNOS8890 */ +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ + } #endif /* SUPPORT_LINKDOWN_RECOVERY */ - - dhdpcie_bus_remove_prep(bus); dhdpcie_bus_release(bus); pci_disable_device(pdev); #ifdef BCMPCIE_OOB_HOST_WAKE @@ -425,15 +610,28 @@ dhdpcie_request_irq(dhdpcie_info_t *dhdpcie_info) { dhd_bus_t *bus = dhdpcie_info->bus; struct pci_dev *pdev = dhdpcie_info->bus->dev; + int err = 0; - snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname), - "dhdpcie:%s", pci_name(pdev)); - if (request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED, - dhdpcie_info->pciname, bus) < 0) { + if (!bus->irq_registered) { + snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname), + "dhdpcie:%s", pci_name(pdev)); +#ifdef DHD_USE_MSI + pci_enable_msi(pdev); +#endif /* DHD_USE_MSI */ + err = request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED, + dhdpcie_info->pciname, bus); + if (err) { DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__)); +#ifdef DHD_USE_MSI + pci_disable_msi(pdev); +#endif /* DHD_USE_MSI */ return -1; + } else { + bus->irq_registered = TRUE; + } + } else { + DHD_ERROR(("%s: PCI IRQ is already registered\n", __FUNCTION__)); } - bus->irq_registered = TRUE; DHD_TRACE(("%s %s\n", __FUNCTION__, dhdpcie_info->pciname)); @@ -491,9 +689,8 @@ int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info) } dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE); - dhdpcie_info->tcm_size = - (bar1_size < DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE; - dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size); + dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, DONGLE_TCM_MAP_SIZE); + dhdpcie_info->tcm_size = DONGLE_TCM_MAP_SIZE; if (!dhdpcie_info->regs || !dhdpcie_info->tcm) { DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__)); @@ -506,9 +703,9 @@ int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info) * in case of built in driver */ pci_save_state(pdev); - dhdpcie_info->state = pci_store_saved_state(pdev); + dhdpcie_info->default_state = pci_store_saved_state(pdev); - if (dhdpcie_info->state == NULL) { + if (dhdpcie_info->default_state == NULL) { DHD_ERROR(("%s pci_store_saved_state returns NULL\n", __FUNCTION__)); REG_UNMAP(dhdpcie_info->regs); @@ -519,9 +716,9 @@ int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info) } #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ - DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n", + DHD_ERROR(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n", __FUNCTION__, dhdpcie_info->regs, bar0_addr)); - DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n", + DHD_ERROR(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n", __FUNCTION__, dhdpcie_info->tcm, bar1_addr)); return 0; /* SUCCESS */ @@ -555,8 +752,9 @@ int dhdpcie_scan_resource(dhdpcie_info_t *dhdpcie_info) } #ifdef SUPPORT_LINKDOWN_RECOVERY -#ifdef CONFIG_ARCH_MSM -void dhdpcie_linkdown_cb(struct msm_pcie_notify *noti) +#if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \ + defined(CONFIG_SOC_EXYNOS8890)) +void dhdpcie_linkdown_cb(struct_pcie_notify *noti) { struct pci_dev *pdev = (struct pci_dev *)noti->user; dhdpcie_info_t *pch = NULL; @@ -571,81 +769,21 @@ void dhdpcie_linkdown_cb(struct msm_pcie_notify *noti) DHD_ERROR(("%s: Event HANG send up " "due to PCIe linkdown\n", __FUNCTION__)); - bus->islinkdown = TRUE; +#ifdef CONFIG_ARCH_MSM + bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ + bus->is_linkdown = 1; DHD_OS_WAKE_LOCK(dhd); - dhd_os_check_hang(dhd, 0, -ETIMEDOUT); + dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN; + dhd_os_send_hang_message(dhd); } } } } } -#endif /* CONFIG_ARCH_MSM */ -#endif /* SUPPORT_LINKDOWN_RECOVERY */ - -#ifdef PCIE_TX_DEFERRAL -static void dhd_pcie_create_flow_worker(struct work_struct *worker) -{ - dhd_bus_t *bus; - struct sk_buff *skb; - uint16 ifidx, flowid; - flow_queue_t *queue; - flow_ring_node_t *flow_ring_node; - unsigned long flags; - - bus = container_of(worker, dhd_bus_t, create_flow_work); - skb = skb_dequeue(&bus->orphan_list); - while (skb) { - ifidx = DHD_PKTTAG_FLOWID((dhd_pkttag_fr_t*)PKTTAG(skb)); - if (BCME_OK != dhd_flowid_update(bus->dhd, ifidx, - bus->dhd->flow_prio_map[(PKTPRIO(skb))], skb)) { - PKTCFREE(bus->dhd->osh, skb, TRUE); - skb = skb_dequeue(&bus->orphan_list); - continue; - } - flowid = DHD_PKTTAG_FLOWID((dhd_pkttag_fr_t*)PKTTAG(skb)); - flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); - queue = &flow_ring_node->queue; - DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); - if ((flowid >= bus->dhd->num_flow_rings) || - (!flow_ring_node->active) || - (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING)) { - DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); - DHD_ERROR(("%s: Dropping pkt flowid %d, status %d active %d\n", - __FUNCTION__, flowid, flow_ring_node->status, - flow_ring_node->active)); - PKTCFREE(bus->dhd->osh, skb, TRUE); - skb = skb_dequeue(&bus->orphan_list); - continue; - } - if (BCME_OK != dhd_flow_queue_enqueue(bus->dhd, queue, skb)) { - DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); - PKTCFREE(bus->dhd->osh, skb, TRUE); - skb = skb_dequeue(&bus->orphan_list); - continue; - } - DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); - - if (flow_ring_node->status == FLOW_RING_STATUS_OPEN) - dhd_bus_schedule_queue(bus, flowid, FALSE); - - skb = skb_dequeue(&bus->orphan_list); - } -} - -static void dhd_pcie_delete_flow_worker(struct work_struct *worker) -{ - dhd_bus_t *bus; - uint16 flowid; - - bus = container_of(worker, dhd_bus_t, delete_flow_work); - for_each_set_bit(flowid, bus->delete_flow_map, bus->dhd->num_flow_rings) { - clear_bit(flowid, bus->delete_flow_map); - dhd_bus_flow_ring_delete_response(bus, flowid, BCME_OK); - } -} - -#endif /* PCIE_TX_DEFERRAL */ +#endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY && CONFIG_SOC_EXYNOS8890) */ +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ #if defined(MULTIPLE_SUPPLICANT) extern void wl_android_post_init(void); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe @@ -672,7 +810,7 @@ int dhdpcie_init(struct pci_dev *pdev) } mutex_lock(&_dhd_sdio_mutex_lock_); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */ -#endif +#endif do { /* osl attach */ @@ -690,6 +828,10 @@ int dhdpcie_init(struct pci_dev *pdev) DHD_ERROR(("%s: can't find adapter info for this chip\n", __FUNCTION__)); osl_static_mem_init(osh, adapter); + /* Set ACP coherence flag */ + if (OSL_ACP_WAR_ENAB() || OSL_ARCH_IS_COHERENT()) + osl_flag_set(osh, OSL_ACP_COHERENCE); + /* allocate linux spcific pcie structure here */ if (!(dhdpcie_info = MALLOC(osh, sizeof(dhdpcie_info_t)))) { DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__)); @@ -729,16 +871,22 @@ int dhdpcie_init(struct pci_dev *pdev) } /* Bus initialization */ - bus = dhdpcie_bus_attach(osh, dhdpcie_info->regs, - dhdpcie_info->tcm, dhdpcie_info->tcm_size); +#if defined(DHD_PCIE_BAR1_WIN_BASE_FIX) + bus = dhdpcie_bus_attach(osh, dhdpcie_info->regs, dhdpcie_info->tcm, dhdpcie_info->tcm_size, pdev); +#else + bus = dhdpcie_bus_attach(osh, dhdpcie_info->regs, dhdpcie_info->tcm, pdev); +#endif /* defined(DHD_PCIE_BAR1_WIN_BASE_FIX) */ if (!bus) { DHD_ERROR(("%s:dhdpcie_bus_attach() failed\n", __FUNCTION__)); break; } dhdpcie_info->bus = bus; - dhdpcie_info->bus->dev = pdev; - + bus->is_linkdown = 0; + bus->pci_d3hot_done = 0; +#ifdef DONGLE_ENABLE_ISOLATION + bus->dhd->dongle_isolation = TRUE; +#endif /* DONGLE_ENABLE_ISOLATION */ #ifdef SUPPORT_LINKDOWN_RECOVERY #ifdef CONFIG_ARCH_MSM bus->pcie_event.events = MSM_PCIE_EVENT_LINKDOWN; @@ -747,8 +895,17 @@ int dhdpcie_init(struct pci_dev *pdev) bus->pcie_event.callback = dhdpcie_linkdown_cb; bus->pcie_event.options = MSM_PCIE_CONFIG_NO_RECOVERY; msm_pcie_register_event(&bus->pcie_event); - bus->islinkdown = FALSE; + bus->no_cfg_restore = 0; #endif /* CONFIG_ARCH_MSM */ +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY +#ifdef CONFIG_SOC_EXYNOS8890 + bus->pcie_event.events = EXYNOS_PCIE_EVENT_LINKDOWN; + bus->pcie_event.user = pdev; + bus->pcie_event.mode = EXYNOS_PCIE_TRIGGER_CALLBACK; + bus->pcie_event.callback = dhdpcie_linkdown_cb; + exynos_pcie_register_event(&bus->pcie_event); +#endif /* CONFIG_SOC_EXYNOS8890 */ +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ #endif /* SUPPORT_LINKDOWN_RECOVERY */ if (bus->intr) { @@ -766,28 +923,31 @@ int dhdpcie_init(struct pci_dev *pdev) "due to polling mode\n", __FUNCTION__)); } -#if 0 // terence 20150325: fix for WPA/WPA2 4-way handshake fail in hostapd - if (dhd_download_fw_on_driverload) { - if (dhd_bus_start(bus->dhd)) { - DHD_ERROR(("%s: dhd_bud_start() failed\n", __FUNCTION__)); - break; - } +#if defined(BCM_REQUEST_FW) + if (dhd_bus_download_firmware(bus, osh, NULL, NULL) < 0) { + DHD_ERROR(("%s: failed to download firmware\n", __FUNCTION__)); } -#endif + bus->nv_path = NULL; + bus->fw_path = NULL; +#endif /* BCM_REQUEST_FW */ /* set private data for pci_dev */ pci_set_drvdata(pdev, dhdpcie_info); -#ifdef PCIE_TX_DEFERRAL - bus->tx_wq = create_singlethread_workqueue("bcmdhd_tx"); - if (bus->tx_wq == NULL) { - DHD_ERROR(("%s workqueue creation failed\n", __FUNCTION__)); - break; + if (dhd_download_fw_on_driverload) { + if (dhd_bus_start(bus->dhd)) { + DHD_ERROR(("%s: dhd_bud_start() failed\n", __FUNCTION__)); + if (!allow_delay_fwdl) + break; + } + } else { + /* Set ramdom MAC address during boot time */ + get_random_bytes(&bus->dhd->mac.octet[3], 3); + /* Adding BRCM OUI */ + bus->dhd->mac.octet[0] = 0; + bus->dhd->mac.octet[1] = 0x90; + bus->dhd->mac.octet[2] = 0x4C; } - INIT_WORK(&bus->create_flow_work, dhd_pcie_create_flow_worker); - INIT_WORK(&bus->delete_flow_work, dhd_pcie_delete_flow_worker); - skb_queue_head_init(&bus->orphan_list); -#endif /* PCIE_TX_DEFERRAL */ /* Attach to the OS network interface */ DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__)); @@ -804,7 +964,7 @@ int dhdpcie_init(struct pci_dev *pdev) mutex_unlock(&_dhd_sdio_mutex_lock_); DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__)); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ -#endif +#endif DHD_TRACE(("%s:Exit - SUCCESS \n", __FUNCTION__)); return 0; /* return SUCCESS */ @@ -816,8 +976,9 @@ int dhdpcie_init(struct pci_dev *pdev) dhdpcie_bus_release(bus); #ifdef BCMPCIE_OOB_HOST_WAKE - if (dhdpcie_osinfo) + if (dhdpcie_osinfo) { MFREE(osh, dhdpcie_osinfo, sizeof(dhdpcie_os_info_t)); + } #endif /* BCMPCIE_OOB_HOST_WAKE */ if (dhdpcie_info) @@ -832,7 +993,7 @@ int dhdpcie_init(struct pci_dev *pdev) mutex_unlock(&_dhd_sdio_mutex_lock_); DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__)); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ -#endif +#endif DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__)); @@ -846,10 +1007,19 @@ dhdpcie_free_irq(dhd_bus_t *bus) struct pci_dev *pdev = NULL; DHD_TRACE(("%s: freeing up the IRQ\n", __FUNCTION__)); - if (bus && bus->irq_registered) { + if (!bus) { + return; + } + + if (bus->irq_registered) { pdev = bus->dev; free_irq(pdev->irq, bus); bus->irq_registered = FALSE; +#ifdef DHD_USE_MSI + pci_disable_msi(pdev); +#endif /* DHD_USE_MSI */ + } else { + DHD_ERROR(("%s: PCIe IRQ is not registered\n", __FUNCTION__)); } DHD_TRACE(("%s: Exit\n", __FUNCTION__)); return; @@ -895,24 +1065,24 @@ dhdpcie_start_host_pcieclock(dhd_bus_t *bus) #endif /* CONFIG_ARCH_MSM */ DHD_TRACE(("%s Enter:\n", __FUNCTION__)); - if (bus == NULL) + if (bus == NULL) { return BCME_ERROR; + } - if (bus->dev == NULL) + if (bus->dev == NULL) { return BCME_ERROR; + } #ifdef CONFIG_ARCH_MSM #ifdef SUPPORT_LINKDOWN_RECOVERY - if (bus->islinkdown) { + if (bus->no_cfg_restore) { options = MSM_PCIE_CONFIG_NO_CFG_RESTORE; } ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number, bus->dev, NULL, options); - if (bus->islinkdown && !ret) { + if (bus->no_cfg_restore && !ret) { msm_pcie_recover_config(bus->dev); - if (bus->dhd) - DHD_OS_WAKE_UNLOCK(bus->dhd); - bus->islinkdown = FALSE; + bus->no_cfg_restore = 0; } #else ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number, @@ -933,29 +1103,32 @@ int dhdpcie_stop_host_pcieclock(dhd_bus_t *bus) { int ret = 0; - #ifdef CONFIG_ARCH_MSM #ifdef SUPPORT_LINKDOWN_RECOVERY int options = 0; #endif /* SUPPORT_LINKDOWN_RECOVERY */ #endif /* CONFIG_ARCH_MSM */ + DHD_TRACE(("%s Enter:\n", __FUNCTION__)); - if (bus == NULL) + if (bus == NULL) { return BCME_ERROR; + } - if (bus->dev == NULL) + if (bus->dev == NULL) { return BCME_ERROR; + } #ifdef CONFIG_ARCH_MSM #ifdef SUPPORT_LINKDOWN_RECOVERY - if (bus->islinkdown) + if (bus->no_cfg_restore) { options = MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN; + } - ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number, + ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number, bus->dev, NULL, options); #else - ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number, + ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number, bus->dev, NULL, 0); #endif /* SUPPORT_LINKDOWN_RECOVERY */ if (ret) { @@ -971,11 +1144,15 @@ dhdpcie_stop_host_pcieclock(dhd_bus_t *bus) int dhdpcie_disable_device(dhd_bus_t *bus) { - if (bus == NULL) + DHD_TRACE(("%s Enter:\n", __FUNCTION__)); + + if (bus == NULL) { return BCME_ERROR; + } - if (bus->dev == NULL) + if (bus->dev == NULL) { return BCME_ERROR; + } pci_disable_device(bus->dev); @@ -986,50 +1163,45 @@ int dhdpcie_enable_device(dhd_bus_t *bus) { int ret = BCME_ERROR; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) dhdpcie_info_t *pch; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ DHD_TRACE(("%s Enter:\n", __FUNCTION__)); - if (bus == NULL) + if (bus == NULL) { return BCME_ERROR; + } - if (bus->dev == NULL) + if (bus->dev == NULL) { return BCME_ERROR; + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) pch = pci_get_drvdata(bus->dev); - if (pch == NULL) + if (pch == NULL) { return BCME_ERROR; + } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && !defined(CONFIG_SOC_EXYNOS8890) /* Updated with pci_load_and_free_saved_state to compatible * with kernel 3.14 or higher */ - if (pci_load_and_free_saved_state(bus->dev, &pch->state)) - pci_disable_device(bus->dev); - else -#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \ - (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))) - if (pci_load_saved_state(bus->dev, pch->state)) - pci_disable_device(bus->dev); - else -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) and - * (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \ - * (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) - */ - { - pci_restore_state(bus->dev); - ret = pci_enable_device(bus->dev); - if (!ret) - pci_set_master(bus->dev); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) - } -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) and - * (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \ - * (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) - */ + pci_load_and_free_saved_state(bus->dev, &pch->default_state); + pch->default_state = pci_store_saved_state(bus->dev); +#else + pci_load_saved_state(bus->dev, pch->default_state); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && !CONFIG_SOC_EXYNOS8890 */ + + pci_restore_state(bus->dev); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) */ - if (ret) + ret = pci_enable_device(bus->dev); + if (ret) { pci_disable_device(bus->dev); + } else { + pci_set_master(bus->dev); + } return ret; } @@ -1078,9 +1250,8 @@ dhdpcie_alloc_resource(dhd_bus_t *bus) } bus->regs = dhdpcie_info->regs; - dhdpcie_info->tcm_size = - (bar1_size < DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE; - dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size); + dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, DONGLE_TCM_MAP_SIZE); + dhdpcie_info->tcm_size = DONGLE_TCM_MAP_SIZE; if (!dhdpcie_info->tcm) { DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__)); REG_UNMAP(dhdpcie_info->regs); @@ -1089,11 +1260,13 @@ dhdpcie_alloc_resource(dhd_bus_t *bus) } bus->tcm = dhdpcie_info->tcm; +#if defined(DHD_PCIE_BAR1_WIN_BASE_FIX) bus->tcm_size = dhdpcie_info->tcm_size; +#endif /* defined(DHD_PCIE_BAR1_WIN_BASE_FIX) */ - DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n", + DHD_ERROR(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n", __FUNCTION__, dhdpcie_info->regs, bar0_addr)); - DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n", + DHD_ERROR(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n", __FUNCTION__, dhdpcie_info->tcm, bar1_addr)); return 0; @@ -1198,10 +1371,11 @@ void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable) spin_lock_irqsave(&dhdpcie_osinfo->oob_irq_spinlock, flags); if ((dhdpcie_osinfo->oob_irq_enabled != enable) && (dhdpcie_osinfo->oob_irq_num > 0)) { - if (enable) + if (enable) { enable_irq(dhdpcie_osinfo->oob_irq_num); - else + } else { disable_irq_nosync(dhdpcie_osinfo->oob_irq_num); + } dhdpcie_osinfo->oob_irq_enabled = enable; } spin_unlock_irqrestore(&dhdpcie_osinfo->oob_irq_spinlock, flags); @@ -1212,6 +1386,10 @@ static irqreturn_t wlan_oob_irq(int irq, void *data) dhd_bus_t *bus; DHD_TRACE(("%s: IRQ Triggered\n", __FUNCTION__)); bus = (dhd_bus_t *)data; + dhdpcie_oob_intr_set(bus, FALSE); +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(bus->dhd, FALSE, wlan_oob_irq); +#endif /* DHD_PCIE_RUNTIMPM */ if (bus->dhd->up && bus->suspended) { DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(bus->dhd, OOB_WAKE_LOCK_TIMEOUT); } @@ -1248,9 +1426,9 @@ int dhdpcie_oob_intr_register(dhd_bus_t *bus) } if (dhdpcie_osinfo->oob_irq_num > 0) { - DHD_INFO_HW4(("%s OOB irq=%d flags=%X \n", __FUNCTION__, + printf("%s OOB irq=%d flags=0x%X\n", __FUNCTION__, (int)dhdpcie_osinfo->oob_irq_num, - (int)dhdpcie_osinfo->oob_irq_flags)); + (int)dhdpcie_osinfo->oob_irq_flags); err = request_irq(dhdpcie_osinfo->oob_irq_num, wlan_oob_irq, dhdpcie_osinfo->oob_irq_flags, "dhdpcie_host_wake", bus); @@ -1259,9 +1437,17 @@ int dhdpcie_oob_intr_register(dhd_bus_t *bus) __FUNCTION__, err)); return err; } +#if defined(DISABLE_WOWLAN) + printf("%s: disable_irq_wake\n", __FUNCTION__); + dhdpcie_osinfo->oob_irq_wake_enabled = FALSE; +#else + printf("%s: enable_irq_wake\n", __FUNCTION__); err = enable_irq_wake(dhdpcie_osinfo->oob_irq_num); - if (!err) + if (!err) { dhdpcie_osinfo->oob_irq_wake_enabled = TRUE; + } else + printf("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err); +#endif dhdpcie_osinfo->oob_irq_enabled = TRUE; } @@ -1301,8 +1487,9 @@ void dhdpcie_oob_intr_unregister(dhd_bus_t *bus) if (dhdpcie_osinfo->oob_irq_num > 0) { if (dhdpcie_osinfo->oob_irq_wake_enabled) { err = disable_irq_wake(dhdpcie_osinfo->oob_irq_num); - if (!err) + if (!err) { dhdpcie_osinfo->oob_irq_wake_enabled = FALSE; + } } if (dhdpcie_osinfo->oob_irq_enabled) { disable_irq(dhdpcie_osinfo->oob_irq_num); @@ -1313,3 +1500,155 @@ void dhdpcie_oob_intr_unregister(dhd_bus_t *bus) dhdpcie_osinfo->oob_irq_registered = FALSE; } #endif /* BCMPCIE_OOB_HOST_WAKE */ + +#ifdef DHD_PCIE_RUNTIMEPM +bool dhd_runtimepm_state(dhd_pub_t *dhd) +{ + dhd_bus_t *bus; + unsigned long flags; + bus = dhd->bus; + + DHD_GENERAL_LOCK(dhd, flags); + if (bus->suspended == TRUE) { + DHD_GENERAL_UNLOCK(dhd, flags); + DHD_INFO(("Bus is already suspended system PM: %d\n", bus->suspended)); + return FALSE; + } + + bus->idlecount++; + + DHD_TRACE(("%s : Enter \n", __FUNCTION__)); + if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) { + bus->idlecount = 0; + if (dhd->dhd_bus_busy_state == 0 && dhd->busstate != DHD_BUS_DOWN && + dhd->busstate != DHD_BUS_DOWN_IN_PROGRESS) { + bus->bus_wake = 0; + dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS; + bus->runtime_resume_done = FALSE; + /* stop all interface network queue. */ + dhd_bus_stop_queue(bus); + DHD_GENERAL_UNLOCK(dhd, flags); + DHD_ERROR(("%s: DHD Idle state!! - idletime :%d, wdtick :%d \n", + __FUNCTION__, bus->idletime, dhd_runtimepm_ms)); + /* RPM suspend is failed, return FALSE then re-trying */ + if (dhdpcie_set_suspend_resume(bus->dev, TRUE)) { + DHD_ERROR(("%s: exit with wakelock \n", __FUNCTION__)); + DHD_GENERAL_LOCK(dhd, flags); + dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS; + bus->runtime_resume_done = TRUE; + /* It can make stuck NET TX Queue without below */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(dhd, flags); + smp_wmb(); + wake_up_interruptible(&bus->rpm_queue); + return FALSE; + } + + DHD_GENERAL_LOCK(dhd, flags); + dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS; + dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_DONE; + /* For making sure NET TX Queue active */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(dhd, flags); + + wait_event_interruptible(bus->rpm_queue, bus->bus_wake); + + DHD_GENERAL_LOCK(dhd, flags); + dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_DONE; + dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS; + DHD_GENERAL_UNLOCK(dhd, flags); + + dhdpcie_set_suspend_resume(bus->dev, FALSE); + + DHD_GENERAL_LOCK(dhd, flags); + dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS; + /* Inform the wake up context that Resume is over */ + bus->runtime_resume_done = TRUE; + /* For making sure NET TX Queue active */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(dhd, flags); + + smp_wmb(); + wake_up_interruptible(&bus->rpm_queue); + DHD_ERROR(("%s : runtime resume ended\n", __FUNCTION__)); + return TRUE; + } else { + DHD_GENERAL_UNLOCK(dhd, flags); + /* Since one of the contexts are busy (TX, IOVAR or RX) + * we should not suspend + */ + DHD_ERROR(("%s : bus is active with dhd_bus_busy_state = 0x%x\n", + __FUNCTION__, dhd->dhd_bus_busy_state)); + return FALSE; + } + } + + DHD_GENERAL_UNLOCK(dhd, flags); + return FALSE; +} /* dhd_runtimepm_state */ + +/* + * dhd_runtime_bus_wake + * TRUE - related with runtime pm context + * FALSE - It isn't invloved in runtime pm context + */ +bool dhd_runtime_bus_wake(dhd_bus_t *bus, bool wait, void *func_addr) +{ + unsigned long flags; + bus->idlecount = 0; + DHD_TRACE(("%s : enter\n", __FUNCTION__)); + if (bus->dhd->up == FALSE) { + DHD_INFO(("%s : dhd is not up\n", __FUNCTION__)); + return FALSE; + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_ALL) { + /* Wake up RPM state thread if it is suspend in progress or suspended */ + if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS || + bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_DONE) { + bus->bus_wake = 1; + + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + DHD_ERROR(("Runtime Resume is called in %pf\n", func_addr)); + smp_wmb(); + wake_up_interruptible(&bus->rpm_queue); + /* No need to wake up the RPM state thread */ + } else if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS) { + DHD_GENERAL_UNLOCK(bus->dhd, flags); + } + + /* If wait is TRUE, function with wait = TRUE will be wait in here */ + if (wait) { + wait_event_interruptible(bus->rpm_queue, bus->runtime_resume_done); + } else { + DHD_INFO(("%s: bus wakeup but no wait until resume done\n", __FUNCTION__)); + } + /* If it is called from RPM context, it returns TRUE */ + return TRUE; + } + + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + return FALSE; +} + +bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void* func_addr) +{ + dhd_bus_t *bus = dhdp->bus; + return dhd_runtime_bus_wake(bus, wait, func_addr); +} + +void dhdpcie_block_runtime_pm(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + bus->idletime = 0; +} + +bool dhdpcie_is_resume_done(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + return bus->runtime_resume_done; +} +#endif /* DHD_PCIE_RUNTIMEPM */ diff --git a/drivers/amlogic/wifi/bcmdhd/dhd_pno.c b/drivers/amlogic/wifi/bcmdhd/dhd_pno.c new file mode 100644 index 0000000000000..a0eddd2e892e1 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/dhd_pno.c @@ -0,0 +1,3898 @@ +/* + * Broadcom Dongle Host Driver (DHD) + * Prefered Network Offload and Wi-Fi Location Service(WLS) code. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_pno.c 606280 2015-12-15 05:28:25Z $ + */ + +#if defined(GSCAN_SUPPORT) && !defined(PNO_SUPPORT) +#error "GSCAN needs PNO to be enabled!" +#endif + +#ifdef PNO_SUPPORT +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#ifdef GSCAN_SUPPORT +#include +#endif /* GSCAN_SUPPORT */ + +#ifdef __BIG_ENDIAN +#include +#define htod32(i) (bcmswap32(i)) +#define htod16(i) (bcmswap16(i)) +#define dtoh32(i) (bcmswap32(i)) +#define dtoh16(i) (bcmswap16(i)) +#define htodchanspec(i) htod16(i) +#define dtohchanspec(i) dtoh16(i) +#else +#define htod32(i) (i) +#define htod16(i) (i) +#define dtoh32(i) (i) +#define dtoh16(i) (i) +#define htodchanspec(i) (i) +#define dtohchanspec(i) (i) +#endif /* IL_BIGENDINA */ + +#define NULL_CHECK(p, s, err) \ + do { \ + if (!(p)) { \ + printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \ + err = BCME_ERROR; \ + return err; \ + } \ + } while (0) +#define PNO_GET_PNOSTATE(dhd) ((dhd_pno_status_info_t *)dhd->pno_state) +#define PNO_BESTNET_LEN 1024 +#define PNO_ON 1 +#define PNO_OFF 0 +#define CHANNEL_2G_MAX 14 +#define CHANNEL_5G_MAX 165 +#define MAX_NODE_CNT 5 +#define WLS_SUPPORTED(pno_state) (pno_state->wls_supported == TRUE) +#define TIME_DIFF(timestamp1, timestamp2) (abs((uint32)(timestamp1/1000) \ + - (uint32)(timestamp2/1000))) +#define TIME_DIFF_MS(timestamp1, timestamp2) (abs((uint32)(timestamp1) \ + - (uint32)(timestamp2))) +#define TIMESPEC_TO_US(ts) (((uint64)(ts).tv_sec * USEC_PER_SEC) + \ + (ts).tv_nsec / NSEC_PER_USEC) + +#define ENTRY_OVERHEAD strlen("bssid=\nssid=\nfreq=\nlevel=\nage=\ndist=\ndistSd=\n====") +#define TIME_MIN_DIFF 5 +static wlc_ssid_ext_t * dhd_pno_get_legacy_pno_ssid(dhd_pub_t *dhd, + dhd_pno_status_info_t *pno_state); +#ifdef GSCAN_SUPPORT +static wl_pfn_gscan_channel_bucket_t * +dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd, dhd_pno_status_info_t *pno_state, +uint16 *chan_list, uint32 *num_buckets, uint32 *num_buckets_to_fw); +#endif /* GSCAN_SUPPORT */ + +static inline bool +is_dfs(uint16 channel) +{ + if (channel >= 52 && channel <= 64) /* class 2 */ + return TRUE; + else if (channel >= 100 && channel <= 140) /* class 4 */ + return TRUE; + else + return FALSE; +} +int +dhd_pno_clean(dhd_pub_t *dhd) +{ + int pfn = 0; + int err; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + /* Disable PNO */ + err = dhd_iovar(dhd, 0, "pfn", (char *)&pfn, sizeof(pfn), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn(error : %d)\n", + __FUNCTION__, err)); + goto exit; + } + _pno_state->pno_status = DHD_PNO_DISABLED; + err = dhd_iovar(dhd, 0, "pfnclear", NULL, 0, 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfnclear(error : %d)\n", + __FUNCTION__, err)); + } +exit: + return err; +} + +bool +dhd_is_pno_supported(dhd_pub_t *dhd) +{ + dhd_pno_status_info_t *_pno_state; + + if (!dhd || !dhd->pno_state) { + DHD_ERROR(("NULL POINTER : %s\n", + __FUNCTION__)); + return FALSE; + } + _pno_state = PNO_GET_PNOSTATE(dhd); + return WLS_SUPPORTED(_pno_state); +} + +int +dhd_pno_set_mac_oui(dhd_pub_t *dhd, uint8 *oui) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + + if (!dhd || !dhd->pno_state) { + DHD_ERROR(("NULL POINTER : %s\n", __FUNCTION__)); + return BCME_ERROR; + } + _pno_state = PNO_GET_PNOSTATE(dhd); + if (ETHER_ISMULTI(oui)) { + DHD_ERROR(("Expected unicast OUI\n")); + err = BCME_ERROR; + } else { + memcpy(_pno_state->pno_oui, oui, DOT11_OUI_LEN); + DHD_PNO(("PNO mac oui to be used - %02x:%02x:%02x\n", _pno_state->pno_oui[0], + _pno_state->pno_oui[1], _pno_state->pno_oui[2])); + } + + return err; +} + +#ifdef GSCAN_SUPPORT +static uint64 +convert_fw_rel_time_to_systime(uint32 fw_ts_ms) +{ + struct timespec ts; + + get_monotonic_boottime(&ts); + return ((uint64)(TIMESPEC_TO_US(ts)) - (uint64)(fw_ts_ms * 1000)); +} + +static int +_dhd_pno_gscan_cfg(dhd_pub_t *dhd, wl_pfn_gscan_cfg_t *pfncfg_gscan_param, int size) +{ + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + + err = dhd_iovar(dhd, 0, "pfn_gscan_cfg", (char *)pfncfg_gscan_param, size, 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfncfg_gscan_param\n", __FUNCTION__)); + goto exit; + } +exit: + return err; +} + +static bool +is_batch_retrieval_complete(struct dhd_pno_gscan_params *gscan_params) +{ + smp_rmb(); + return (gscan_params->get_batch_flag == GSCAN_BATCH_RETRIEVAL_COMPLETE); +} +#endif /* GSCAN_SUPPORT */ + +static int +dhd_pno_set_mac_addr(dhd_pub_t *dhd, struct ether_addr *macaddr) +{ + int err; + wl_pfn_macaddr_cfg_t cfg; + + cfg.version = WL_PFN_MACADDR_CFG_VER; + if (ETHER_ISNULLADDR(macaddr)) { + cfg.flags = 0; + } else { + cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK); + } + memcpy(&cfg.macaddr, macaddr, ETHER_ADDR_LEN); + + err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&cfg, sizeof(cfg), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_macaddr\n", __FUNCTION__)); + } + + return err; +} + +static int +_dhd_pno_suspend(dhd_pub_t *dhd) +{ + int err; + int suspend = 1; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + err = dhd_iovar(dhd, 0, "pfn_suspend", (char *)&suspend, sizeof(suspend), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to suspend pfn(error :%d)\n", __FUNCTION__, err)); + goto exit; + + } + _pno_state->pno_status = DHD_PNO_SUSPEND; +exit: + return err; +} +static int +_dhd_pno_enable(dhd_pub_t *dhd, int enable) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (enable & 0xfffe) { + DHD_ERROR(("%s invalid value\n", __FUNCTION__)); + err = BCME_BADARG; + goto exit; + } + if (!dhd_support_sta_mode(dhd)) { + DHD_ERROR(("PNO is not allowed for non-STA mode")); + err = BCME_BADOPTION; + goto exit; + } + if (enable) { + if ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) && + dhd_is_associated(dhd, 0, NULL)) { + DHD_ERROR(("%s Legacy PNO mode cannot be enabled " + "in assoc mode , ignore it\n", __FUNCTION__)); + err = BCME_BADOPTION; + goto exit; + } + } + /* Enable/Disable PNO */ + err = dhd_iovar(dhd, 0, "pfn", (char *)&enable, sizeof(enable), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_set - %d\n", __FUNCTION__, err)); + goto exit; + } + _pno_state->pno_status = (enable)? + DHD_PNO_ENABLED : DHD_PNO_DISABLED; + if (!enable) + _pno_state->pno_mode = DHD_PNO_NONE_MODE; + + DHD_PNO(("%s set pno as %s\n", + __FUNCTION__, enable ? "Enable" : "Disable")); +exit: + return err; +} + +static int +_dhd_pno_set(dhd_pub_t *dhd, const dhd_pno_params_t *pno_params, dhd_pno_mode_t mode) +{ + int err = BCME_OK; + wl_pfn_param_t pfn_param; + dhd_pno_params_t *_params; + dhd_pno_status_info_t *_pno_state; + bool combined_scan = FALSE; + struct ether_addr macaddr; + DHD_PNO(("%s enter\n", __FUNCTION__)); + + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + memset(&pfn_param, 0, sizeof(pfn_param)); + + /* set pfn parameters */ + pfn_param.version = htod32(PFN_VERSION); + pfn_param.flags = ((PFN_LIST_ORDER << SORT_CRITERIA_BIT) | + (ENABLE << IMMEDIATE_SCAN_BIT) | (ENABLE << REPORT_SEPERATELY_BIT)); + if (mode == DHD_PNO_LEGACY_MODE) { + /* check and set extra pno params */ + if ((pno_params->params_legacy.pno_repeat != 0) || + (pno_params->params_legacy.pno_freq_expo_max != 0)) { + pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT); + pfn_param.repeat = (uchar) (pno_params->params_legacy.pno_repeat); + pfn_param.exp = (uchar) (pno_params->params_legacy.pno_freq_expo_max); + } + /* set up pno scan fr */ + if (pno_params->params_legacy.scan_fr != 0) + pfn_param.scan_freq = htod32(pno_params->params_legacy.scan_fr); + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + DHD_PNO(("will enable combined scan with BATCHIG SCAN MODE\n")); + mode |= DHD_PNO_BATCH_MODE; + combined_scan = TRUE; + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + DHD_PNO(("will enable combined scan with HOTLIST SCAN MODE\n")); + mode |= DHD_PNO_HOTLIST_MODE; + combined_scan = TRUE; + } +#ifdef GSCAN_SUPPORT + else if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + DHD_PNO(("will enable combined scan with GSCAN SCAN MODE\n")); + mode |= DHD_PNO_GSCAN_MODE; + } +#endif /* GSCAN_SUPPORT */ + } + if (mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) { + /* Scan frequency of 30 sec */ + pfn_param.scan_freq = htod32(30); + /* slow adapt scan is off by default */ + pfn_param.slow_freq = htod32(0); + /* RSSI margin of 30 dBm */ + pfn_param.rssi_margin = htod16(PNO_RSSI_MARGIN_DBM); + /* Network timeout 60 sec */ + pfn_param.lost_network_timeout = htod32(60); + /* best n = 2 by default */ + pfn_param.bestn = DEFAULT_BESTN; + /* mscan m=0 by default, so not record best networks by default */ + pfn_param.mscan = DEFAULT_MSCAN; + /* default repeat = 10 */ + pfn_param.repeat = DEFAULT_REPEAT; + /* by default, maximum scan interval = 2^2 + * scan_freq when adaptive scan is turned on + */ + pfn_param.exp = DEFAULT_EXP; + if (mode == DHD_PNO_BATCH_MODE) { + /* In case of BATCH SCAN */ + if (pno_params->params_batch.bestn) + pfn_param.bestn = pno_params->params_batch.bestn; + if (pno_params->params_batch.scan_fr) + pfn_param.scan_freq = htod32(pno_params->params_batch.scan_fr); + if (pno_params->params_batch.mscan) + pfn_param.mscan = pno_params->params_batch.mscan; + /* enable broadcast scan */ + pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); + } else if (mode == DHD_PNO_HOTLIST_MODE) { + /* In case of HOTLIST SCAN */ + if (pno_params->params_hotlist.scan_fr) + pfn_param.scan_freq = htod32(pno_params->params_hotlist.scan_fr); + pfn_param.bestn = 0; + pfn_param.repeat = 0; + /* enable broadcast scan */ + pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); + } + if (combined_scan) { + /* Disable Adaptive Scan */ + pfn_param.flags &= ~(htod16(ENABLE << ENABLE_ADAPTSCAN_BIT)); + pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); + pfn_param.repeat = 0; + pfn_param.exp = 0; + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + /* In case of Legacy PNO + BATCH SCAN */ + _params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); + if (_params->params_batch.bestn) + pfn_param.bestn = _params->params_batch.bestn; + if (_params->params_batch.scan_fr) + pfn_param.scan_freq = htod32(_params->params_batch.scan_fr); + if (_params->params_batch.mscan) + pfn_param.mscan = _params->params_batch.mscan; + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + /* In case of Legacy PNO + HOTLIST SCAN */ + _params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); + if (_params->params_hotlist.scan_fr) + pfn_param.scan_freq = htod32(_params->params_hotlist.scan_fr); + pfn_param.bestn = 0; + pfn_param.repeat = 0; + } + } + } +#ifdef GSCAN_SUPPORT + if (mode & DHD_PNO_GSCAN_MODE) { + uint32 lost_network_timeout; + + pfn_param.scan_freq = htod32(pno_params->params_gscan.scan_fr); + if (pno_params->params_gscan.mscan) { + pfn_param.bestn = pno_params->params_gscan.bestn; + pfn_param.mscan = pno_params->params_gscan.mscan; + pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); + } + /* RSSI margin of 30 dBm */ + pfn_param.rssi_margin = htod16(PNO_RSSI_MARGIN_DBM); + /* ADAPTIVE turned off */ + pfn_param.flags &= ~(htod16(ENABLE << ENABLE_ADAPTSCAN_BIT)); + pfn_param.repeat = 0; + pfn_param.exp = 0; + pfn_param.slow_freq = 0; + + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + dhd_pno_params_t *_params; + + _params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + + pfn_param.scan_freq = htod32(MIN(pno_params->params_gscan.scan_fr, + _params->params_legacy.scan_fr)); + } + + lost_network_timeout = (pno_params->params_gscan.max_ch_bucket_freq * + pfn_param.scan_freq * + pno_params->params_gscan.lost_ap_window); + if (lost_network_timeout) { + pfn_param.lost_network_timeout = htod32(MIN(lost_network_timeout, + GSCAN_MIN_BSSID_TIMEOUT)); + } else { + pfn_param.lost_network_timeout = htod32(GSCAN_MIN_BSSID_TIMEOUT); + } + } else +#endif /* GSCAN_SUPPORT */ + { + if (pfn_param.scan_freq < htod32(PNO_SCAN_MIN_FW_SEC) || + pfn_param.scan_freq > htod32(PNO_SCAN_MAX_FW_SEC)) { + DHD_ERROR(("%s pno freq(%d sec) is not valid \n", + __FUNCTION__, PNO_SCAN_MIN_FW_SEC)); + err = BCME_BADARG; + goto exit; + } + } + + memset(&macaddr, 0, ETHER_ADDR_LEN); + memcpy(&macaddr, _pno_state->pno_oui, DOT11_OUI_LEN); + + DHD_PNO(("Setting mac oui to FW - %02x:%02x:%02x\n", _pno_state->pno_oui[0], + _pno_state->pno_oui[1], _pno_state->pno_oui[2])); + err = dhd_pno_set_mac_addr(dhd, &macaddr); + if (err < 0) { + DHD_ERROR(("%s : failed to set pno mac address, error - %d\n", __FUNCTION__, err)); + goto exit; + } + + +#ifdef GSCAN_SUPPORT + if (mode == DHD_PNO_BATCH_MODE || + ((mode & DHD_PNO_GSCAN_MODE) && pno_params->params_gscan.mscan)) +#else + if (mode == DHD_PNO_BATCH_MODE) +#endif /* GSCAN_SUPPORT */ + { + int _tmp = pfn_param.bestn; + /* set bestn to calculate the max mscan which firmware supports */ + err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to set pfnmem\n", __FUNCTION__)); + goto exit; + } + /* get max mscan which the firmware supports */ + err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), 0); + if (err < 0) { + DHD_ERROR(("%s : failed to get pfnmem\n", __FUNCTION__)); + goto exit; + } + DHD_PNO((" returned mscan : %d, set bestn : %d\n", _tmp, pfn_param.bestn)); + pfn_param.mscan = MIN(pfn_param.mscan, _tmp); + } + err = dhd_iovar(dhd, 0, "pfn_set", (char *)&pfn_param, sizeof(pfn_param), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_set %d\n", __FUNCTION__, err)); + goto exit; + } + /* need to return mscan if this is for batch scan instead of err */ + err = (mode == DHD_PNO_BATCH_MODE)? pfn_param.mscan : err; +exit: + return err; +} + +static int +_dhd_pno_add_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssids_list, int nssid) +{ + int err = BCME_OK; + int i = 0; + wl_pfn_t pfn_element; + NULL_CHECK(dhd, "dhd is NULL", err); + if (nssid) { + NULL_CHECK(ssids_list, "ssid list is NULL", err); + } + memset(&pfn_element, 0, sizeof(pfn_element)); + { + int j; + for (j = 0; j < nssid; j++) { + DHD_PNO(("%d: scan for %s size = %d hidden = %d\n", j, + ssids_list[j].SSID, ssids_list[j].SSID_len, ssids_list[j].hidden)); + } + } + /* Check for broadcast ssid */ + for (i = 0; i < nssid; i++) { + if (!ssids_list[i].SSID_len) { + DHD_ERROR(("%d: Broadcast SSID is ilegal for PNO setting\n", i)); + err = BCME_ERROR; + goto exit; + } + } + /* set all pfn ssid */ + for (i = 0; i < nssid; i++) { + pfn_element.infra = htod32(DOT11_BSSTYPE_INFRASTRUCTURE); + pfn_element.auth = (DOT11_OPEN_SYSTEM); + pfn_element.wpa_auth = htod32(WPA_AUTH_PFN_ANY); + pfn_element.wsec = htod32(0); + pfn_element.infra = htod32(1); + if (ssids_list[i].hidden) { + pfn_element.flags = htod32(ENABLE << WL_PFN_HIDDEN_BIT); + } else { + pfn_element.flags = 0; + } + memcpy((char *)pfn_element.ssid.SSID, ssids_list[i].SSID, + ssids_list[i].SSID_len); + pfn_element.ssid.SSID_len = ssids_list[i].SSID_len; + err = dhd_iovar(dhd, 0, "pfn_add", (char *)&pfn_element, + sizeof(pfn_element), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__)); + goto exit; + } + } +exit: + return err; +} + +/* qsort compare function */ +static int +_dhd_pno_cmpfunc(const void *a, const void *b) +{ + return (*(uint16*)a - *(uint16*)b); +} + +static int +_dhd_pno_chan_merge(uint16 *d_chan_list, int *nchan, + uint16 *chan_list1, int nchan1, uint16 *chan_list2, int nchan2) +{ + int err = BCME_OK; + int i = 0, j = 0, k = 0; + uint16 tmp; + NULL_CHECK(d_chan_list, "d_chan_list is NULL", err); + NULL_CHECK(nchan, "nchan is NULL", err); + NULL_CHECK(chan_list1, "chan_list1 is NULL", err); + NULL_CHECK(chan_list2, "chan_list2 is NULL", err); + /* chan_list1 and chan_list2 should be sorted at first */ + while (i < nchan1 && j < nchan2) { + tmp = chan_list1[i] < chan_list2[j]? + chan_list1[i++] : chan_list2[j++]; + for (; i < nchan1 && chan_list1[i] == tmp; i++); + for (; j < nchan2 && chan_list2[j] == tmp; j++); + d_chan_list[k++] = tmp; + } + + while (i < nchan1) { + tmp = chan_list1[i++]; + for (; i < nchan1 && chan_list1[i] == tmp; i++); + d_chan_list[k++] = tmp; + } + + while (j < nchan2) { + tmp = chan_list2[j++]; + for (; j < nchan2 && chan_list2[j] == tmp; j++); + d_chan_list[k++] = tmp; + + } + *nchan = k; + return err; +} + +static int +_dhd_pno_get_channels(dhd_pub_t *dhd, uint16 *d_chan_list, + int *nchan, uint8 band, bool skip_dfs) +{ + int err = BCME_OK; + int i, j; + uint32 chan_buf[WL_NUMCHANNELS + 1]; + wl_uint32_list_t *list; + NULL_CHECK(dhd, "dhd is NULL", err); + if (*nchan) { + NULL_CHECK(d_chan_list, "d_chan_list is NULL", err); + } + list = (wl_uint32_list_t *) (void *)chan_buf; + list->count = htod32(WL_NUMCHANNELS); + err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, chan_buf, sizeof(chan_buf), FALSE, 0); + if (err < 0) { + DHD_ERROR(("failed to get channel list (err: %d)\n", err)); + goto exit; + } + for (i = 0, j = 0; i < dtoh32(list->count) && i < *nchan; i++) { + if (band == WLC_BAND_2G) { + if (dtoh32(list->element[i]) > CHANNEL_2G_MAX) + continue; + } else if (band == WLC_BAND_5G) { + if (dtoh32(list->element[i]) <= CHANNEL_2G_MAX) + continue; + if (skip_dfs && is_dfs(dtoh32(list->element[i]))) + continue; + + + } else if (band == WLC_BAND_AUTO) { + if (skip_dfs || !is_dfs(dtoh32(list->element[i]))) + continue; + } else { /* All channels */ + if (skip_dfs && is_dfs(dtoh32(list->element[i]))) + continue; + } + if (dtoh32(list->element[i]) <= CHANNEL_5G_MAX) { + d_chan_list[j++] = (uint16) dtoh32(list->element[i]); + } else { + err = BCME_BADCHAN; + goto exit; + } + } + *nchan = j; +exit: + return err; +} + +static int +_dhd_pno_convert_format(dhd_pub_t *dhd, struct dhd_pno_batch_params *params_batch, + char *buf, int nbufsize) +{ + int err = BCME_OK; + int bytes_written = 0, nreadsize = 0; + int t_delta = 0; + int nleftsize = nbufsize; + uint8 cnt = 0; + char *bp = buf; + char eabuf[ETHER_ADDR_STR_LEN]; +#ifdef PNO_DEBUG + char *_base_bp; + char msg[150]; +#endif + dhd_pno_bestnet_entry_t *iter, *next; + dhd_pno_scan_results_t *siter, *snext; + dhd_pno_best_header_t *phead, *pprev; + NULL_CHECK(params_batch, "params_batch is NULL", err); + if (nbufsize > 0) + NULL_CHECK(buf, "buf is NULL", err); + /* initialize the buffer */ + memset(buf, 0, nbufsize); + DHD_PNO(("%s enter \n", __FUNCTION__)); + /* # of scans */ + if (!params_batch->get_batch.batch_started) { + bp += nreadsize = sprintf(bp, "scancount=%d\n", + params_batch->get_batch.expired_tot_scan_cnt); + nleftsize -= nreadsize; + params_batch->get_batch.batch_started = TRUE; + } + DHD_PNO(("%s scancount %d\n", __FUNCTION__, params_batch->get_batch.expired_tot_scan_cnt)); + /* preestimate scan count until which scan result this report is going to end */ + list_for_each_entry_safe(siter, snext, + ¶ms_batch->get_batch.expired_scan_results_list, list) { + phead = siter->bestnetheader; + while (phead != NULL) { + /* if left_size is less than bestheader total size , stop this */ + if (nleftsize <= + (phead->tot_size + phead->tot_cnt * ENTRY_OVERHEAD)) + goto exit; + /* increase scan count */ + cnt++; + /* # best of each scan */ + DHD_PNO(("\n\n", cnt - 1, phead->tot_cnt)); + /* attribute of the scan */ + if (phead->reason & PNO_STATUS_ABORT_MASK) { + bp += nreadsize = sprintf(bp, "trunc\n"); + nleftsize -= nreadsize; + } + list_for_each_entry_safe(iter, next, + &phead->entry_list, list) { + t_delta = jiffies_to_msecs(jiffies - iter->recorded_time); +#ifdef PNO_DEBUG + _base_bp = bp; + memset(msg, 0, sizeof(msg)); +#endif + /* BSSID info */ + bp += nreadsize = sprintf(bp, "bssid=%s\n", + bcm_ether_ntoa((const struct ether_addr *)&iter->BSSID, eabuf)); + nleftsize -= nreadsize; + /* SSID */ + bp += nreadsize = sprintf(bp, "ssid=%s\n", iter->SSID); + nleftsize -= nreadsize; + /* channel */ + bp += nreadsize = sprintf(bp, "freq=%d\n", + wf_channel2mhz(iter->channel, + iter->channel <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + nleftsize -= nreadsize; + /* RSSI */ + bp += nreadsize = sprintf(bp, "level=%d\n", iter->RSSI); + nleftsize -= nreadsize; + /* add the time consumed in Driver to the timestamp of firmware */ + iter->timestamp += t_delta; + bp += nreadsize = sprintf(bp, "age=%d\n", iter->timestamp); + nleftsize -= nreadsize; + /* RTT0 */ + bp += nreadsize = sprintf(bp, "dist=%d\n", + (iter->rtt0 == 0)? -1 : iter->rtt0); + nleftsize -= nreadsize; + /* RTT1 */ + bp += nreadsize = sprintf(bp, "distSd=%d\n", + (iter->rtt0 == 0)? -1 : iter->rtt1); + nleftsize -= nreadsize; + bp += nreadsize = sprintf(bp, "%s", AP_END_MARKER); + nleftsize -= nreadsize; + list_del(&iter->list); + MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE); +#ifdef PNO_DEBUG + memcpy(msg, _base_bp, bp - _base_bp); + DHD_PNO(("Entry : \n%s", msg)); +#endif + } + bp += nreadsize = sprintf(bp, "%s", SCAN_END_MARKER); + DHD_PNO(("%s", SCAN_END_MARKER)); + nleftsize -= nreadsize; + pprev = phead; + /* reset the header */ + siter->bestnetheader = phead = phead->next; + MFREE(dhd->osh, pprev, BEST_HEADER_SIZE); + + siter->cnt_header--; + } + if (phead == NULL) { + /* we store all entry in this scan , so it is ok to delete */ + list_del(&siter->list); + MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE); + } + } +exit: + if (cnt < params_batch->get_batch.expired_tot_scan_cnt) { + DHD_ERROR(("Buffer size is small to save all batch entry," + " cnt : %d (remained_scan_cnt): %d\n", + cnt, params_batch->get_batch.expired_tot_scan_cnt - cnt)); + } + params_batch->get_batch.expired_tot_scan_cnt -= cnt; + /* set FALSE only if the link list is empty after returning the data */ + if (list_empty(¶ms_batch->get_batch.expired_scan_results_list)) { + params_batch->get_batch.batch_started = FALSE; + bp += sprintf(bp, "%s", RESULTS_END_MARKER); + DHD_PNO(("%s", RESULTS_END_MARKER)); + DHD_PNO(("%s : Getting the batching data is complete\n", __FUNCTION__)); + } + /* return used memory in buffer */ + bytes_written = (int32)(bp - buf); + return bytes_written; +} +static int +_dhd_pno_clear_all_batch_results(dhd_pub_t *dhd, struct list_head *head, bool only_last) +{ + int err = BCME_OK; + int removed_scan_cnt = 0; + dhd_pno_scan_results_t *siter, *snext; + dhd_pno_best_header_t *phead, *pprev; + dhd_pno_bestnet_entry_t *iter, *next; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(head, "head is NULL", err); + NULL_CHECK(head->next, "head->next is NULL", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); + list_for_each_entry_safe(siter, snext, + head, list) { + if (only_last) { + /* in case that we need to delete only last one */ + if (!list_is_last(&siter->list, head)) { + /* skip if the one is not last */ + continue; + } + } + /* delete all data belong if the one is last */ + phead = siter->bestnetheader; + while (phead != NULL) { + removed_scan_cnt++; + list_for_each_entry_safe(iter, next, + &phead->entry_list, list) { + list_del(&iter->list); + MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE); + } + pprev = phead; + phead = phead->next; + MFREE(dhd->osh, pprev, BEST_HEADER_SIZE); + } + if (phead == NULL) { + /* it is ok to delete top node */ + list_del(&siter->list); + MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE); + } + } + return removed_scan_cnt; +} + +static int +_dhd_pno_cfg(dhd_pub_t *dhd, uint16 *channel_list, int nchan) +{ + int err = BCME_OK; + int i = 0; + wl_pfn_cfg_t pfncfg_param; + NULL_CHECK(dhd, "dhd is NULL", err); + if (nchan) { + NULL_CHECK(channel_list, "nchan is NULL", err); + } + DHD_PNO(("%s enter : nchan : %d\n", __FUNCTION__, nchan)); + memset(&pfncfg_param, 0, sizeof(wl_pfn_cfg_t)); + /* Setup default values */ + pfncfg_param.reporttype = htod32(WL_PFN_REPORT_ALLNET); + pfncfg_param.channel_num = htod32(0); + + for (i = 0; i < nchan && nchan < WL_NUMCHANNELS; i++) + pfncfg_param.channel_list[i] = channel_list[i]; + + pfncfg_param.channel_num = htod32(nchan); + err = dhd_iovar(dhd, 0, "pfn_cfg", (char *)&pfncfg_param, sizeof(pfncfg_param), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__)); + goto exit; + } +exit: + return err; +} +static int +_dhd_pno_reinitialize_prof(dhd_pub_t *dhd, dhd_pno_params_t *params, dhd_pno_mode_t mode) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL\n", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + mutex_lock(&_pno_state->pno_mutex); + switch (mode) { + case DHD_PNO_LEGACY_MODE: { + struct dhd_pno_ssid *iter, *next; + if (params->params_legacy.nssid > 0) { + list_for_each_entry_safe(iter, next, + ¶ms->params_legacy.ssid_list, list) { + list_del(&iter->list); + kfree(iter); + } + } + params->params_legacy.nssid = 0; + params->params_legacy.scan_fr = 0; + params->params_legacy.pno_freq_expo_max = 0; + params->params_legacy.pno_repeat = 0; + params->params_legacy.nchan = 0; + memset(params->params_legacy.chan_list, 0, + sizeof(params->params_legacy.chan_list)); + break; + } + case DHD_PNO_BATCH_MODE: { + params->params_batch.scan_fr = 0; + params->params_batch.mscan = 0; + params->params_batch.nchan = 0; + params->params_batch.rtt = 0; + params->params_batch.bestn = 0; + params->params_batch.nchan = 0; + params->params_batch.band = WLC_BAND_AUTO; + memset(params->params_batch.chan_list, 0, + sizeof(params->params_batch.chan_list)); + params->params_batch.get_batch.batch_started = FALSE; + params->params_batch.get_batch.buf = NULL; + params->params_batch.get_batch.bufsize = 0; + params->params_batch.get_batch.reason = 0; + _dhd_pno_clear_all_batch_results(dhd, + ¶ms->params_batch.get_batch.scan_results_list, FALSE); + _dhd_pno_clear_all_batch_results(dhd, + ¶ms->params_batch.get_batch.expired_scan_results_list, FALSE); + params->params_batch.get_batch.tot_scan_cnt = 0; + params->params_batch.get_batch.expired_tot_scan_cnt = 0; + params->params_batch.get_batch.top_node_cnt = 0; + INIT_LIST_HEAD(¶ms->params_batch.get_batch.scan_results_list); + INIT_LIST_HEAD(¶ms->params_batch.get_batch.expired_scan_results_list); + break; + } + case DHD_PNO_HOTLIST_MODE: { + struct dhd_pno_bssid *iter, *next; + if (params->params_hotlist.nbssid > 0) { + list_for_each_entry_safe(iter, next, + ¶ms->params_hotlist.bssid_list, list) { + list_del(&iter->list); + kfree(iter); + } + } + params->params_hotlist.scan_fr = 0; + params->params_hotlist.nbssid = 0; + params->params_hotlist.nchan = 0; + params->params_batch.band = WLC_BAND_AUTO; + memset(params->params_hotlist.chan_list, 0, + sizeof(params->params_hotlist.chan_list)); + break; + } + default: + DHD_ERROR(("%s : unknown mode : %d\n", __FUNCTION__, mode)); + break; + } + mutex_unlock(&_pno_state->pno_mutex); + return err; +} +static int +_dhd_pno_add_bssid(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, int nbssid) +{ + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + if (nbssid) { + NULL_CHECK(p_pfn_bssid, "bssid list is NULL", err); + } + err = dhd_iovar(dhd, 0, "pfn_add_bssid", (char *)p_pfn_bssid, + sizeof(wl_pfn_bssid_t) * nbssid, 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__)); + goto exit; + } +exit: + return err; +} + +#ifdef GSCAN_SUPPORT +static int +_dhd_pno_add_significant_bssid(dhd_pub_t *dhd, + wl_pfn_significant_bssid_t *p_pfn_significant_bssid, int nbssid) +{ + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + + if (!nbssid) { + err = BCME_ERROR; + goto exit; + } + + NULL_CHECK(p_pfn_significant_bssid, "bssid list is NULL", err); + + err = dhd_iovar(dhd, 0, "pfn_add_swc_bssid", (char *)p_pfn_significant_bssid, + sizeof(wl_pfn_significant_bssid_t) * nbssid, 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_significant_bssid %d\n", __FUNCTION__, err)); + goto exit; + } +exit: + return err; +} +#endif /* GSCAN_SUPPORT */ + +int +dhd_pno_stop_for_ssid(dhd_pub_t *dhd) +{ + int err = BCME_OK; + uint32 mode = 0; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + wl_pfn_bssid_t *p_pfn_bssid = NULL; + NULL_CHECK(dhd, "dev is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + if (!(_pno_state->pno_mode & DHD_PNO_LEGACY_MODE)) { + DHD_ERROR(("%s : LEGACY PNO MODE is not enabled\n", __FUNCTION__)); + goto exit; + } + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + struct dhd_pno_gscan_params *gscan_params; + + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = &_params->params_gscan; + + if (gscan_params->mscan) + dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); + /* save current pno_mode before calling dhd_pno_clean */ + mode = _pno_state->pno_mode; + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + /* restore previous pno_mode */ + _pno_state->pno_mode = mode; + /* Restart gscan */ + err = dhd_pno_initiate_gscan_request(dhd, 1, 0); + goto exit; + } +#endif /* GSCAN_SUPPORT */ + /* restart Batch mode if the batch mode is on */ + if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) { + /* retrieve the batching data from firmware into host */ + dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); + /* save current pno_mode before calling dhd_pno_clean */ + mode = _pno_state->pno_mode; + dhd_pno_clean(dhd); + /* restore previous pno_mode */ + _pno_state->pno_mode = mode; + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + _params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); + /* restart BATCH SCAN */ + err = dhd_pno_set_for_batch(dhd, &_params->params_batch); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + DHD_ERROR(("%s : failed to restart batch scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + /* restart HOTLIST SCAN */ + struct dhd_pno_bssid *iter, *next; + _params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); + p_pfn_bssid = kzalloc(sizeof(wl_pfn_bssid_t) * + _params->params_hotlist.nbssid, GFP_KERNEL); + if (p_pfn_bssid == NULL) { + DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array" + " (count: %d)", + __FUNCTION__, _params->params_hotlist.nbssid)); + err = BCME_ERROR; + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + goto exit; + } + /* convert dhd_pno_bssid to wl_pfn_bssid */ + list_for_each_entry_safe(iter, next, + &_params->params_hotlist.bssid_list, list) { + memcpy(&p_pfn_bssid->macaddr, + &iter->macaddr, ETHER_ADDR_LEN); + p_pfn_bssid->flags = iter->flags; + p_pfn_bssid++; + } + err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + DHD_ERROR(("%s : failed to restart hotlist scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } + } else { + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } +exit: + kfree(p_pfn_bssid); + return err; +} + +int +dhd_pno_enable(dhd_pub_t *dhd, int enable) +{ + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); + return (_dhd_pno_enable(dhd, enable)); +} + +static wlc_ssid_ext_t * +dhd_pno_get_legacy_pno_ssid(dhd_pub_t *dhd, dhd_pno_status_info_t *pno_state) +{ + int err = BCME_OK; + int i; + struct dhd_pno_ssid *iter, *next; + dhd_pno_params_t *_params1 = &pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; + wlc_ssid_ext_t *p_ssid_list; + + p_ssid_list = kzalloc(sizeof(wlc_ssid_ext_t) * + _params1->params_legacy.nssid, GFP_KERNEL); + if (p_ssid_list == NULL) { + DHD_ERROR(("%s : failed to allocate wlc_ssid_ext_t array (count: %d)", + __FUNCTION__, _params1->params_legacy.nssid)); + err = BCME_ERROR; + pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + goto exit; + } + i = 0; + /* convert dhd_pno_ssid to wlc_ssid_ext_t */ + list_for_each_entry_safe(iter, next, &_params1->params_legacy.ssid_list, list) { + p_ssid_list[i].SSID_len = iter->SSID_len; + p_ssid_list[i].hidden = iter->hidden; + memcpy(p_ssid_list[i].SSID, iter->SSID, p_ssid_list[i].SSID_len); + i++; + } +exit: + return p_ssid_list; +} + +static int +dhd_pno_add_to_ssid_list(dhd_pno_params_t *params, wlc_ssid_ext_t *ssid_list, + int nssid) +{ + int ret = 0; + int i; + struct dhd_pno_ssid *_pno_ssid; + + for (i = 0; i < nssid; i++) { + if (ssid_list[i].SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("%s : Invalid SSID length %d\n", + __FUNCTION__, ssid_list[i].SSID_len)); + ret = BCME_ERROR; + goto exit; + } + _pno_ssid = kzalloc(sizeof(struct dhd_pno_ssid), GFP_KERNEL); + if (_pno_ssid == NULL) { + DHD_ERROR(("%s : failed to allocate struct dhd_pno_ssid\n", + __FUNCTION__)); + ret = BCME_ERROR; + goto exit; + } + _pno_ssid->SSID_len = ssid_list[i].SSID_len; + _pno_ssid->hidden = ssid_list[i].hidden; + memcpy(_pno_ssid->SSID, ssid_list[i].SSID, _pno_ssid->SSID_len); + list_add_tail(&_pno_ssid->list, ¶ms->params_legacy.ssid_list); + } + +exit: + return ret; +} + +int +dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid, + uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan) +{ + dhd_pno_params_t *_params; + dhd_pno_params_t *_params2; + dhd_pno_status_info_t *_pno_state; + uint16 _chan_list[WL_NUMCHANNELS]; + int32 tot_nchan = 0; + int err = BCME_OK; + int i; + int mode = 0; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit_no_clear; + } + DHD_PNO(("%s enter : scan_fr :%d, pno_repeat :%d," + "pno_freq_expo_max: %d, nchan :%d\n", __FUNCTION__, + scan_fr, pno_repeat, pno_freq_expo_max, nchan)); + + _params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + /* If GSCAN is also ON will handle this down below */ +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE && + !(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) +#else + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) +#endif /* GSCAN_SUPPORT */ + { + DHD_ERROR(("%s : Legacy PNO mode was already started, " + "will disable previous one to start new one\n", __FUNCTION__)); + err = dhd_pno_stop_for_ssid(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to stop legacy PNO (err %d)\n", + __FUNCTION__, err)); + goto exit_no_clear; + } + } + _pno_state->pno_mode |= DHD_PNO_LEGACY_MODE; + err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); + if (err < 0) { + DHD_ERROR(("%s : failed to reinitialize profile (err %d)\n", + __FUNCTION__, err)); + goto exit_no_clear; + } + memset(_chan_list, 0, sizeof(_chan_list)); + tot_nchan = MIN(nchan, WL_NUMCHANNELS); + if (tot_nchan > 0 && channel_list) { + for (i = 0; i < tot_nchan; i++) + _params->params_legacy.chan_list[i] = _chan_list[i] = channel_list[i]; + } +#ifdef GSCAN_SUPPORT + else { + tot_nchan = WL_NUMCHANNELS; + err = _dhd_pno_get_channels(dhd, _chan_list, &tot_nchan, + (WLC_BAND_2G | WLC_BAND_5G), TRUE); + if (err < 0) { + tot_nchan = 0; + DHD_PNO(("Could not get channel list for PNO SSID\n")); + } else { + for (i = 0; i < tot_nchan; i++) + _params->params_legacy.chan_list[i] = _chan_list[i]; + } + } +#endif /* GSCAN_SUPPORT */ + + if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) { + DHD_PNO(("BATCH SCAN is on progress in firmware\n")); + /* retrieve the batching data from firmware into host */ + dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); + /* store current pno_mode before disabling pno */ + mode = _pno_state->pno_mode; + err = _dhd_pno_enable(dhd, PNO_OFF); + if (err < 0) { + DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); + goto exit_no_clear; + } + /* restore the previous mode */ + _pno_state->pno_mode = mode; + /* use superset of channel list between two mode */ + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + _params2 = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); + if (_params2->params_batch.nchan > 0 && tot_nchan > 0) { + err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, + &_params2->params_batch.chan_list[0], + _params2->params_batch.nchan, + &channel_list[0], tot_nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to merge channel list" + " between legacy and batch\n", + __FUNCTION__)); + goto exit_no_clear; + } + } else { + DHD_PNO(("superset channel will use" + " all channels in firmware\n")); + } + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + _params2 = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); + if (_params2->params_hotlist.nchan > 0 && tot_nchan > 0) { + err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, + &_params2->params_hotlist.chan_list[0], + _params2->params_hotlist.nchan, + &channel_list[0], tot_nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to merge channel list" + " between legacy and hotlist\n", + __FUNCTION__)); + goto exit_no_clear; + } + } + } + } + _params->params_legacy.scan_fr = scan_fr; + _params->params_legacy.pno_repeat = pno_repeat; + _params->params_legacy.pno_freq_expo_max = pno_freq_expo_max; + _params->params_legacy.nchan = tot_nchan; + _params->params_legacy.nssid = nssid; + INIT_LIST_HEAD(&_params->params_legacy.ssid_list); +#ifdef GSCAN_SUPPORT + /* dhd_pno_initiate_gscan_request will handle simultaneous Legacy PNO and GSCAN */ + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + if (dhd_pno_add_to_ssid_list(_params, ssid_list, nssid) < 0) { + err = BCME_ERROR; + goto exit; + } + DHD_PNO(("GSCAN mode is ON! Will restart GSCAN+Legacy PNO\n")); + err = dhd_pno_initiate_gscan_request(dhd, 1, 0); + goto exit; + } +#endif /* GSCAN_SUPPORT */ + if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_LEGACY_MODE)) < 0) { + DHD_ERROR(("failed to set call pno_set (err %d) in firmware\n", err)); + goto exit; + } + if ((err = _dhd_pno_add_ssid(dhd, ssid_list, nssid)) < 0) { + DHD_ERROR(("failed to add ssid list(err %d), %d in firmware\n", err, nssid)); + goto exit; + } + if (dhd_pno_add_to_ssid_list(_params, ssid_list, nssid) < 0) { + err = BCME_ERROR; + goto exit; + } + if (tot_nchan > 0) { + if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { + DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + } + if (_pno_state->pno_status == DHD_PNO_DISABLED) { + if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) + DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__)); + } +exit: + if (err < 0) { + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); + } +exit_no_clear: + /* clear mode in case of error */ + if (err < 0) { + int ret = dhd_pno_clean(dhd); + + if (ret < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, ret)); + } else { + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + } + } + return err; +} + +int +dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params) +{ + int err = BCME_OK; + uint16 _chan_list[WL_NUMCHANNELS]; + int rem_nchan = 0, tot_nchan = 0; + int mode = 0, mscan = 0; + dhd_pno_params_t *_params; + dhd_pno_params_t *_params2; + dhd_pno_status_info_t *_pno_state; + wlc_ssid_ext_t *p_ssid_list = NULL; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + NULL_CHECK(batch_params, "batch_params is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; + if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { + _pno_state->pno_mode |= DHD_PNO_BATCH_MODE; + err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_reinitialize_prof\n", + __FUNCTION__)); + goto exit; + } + } else { + /* batch mode is already started */ + return -EBUSY; + } + _params->params_batch.scan_fr = batch_params->scan_fr; + _params->params_batch.bestn = batch_params->bestn; + _params->params_batch.mscan = (batch_params->mscan)? + batch_params->mscan : DEFAULT_BATCH_MSCAN; + _params->params_batch.nchan = batch_params->nchan; + memcpy(_params->params_batch.chan_list, batch_params->chan_list, + sizeof(_params->params_batch.chan_list)); + + memset(_chan_list, 0, sizeof(_chan_list)); + + rem_nchan = ARRAYSIZE(batch_params->chan_list) - batch_params->nchan; + if (batch_params->band == WLC_BAND_2G || batch_params->band == WLC_BAND_5G) { + /* get a valid channel list based on band B or A */ + err = _dhd_pno_get_channels(dhd, + &_params->params_batch.chan_list[batch_params->nchan], + &rem_nchan, batch_params->band, FALSE); + if (err < 0) { + DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n", + __FUNCTION__, batch_params->band)); + goto exit; + } + /* now we need to update nchan because rem_chan has valid channel count */ + _params->params_batch.nchan += rem_nchan; + /* need to sort channel list */ + sort(_params->params_batch.chan_list, _params->params_batch.nchan, + sizeof(_params->params_batch.chan_list[0]), _dhd_pno_cmpfunc, NULL); + } +#ifdef PNO_DEBUG +{ + DHD_PNO(("Channel list : ")); + for (i = 0; i < _params->params_batch.nchan; i++) { + DHD_PNO(("%d ", _params->params_batch.chan_list[i])); + } + DHD_PNO(("\n")); +} +#endif + if (_params->params_batch.nchan) { + /* copy the channel list into local array */ + memcpy(_chan_list, _params->params_batch.chan_list, sizeof(_chan_list)); + tot_nchan = _params->params_batch.nchan; + } + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + DHD_PNO(("PNO SSID is on progress in firmware\n")); + /* store current pno_mode before disabling pno */ + mode = _pno_state->pno_mode; + err = _dhd_pno_enable(dhd, PNO_OFF); + if (err < 0) { + DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); + goto exit; + } + /* restore the previous mode */ + _pno_state->pno_mode = mode; + /* Use the superset for channelist between two mode */ + _params2 = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + if (_params2->params_legacy.nchan > 0 && _params->params_batch.nchan > 0) { + err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, + &_params2->params_legacy.chan_list[0], + _params2->params_legacy.nchan, + &_params->params_batch.chan_list[0], _params->params_batch.nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to merge channel list" + " between legacy and batch\n", + __FUNCTION__)); + goto exit; + } + } else { + DHD_PNO(("superset channel will use all channels in firmware\n")); + } + p_ssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state); + if (!p_ssid_list) { + err = BCME_NOMEM; + DHD_ERROR(("failed to get Legacy PNO SSID list\n")); + goto exit; + } + if ((err = _dhd_pno_add_ssid(dhd, p_ssid_list, + _params2->params_legacy.nssid)) < 0) { + DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err)); + goto exit; + } + } + if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_BATCH_MODE)) < 0) { + DHD_ERROR(("%s : failed to set call pno_set (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } else { + /* we need to return mscan */ + mscan = err; + } + if (tot_nchan > 0) { + if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { + DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + } + if (_pno_state->pno_status == DHD_PNO_DISABLED) { + if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) + DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__)); + } +exit: + /* clear mode in case of error */ + if (err < 0) + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + else { + /* return #max scan firmware can do */ + err = mscan; + } + if (p_ssid_list) + kfree(p_ssid_list); + return err; +} + + +#ifdef GSCAN_SUPPORT +static void +dhd_pno_reset_cfg_gscan(dhd_pno_params_t *_params, + dhd_pno_status_info_t *_pno_state, uint8 flags) +{ + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (flags & GSCAN_FLUSH_SCAN_CFG) { + _params->params_gscan.bestn = 0; + _params->params_gscan.mscan = 0; + _params->params_gscan.buffer_threshold = GSCAN_BATCH_NO_THR_SET; + _params->params_gscan.scan_fr = 0; + _params->params_gscan.send_all_results_flag = 0; + memset(_params->params_gscan.channel_bucket, 0, + _params->params_gscan.nchannel_buckets * + sizeof(struct dhd_pno_gscan_channel_bucket)); + _params->params_gscan.nchannel_buckets = 0; + DHD_PNO(("Flush Scan config\n")); + } + if (flags & GSCAN_FLUSH_HOTLIST_CFG) { + struct dhd_pno_bssid *iter, *next; + if (_params->params_gscan.nbssid_hotlist > 0) { + list_for_each_entry_safe(iter, next, + &_params->params_gscan.hotlist_bssid_list, list) { + list_del(&iter->list); + kfree(iter); + } + } + _params->params_gscan.nbssid_hotlist = 0; + DHD_PNO(("Flush Hotlist Config\n")); + } + if (flags & GSCAN_FLUSH_SIGNIFICANT_CFG) { + dhd_pno_significant_bssid_t *iter, *next; + + if (_params->params_gscan.nbssid_significant_change > 0) { + list_for_each_entry_safe(iter, next, + &_params->params_gscan.significant_bssid_list, list) { + list_del(&iter->list); + kfree(iter); + } + } + _params->params_gscan.nbssid_significant_change = 0; + DHD_PNO(("Flush Significant Change Config\n")); + } + + return; +} + +void +dhd_pno_lock_batch_results(dhd_pub_t *dhd) +{ + dhd_pno_status_info_t *_pno_state; + _pno_state = PNO_GET_PNOSTATE(dhd); + mutex_lock(&_pno_state->pno_mutex); + return; +} + +void +dhd_pno_unlock_batch_results(dhd_pub_t *dhd) +{ + dhd_pno_status_info_t *_pno_state; + _pno_state = PNO_GET_PNOSTATE(dhd); + mutex_unlock(&_pno_state->pno_mutex); + return; +} + +void +dhd_wait_batch_results_complete(dhd_pub_t *dhd) +{ + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + /* Has the workqueue finished its job already?? */ + if (_params->params_gscan.get_batch_flag == GSCAN_BATCH_RETRIEVAL_IN_PROGRESS) { + DHD_PNO(("%s: Waiting to complete retrieval..\n", __FUNCTION__)); + wait_event_interruptible_timeout(_pno_state->batch_get_wait, + is_batch_retrieval_complete(&_params->params_gscan), + msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT)); + } else { /* GSCAN_BATCH_RETRIEVAL_COMPLETE */ + gscan_results_cache_t *iter; + uint16 num_results = 0; + int err; + + mutex_lock(&_pno_state->pno_mutex); + iter = _params->params_gscan.gscan_batch_cache; + while (iter) { + num_results += iter->tot_count - iter->tot_consumed; + iter = iter->next; + } + mutex_unlock(&_pno_state->pno_mutex); + + /* All results consumed/No results cached?? + * Get fresh results from FW + */ + if (!num_results) { + DHD_PNO(("%s: No results cached, getting from FW..\n", __FUNCTION__)); + err = dhd_retreive_batch_scan_results(dhd); + if (err == BCME_OK) { + wait_event_interruptible_timeout(_pno_state->batch_get_wait, + is_batch_retrieval_complete(&_params->params_gscan), + msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT)); + } + } + } + DHD_PNO(("%s: Wait complete\n", __FUNCTION__)); + + return; +} + +static void * +dhd_get_gscan_batch_results(dhd_pub_t *dhd, uint32 *len) +{ + gscan_results_cache_t *iter, *results; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + uint16 num_scan_ids = 0, num_results = 0; + + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + iter = results = _params->params_gscan.gscan_batch_cache; + while (iter) { + num_results += iter->tot_count - iter->tot_consumed; + num_scan_ids++; + iter = iter->next; + } + + *len = ((num_results << 16) | (num_scan_ids)); + return results; +} + +void * +dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, + void *info, uint32 *len) +{ + void *ret = NULL; + dhd_pno_gscan_capabilities_t *ptr; + + if (!len) { + DHD_ERROR(("%s: len is NULL\n", __FUNCTION__)); + return ret; + } + + switch (type) { + case DHD_PNO_GET_CAPABILITIES: + ptr = (dhd_pno_gscan_capabilities_t *) + kmalloc(sizeof(dhd_pno_gscan_capabilities_t), GFP_KERNEL); + if (!ptr) + break; + /* Hardcoding these values for now, need to get + * these values from FW, will change in a later check-in + */ + ptr->max_scan_cache_size = 12; + ptr->max_scan_buckets = GSCAN_MAX_CH_BUCKETS; + ptr->max_ap_cache_per_scan = 16; + ptr->max_rssi_sample_size = PFN_SWC_RSSI_WINDOW_MAX; + ptr->max_scan_reporting_threshold = 100; + ptr->max_hotlist_aps = PFN_HOTLIST_MAX_NUM_APS; + ptr->max_significant_wifi_change_aps = PFN_SWC_MAX_NUM_APS; + ret = (void *)ptr; + *len = sizeof(dhd_pno_gscan_capabilities_t); + break; + + case DHD_PNO_GET_BATCH_RESULTS: + ret = dhd_get_gscan_batch_results(dhd, len); + break; + case DHD_PNO_GET_CHANNEL_LIST: + if (info) { + uint16 ch_list[WL_NUMCHANNELS]; + uint32 *ptr, mem_needed, i; + int32 err, nchan = WL_NUMCHANNELS; + uint32 *gscan_band = (uint32 *) info; + uint8 band = 0; + + /* No band specified?, nothing to do */ + if ((*gscan_band & GSCAN_BAND_MASK) == 0) { + DHD_PNO(("No band specified\n")); + *len = 0; + break; + } + + /* HAL and DHD use different bits for 2.4G and + * 5G in bitmap. Hence translating it here... + */ + if (*gscan_band & GSCAN_BG_BAND_MASK) { + band |= WLC_BAND_2G; + } + if (*gscan_band & GSCAN_A_BAND_MASK) { + band |= WLC_BAND_5G; + } + + err = _dhd_pno_get_channels(dhd, ch_list, &nchan, + (band & GSCAN_ABG_BAND_MASK), + !(*gscan_band & GSCAN_DFS_MASK)); + + if (err < 0) { + DHD_ERROR(("%s: failed to get valid channel list\n", + __FUNCTION__)); + *len = 0; + } else { + mem_needed = sizeof(uint32) * nchan; + ptr = (uint32 *) kmalloc(mem_needed, GFP_KERNEL); + if (!ptr) { + DHD_ERROR(("%s: Unable to malloc %d bytes\n", + __FUNCTION__, mem_needed)); + break; + } + for (i = 0; i < nchan; i++) { + ptr[i] = wf_channel2mhz(ch_list[i], + (ch_list[i] <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + } + ret = ptr; + *len = mem_needed; + } + } else { + *len = 0; + DHD_ERROR(("%s: info buffer is NULL\n", __FUNCTION__)); + } + break; + + default: + DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type)); + break; + } + + return ret; + +} + +int +dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, + void *buf, uint8 flush) +{ + int err = BCME_OK; + dhd_pno_params_t *_params; + int i; + dhd_pno_status_info_t *_pno_state; + + NULL_CHECK(dhd, "dhd is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + mutex_lock(&_pno_state->pno_mutex); + + switch (type) { + case DHD_PNO_BATCH_SCAN_CFG_ID: + { + gscan_batch_params_t *ptr = (gscan_batch_params_t *)buf; + _params->params_gscan.bestn = ptr->bestn; + _params->params_gscan.mscan = ptr->mscan; + _params->params_gscan.buffer_threshold = ptr->buffer_threshold; + break; + } + case DHD_PNO_GEOFENCE_SCAN_CFG_ID: + { + gscan_hotlist_scan_params_t *ptr = (gscan_hotlist_scan_params_t *)buf; + struct dhd_pno_bssid *_pno_bssid; + struct bssid_t *bssid_ptr; + int8 flags; + + if (flush) { + dhd_pno_reset_cfg_gscan(_params, _pno_state, + GSCAN_FLUSH_HOTLIST_CFG); + } + + if (!ptr->nbssid) { + break; + } + if (!_params->params_gscan.nbssid_hotlist) { + INIT_LIST_HEAD(&_params->params_gscan.hotlist_bssid_list); + } + if ((_params->params_gscan.nbssid_hotlist + + ptr->nbssid) > PFN_SWC_MAX_NUM_APS) { + DHD_ERROR(("Excessive number of hotlist APs programmed %d\n", + (_params->params_gscan.nbssid_hotlist + + ptr->nbssid))); + err = BCME_RANGE; + goto exit; + } + + for (i = 0, bssid_ptr = ptr->bssid; i < ptr->nbssid; i++, bssid_ptr++) { + _pno_bssid = kzalloc(sizeof(struct dhd_pno_bssid), GFP_KERNEL); + + if (!_pno_bssid) { + DHD_ERROR(("_pno_bssid is NULL, cannot kalloc %zd bytes", + sizeof(struct dhd_pno_bssid))); + err = BCME_NOMEM; + goto exit; + } + memcpy(&_pno_bssid->macaddr, &bssid_ptr->macaddr, ETHER_ADDR_LEN); + + flags = (int8) bssid_ptr->rssi_reporting_threshold; + _pno_bssid->flags = flags << WL_PFN_RSSI_SHIFT; + list_add_tail(&_pno_bssid->list, + &_params->params_gscan.hotlist_bssid_list); + } + + _params->params_gscan.nbssid_hotlist += ptr->nbssid; + _params->params_gscan.lost_ap_window = ptr->lost_ap_window; + break; + } + case DHD_PNO_SIGNIFICANT_SCAN_CFG_ID: + { + gscan_swc_params_t *ptr = (gscan_swc_params_t *)buf; + dhd_pno_significant_bssid_t *_pno_significant_change_bssid; + wl_pfn_significant_bssid_t *significant_bssid_ptr; + + if (flush) { + dhd_pno_reset_cfg_gscan(_params, _pno_state, + GSCAN_FLUSH_SIGNIFICANT_CFG); + } + + if (!ptr->nbssid) { + break; + } + if (!_params->params_gscan.nbssid_significant_change) { + INIT_LIST_HEAD(&_params->params_gscan.significant_bssid_list); + } + if ((_params->params_gscan.nbssid_significant_change + + ptr->nbssid) > PFN_SWC_MAX_NUM_APS) { + DHD_ERROR(("Excessive number of SWC APs programmed %d\n", + (_params->params_gscan.nbssid_significant_change + + ptr->nbssid))); + err = BCME_RANGE; + goto exit; + } + + for (i = 0, significant_bssid_ptr = ptr->bssid_elem_list; + i < ptr->nbssid; i++, significant_bssid_ptr++) { + _pno_significant_change_bssid = + kzalloc(sizeof(dhd_pno_significant_bssid_t), + GFP_KERNEL); + + if (!_pno_significant_change_bssid) { + DHD_ERROR(("SWC bssidptr is NULL, cannot kalloc %zd bytes", + sizeof(dhd_pno_significant_bssid_t))); + err = BCME_NOMEM; + goto exit; + } + memcpy(&_pno_significant_change_bssid->BSSID, + &significant_bssid_ptr->macaddr, ETHER_ADDR_LEN); + _pno_significant_change_bssid->rssi_low_threshold = + significant_bssid_ptr->rssi_low_threshold; + _pno_significant_change_bssid->rssi_high_threshold = + significant_bssid_ptr->rssi_high_threshold; + list_add_tail(&_pno_significant_change_bssid->list, + &_params->params_gscan.significant_bssid_list); + } + + _params->params_gscan.swc_nbssid_threshold = ptr->swc_threshold; + _params->params_gscan.swc_rssi_window_size = ptr->rssi_window; + _params->params_gscan.lost_ap_window = ptr->lost_ap_window; + _params->params_gscan.nbssid_significant_change += ptr->nbssid; + break; + } + case DHD_PNO_SCAN_CFG_ID: + { + int i, k, valid = 0; + uint16 band, min; + gscan_scan_params_t *ptr = (gscan_scan_params_t *)buf; + struct dhd_pno_gscan_channel_bucket *ch_bucket; + + if (ptr->nchannel_buckets <= GSCAN_MAX_CH_BUCKETS) { + _params->params_gscan.nchannel_buckets = ptr->nchannel_buckets; + + memcpy(_params->params_gscan.channel_bucket, ptr->channel_bucket, + _params->params_gscan.nchannel_buckets * + sizeof(struct dhd_pno_gscan_channel_bucket)); + min = ptr->channel_bucket[0].bucket_freq_multiple; + ch_bucket = _params->params_gscan.channel_bucket; + + for (i = 0; i < ptr->nchannel_buckets; i++) { + band = ch_bucket[i].band; + for (k = 0; k < ptr->channel_bucket[i].num_channels; k++) { + ch_bucket[i].chan_list[k] = + wf_mhz2channel(ptr->channel_bucket[i].chan_list[k], + 0); + } + ch_bucket[i].band = 0; + /* HAL and DHD use different bits for 2.4G and + * 5G in bitmap. Hence translating it here... + */ + if (band & GSCAN_BG_BAND_MASK) + ch_bucket[i].band |= WLC_BAND_2G; + + if (band & GSCAN_A_BAND_MASK) + ch_bucket[i].band |= WLC_BAND_5G; + + if (band & GSCAN_DFS_MASK) + ch_bucket[i].band |= GSCAN_DFS_MASK; + if (ptr->scan_fr == + ptr->channel_bucket[i].bucket_freq_multiple) { + valid = 1; + } + if (ptr->channel_bucket[i].bucket_freq_multiple < min) + min = ptr->channel_bucket[i].bucket_freq_multiple; + + DHD_PNO(("band %d report_flag %d\n", ch_bucket[i].band, + ch_bucket[i].report_flag)); + } + if (!valid) + ptr->scan_fr = min; + + for (i = 0; i < ptr->nchannel_buckets; i++) { + ch_bucket[i].bucket_freq_multiple = + ch_bucket[i].bucket_freq_multiple/ptr->scan_fr; + } + _params->params_gscan.scan_fr = ptr->scan_fr; + + DHD_PNO(("num_buckets %d scan_fr %d\n", ptr->nchannel_buckets, + _params->params_gscan.scan_fr)); + } else { + err = BCME_BADARG; + } + break; + } + default: + err = BCME_BADARG; + DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type)); + break; + } +exit: + mutex_unlock(&_pno_state->pno_mutex); + return err; + +} + + +static bool +validate_gscan_params(struct dhd_pno_gscan_params *gscan_params) +{ + unsigned int i, k; + + if (!gscan_params->scan_fr || !gscan_params->nchannel_buckets) { + DHD_ERROR(("%s : Scan freq - %d or number of channel buckets - %d is empty\n", + __FUNCTION__, gscan_params->scan_fr, gscan_params->nchannel_buckets)); + return false; + } + + for (i = 0; i < gscan_params->nchannel_buckets; i++) { + if (!gscan_params->channel_bucket[i].band) { + for (k = 0; k < gscan_params->channel_bucket[i].num_channels; k++) { + if (gscan_params->channel_bucket[i].chan_list[k] > CHANNEL_5G_MAX) { + DHD_ERROR(("%s : Unknown channel %d\n", __FUNCTION__, + gscan_params->channel_bucket[i].chan_list[k])); + return false; + } + } + } + } + + return true; +} + +static int +dhd_pno_set_for_gscan(dhd_pub_t *dhd, struct dhd_pno_gscan_params *gscan_params) +{ + int err = BCME_OK; + int mode, i = 0, k; + uint16 _chan_list[WL_NUMCHANNELS]; + int tot_nchan = 0; + int num_buckets_to_fw, tot_num_buckets, gscan_param_size; + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + wl_pfn_gscan_channel_bucket_t *ch_bucket = NULL; + wl_pfn_gscan_cfg_t *pfn_gscan_cfg_t = NULL; + wl_pfn_significant_bssid_t *p_pfn_significant_bssid = NULL; + wl_pfn_bssid_t *p_pfn_bssid = NULL; + wlc_ssid_ext_t *pssid_list = NULL; + dhd_pno_params_t *params_legacy; + dhd_pno_params_t *_params; + + params_legacy = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + NULL_CHECK(gscan_params, "gscan_params is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + if (!validate_gscan_params(gscan_params)) { + DHD_ERROR(("%s : Cannot start gscan - bad params\n", __FUNCTION__)); + err = BCME_BADARG; + goto exit; + } + /* Create channel list based on channel buckets */ + if (!(ch_bucket = dhd_pno_gscan_create_channel_list(dhd, _pno_state, + _chan_list, &tot_num_buckets, &num_buckets_to_fw))) { + goto exit; + } + + if (_pno_state->pno_mode & (DHD_PNO_GSCAN_MODE | DHD_PNO_LEGACY_MODE)) { + /* store current pno_mode before disabling pno */ + mode = _pno_state->pno_mode; + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); + goto exit; + } + /* restore the previous mode */ + _pno_state->pno_mode = mode; + } + + _pno_state->pno_mode |= DHD_PNO_GSCAN_MODE; + + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + pssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state); + + if (!pssid_list) { + err = BCME_NOMEM; + DHD_ERROR(("failed to get Legacy PNO SSID list\n")); + goto exit; + } + + if ((err = _dhd_pno_add_ssid(dhd, pssid_list, + params_legacy->params_legacy.nssid)) < 0) { + DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err)); + goto exit; + } + } + + if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_GSCAN_MODE)) < 0) { + DHD_ERROR(("failed to set call pno_set (err %d) in firmware\n", err)); + goto exit; + } + + gscan_param_size = sizeof(wl_pfn_gscan_cfg_t) + + (num_buckets_to_fw - 1) * sizeof(wl_pfn_gscan_channel_bucket_t); + pfn_gscan_cfg_t = (wl_pfn_gscan_cfg_t *) MALLOC(dhd->osh, gscan_param_size); + + if (!pfn_gscan_cfg_t) { + DHD_ERROR(("%s: failed to malloc memory of size %d\n", + __FUNCTION__, gscan_param_size)); + err = BCME_NOMEM; + goto exit; + } + + + if (gscan_params->mscan) { + pfn_gscan_cfg_t->buffer_threshold = gscan_params->buffer_threshold; + } else { + pfn_gscan_cfg_t->buffer_threshold = GSCAN_BATCH_NO_THR_SET; + } + if (gscan_params->nbssid_significant_change) { + pfn_gscan_cfg_t->swc_nbssid_threshold = gscan_params->swc_nbssid_threshold; + pfn_gscan_cfg_t->swc_rssi_window_size = gscan_params->swc_rssi_window_size; + pfn_gscan_cfg_t->lost_ap_window = gscan_params->lost_ap_window; + } else { + pfn_gscan_cfg_t->swc_nbssid_threshold = 0; + pfn_gscan_cfg_t->swc_rssi_window_size = 0; + pfn_gscan_cfg_t->lost_ap_window = 0; + } + + pfn_gscan_cfg_t->flags = + (gscan_params->send_all_results_flag & GSCAN_SEND_ALL_RESULTS_MASK); + pfn_gscan_cfg_t->count_of_channel_buckets = num_buckets_to_fw; + + + for (i = 0, k = 0; i < tot_num_buckets; i++) { + if (ch_bucket[i].bucket_end_index != CHANNEL_BUCKET_EMPTY_INDEX) { + pfn_gscan_cfg_t->channel_bucket[k].bucket_end_index = + ch_bucket[i].bucket_end_index; + pfn_gscan_cfg_t->channel_bucket[k].bucket_freq_multiple = + ch_bucket[i].bucket_freq_multiple; + pfn_gscan_cfg_t->channel_bucket[k].report_flag = + ch_bucket[i].report_flag; + k++; + } + } + + tot_nchan = pfn_gscan_cfg_t->channel_bucket[num_buckets_to_fw - 1].bucket_end_index + 1; + DHD_PNO(("Total channel num %d total ch_buckets %d ch_buckets_to_fw %d \n", tot_nchan, + tot_num_buckets, num_buckets_to_fw)); + + if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { + DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + + if ((err = _dhd_pno_gscan_cfg(dhd, pfn_gscan_cfg_t, gscan_param_size)) < 0) { + DHD_ERROR(("%s : failed to set call pno_gscan_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + if (gscan_params->nbssid_significant_change) { + dhd_pno_significant_bssid_t *iter, *next; + + + p_pfn_significant_bssid = kzalloc(sizeof(wl_pfn_significant_bssid_t) * + gscan_params->nbssid_significant_change, GFP_KERNEL); + if (p_pfn_significant_bssid == NULL) { + DHD_ERROR(("%s : failed to allocate memory %zd\n", + __FUNCTION__, + sizeof(wl_pfn_significant_bssid_t) * + gscan_params->nbssid_significant_change)); + err = BCME_NOMEM; + goto exit; + } + i = 0; + /* convert dhd_pno_significant_bssid_t to wl_pfn_significant_bssid_t */ + list_for_each_entry_safe(iter, next, &gscan_params->significant_bssid_list, list) { + p_pfn_significant_bssid[i].rssi_low_threshold = iter->rssi_low_threshold; + p_pfn_significant_bssid[i].rssi_high_threshold = iter->rssi_high_threshold; + memcpy(&p_pfn_significant_bssid[i].macaddr, &iter->BSSID, ETHER_ADDR_LEN); + i++; + } + DHD_PNO(("nbssid_significant_change %d \n", + gscan_params->nbssid_significant_change)); + err = _dhd_pno_add_significant_bssid(dhd, p_pfn_significant_bssid, + gscan_params->nbssid_significant_change); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_add_significant_bssid(err :%d)\n", + __FUNCTION__, err)); + goto exit; + } + } + + if (gscan_params->nbssid_hotlist) { + struct dhd_pno_bssid *iter, *next; + wl_pfn_bssid_t *ptr; + p_pfn_bssid = (wl_pfn_bssid_t *)kzalloc(sizeof(wl_pfn_bssid_t) * + gscan_params->nbssid_hotlist, GFP_KERNEL); + if (p_pfn_bssid == NULL) { + DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array" + " (count: %d)", + __FUNCTION__, _params->params_hotlist.nbssid)); + err = BCME_NOMEM; + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + goto exit; + } + ptr = p_pfn_bssid; + /* convert dhd_pno_bssid to wl_pfn_bssid */ + DHD_PNO(("nhotlist %d\n", gscan_params->nbssid_hotlist)); + list_for_each_entry_safe(iter, next, + &gscan_params->hotlist_bssid_list, list) { + memcpy(&ptr->macaddr, + &iter->macaddr, ETHER_ADDR_LEN); + ptr->flags = iter->flags; + ptr++; + } + + err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, gscan_params->nbssid_hotlist); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_add_bssid(err :%d)\n", + __FUNCTION__, err)); + goto exit; + } + } + + if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) { + DHD_ERROR(("%s : failed to enable PNO err %d\n", __FUNCTION__, err)); + } + +exit: + /* clear mode in case of error */ + if (err < 0) { + int ret = dhd_pno_clean(dhd); + + if (ret < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, ret)); + } else { + _pno_state->pno_mode &= ~DHD_PNO_GSCAN_MODE; + } + } + kfree(pssid_list); + kfree(p_pfn_significant_bssid); + kfree(p_pfn_bssid); + if (pfn_gscan_cfg_t) { + MFREE(dhd->osh, pfn_gscan_cfg_t, gscan_param_size); + } + if (ch_bucket) { + MFREE(dhd->osh, ch_bucket, + (tot_num_buckets * sizeof(wl_pfn_gscan_channel_bucket_t))); + } + return err; + +} + + +static void +dhd_pno_merge_gscan_pno_channels(dhd_pno_status_info_t *pno_state, + uint16 *chan_list, + uint8 *ch_scratch_pad, + wl_pfn_gscan_channel_bucket_t *ch_bucket, + uint32 *num_buckets_to_fw, + int num_channels) +{ + uint16 chan_buf[WL_NUMCHANNELS]; + int i, j = 0, ch_bucket_idx = 0; + dhd_pno_params_t *_params = &pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + dhd_pno_params_t *_params1 = &pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; + uint16 *legacy_chan_list = _params1->params_legacy.chan_list; + bool is_legacy_scan_freq_higher; + uint8 report_flag = CH_BUCKET_REPORT_REGULAR; + + if (!_params1->params_legacy.scan_fr) + _params1->params_legacy.scan_fr = PNO_SCAN_MIN_FW_SEC; + + is_legacy_scan_freq_higher = + _params->params_gscan.scan_fr < _params1->params_legacy.scan_fr; + + /* Calculate new Legacy scan multiple of base scan_freq + * The legacy PNO channel bucket is added at the end of the + * channel bucket list. + */ + if (is_legacy_scan_freq_higher) { + ch_bucket[_params->params_gscan.nchannel_buckets].bucket_freq_multiple = + _params1->params_legacy.scan_fr/_params->params_gscan.scan_fr; + + } else { + uint16 max = 0; + + /* Calculate new multiple of base scan_freq for gscan buckets */ + ch_bucket[_params->params_gscan.nchannel_buckets].bucket_freq_multiple = 1; + for (i = 0; i < _params->params_gscan.nchannel_buckets; i++) { + ch_bucket[i].bucket_freq_multiple *= _params->params_gscan.scan_fr; + ch_bucket[i].bucket_freq_multiple /= _params1->params_legacy.scan_fr; + if (max < ch_bucket[i].bucket_freq_multiple) + max = ch_bucket[i].bucket_freq_multiple; + } + _params->params_gscan.max_ch_bucket_freq = max; + } + + /* Off to remove duplicates!! + * Find channels that are already being serviced by gscan before legacy bucket + * These have to be removed from legacy bucket. + * !!Assuming chan_list channels are validated list of channels!! + * ch_scratch_pad is 1 at gscan bucket locations see dhd_pno_gscan_create_channel_list() + */ + for (i = 0; i < _params1->params_legacy.nchan; i++) + ch_scratch_pad[legacy_chan_list[i]] += 2; + + ch_bucket_idx = 0; + memcpy(chan_buf, chan_list, num_channels * sizeof(uint16)); + + /* Finally create channel list and bucket + * At this point ch_scratch_pad can have 4 values: + * 0 - Channel not present in either Gscan or Legacy PNO bucket + * 1 - Channel present only in Gscan bucket + * 2 - Channel present only in Legacy PNO bucket + * 3 - Channel present in both Gscan and Legacy PNO buckets + * Thus Gscan buckets can have values 1 or 3 and Legacy 2 or 3 + * For channel buckets with scan_freq < legacy accept all + * channels i.e. ch_scratch_pad = 1 and 3 + * else accept only ch_scratch_pad = 1 and mark rejects as + * ch_scratch_pad = 4 so that they go in legacy + */ + for (i = 0; i < _params->params_gscan.nchannel_buckets; i++) { + if (ch_bucket[i].bucket_freq_multiple <= + ch_bucket[_params->params_gscan.nchannel_buckets].bucket_freq_multiple) { + for (; ch_bucket_idx <= ch_bucket[i].bucket_end_index; ch_bucket_idx++, j++) + chan_list[j] = chan_buf[ch_bucket_idx]; + + ch_bucket[i].bucket_end_index = j - 1; + } else { + num_channels = 0; + for (; ch_bucket_idx <= ch_bucket[i].bucket_end_index; ch_bucket_idx++) { + if (ch_scratch_pad[chan_buf[ch_bucket_idx]] == 1) { + chan_list[j] = chan_buf[ch_bucket_idx]; + j++; + num_channels++; + } else { + ch_scratch_pad[chan_buf[ch_bucket_idx]] = 4; + /* If Gscan channel is merged off to legacy bucket and + * if the gscan channel bucket has a report flag > 0 + * use the same for legacy + */ + if (report_flag < ch_bucket[i].report_flag) + report_flag = ch_bucket[i].report_flag; + } + } + + if (num_channels) { + ch_bucket[i].bucket_end_index = j - 1; + } else { + ch_bucket[i].bucket_end_index = CHANNEL_BUCKET_EMPTY_INDEX; + *num_buckets_to_fw = *num_buckets_to_fw - 1; + } + } + + } + + num_channels = 0; + ch_bucket[_params->params_gscan.nchannel_buckets].report_flag = report_flag; + /* Now add channels to the legacy scan bucket + * ch_scratch_pad = 0 to 4 at this point, for legacy -> 2,3,4. 2 means exclusively + * Legacy so add to bucket. 4 means it is a reject of gscan bucket and must + * be added to Legacy bucket,reject 3 + */ + for (i = 0; i < _params1->params_legacy.nchan; i++) { + if (ch_scratch_pad[legacy_chan_list[i]] != 3) { + chan_list[j] = legacy_chan_list[i]; + j++; + num_channels++; + } + } + if (num_channels) { + ch_bucket[_params->params_gscan.nchannel_buckets].bucket_end_index = j - 1; + } + else { + ch_bucket[_params->params_gscan.nchannel_buckets].bucket_end_index = + CHANNEL_BUCKET_EMPTY_INDEX; + *num_buckets_to_fw = *num_buckets_to_fw - 1; + } + + return; +} +static wl_pfn_gscan_channel_bucket_t * +dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd, + dhd_pno_status_info_t *_pno_state, + uint16 *chan_list, + uint32 *num_buckets, + uint32 *num_buckets_to_fw) +{ + int i, num_channels, err, nchan = WL_NUMCHANNELS; + uint16 *ptr = chan_list, max; + uint8 *ch_scratch_pad; + wl_pfn_gscan_channel_bucket_t *ch_bucket; + dhd_pno_params_t *_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + bool is_pno_legacy_running = _pno_state->pno_mode & DHD_PNO_LEGACY_MODE; + dhd_pno_gscan_channel_bucket_t *gscan_buckets = _params->params_gscan.channel_bucket; + + if (is_pno_legacy_running) + *num_buckets = _params->params_gscan.nchannel_buckets + 1; + else + *num_buckets = _params->params_gscan.nchannel_buckets; + + + *num_buckets_to_fw = *num_buckets; + + + ch_bucket = (wl_pfn_gscan_channel_bucket_t *) MALLOC(dhd->osh, + ((*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t))); + + if (!ch_bucket) { + DHD_ERROR(("%s: failed to malloc memory of size %zd\n", + __FUNCTION__, (*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t))); + *num_buckets_to_fw = *num_buckets = 0; + return NULL; + } + + max = gscan_buckets[0].bucket_freq_multiple; + num_channels = 0; + for (i = 0; i < _params->params_gscan.nchannel_buckets; i++) { + if (!gscan_buckets[i].band) { + num_channels += gscan_buckets[i].num_channels; + memcpy(ptr, gscan_buckets[i].chan_list, + gscan_buckets[i].num_channels * sizeof(uint16)); + ptr = ptr + gscan_buckets[i].num_channels; + } else { + /* get a valid channel list based on band B or A */ + err = _dhd_pno_get_channels(dhd, ptr, + &nchan, (gscan_buckets[i].band & GSCAN_ABG_BAND_MASK), + !(gscan_buckets[i].band & GSCAN_DFS_MASK)); + + if (err < 0) { + DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n", + __FUNCTION__, gscan_buckets[i].band)); + MFREE(dhd->osh, ch_bucket, + ((*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t))); + *num_buckets_to_fw = *num_buckets = 0; + return NULL; + } + + num_channels += nchan; + ptr = ptr + nchan; + } + + ch_bucket[i].bucket_end_index = num_channels - 1; + ch_bucket[i].bucket_freq_multiple = gscan_buckets[i].bucket_freq_multiple; + ch_bucket[i].report_flag = gscan_buckets[i].report_flag; + if (max < gscan_buckets[i].bucket_freq_multiple) + max = gscan_buckets[i].bucket_freq_multiple; + nchan = WL_NUMCHANNELS - num_channels; + DHD_PNO(("end_idx %d freq_mult - %d\n", + ch_bucket[i].bucket_end_index, ch_bucket[i].bucket_freq_multiple)); + } + + ch_scratch_pad = (uint8 *) kzalloc(CHANNEL_5G_MAX, GFP_KERNEL); + if (!ch_scratch_pad) { + DHD_ERROR(("%s: failed to malloc memory of size %d\n", + __FUNCTION__, CHANNEL_5G_MAX)); + MFREE(dhd->osh, ch_bucket, + ((*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t))); + *num_buckets_to_fw = *num_buckets = 0; + return NULL; + } + + /* Need to look for duplicates in gscan buckets if the framework programmed + * the gscan buckets badly, for now return error if there are duplicates. + * Plus as an added bonus, we get all channels in Gscan bucket + * set to 1 for dhd_pno_merge_gscan_pno_channels() + */ + for (i = 0; i < num_channels; i++) { + if (!ch_scratch_pad[chan_list[i]]) { + ch_scratch_pad[chan_list[i]] = 1; + } else { + DHD_ERROR(("%s: Duplicate channel - %d programmed in channel bucket\n", + __FUNCTION__, chan_list[i])); + MFREE(dhd->osh, ch_bucket, ((*num_buckets) * + sizeof(wl_pfn_gscan_channel_bucket_t))); + *num_buckets_to_fw = *num_buckets = 0; + kfree(ch_scratch_pad); + return NULL; + } + } + _params->params_gscan.max_ch_bucket_freq = max; + /* Legacy PNO maybe running, which means we need to create a legacy PNO bucket + * Plus need to remove duplicates as the legacy PNO chan_list may have common channels + * If channel is to be scanned more frequently as per gscan requirements + * remove from legacy PNO ch_bucket. Similarly, if legacy wants a channel scanned + * more often, it is removed from the Gscan channel bucket. + * In the end both are satisfied. + */ + if (is_pno_legacy_running) + dhd_pno_merge_gscan_pno_channels(_pno_state, chan_list, + ch_scratch_pad, ch_bucket, num_buckets_to_fw, num_channels); + + kfree(ch_scratch_pad); + return ch_bucket; +} + +static int +dhd_pno_stop_for_gscan(dhd_pub_t *dhd) +{ + int err = BCME_OK; + int mode; + wlc_ssid_ext_t *pssid_list = NULL; + dhd_pno_status_info_t *_pno_state; + + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", + __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) { + DHD_ERROR(("%s : GSCAN is not enabled\n", __FUNCTION__)); + goto exit; + } + mutex_lock(&_pno_state->pno_mutex); + mode = _pno_state->pno_mode & ~DHD_PNO_GSCAN_MODE; + err = dhd_pno_clean(dhd); + if (err < 0) { + + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + mutex_unlock(&_pno_state->pno_mutex); + return err; + } + _pno_state->pno_mode = mode; + mutex_unlock(&_pno_state->pno_mutex); + + /* Reprogram Legacy PNO if it was running */ + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + struct dhd_pno_legacy_params *params_legacy; + uint16 chan_list[WL_NUMCHANNELS]; + + params_legacy = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + pssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state); + if (!pssid_list) { + err = BCME_NOMEM; + DHD_ERROR(("failed to get Legacy PNO SSID list\n")); + goto exit; + } + + DHD_PNO(("Restarting Legacy PNO SSID scan...\n")); + memcpy(chan_list, params_legacy->chan_list, + (params_legacy->nchan * sizeof(uint16))); + err = dhd_pno_set_for_ssid(dhd, pssid_list, params_legacy->nssid, + params_legacy->scan_fr, params_legacy->pno_repeat, + params_legacy->pno_freq_expo_max, chan_list, + params_legacy->nchan); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + + } + +exit: + kfree(pssid_list); + return err; +} + +int +dhd_pno_initiate_gscan_request(dhd_pub_t *dhd, bool run, bool flush) +{ + int err = BCME_OK; + dhd_pno_params_t *params; + dhd_pno_status_info_t *_pno_state; + struct dhd_pno_gscan_params *gscan_params; + + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + DHD_PNO(("%s enter - run %d flush %d\n", __FUNCTION__, run, flush)); + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = ¶ms->params_gscan; + + if (run) { + err = dhd_pno_set_for_gscan(dhd, gscan_params); + } else { + if (flush) { + mutex_lock(&_pno_state->pno_mutex); + dhd_pno_reset_cfg_gscan(params, _pno_state, GSCAN_FLUSH_ALL_CFG); + mutex_unlock(&_pno_state->pno_mutex); + } + /* Need to stop all gscan */ + err = dhd_pno_stop_for_gscan(dhd); + } + + return err; +} + +int +dhd_pno_enable_full_scan_result(dhd_pub_t *dhd, bool real_time_flag) +{ + int err = BCME_OK; + dhd_pno_params_t *params; + dhd_pno_status_info_t *_pno_state; + struct dhd_pno_gscan_params *gscan_params; + uint8 old_flag; + + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = ¶ms->params_gscan; + + mutex_lock(&_pno_state->pno_mutex); + + old_flag = gscan_params->send_all_results_flag; + gscan_params->send_all_results_flag = (uint8) real_time_flag; + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + if (old_flag != gscan_params->send_all_results_flag) { + wl_pfn_gscan_cfg_t gscan_cfg; + gscan_cfg.flags = (gscan_params->send_all_results_flag & + GSCAN_SEND_ALL_RESULTS_MASK); + gscan_cfg.flags |= GSCAN_CFG_FLAGS_ONLY_MASK; + + if ((err = _dhd_pno_gscan_cfg(dhd, &gscan_cfg, + sizeof(wl_pfn_gscan_cfg_t))) < 0) { + DHD_ERROR(("%s : pno_gscan_cfg failed (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit_mutex_unlock; + } + } else { + DHD_PNO(("No change in flag - %d\n", old_flag)); + } + } else { + DHD_PNO(("Gscan not started\n")); + } +exit_mutex_unlock: + mutex_unlock(&_pno_state->pno_mutex); +exit: + return err; +} + +int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd) +{ + int ret = 0; + dhd_pno_params_t *params; + struct dhd_pno_gscan_params *gscan_params; + dhd_pno_status_info_t *_pno_state; + gscan_results_cache_t *iter, *tmp; + + _pno_state = PNO_GET_PNOSTATE(dhd); + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = ¶ms->params_gscan; + iter = gscan_params->gscan_batch_cache; + + while (iter) { + if (iter->tot_consumed == iter->tot_count) { + tmp = iter->next; + kfree(iter); + iter = tmp; + } else + break; +} + gscan_params->gscan_batch_cache = iter; + ret = (iter == NULL); + return ret; +} + +static int +_dhd_pno_get_gscan_batch_from_fw(dhd_pub_t *dhd) +{ + int err = BCME_OK; + uint32 timestamp = 0, ts = 0, i, j, timediff; + dhd_pno_params_t *params; + dhd_pno_status_info_t *_pno_state; + wl_pfn_lnet_info_t *plnetinfo; + struct dhd_pno_gscan_params *gscan_params; + wl_pfn_lscanresults_t *plbestnet = NULL; + gscan_results_cache_t *iter, *tail; + wifi_gscan_result_t *result; + uint8 *nAPs_per_scan = NULL; + uint8 num_scans_in_cur_iter; + uint16 count, scan_id = 0; + + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + + _pno_state = PNO_GET_PNOSTATE(dhd); + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + gscan_params = ¶ms->params_gscan; + nAPs_per_scan = (uint8 *) MALLOC(dhd->osh, gscan_params->mscan); + + if (!nAPs_per_scan) { + DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", __FUNCTION__, + gscan_params->mscan)); + err = BCME_NOMEM; + goto exit; + } + + plbestnet = (wl_pfn_lscanresults_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN); + + mutex_lock(&_pno_state->pno_mutex); + iter = gscan_params->gscan_batch_cache; + /* If a cache has not been consumed , just delete it */ + while (iter) { + iter->tot_consumed = iter->tot_count; + iter = iter->next; + } + dhd_gscan_batch_cache_cleanup(dhd); + + if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) { + DHD_ERROR(("%s : GSCAN is not enabled\n", __FUNCTION__)); + goto exit_mutex_unlock; + } + + timediff = gscan_params->scan_fr * 1000; + timediff = timediff >> 1; + + /* Ok, now lets start getting results from the FW */ + plbestnet->status = PFN_INCOMPLETE; + tail = gscan_params->gscan_batch_cache; + while (plbestnet->status != PFN_COMPLETE) { + memset(plbestnet, 0, PNO_BESTNET_LEN); + err = dhd_iovar(dhd, 0, "pfnlbest", (char *)plbestnet, PNO_BESTNET_LEN, 0); + if (err < 0) { + DHD_ERROR(("%s : Cannot get all the batch results, err :%d\n", + __FUNCTION__, err)); + goto exit_mutex_unlock; + } + DHD_PNO(("ver %d, status : %d, count %d\n", plbestnet->version, + plbestnet->status, plbestnet->count)); + if (plbestnet->version != PFN_SCANRESULT_VERSION) { + err = BCME_VERSION; + DHD_ERROR(("bestnet version(%d) is mismatch with Driver version(%d)\n", + plbestnet->version, PFN_SCANRESULT_VERSION)); + goto exit_mutex_unlock; + } + + num_scans_in_cur_iter = 0; + timestamp = plbestnet->netinfo[0].timestamp; + /* find out how many scans' results did we get in this batch of FW results */ + for (i = 0, count = 0; i < plbestnet->count; i++, count++) { + plnetinfo = &plbestnet->netinfo[i]; + /* Unlikely to happen, but just in case the results from + * FW doesnt make sense..... Assume its part of one single scan + */ + if (num_scans_in_cur_iter > gscan_params->mscan) { + num_scans_in_cur_iter = 0; + count = plbestnet->count; + break; + } + if (TIME_DIFF_MS(timestamp, plnetinfo->timestamp) > timediff) { + nAPs_per_scan[num_scans_in_cur_iter] = count; + count = 0; + num_scans_in_cur_iter++; + } + timestamp = plnetinfo->timestamp; + } + nAPs_per_scan[num_scans_in_cur_iter] = count; + num_scans_in_cur_iter++; + + DHD_PNO(("num_scans_in_cur_iter %d\n", num_scans_in_cur_iter)); + plnetinfo = &plbestnet->netinfo[0]; + + for (i = 0; i < num_scans_in_cur_iter; i++) { + iter = (gscan_results_cache_t *) + kzalloc(((nAPs_per_scan[i] - 1) * sizeof(wifi_gscan_result_t)) + + sizeof(gscan_results_cache_t), GFP_KERNEL); + if (!iter) { + DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", + __FUNCTION__, gscan_params->mscan)); + err = BCME_NOMEM; + goto exit_mutex_unlock; + } + /* Need this check because the new set of results from FW + * maybe a continuation of previous sets' scan results + */ + if (TIME_DIFF_MS(ts, plnetinfo->timestamp) > timediff) { + iter->scan_id = ++scan_id; + } else { + iter->scan_id = scan_id; + } + DHD_PNO(("scan_id %d tot_count %d\n", scan_id, nAPs_per_scan[i])); + iter->tot_count = nAPs_per_scan[i]; + iter->tot_consumed = 0; + + if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) { + DHD_PNO(("This scan is aborted\n")); + iter->flag = (ENABLE << PNO_STATUS_ABORT); + } else if (gscan_params->reason) { + iter->flag = (ENABLE << gscan_params->reason); + } + + if (!tail) { + gscan_params->gscan_batch_cache = iter; + } else { + tail->next = iter; + } + tail = iter; + iter->next = NULL; + for (j = 0; j < nAPs_per_scan[i]; j++, plnetinfo++) { + result = &iter->results[j]; + + result->channel = wf_channel2mhz(plnetinfo->pfnsubnet.channel, + (plnetinfo->pfnsubnet.channel <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + result->rssi = (int32) plnetinfo->RSSI; + /* Info not available & not expected */ + result->beacon_period = 0; + result->capability = 0; + result->ie_length = 0; + result->rtt = (uint64) plnetinfo->rtt0; + result->rtt_sd = (uint64) plnetinfo->rtt1; + result->ts = convert_fw_rel_time_to_systime(plnetinfo->timestamp); + ts = plnetinfo->timestamp; + if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("%s: Invalid SSID length %d\n", + __FUNCTION__, plnetinfo->pfnsubnet.SSID_len)); + plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; + } + memcpy(result->ssid, plnetinfo->pfnsubnet.SSID, + plnetinfo->pfnsubnet.SSID_len); + result->ssid[plnetinfo->pfnsubnet.SSID_len] = '\0'; + memcpy(&result->macaddr, &plnetinfo->pfnsubnet.BSSID, + ETHER_ADDR_LEN); + + DHD_PNO(("\tSSID : ")); + DHD_PNO(("\n")); + DHD_PNO(("\tBSSID: %02x:%02x:%02x:%02x:%02x:%02x\n", + result->macaddr.octet[0], + result->macaddr.octet[1], + result->macaddr.octet[2], + result->macaddr.octet[3], + result->macaddr.octet[4], + result->macaddr.octet[5])); + DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n", + plnetinfo->pfnsubnet.channel, + plnetinfo->RSSI, plnetinfo->timestamp)); + DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", + plnetinfo->rtt0, plnetinfo->rtt1)); + + } + } + } +exit_mutex_unlock: + mutex_unlock(&_pno_state->pno_mutex); +exit: + params->params_gscan.get_batch_flag = GSCAN_BATCH_RETRIEVAL_COMPLETE; + smp_wmb(); + wake_up_interruptible(&_pno_state->batch_get_wait); + if (nAPs_per_scan) { + MFREE(dhd->osh, nAPs_per_scan, gscan_params->mscan); + } + if (plbestnet) { + MFREE(dhd->osh, plbestnet, PNO_BESTNET_LEN); + } + DHD_PNO(("Batch retrieval done!\n")); + return err; +} +#endif /* GSCAN_SUPPORT */ + +static int +_dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) +{ + int err = BCME_OK; + int i, j; + uint32 timestamp = 0; + dhd_pno_params_t *_params = NULL; + dhd_pno_status_info_t *_pno_state = NULL; + wl_pfn_lscanresults_t *plbestnet = NULL; + wl_pfn_lnet_info_t *plnetinfo; + dhd_pno_bestnet_entry_t *pbestnet_entry; + dhd_pno_best_header_t *pbestnetheader = NULL; + dhd_pno_scan_results_t *pscan_results = NULL, *siter, *snext; + bool allocate_header = FALSE; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit_no_unlock; + } + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit_no_unlock; + } +#ifdef GSCAN_SUPPORT + if (!(_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_GSCAN_MODE))) +#else + if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) +#endif /* GSCAN_SUPPORT */ + { + DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__)); + goto exit_no_unlock; + } + mutex_lock(&_pno_state->pno_mutex); + _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; + if (buf && bufsize) { + if (!list_empty(&_params->params_batch.get_batch.expired_scan_results_list)) { + /* need to check whether we have cashed data or not */ + DHD_PNO(("%s: have cashed batching data in Driver\n", + __FUNCTION__)); + /* convert to results format */ + goto convert_format; + } else { + /* this is a first try to get batching results */ + if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) { + /* move the scan_results_list to expired_scan_results_lists */ + list_for_each_entry_safe(siter, snext, + &_params->params_batch.get_batch.scan_results_list, list) { + list_move_tail(&siter->list, + &_params->params_batch.get_batch.expired_scan_results_list); + } + _params->params_batch.get_batch.top_node_cnt = 0; + _params->params_batch.get_batch.expired_tot_scan_cnt = + _params->params_batch.get_batch.tot_scan_cnt; + _params->params_batch.get_batch.tot_scan_cnt = 0; + goto convert_format; + } + } + } + /* create dhd_pno_scan_results_t whenever we got event WLC_E_PFN_BEST_BATCHING */ + pscan_results = (dhd_pno_scan_results_t *)MALLOC(dhd->osh, SCAN_RESULTS_SIZE); + if (pscan_results == NULL) { + err = BCME_NOMEM; + DHD_ERROR(("failed to allocate dhd_pno_scan_results_t\n")); + goto exit; + } + pscan_results->bestnetheader = NULL; + pscan_results->cnt_header = 0; + /* add the element into list unless total node cnt is less than MAX_NODE_ CNT */ + if (_params->params_batch.get_batch.top_node_cnt < MAX_NODE_CNT) { + list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list); + _params->params_batch.get_batch.top_node_cnt++; + } else { + int _removed_scan_cnt; + /* remove oldest one and add new one */ + DHD_PNO(("%s : Remove oldest node and add new one\n", __FUNCTION__)); + _removed_scan_cnt = _dhd_pno_clear_all_batch_results(dhd, + &_params->params_batch.get_batch.scan_results_list, TRUE); + _params->params_batch.get_batch.tot_scan_cnt -= _removed_scan_cnt; + list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list); + + } + plbestnet = (wl_pfn_lscanresults_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN); + NULL_CHECK(plbestnet, "failed to allocate buffer for bestnet", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); + memset(plbestnet, 0, PNO_BESTNET_LEN); + while (plbestnet->status != PFN_COMPLETE) { + memset(plbestnet, 0, PNO_BESTNET_LEN); + err = dhd_iovar(dhd, 0, "pfnlbest", (char *)plbestnet, PNO_BESTNET_LEN, 0); + if (err < 0) { + if (err == BCME_EPERM) { + DHD_ERROR(("we cannot get the batching data " + "during scanning in firmware, try again\n,")); + msleep(500); + continue; + } else { + DHD_ERROR(("%s : failed to execute pfnlbest (err :%d)\n", + __FUNCTION__, err)); + goto exit; + } + } + DHD_PNO(("ver %d, status : %d, count %d\n", plbestnet->version, + plbestnet->status, plbestnet->count)); + if (plbestnet->version != PFN_SCANRESULT_VERSION) { + err = BCME_VERSION; + DHD_ERROR(("bestnet version(%d) is mismatch with Driver version(%d)\n", + plbestnet->version, PFN_SCANRESULT_VERSION)); + goto exit; + } + plnetinfo = plbestnet->netinfo; + for (i = 0; i < plbestnet->count; i++) { + pbestnet_entry = (dhd_pno_bestnet_entry_t *) + MALLOC(dhd->osh, BESTNET_ENTRY_SIZE); + if (pbestnet_entry == NULL) { + err = BCME_NOMEM; + DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n")); + goto exit; + } + memset(pbestnet_entry, 0, BESTNET_ENTRY_SIZE); + pbestnet_entry->recorded_time = jiffies; /* record the current time */ + /* create header for the first entry */ + allocate_header = (i == 0)? TRUE : FALSE; + /* check whether the new generation is started or not */ + if (timestamp && (TIME_DIFF(timestamp, plnetinfo->timestamp) + > TIME_MIN_DIFF)) + allocate_header = TRUE; + timestamp = plnetinfo->timestamp; + if (allocate_header) { + pbestnetheader = (dhd_pno_best_header_t *) + MALLOC(dhd->osh, BEST_HEADER_SIZE); + if (pbestnetheader == NULL) { + err = BCME_NOMEM; + if (pbestnet_entry) + MFREE(dhd->osh, pbestnet_entry, + BESTNET_ENTRY_SIZE); + DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n")); + goto exit; + } + /* increase total cnt of bestnet header */ + pscan_results->cnt_header++; + /* need to record the reason to call dhd_pno_get_for_bach */ + if (reason) + pbestnetheader->reason = (ENABLE << reason); + memset(pbestnetheader, 0, BEST_HEADER_SIZE); + /* initialize the head of linked list */ + INIT_LIST_HEAD(&(pbestnetheader->entry_list)); + /* link the pbestnet heaer into existed list */ + if (pscan_results->bestnetheader == NULL) + /* In case of header */ + pscan_results->bestnetheader = pbestnetheader; + else { + dhd_pno_best_header_t *head = pscan_results->bestnetheader; + pscan_results->bestnetheader = pbestnetheader; + pbestnetheader->next = head; + } + } + /* fills the best network info */ + pbestnet_entry->channel = plnetinfo->pfnsubnet.channel; + pbestnet_entry->RSSI = plnetinfo->RSSI; + if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) { + /* if RSSI is positive value, we assume that + * this scan is aborted by other scan + */ + DHD_PNO(("This scan is aborted\n")); + pbestnetheader->reason = (ENABLE << PNO_STATUS_ABORT); + } + pbestnet_entry->rtt0 = plnetinfo->rtt0; + pbestnet_entry->rtt1 = plnetinfo->rtt1; + pbestnet_entry->timestamp = plnetinfo->timestamp; + if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("%s: Invalid SSID length %d: trimming it to max\n", + __FUNCTION__, plnetinfo->pfnsubnet.SSID_len)); + plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; + } + pbestnet_entry->SSID_len = plnetinfo->pfnsubnet.SSID_len; + memcpy(pbestnet_entry->SSID, plnetinfo->pfnsubnet.SSID, + pbestnet_entry->SSID_len); + memcpy(&pbestnet_entry->BSSID, &plnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN); + /* add the element into list */ + list_add_tail(&pbestnet_entry->list, &pbestnetheader->entry_list); + /* increase best entry count */ + pbestnetheader->tot_cnt++; + pbestnetheader->tot_size += BESTNET_ENTRY_SIZE; + DHD_PNO(("Header %d\n", pscan_results->cnt_header - 1)); + DHD_PNO(("\tSSID : ")); + for (j = 0; j < plnetinfo->pfnsubnet.SSID_len; j++) + DHD_PNO(("%c", plnetinfo->pfnsubnet.SSID[j])); + DHD_PNO(("\n")); + DHD_PNO(("\tBSSID: %02x:%02x:%02x:%02x:%02x:%02x\n", + plnetinfo->pfnsubnet.BSSID.octet[0], + plnetinfo->pfnsubnet.BSSID.octet[1], + plnetinfo->pfnsubnet.BSSID.octet[2], + plnetinfo->pfnsubnet.BSSID.octet[3], + plnetinfo->pfnsubnet.BSSID.octet[4], + plnetinfo->pfnsubnet.BSSID.octet[5])); + DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n", + plnetinfo->pfnsubnet.channel, + plnetinfo->RSSI, plnetinfo->timestamp)); + DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", plnetinfo->rtt0, plnetinfo->rtt1)); + plnetinfo++; + } + } + if (pscan_results->cnt_header == 0) { + /* In case that we didn't get any data from the firmware + * Remove the current scan_result list from get_bach.scan_results_list. + */ + DHD_PNO(("NO BATCH DATA from Firmware, Delete current SCAN RESULT LIST\n")); + list_del(&pscan_results->list); + MFREE(dhd->osh, pscan_results, SCAN_RESULTS_SIZE); + _params->params_batch.get_batch.top_node_cnt--; + } + /* increase total scan count using current scan count */ + _params->params_batch.get_batch.tot_scan_cnt += pscan_results->cnt_header; + + if (buf && bufsize) { + /* This is a first try to get batching results */ + if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) { + /* move the scan_results_list to expired_scan_results_lists */ + list_for_each_entry_safe(siter, snext, + &_params->params_batch.get_batch.scan_results_list, list) { + list_move_tail(&siter->list, + &_params->params_batch.get_batch.expired_scan_results_list); + } + /* reset gloval values after moving to expired list */ + _params->params_batch.get_batch.top_node_cnt = 0; + _params->params_batch.get_batch.expired_tot_scan_cnt = + _params->params_batch.get_batch.tot_scan_cnt; + _params->params_batch.get_batch.tot_scan_cnt = 0; + } +convert_format: + err = _dhd_pno_convert_format(dhd, &_params->params_batch, buf, bufsize); + if (err < 0) { + DHD_ERROR(("failed to convert the data into upper layer format\n")); + goto exit; + } + } +exit: + if (plbestnet) + MFREE(dhd->osh, plbestnet, PNO_BESTNET_LEN); + if (_params) { + _params->params_batch.get_batch.buf = NULL; + _params->params_batch.get_batch.bufsize = 0; + _params->params_batch.get_batch.bytes_written = err; + } + mutex_unlock(&_pno_state->pno_mutex); +exit_no_unlock: + if (waitqueue_active(&_pno_state->get_batch_done.wait)) + complete(&_pno_state->get_batch_done); + return err; +} + +static void +_dhd_pno_get_batch_handler(struct work_struct *work) +{ + dhd_pno_status_info_t *_pno_state; + dhd_pub_t *dhd; + struct dhd_pno_batch_params *params_batch; + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = container_of(work, struct dhd_pno_status_info, work); + dhd = _pno_state->dhd; + if (dhd == NULL) { + DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__)); + return; + } + +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + _dhd_pno_get_gscan_batch_from_fw(dhd); + return; + } else +#endif /* GSCAN_SUPPORT */ + { + params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; + + _dhd_pno_get_for_batch(dhd, params_batch->get_batch.buf, + params_batch->get_batch.bufsize, params_batch->get_batch.reason); + } + +} + +int +dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) +{ + int err = BCME_OK; + char *pbuf = buf; + dhd_pno_status_info_t *_pno_state; + struct dhd_pno_batch_params *params_batch; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + struct dhd_pno_gscan_params *gscan_params; + gscan_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan; + gscan_params->reason = reason; + err = dhd_retreive_batch_scan_results(dhd); + if (err == BCME_OK) { + wait_event_interruptible_timeout(_pno_state->batch_get_wait, + is_batch_retrieval_complete(gscan_params), + msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT)); + } + } else +#endif + { + if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { + DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__)); + memset(pbuf, 0, bufsize); + pbuf += sprintf(pbuf, "scancount=%d\n", 0); + sprintf(pbuf, "%s", RESULTS_END_MARKER); + err = strlen(buf); + goto exit; + } + params_batch->get_batch.buf = buf; + params_batch->get_batch.bufsize = bufsize; + params_batch->get_batch.reason = reason; + params_batch->get_batch.bytes_written = 0; + schedule_work(&_pno_state->work); + wait_for_completion(&_pno_state->get_batch_done); + } + +#ifdef GSCAN_SUPPORT + if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) +#endif + err = params_batch->get_batch.bytes_written; +exit: + return err; +} + +int +dhd_pno_stop_for_batch(dhd_pub_t *dhd) +{ + int err = BCME_OK; + int mode = 0; + int i = 0; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + wl_pfn_bssid_t *p_pfn_bssid = NULL; + wlc_ssid_ext_t *p_ssid_list = NULL; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", + __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + DHD_PNO(("Gscan is ongoing, nothing to stop here\n")); + return err; + } +#endif + + if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { + DHD_ERROR(("%s : PNO BATCH MODE is not enabled\n", __FUNCTION__)); + goto exit; + } + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_HOTLIST_MODE)) { + mode = _pno_state->pno_mode; + dhd_pno_clean(dhd); + _pno_state->pno_mode = mode; + /* restart Legacy PNO if the Legacy PNO is on */ + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + struct dhd_pno_legacy_params *_params_legacy; + _params_legacy = + &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); + p_ssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state); + if (!p_ssid_list) { + err = BCME_NOMEM; + DHD_ERROR(("failed to get Legacy PNO SSID list\n")); + goto exit; + } + err = dhd_pno_set_for_ssid(dhd, p_ssid_list, _params_legacy->nssid, + _params_legacy->scan_fr, _params_legacy->pno_repeat, + _params_legacy->pno_freq_expo_max, _params_legacy->chan_list, + _params_legacy->nchan); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + struct dhd_pno_bssid *iter, *next; + _params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); + p_pfn_bssid = kzalloc(sizeof(wl_pfn_bssid_t) * + _params->params_hotlist.nbssid, GFP_KERNEL); + if (p_pfn_bssid == NULL) { + DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array" + " (count: %d)", + __FUNCTION__, _params->params_hotlist.nbssid)); + err = BCME_ERROR; + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + goto exit; + } + i = 0; + /* convert dhd_pno_bssid to wl_pfn_bssid */ + list_for_each_entry_safe(iter, next, + &_params->params_hotlist.bssid_list, list) { + memcpy(&p_pfn_bssid[i].macaddr, &iter->macaddr, ETHER_ADDR_LEN); + p_pfn_bssid[i].flags = iter->flags; + i++; + } + err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + DHD_ERROR(("%s : failed to restart hotlist scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } + } else { + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } +exit: + _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE); + kfree(p_ssid_list); + kfree(p_pfn_bssid); + return err; +} + +int +dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, + struct dhd_pno_hotlist_params *hotlist_params) +{ + int err = BCME_OK; + int i; + uint16 _chan_list[WL_NUMCHANNELS]; + int rem_nchan = 0; + int tot_nchan = 0; + int mode = 0; + dhd_pno_params_t *_params; + dhd_pno_params_t *_params2; + struct dhd_pno_bssid *_pno_bssid; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + NULL_CHECK(hotlist_params, "hotlist_params is NULL", err); + NULL_CHECK(p_pfn_bssid, "p_pfn_bssid is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + _params = &_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]; + if (!(_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE)) { + _pno_state->pno_mode |= DHD_PNO_HOTLIST_MODE; + err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_HOTLIST_MODE); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_reinitialize_prof\n", + __FUNCTION__)); + goto exit; + } + } + _params->params_batch.nchan = hotlist_params->nchan; + _params->params_batch.scan_fr = hotlist_params->scan_fr; + if (hotlist_params->nchan) + memcpy(_params->params_hotlist.chan_list, hotlist_params->chan_list, + sizeof(_params->params_hotlist.chan_list)); + memset(_chan_list, 0, sizeof(_chan_list)); + + rem_nchan = ARRAYSIZE(hotlist_params->chan_list) - hotlist_params->nchan; + if (hotlist_params->band == WLC_BAND_2G || hotlist_params->band == WLC_BAND_5G) { + /* get a valid channel list based on band B or A */ + err = _dhd_pno_get_channels(dhd, + &_params->params_hotlist.chan_list[hotlist_params->nchan], + &rem_nchan, hotlist_params->band, FALSE); + if (err < 0) { + DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n", + __FUNCTION__, hotlist_params->band)); + goto exit; + } + /* now we need to update nchan because rem_chan has valid channel count */ + _params->params_hotlist.nchan += rem_nchan; + /* need to sort channel list */ + sort(_params->params_hotlist.chan_list, _params->params_hotlist.nchan, + sizeof(_params->params_hotlist.chan_list[0]), _dhd_pno_cmpfunc, NULL); + } +#ifdef PNO_DEBUG +{ + int i; + DHD_PNO(("Channel list : ")); + for (i = 0; i < _params->params_batch.nchan; i++) { + DHD_PNO(("%d ", _params->params_batch.chan_list[i])); + } + DHD_PNO(("\n")); +} +#endif + if (_params->params_hotlist.nchan) { + /* copy the channel list into local array */ + memcpy(_chan_list, _params->params_hotlist.chan_list, + sizeof(_chan_list)); + tot_nchan = _params->params_hotlist.nchan; + } + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + DHD_PNO(("PNO SSID is on progress in firmware\n")); + /* store current pno_mode before disabling pno */ + mode = _pno_state->pno_mode; + err = _dhd_pno_enable(dhd, PNO_OFF); + if (err < 0) { + DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); + goto exit; + } + /* restore the previous mode */ + _pno_state->pno_mode = mode; + /* Use the superset for channelist between two mode */ + _params2 = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + if (_params2->params_legacy.nchan > 0 && + _params->params_hotlist.nchan > 0) { + err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, + &_params2->params_legacy.chan_list[0], + _params2->params_legacy.nchan, + &_params->params_hotlist.chan_list[0], + _params->params_hotlist.nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to merge channel list" + "between legacy and hotlist\n", + __FUNCTION__)); + goto exit; + } + } + + } + + INIT_LIST_HEAD(&(_params->params_hotlist.bssid_list)); + + err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, hotlist_params->nbssid); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_add_bssid(err :%d)\n", + __FUNCTION__, err)); + goto exit; + } + if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_HOTLIST_MODE)) < 0) { + DHD_ERROR(("%s : failed to set call pno_set (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + if (tot_nchan > 0) { + if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { + DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + } + for (i = 0; i < hotlist_params->nbssid; i++) { + _pno_bssid = kzalloc(sizeof(struct dhd_pno_bssid), GFP_KERNEL); + NULL_CHECK(_pno_bssid, "_pfn_bssid is NULL", err); + memcpy(&_pno_bssid->macaddr, &p_pfn_bssid[i].macaddr, ETHER_ADDR_LEN); + _pno_bssid->flags = p_pfn_bssid[i].flags; + list_add_tail(&_pno_bssid->list, &_params->params_hotlist.bssid_list); + } + _params->params_hotlist.nbssid = hotlist_params->nbssid; + if (_pno_state->pno_status == DHD_PNO_DISABLED) { + if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) + DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__)); + } +exit: + /* clear mode in case of error */ + if (err < 0) + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + return err; +} + +int +dhd_pno_stop_for_hotlist(dhd_pub_t *dhd) +{ + int err = BCME_OK; + uint32 mode = 0; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + wlc_ssid_ext_t *p_ssid_list = NULL; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", + __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + if (!(_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE)) { + DHD_ERROR(("%s : Hotlist MODE is not enabled\n", + __FUNCTION__)); + goto exit; + } + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + + if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_BATCH_MODE)) { + /* retrieve the batching data from firmware into host */ + dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); + /* save current pno_mode before calling dhd_pno_clean */ + mode = _pno_state->pno_mode; + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + /* restore previos pno mode */ + _pno_state->pno_mode = mode; + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + /* restart Legacy PNO Scan */ + struct dhd_pno_legacy_params *_params_legacy; + _params_legacy = + &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); + p_ssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state); + if (!p_ssid_list) { + err = BCME_NOMEM; + DHD_ERROR(("failed to get Legacy PNO SSID list\n")); + goto exit; + } + err = dhd_pno_set_for_ssid(dhd, p_ssid_list, _params_legacy->nssid, + _params_legacy->scan_fr, _params_legacy->pno_repeat, + _params_legacy->pno_freq_expo_max, _params_legacy->chan_list, + _params_legacy->nchan); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } else if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + /* restart Batching Scan */ + _params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); + /* restart BATCH SCAN */ + err = dhd_pno_set_for_batch(dhd, &_params->params_batch); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + DHD_ERROR(("%s : failed to restart batch scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } + } else { + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } +exit: + kfree(p_ssid_list); + return err; +} + +#ifdef GSCAN_SUPPORT +int +dhd_retreive_batch_scan_results(dhd_pub_t *dhd) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + struct dhd_pno_batch_params *params_batch; + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; + if (_params->params_gscan.get_batch_flag == GSCAN_BATCH_RETRIEVAL_COMPLETE) { + DHD_PNO(("Retreive batch results\n")); + params_batch->get_batch.buf = NULL; + params_batch->get_batch.bufsize = 0; + params_batch->get_batch.reason = PNO_STATUS_EVENT; + _params->params_gscan.get_batch_flag = GSCAN_BATCH_RETRIEVAL_IN_PROGRESS; + schedule_work(&_pno_state->work); + } else { + DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING retrieval" + "already in progress, will skip\n", __FUNCTION__)); + err = BCME_ERROR; + } + + return err; +} + +/* Handle Significant WiFi Change (SWC) event from FW + * Send event to HAL when all results arrive from FW + */ +void * +dhd_handle_swc_evt(dhd_pub_t *dhd, const void *event_data, int *send_evt_bytes) +{ + void *ptr = NULL; + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + struct dhd_pno_gscan_params *gscan_params; + struct dhd_pno_swc_evt_param *params; + wl_pfn_swc_results_t *results = (wl_pfn_swc_results_t *)event_data; + wl_pfn_significant_net_t *change_array; + int i; + + + gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); + params = &(gscan_params->param_significant); + + if (!results->total_count) { + *send_evt_bytes = 0; + return ptr; + } + + if (!params->results_rxed_so_far) { + if (!params->change_array) { + params->change_array = (wl_pfn_significant_net_t *) + kmalloc(sizeof(wl_pfn_significant_net_t) * results->total_count, + GFP_KERNEL); + + if (!params->change_array) { + DHD_ERROR(("%s Cannot Malloc %zd bytes!!\n", __FUNCTION__, + sizeof(wl_pfn_significant_net_t) * results->total_count)); + *send_evt_bytes = 0; + return ptr; + } + } else { + DHD_ERROR(("RX'ed WLC_E_PFN_SWC evt from FW, previous evt not complete!!")); + *send_evt_bytes = 0; + return ptr; + } + + } + + DHD_PNO(("%s: pkt_count %d total_count %d\n", __FUNCTION__, + results->pkt_count, results->total_count)); + + for (i = 0; i < results->pkt_count; i++) { + DHD_PNO(("\t %02x:%02x:%02x:%02x:%02x:%02x\n", + results->list[i].BSSID.octet[0], + results->list[i].BSSID.octet[1], + results->list[i].BSSID.octet[2], + results->list[i].BSSID.octet[3], + results->list[i].BSSID.octet[4], + results->list[i].BSSID.octet[5])); + } + + change_array = ¶ms->change_array[params->results_rxed_so_far]; + memcpy(change_array, results->list, sizeof(wl_pfn_significant_net_t) * results->pkt_count); + params->results_rxed_so_far += results->pkt_count; + + if (params->results_rxed_so_far == results->total_count) { + params->results_rxed_so_far = 0; + *send_evt_bytes = sizeof(wl_pfn_significant_net_t) * results->total_count; + /* Pack up change buffer to send up and reset + * results_rxed_so_far, after its done. + */ + ptr = (void *) params->change_array; + /* expecting the callee to free this mem chunk */ + params->change_array = NULL; + } + else { + *send_evt_bytes = 0; + } + + return ptr; +} + +void +dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type) +{ + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + struct dhd_pno_gscan_params *gscan_params; + gscan_results_cache_t *iter, *tmp; + + if (!_pno_state) { + return; + } + gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); + + if (type == HOTLIST_FOUND) { + iter = gscan_params->gscan_hotlist_found; + gscan_params->gscan_hotlist_found = NULL; + } else { + iter = gscan_params->gscan_hotlist_lost; + gscan_params->gscan_hotlist_lost = NULL; + } + + while (iter) { + tmp = iter->next; + kfree(iter); + iter = tmp; + } + + return; +} + +void * +dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *data, int *size) +{ + wl_bss_info_t *bi = NULL; + wl_gscan_result_t *gscan_result; + wifi_gscan_result_t *result = NULL; + u32 bi_length = 0; + uint8 channel; + uint32 mem_needed; + + struct timespec ts; + + *size = 0; + + gscan_result = (wl_gscan_result_t *)data; + + if (!gscan_result) { + DHD_ERROR(("Invalid gscan result (NULL pointer)\n")); + goto exit; + } + if (!gscan_result->bss_info) { + DHD_ERROR(("Invalid gscan bss info (NULL pointer)\n")); + goto exit; + } + bi = &gscan_result->bss_info[0].info; + bi_length = dtoh32(bi->length); + if (bi_length != (dtoh32(gscan_result->buflen) - + WL_GSCAN_RESULTS_FIXED_SIZE - WL_GSCAN_INFO_FIXED_FIELD_SIZE)) { + DHD_ERROR(("Invalid bss_info length %d: ignoring\n", bi_length)); + goto exit; + } + if (bi->SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("Invalid SSID length %d: trimming it to max\n", bi->SSID_len)); + bi->SSID_len = DOT11_MAX_SSID_LEN; + } + + mem_needed = OFFSETOF(wifi_gscan_result_t, ie_data) + bi->ie_length; + result = kmalloc(mem_needed, GFP_KERNEL); + + if (!result) { + DHD_ERROR(("%s Cannot malloc scan result buffer %d bytes\n", + __FUNCTION__, mem_needed)); + goto exit; + } + + memcpy(result->ssid, bi->SSID, bi->SSID_len); + result->ssid[bi->SSID_len] = '\0'; + channel = wf_chspec_ctlchan(bi->chanspec); + result->channel = wf_channel2mhz(channel, + (channel <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + result->rssi = (int32) bi->RSSI; + result->rtt = 0; + result->rtt_sd = 0; + get_monotonic_boottime(&ts); + result->ts = (uint64) TIMESPEC_TO_US(ts); + result->beacon_period = dtoh16(bi->beacon_period); + result->capability = dtoh16(bi->capability); + result->ie_length = dtoh32(bi->ie_length); + memcpy(&result->macaddr, &bi->BSSID, ETHER_ADDR_LEN); + memcpy(result->ie_data, ((uint8 *)bi + bi->ie_offset), bi->ie_length); + *size = mem_needed; +exit: + return result; +} + +void * +dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data, + int *send_evt_bytes, hotlist_type_t type) +{ + void *ptr = NULL; + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + struct dhd_pno_gscan_params *gscan_params; + wl_pfn_scanresults_t *results = (wl_pfn_scanresults_t *)event_data; + wifi_gscan_result_t *hotlist_found_array; + wl_pfn_net_info_t *plnetinfo; + gscan_results_cache_t *gscan_hotlist_cache; + int malloc_size = 0, i, total = 0; + + gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); + + if (!results->count) { + *send_evt_bytes = 0; + return ptr; + } + + malloc_size = sizeof(gscan_results_cache_t) + + ((results->count - 1) * sizeof(wifi_gscan_result_t)); + gscan_hotlist_cache = (gscan_results_cache_t *) kmalloc(malloc_size, GFP_KERNEL); + + if (!gscan_hotlist_cache) { + DHD_ERROR(("%s Cannot Malloc %d bytes!!\n", __FUNCTION__, malloc_size)); + *send_evt_bytes = 0; + return ptr; + } + + if (type == HOTLIST_FOUND) { + gscan_hotlist_cache->next = gscan_params->gscan_hotlist_found; + gscan_params->gscan_hotlist_found = gscan_hotlist_cache; + DHD_PNO(("%s enter, FOUND results count %d\n", __FUNCTION__, results->count)); + } else { + gscan_hotlist_cache->next = gscan_params->gscan_hotlist_lost; + gscan_params->gscan_hotlist_lost = gscan_hotlist_cache; + DHD_PNO(("%s enter, LOST results count %d\n", __FUNCTION__, results->count)); + } + + gscan_hotlist_cache->tot_count = results->count; + gscan_hotlist_cache->tot_consumed = 0; + plnetinfo = results->netinfo; + + for (i = 0; i < results->count; i++, plnetinfo++) { + hotlist_found_array = &gscan_hotlist_cache->results[i]; + hotlist_found_array->channel = wf_channel2mhz(plnetinfo->pfnsubnet.channel, + (plnetinfo->pfnsubnet.channel <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + hotlist_found_array->rssi = (int32) plnetinfo->RSSI; + /* Info not available & not expected */ + hotlist_found_array->beacon_period = 0; + hotlist_found_array->capability = 0; + hotlist_found_array->ie_length = 0; + + hotlist_found_array->ts = convert_fw_rel_time_to_systime(plnetinfo->timestamp); + if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("Invalid SSID length %d: trimming it to max\n", + plnetinfo->pfnsubnet.SSID_len)); + plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; + } + memcpy(hotlist_found_array->ssid, plnetinfo->pfnsubnet.SSID, + plnetinfo->pfnsubnet.SSID_len); + hotlist_found_array->ssid[plnetinfo->pfnsubnet.SSID_len] = '\0'; + + memcpy(&hotlist_found_array->macaddr, &plnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN); + DHD_PNO(("\t%s %02x:%02x:%02x:%02x:%02x:%02x rssi %d\n", hotlist_found_array->ssid, + hotlist_found_array->macaddr.octet[0], + hotlist_found_array->macaddr.octet[1], + hotlist_found_array->macaddr.octet[2], + hotlist_found_array->macaddr.octet[3], + hotlist_found_array->macaddr.octet[4], + hotlist_found_array->macaddr.octet[5], + hotlist_found_array->rssi)); + } + + + if (results->status == PFN_COMPLETE) { + ptr = (void *) gscan_hotlist_cache; + while (gscan_hotlist_cache) { + total += gscan_hotlist_cache->tot_count; + gscan_hotlist_cache = gscan_hotlist_cache->next; + } + *send_evt_bytes = total * sizeof(wifi_gscan_result_t); + } + + return ptr; +} +#endif /* GSCAN_SUPPORT */ +int +dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) +{ + int err = BCME_OK; + uint status, event_type, flags, datalen; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + event_type = ntoh32(event->event_type); + flags = ntoh16(event->flags); + status = ntoh32(event->status); + datalen = ntoh32(event->datalen); + DHD_PNO(("%s enter : event_type :%d\n", __FUNCTION__, event_type)); + switch (event_type) { + case WLC_E_PFN_BSSID_NET_FOUND: + case WLC_E_PFN_BSSID_NET_LOST: + /* TODO : need to implement event logic using generic netlink */ + break; + case WLC_E_PFN_BEST_BATCHING: +#ifndef GSCAN_SUPPORT + { + struct dhd_pno_batch_params *params_batch; + params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; + if (!waitqueue_active(&_pno_state->get_batch_done.wait)) { + DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING\n", __FUNCTION__)); + params_batch->get_batch.buf = NULL; + params_batch->get_batch.bufsize = 0; + params_batch->get_batch.reason = PNO_STATUS_EVENT; + schedule_work(&_pno_state->work); + } else + DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING" + "will skip this event\n", __FUNCTION__)); + break; + } +#else + break; +#endif /* !GSCAN_SUPPORT */ + default: + DHD_ERROR(("unknown event : %d\n", event_type)); + } +exit: + return err; +} + +int dhd_pno_init(dhd_pub_t *dhd) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); + UNUSED_PARAMETER(_dhd_pno_suspend); + if (dhd->pno_state) + goto exit; + dhd->pno_state = MALLOC(dhd->osh, sizeof(dhd_pno_status_info_t)); + NULL_CHECK(dhd->pno_state, "failed to create dhd_pno_state", err); + memset(dhd->pno_state, 0, sizeof(dhd_pno_status_info_t)); + /* need to check whether current firmware support batching and hotlist scan */ + _pno_state = PNO_GET_PNOSTATE(dhd); + _pno_state->wls_supported = TRUE; + _pno_state->dhd = dhd; + mutex_init(&_pno_state->pno_mutex); + INIT_WORK(&_pno_state->work, _dhd_pno_get_batch_handler); + init_completion(&_pno_state->get_batch_done); +#ifdef GSCAN_SUPPORT + init_waitqueue_head(&_pno_state->batch_get_wait); +#endif /* GSCAN_SUPPORT */ + err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, 0); + if (err == BCME_UNSUPPORTED) { + _pno_state->wls_supported = FALSE; + DHD_INFO(("Current firmware doesn't support" + " Android Location Service\n")); + } else { + DHD_ERROR(("%s: Support Android Location Service\n", + __FUNCTION__)); + } +exit: + return err; +} + +int dhd_pno_deinit(dhd_pub_t *dhd) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + NULL_CHECK(dhd, "dhd is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + NULL_CHECK(_pno_state, "pno_state is NULL", err); + /* may need to free legacy ssid_list */ + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + _params = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); + } + +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + mutex_lock(&_pno_state->pno_mutex); + dhd_pno_reset_cfg_gscan(_params, _pno_state, GSCAN_FLUSH_ALL_CFG); + mutex_unlock(&_pno_state->pno_mutex); + } +#endif /* GSCAN_SUPPORT */ + + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; + /* clear resource if the BATCH MODE is on */ + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE); + } + cancel_work_sync(&_pno_state->work); + MFREE(dhd->osh, _pno_state, sizeof(dhd_pno_status_info_t)); + dhd->pno_state = NULL; + return err; +} +#endif /* PNO_SUPPORT */ diff --git a/drivers/amlogic/wifi/bcmdhd/dhd_pno.h b/drivers/amlogic/wifi/bcmdhd/dhd_pno.h new file mode 100644 index 0000000000000..990ec6ce4ad13 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/dhd_pno.h @@ -0,0 +1,498 @@ +/* + * Header file of Broadcom Dongle Host Driver (DHD) + * Prefered Network Offload code and Wi-Fi Location Service(WLS) code. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_pno.h 591285 2015-10-07 11:56:29Z $ + */ + +#ifndef __DHD_PNO_H__ +#define __DHD_PNO_H__ + +#if defined(PNO_SUPPORT) +#define PNO_TLV_PREFIX 'S' +#define PNO_TLV_VERSION '1' +#define PNO_TLV_SUBTYPE_LEGACY_PNO '2' +#define PNO_TLV_RESERVED '0' +#define PNO_BATCHING_SET "SET" +#define PNO_BATCHING_GET "GET" +#define PNO_BATCHING_STOP "STOP" +#define PNO_PARAMS_DELIMETER " " +#define PNO_PARAM_CHANNEL_DELIMETER "," +#define PNO_PARAM_VALUE_DELLIMETER '=' +#define PNO_PARAM_SCANFREQ "SCANFREQ" +#define PNO_PARAM_BESTN "BESTN" +#define PNO_PARAM_MSCAN "MSCAN" +#define PNO_PARAM_CHANNEL "CHANNEL" +#define PNO_PARAM_RTT "RTT" + +#define PNO_TLV_TYPE_SSID_IE 'S' +#define PNO_TLV_TYPE_TIME 'T' +#define PNO_TLV_FREQ_REPEAT 'R' +#define PNO_TLV_FREQ_EXPO_MAX 'M' + +#define MAXNUM_SSID_PER_ADD 16 +#define MAXNUM_PNO_PARAMS 2 +#define PNO_TLV_COMMON_LENGTH 1 +#define DEFAULT_BATCH_MSCAN 16 + +#define RESULTS_END_MARKER "----\n" +#define SCAN_END_MARKER "####\n" +#define AP_END_MARKER "====\n" +#define PNO_RSSI_MARGIN_DBM 30 + +#ifdef GSCAN_SUPPORT + +#define GSCAN_MAX_CH_BUCKETS 8 +#define GSCAN_BG_BAND_MASK (1 << 0) +#define GSCAN_A_BAND_MASK (1 << 1) +#define GSCAN_DFS_MASK (1 << 2) +#define GSCAN_ABG_BAND_MASK (GSCAN_A_BAND_MASK | GSCAN_BG_BAND_MASK) +#define GSCAN_BAND_MASK (GSCAN_ABG_BAND_MASK | GSCAN_DFS_MASK) + +#define GSCAN_FLUSH_HOTLIST_CFG (1 << 0) +#define GSCAN_FLUSH_SIGNIFICANT_CFG (1 << 1) +#define GSCAN_FLUSH_SCAN_CFG (1 << 2) +#define GSCAN_FLUSH_ALL_CFG (GSCAN_FLUSH_SCAN_CFG | \ + GSCAN_FLUSH_SIGNIFICANT_CFG | \ + GSCAN_FLUSH_HOTLIST_CFG) +/* Do not change GSCAN_BATCH_RETRIEVAL_COMPLETE */ +#define GSCAN_BATCH_RETRIEVAL_COMPLETE 0 +#define GSCAN_BATCH_RETRIEVAL_IN_PROGRESS 1 +#define GSCAN_BATCH_NO_THR_SET 101 +#define GSCAN_LOST_AP_WINDOW_DEFAULT 4 +#define GSCAN_MIN_BSSID_TIMEOUT 90 +#define GSCAN_BATCH_GET_MAX_WAIT 500 +#define CHANNEL_BUCKET_EMPTY_INDEX 0xFFFF +#define GSCAN_RETRY_THRESHOLD 3 +#endif /* GSCAN_SUPPORT */ + +enum scan_status { + /* SCAN ABORT by other scan */ + PNO_STATUS_ABORT, + /* RTT is presence or not */ + PNO_STATUS_RTT_PRESENCE, + /* Disable PNO by Driver */ + PNO_STATUS_DISABLE, + /* NORMAL BATCHING GET */ + PNO_STATUS_NORMAL, + /* WLC_E_PFN_BEST_BATCHING */ + PNO_STATUS_EVENT, + PNO_STATUS_MAX +}; +#define PNO_STATUS_ABORT_MASK 0x0001 +#define PNO_STATUS_RTT_MASK 0x0002 +#define PNO_STATUS_DISABLE_MASK 0x0004 +#define PNO_STATUS_OOM_MASK 0x0010 + +enum index_mode { + INDEX_OF_LEGACY_PARAMS, + INDEX_OF_BATCH_PARAMS, + INDEX_OF_HOTLIST_PARAMS, + /* GSCAN includes hotlist scan and they do not run + * independent of each other + */ +#ifdef GSCAN_SUPPORT + INDEX_OF_GSCAN_PARAMS = INDEX_OF_HOTLIST_PARAMS, +#endif /* GSCAN_SUPPORT */ + INDEX_MODE_MAX +}; +enum dhd_pno_status { + DHD_PNO_DISABLED, + DHD_PNO_ENABLED, + DHD_PNO_SUSPEND +}; +typedef struct cmd_tlv { + char prefix; + char version; + char subtype; + char reserved; +} cmd_tlv_t; +#ifdef GSCAN_SUPPORT +typedef enum { + WIFI_BAND_UNSPECIFIED, + WIFI_BAND_BG = 1, /* 2.4 GHz */ + WIFI_BAND_A = 2, /* 5 GHz without DFS */ + WIFI_BAND_A_DFS = 4, /* 5 GHz DFS only */ + WIFI_BAND_A_WITH_DFS = 6, /* 5 GHz with DFS */ + WIFI_BAND_ABG = 3, /* 2.4 GHz + 5 GHz; no DFS */ + WIFI_BAND_ABG_WITH_DFS = 7, /* 2.4 GHz + 5 GHz with DFS */ +} gscan_wifi_band_t; + +typedef enum { + HOTLIST_LOST, + HOTLIST_FOUND +} hotlist_type_t; + +typedef enum dhd_pno_gscan_cmd_cfg { + DHD_PNO_BATCH_SCAN_CFG_ID, + DHD_PNO_GEOFENCE_SCAN_CFG_ID, + DHD_PNO_SIGNIFICANT_SCAN_CFG_ID, + DHD_PNO_SCAN_CFG_ID, + DHD_PNO_GET_CAPABILITIES, + DHD_PNO_GET_BATCH_RESULTS, + DHD_PNO_GET_CHANNEL_LIST +} dhd_pno_gscan_cmd_cfg_t; + +typedef enum dhd_pno_mode { + /* Wi-Fi Legacy PNO Mode */ + DHD_PNO_NONE_MODE = 0, + DHD_PNO_LEGACY_MODE = (1 << (0)), + /* Wi-Fi Android BATCH SCAN Mode */ + DHD_PNO_BATCH_MODE = (1 << (1)), + /* Wi-Fi Android Hotlist SCAN Mode */ + DHD_PNO_HOTLIST_MODE = (1 << (2)), + /* Wi-Fi Google Android SCAN Mode */ + DHD_PNO_GSCAN_MODE = (1 << (3)) +} dhd_pno_mode_t; +#else +typedef enum dhd_pno_mode { + /* Wi-Fi Legacy PNO Mode */ + DHD_PNO_NONE_MODE = 0, + DHD_PNO_LEGACY_MODE = (1 << (0)), + /* Wi-Fi Android BATCH SCAN Mode */ + DHD_PNO_BATCH_MODE = (1 << (1)), + /* Wi-Fi Android Hotlist SCAN Mode */ + DHD_PNO_HOTLIST_MODE = (1 << (2)) +} dhd_pno_mode_t; +#endif /* GSCAN_SUPPORT */ +struct dhd_pno_ssid { + bool hidden; + uint32 SSID_len; + uchar SSID[DOT11_MAX_SSID_LEN]; + struct list_head list; +}; +struct dhd_pno_bssid { + struct ether_addr macaddr; + /* Bit4: suppress_lost, Bit3: suppress_found */ + uint16 flags; + struct list_head list; +}; +typedef struct dhd_pno_bestnet_entry { + struct ether_addr BSSID; + uint8 SSID_len; + uint8 SSID[DOT11_MAX_SSID_LEN]; + int8 RSSI; + uint8 channel; + uint32 timestamp; + uint16 rtt0; /* distance_cm based on RTT */ + uint16 rtt1; /* distance_cm based on sample standard deviation */ + unsigned long recorded_time; + struct list_head list; +} dhd_pno_bestnet_entry_t; +#define BESTNET_ENTRY_SIZE (sizeof(dhd_pno_bestnet_entry_t)) + +typedef struct dhd_pno_bestnet_header { + struct dhd_pno_bestnet_header *next; + uint8 reason; + uint32 tot_cnt; + uint32 tot_size; + struct list_head entry_list; +} dhd_pno_best_header_t; +#define BEST_HEADER_SIZE (sizeof(dhd_pno_best_header_t)) + +typedef struct dhd_pno_scan_results { + dhd_pno_best_header_t *bestnetheader; + uint8 cnt_header; + struct list_head list; +} dhd_pno_scan_results_t; +#define SCAN_RESULTS_SIZE (sizeof(dhd_pno_scan_results_t)) + +struct dhd_pno_get_batch_info { + /* info related to get batch */ + char *buf; + bool batch_started; + uint32 tot_scan_cnt; + uint32 expired_tot_scan_cnt; + uint32 top_node_cnt; + uint32 bufsize; + uint32 bytes_written; + int reason; + struct list_head scan_results_list; + struct list_head expired_scan_results_list; +}; +struct dhd_pno_legacy_params { + uint16 scan_fr; + uint16 chan_list[WL_NUMCHANNELS]; + uint16 nchan; + int pno_repeat; + int pno_freq_expo_max; + int nssid; + struct list_head ssid_list; +}; +struct dhd_pno_batch_params { + int32 scan_fr; + uint8 bestn; + uint8 mscan; + uint8 band; + uint16 chan_list[WL_NUMCHANNELS]; + uint16 nchan; + uint16 rtt; + struct dhd_pno_get_batch_info get_batch; +}; +struct dhd_pno_hotlist_params { + uint8 band; + int32 scan_fr; + uint16 chan_list[WL_NUMCHANNELS]; + uint16 nchan; + uint16 nbssid; + struct list_head bssid_list; +}; +#ifdef GSCAN_SUPPORT +typedef struct dhd_pno_gscan_channel_bucket { + uint16 bucket_freq_multiple; + /* band = 1 All bg band channels, + * band = 2 All a band channels, + * band = 0 chan_list channels + */ + uint16 band; + uint8 report_flag; + uint8 num_channels; + uint16 chan_list[GSCAN_MAX_CH_BUCKETS]; +} dhd_pno_gscan_channel_bucket_t; + +typedef struct dhd_pno_swc_evt_param { + uint16 results_rxed_so_far; + wl_pfn_significant_net_t *change_array; +} dhd_pno_swc_evt_param_t; + +typedef struct wifi_gscan_result { + uint64 ts; /* Time of discovery */ + char ssid[DOT11_MAX_SSID_LEN+1]; /* null terminated */ + struct ether_addr macaddr; /* BSSID */ + uint32 channel; /* channel frequency in MHz */ + int32 rssi; /* in db */ + uint64 rtt; /* in nanoseconds */ + uint64 rtt_sd; /* standard deviation in rtt */ + uint16 beacon_period; /* units are Kusec */ + uint16 capability; /* Capability information */ + uint32 ie_length; /* byte length of Information Elements */ + char ie_data[1]; /* IE data to follow */ +} wifi_gscan_result_t; + +typedef struct gscan_results_cache { + struct gscan_results_cache *next; + uint8 scan_id; + uint8 flag; + uint8 tot_count; + uint8 tot_consumed; + wifi_gscan_result_t results[1]; +} gscan_results_cache_t; + +typedef struct dhd_pno_gscan_capabilities { + int max_scan_cache_size; + int max_scan_buckets; + int max_ap_cache_per_scan; + int max_rssi_sample_size; + int max_scan_reporting_threshold; + int max_hotlist_aps; + int max_significant_wifi_change_aps; +} dhd_pno_gscan_capabilities_t; + +struct dhd_pno_gscan_params { + int32 scan_fr; + uint8 bestn; + uint8 mscan; + uint8 buffer_threshold; + uint8 swc_nbssid_threshold; + uint8 swc_rssi_window_size; + uint8 lost_ap_window; + uint8 nchannel_buckets; + uint8 reason; + uint8 get_batch_flag; + uint8 send_all_results_flag; + uint16 max_ch_bucket_freq; + gscan_results_cache_t *gscan_batch_cache; + gscan_results_cache_t *gscan_hotlist_found; + gscan_results_cache_t *gscan_hotlist_lost; + uint16 nbssid_significant_change; + uint16 nbssid_hotlist; + struct dhd_pno_swc_evt_param param_significant; + struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS]; + struct list_head hotlist_bssid_list; + struct list_head significant_bssid_list; +}; + +typedef struct gscan_scan_params { + int32 scan_fr; + uint16 nchannel_buckets; + struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS]; +} gscan_scan_params_t; + +typedef struct gscan_batch_params { + uint8 bestn; + uint8 mscan; + uint8 buffer_threshold; +} gscan_batch_params_t; + +struct bssid_t { + struct ether_addr macaddr; + int16 rssi_reporting_threshold; /* 0 -> no reporting threshold */ +}; + +typedef struct gscan_hotlist_scan_params { + uint16 lost_ap_window; /* number of scans to declare LOST */ + uint16 nbssid; /* number of bssids */ + struct bssid_t bssid[1]; /* n bssids to follow */ +} gscan_hotlist_scan_params_t; + +/* SWC (Significant WiFi Change) params */ +typedef struct gscan_swc_params { + /* Rssi averaging window size */ + uint8 rssi_window; + /* Number of scans that the AP has to be absent before + * being declared LOST + */ + uint8 lost_ap_window; + /* if x Aps have a significant change generate an event. */ + uint8 swc_threshold; + uint8 nbssid; + wl_pfn_significant_bssid_t bssid_elem_list[1]; +} gscan_swc_params_t; + +typedef struct dhd_pno_significant_bssid { + struct ether_addr BSSID; + int8 rssi_low_threshold; + int8 rssi_high_threshold; + struct list_head list; +} dhd_pno_significant_bssid_t; +#endif /* GSCAN_SUPPORT */ +typedef union dhd_pno_params { + struct dhd_pno_legacy_params params_legacy; + struct dhd_pno_batch_params params_batch; + struct dhd_pno_hotlist_params params_hotlist; +#ifdef GSCAN_SUPPORT + struct dhd_pno_gscan_params params_gscan; +#endif /* GSCAN_SUPPORT */ +} dhd_pno_params_t; +typedef struct dhd_pno_status_info { + uint8 pno_oui[DOT11_OUI_LEN]; + dhd_pub_t *dhd; + struct work_struct work; + struct mutex pno_mutex; +#ifdef GSCAN_SUPPORT + wait_queue_head_t batch_get_wait; +#endif /* GSCAN_SUPPORT */ + struct completion get_batch_done; + bool wls_supported; /* wifi location service supported or not */ + enum dhd_pno_status pno_status; + enum dhd_pno_mode pno_mode; + dhd_pno_params_t pno_params_arr[INDEX_MODE_MAX]; + struct list_head head_list; +} dhd_pno_status_info_t; + +/* wrapper functions */ +extern int +dhd_dev_pno_enable(struct net_device *dev, int enable); + +extern int +dhd_dev_pno_stop_for_ssid(struct net_device *dev); + +extern int +dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid, + uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan); + +extern int +dhd_dev_pno_set_for_batch(struct net_device *dev, + struct dhd_pno_batch_params *batch_params); + +extern int +dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize); + +extern int +dhd_dev_pno_stop_for_batch(struct net_device *dev); + +extern int +dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid, + struct dhd_pno_hotlist_params *hotlist_params); +extern int dhd_dev_pno_set_mac_oui(struct net_device *dev, uint8 *oui); +#ifdef GSCAN_SUPPORT +extern int +dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, + void *buf, uint8 flush); +extern void * +dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, void *info, + uint32 *len); +void dhd_dev_pno_lock_access_batch_results(struct net_device *dev); +void dhd_dev_pno_unlock_access_batch_results(struct net_device *dev); +extern int dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush); +extern int dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time); +extern void * dhd_dev_swc_scan_event(struct net_device *dev, const void *data, + int *send_evt_bytes); +int dhd_retreive_batch_scan_results(dhd_pub_t *dhd); +extern void * dhd_dev_hotlist_scan_event(struct net_device *dev, + const void *data, int *send_evt_bytes, hotlist_type_t type); +void * dhd_dev_process_full_gscan_result(struct net_device *dev, + const void *data, int *send_evt_bytes); +extern int dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev); +extern void dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type); +extern void dhd_dev_wait_batch_results_complete(struct net_device *dev); +#endif /* GSCAN_SUPPORT */ +/* dhd pno fuctions */ +extern int dhd_pno_stop_for_ssid(dhd_pub_t *dhd); +extern int dhd_pno_enable(dhd_pub_t *dhd, int enable); +extern int dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid, + uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan); + +extern int dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params); + +extern int dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason); + + +extern int dhd_pno_stop_for_batch(dhd_pub_t *dhd); + +extern int dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, + struct dhd_pno_hotlist_params *hotlist_params); + +extern int dhd_pno_stop_for_hotlist(dhd_pub_t *dhd); + +extern int dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data); +extern int dhd_pno_init(dhd_pub_t *dhd); +extern int dhd_pno_deinit(dhd_pub_t *dhd); +extern bool dhd_is_pno_supported(dhd_pub_t *dhd); +extern int dhd_pno_set_mac_oui(dhd_pub_t *dhd, uint8 *oui); +#ifdef GSCAN_SUPPORT +extern int dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, + void *buf, uint8 flush); +extern void * dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *info, + uint32 *len); +extern void dhd_pno_lock_batch_results(dhd_pub_t *dhd); +extern void dhd_pno_unlock_batch_results(dhd_pub_t *dhd); +extern int dhd_pno_initiate_gscan_request(dhd_pub_t *dhd, bool run, bool flush); +extern int dhd_pno_enable_full_scan_result(dhd_pub_t *dhd, bool real_time_flag); +extern int dhd_pno_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *buf); +extern int dhd_dev_retrieve_batch_scan(struct net_device *dev); +extern void *dhd_handle_swc_evt(dhd_pub_t *dhd, const void *event_data, int *send_evt_bytes); +extern void *dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data, + int *send_evt_bytes, hotlist_type_t type); +extern void *dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *event_data, + int *send_evt_bytes); +extern int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd); +extern void dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type); +extern void dhd_wait_batch_results_complete(dhd_pub_t *dhd); +#endif /* GSCAN_SUPPORT */ +#endif + +#endif /* __DHD_PNO_H__ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_proto.h b/drivers/amlogic/wifi/bcmdhd/dhd_proto.h similarity index 65% rename from drivers/net/wireless/bcmdhd/dhd_proto.h rename to drivers/amlogic/wifi/bcmdhd/dhd_proto.h index f0c7dd970efbc..6dcb56328140e 100644 --- a/drivers/net/wireless/bcmdhd/dhd_proto.h +++ b/drivers/amlogic/wifi/bcmdhd/dhd_proto.h @@ -4,9 +4,30 @@ * Provides type definitions and function prototypes used to link the * DHD OS, bus, and protocol modules. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_proto.h 499674 2014-08-29 21:56:23Z $ + * + * <> + * + * $Id: dhd_proto.h 604483 2015-12-07 14:47:36Z $ */ #ifndef _dhd_proto_h_ @@ -28,6 +49,17 @@ #define MFG_IOCTL_RESP_TIMEOUT 20000 /* In milli second default value for MFG FW */ #endif /* MFG_IOCTL_RESP_TIMEOUT */ +#define DEFAULT_D3_ACK_RESP_TIMEOUT 4000 +#ifndef D3_ACK_RESP_TIMEOUT +#define D3_ACK_RESP_TIMEOUT DEFAULT_D3_ACK_RESP_TIMEOUT +#endif /* D3_ACK_RESP_TIMEOUT */ + +#define DEFAULT_DHD_BUS_BUSY_TIMEOUT (IOCTL_RESP_TIMEOUT + 1000) +#ifndef DHD_BUS_BUSY_TIMEOUT +#define DHD_BUS_BUSY_TIMEOUT DEFAULT_DHD_BUS_BUSY_TIMEOUT +#endif /* DEFAULT_DHD_BUS_BUSY_TIMEOUT */ + +#define IOCTL_DISABLE_TIMEOUT 0 /* * Exported from the dhd protocol module (dhd_cdc, dhd_rndis) */ @@ -36,7 +68,8 @@ extern int dhd_prot_attach(dhd_pub_t *dhdp); /* Initilizes the index block for dma'ing indices */ -extern int dhd_prot_init_index_dma_block(dhd_pub_t *dhdp, uint8 type, uint32 length); +extern int dhd_prot_dma_indx_init(dhd_pub_t *dhdp, uint32 rw_index_sz, + uint8 type, uint32 length); /* Unlink, frees allocated protocol memory (including dhd_prot) */ extern void dhd_prot_detach(dhd_pub_t *dhdp); @@ -95,24 +128,33 @@ extern void dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 offset); extern int dhd_prot_txdata(dhd_pub_t *dhd, void *p, uint8 ifidx); extern int dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay); +extern void dhd_dma_buf_init(dhd_pub_t *dhd, void *dma_buf, + void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma); +extern void dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, + uint16 flowid, void *msgbuf_ring); extern int dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node); -extern void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info); extern int dhd_post_tx_ring_item(dhd_pub_t *dhd, void *PKTBUF, uint8 ifindex); extern int dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node); extern int dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node); extern int dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b); +extern uint32 dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val); +extern uint32 dhd_prot_metadata_dbg_get(dhd_pub_t *dhd); extern uint32 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx); extern uint32 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx); extern void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info, - struct bcmstrbuf *strbuf); + struct bcmstrbuf *strbuf, const char * fmt); extern void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf); extern void dhd_prot_update_txflowring(dhd_pub_t *dhdp, uint16 flow_id, void *msgring_info); extern void dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flow_id, bool in_lock); extern uint32 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val); -extern void dhd_prot_clear(dhd_pub_t *dhd); - +extern void dhd_prot_reset(dhd_pub_t *dhd); +#ifdef DHD_LB +extern void dhd_lb_tx_compl_handler(unsigned long data); +extern void dhd_lb_rx_compl_handler(unsigned long data); +extern void dhd_lb_rx_process_handler(unsigned long data); +#endif /* DHD_LB */ +void dhd_prot_collect_memdump(dhd_pub_t *dhd); #endif /* BCMPCIE */ - /******************************** * For version-string expansion * */ diff --git a/drivers/amlogic/wifi/bcmdhd/dhd_rtt.c b/drivers/amlogic/wifi/bcmdhd/dhd_rtt.c new file mode 100644 index 0000000000000..cc0ebb2ecd2d7 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/dhd_rtt.c @@ -0,0 +1,731 @@ +/* + * Broadcom Dongle Host Driver (DHD), RTT + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_rtt.c 606280 2015-12-15 05:28:25Z $ + */ +#ifdef RTT_SUPPORT +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#define GET_RTTSTATE(dhd) ((rtt_status_info_t *)dhd->rtt_state) +static DEFINE_SPINLOCK(noti_list_lock); +#define NULL_CHECK(p, s, err) \ + do { \ + if (!(p)) { \ + printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \ + err = BCME_ERROR; \ + return err; \ + } \ + } while (0) + +#define RTT_TWO_SIDED(capability) \ + do { \ + if ((capability & RTT_CAP_ONE_WAY) == (uint8) (RTT_CAP_ONE_WAY)) \ + return FALSE; \ + else \ + return TRUE; \ + } while (0) +#define TIMESPEC_TO_US(ts) (((uint64)(ts).tv_sec * USEC_PER_SEC) + \ + (ts).tv_nsec / NSEC_PER_USEC) +struct rtt_noti_callback { + struct list_head list; + void *ctx; + dhd_rtt_compl_noti_fn noti_fn; +}; + +typedef struct rtt_status_info { + dhd_pub_t *dhd; + int8 status; /* current status for the current entry */ + int8 cur_idx; /* current entry to do RTT */ + int32 capability; /* rtt capability */ + struct mutex rtt_mutex; + rtt_config_params_t rtt_config; + struct work_struct work; + struct list_head noti_fn_list; + struct list_head rtt_results_cache; /* store results for RTT */ +} rtt_status_info_t; + +static int dhd_rtt_start(dhd_pub_t *dhd); + +chanspec_t +dhd_rtt_convert_to_chspec(wifi_channel_info_t channel) +{ + int bw; + /* set witdh to 20MHZ for 2.4G HZ */ + if (channel.center_freq >= 2400 && channel.center_freq <= 2500) { + channel.width = WIFI_CHAN_WIDTH_20; + } + switch (channel.width) { + case WIFI_CHAN_WIDTH_20: + bw = WL_CHANSPEC_BW_20; + break; + case WIFI_CHAN_WIDTH_40: + bw = WL_CHANSPEC_BW_40; + break; + case WIFI_CHAN_WIDTH_80: + bw = WL_CHANSPEC_BW_80; + break; + case WIFI_CHAN_WIDTH_160: + bw = WL_CHANSPEC_BW_160; + break; + default: + DHD_ERROR(("doesn't support this bandwith : %d", channel.width)); + bw = -1; + break; + } + return wf_channel2chspec(wf_mhz2channel(channel.center_freq, 0), bw); +} + +int +dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params) +{ + int err = BCME_OK; + int idx; + rtt_status_info_t *rtt_status; + NULL_CHECK(params, "params is NULL", err); + + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + if (rtt_status->capability == RTT_CAP_NONE) { + DHD_ERROR(("doesn't support RTT \n")); + return BCME_ERROR; + } + if (rtt_status->status == RTT_STARTED) { + DHD_ERROR(("rtt is already started\n")); + return BCME_BUSY; + } + DHD_RTT(("%s enter\n", __FUNCTION__)); + bcopy(params, &rtt_status->rtt_config, sizeof(rtt_config_params_t)); + rtt_status->status = RTT_STARTED; + /* start to measure RTT from 1th device */ + /* find next target to trigger RTT */ + for (idx = rtt_status->cur_idx; idx < rtt_status->rtt_config.rtt_target_cnt; idx++) { + /* skip the disabled device */ + if (rtt_status->rtt_config.target_info[idx].disable) { + continue; + } else { + /* set the idx to cur_idx */ + rtt_status->cur_idx = idx; + break; + } + } + if (idx < rtt_status->rtt_config.rtt_target_cnt) { + DHD_RTT(("rtt_status->cur_idx : %d\n", rtt_status->cur_idx)); + schedule_work(&rtt_status->work); + } + return err; +} + +int +dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt) +{ + int err = BCME_OK; + int i = 0, j = 0; + rtt_status_info_t *rtt_status; + + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + if (rtt_status->status == RTT_STOPPED) { + DHD_ERROR(("rtt is not started\n")); + return BCME_OK; + } + DHD_RTT(("%s enter\n", __FUNCTION__)); + mutex_lock(&rtt_status->rtt_mutex); + for (i = 0; i < mac_cnt; i++) { + for (j = 0; j < rtt_status->rtt_config.rtt_target_cnt; j++) { + if (!bcmp(&mac_list[i], &rtt_status->rtt_config.target_info[j].addr, + ETHER_ADDR_LEN)) { + rtt_status->rtt_config.target_info[j].disable = TRUE; + } + } + } + mutex_unlock(&rtt_status->rtt_mutex); + return err; +} + +static int +dhd_rtt_start(dhd_pub_t *dhd) +{ + int err = BCME_OK; + int mpc = 0; + int nss, mcs, bw; + uint32 rspec = 0; + int8 eabuf[ETHER_ADDR_STR_LEN]; + int8 chanbuf[CHANSPEC_STR_LEN]; + bool set_mpc = FALSE; + wl_proxd_iovar_t proxd_iovar; + wl_proxd_params_iovar_t proxd_params; + wl_proxd_params_iovar_t proxd_tune; + wl_proxd_params_tof_method_t *tof_params = &proxd_params.u.tof_params; + rtt_status_info_t *rtt_status; + rtt_target_info_t *rtt_target; + NULL_CHECK(dhd, "dhd is NULL", err); + + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + /* turn off mpc in case of non-associted */ + if (!dhd_is_associated(dhd, 0, NULL)) { + err = dhd_iovar(dhd, 0, "mpc", (char *)&mpc, sizeof(mpc), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to set proxd_tune\n", __FUNCTION__)); + goto exit; + } + set_mpc = TRUE; + } + + if (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt) { + err = BCME_RANGE; + goto exit; + } + DHD_RTT(("%s enter\n", __FUNCTION__)); + bzero(&proxd_tune, sizeof(proxd_tune)); + bzero(&proxd_params, sizeof(proxd_params)); + mutex_lock(&rtt_status->rtt_mutex); + /* Get a target information */ + rtt_target = &rtt_status->rtt_config.target_info[rtt_status->cur_idx]; + mutex_unlock(&rtt_status->rtt_mutex); + /* set role */ + proxd_iovar.method = PROXD_TOF_METHOD; + proxd_iovar.mode = WL_PROXD_MODE_INITIATOR; + + /* make sure that proxd is stop */ + /* dhd_iovar(dhd, 0, "proxd_stop", (char *)NULL, 0, 1); */ + + err = dhd_iovar(dhd, 0, "proxd", (char *)&proxd_iovar, sizeof(proxd_iovar), 1); + if (err < 0 && err != BCME_BUSY) { + DHD_ERROR(("%s : failed to set proxd %d\n", __FUNCTION__, err)); + goto exit; + } + if (err == BCME_BUSY) { + DHD_RTT(("BCME_BUSY occurred\n")); + } + /* mac address */ + bcopy(&rtt_target->addr, &tof_params->tgt_mac, ETHER_ADDR_LEN); + /* frame count */ + if (rtt_target->ftm_cnt > RTT_MAX_FRAME_CNT) { + rtt_target->ftm_cnt = RTT_MAX_FRAME_CNT; + } + + if (rtt_target->ftm_cnt) { + tof_params->ftm_cnt = htol16(rtt_target->ftm_cnt); + } else { + tof_params->ftm_cnt = htol16(DEFAULT_FTM_CNT); + } + + if (rtt_target->retry_cnt > RTT_MAX_RETRY_CNT) { + rtt_target->retry_cnt = RTT_MAX_RETRY_CNT; + } + + /* retry count */ + if (rtt_target->retry_cnt) { + tof_params->retry_cnt = htol16(rtt_target->retry_cnt); + } else { + tof_params->retry_cnt = htol16(DEFAULT_RETRY_CNT); + } + + /* chanspec */ + tof_params->chanspec = htol16(rtt_target->chanspec); + /* set parameter */ + DHD_RTT(("Target addr(Idx %d) %s, Channel : %s for RTT (ftm_cnt %d, rety_cnt : %d)\n", + rtt_status->cur_idx, + bcm_ether_ntoa((const struct ether_addr *)&rtt_target->addr, eabuf), + wf_chspec_ntoa(rtt_target->chanspec, chanbuf), rtt_target->ftm_cnt, + rtt_target->retry_cnt)); + + if (rtt_target->type == RTT_ONE_WAY) { + proxd_tune.u.tof_tune.flags = htol32(WL_PROXD_FLAG_ONEWAY); + /* report RTT results for initiator */ + proxd_tune.u.tof_tune.flags |= htol32(WL_PROXD_FLAG_INITIATOR_RPTRTT); + proxd_tune.u.tof_tune.vhtack = 0; + tof_params->tx_rate = htol16(WL_RATE_6M); + tof_params->vht_rate = htol16((WL_RATE_6M >> 16)); + } else { /* RTT TWO WAY */ + /* initiator will send the rtt result to the target */ + proxd_tune.u.tof_tune.flags = htol32(WL_PROXD_FLAG_INITIATOR_REPORT); + tof_params->timeout = 10; /* 10ms for timeout */ + rspec = WL_RSPEC_ENCODE_VHT; /* 11ac VHT */ + nss = 1; /* default Nss = 1 */ + mcs = 0; /* default MCS 0 */ + rspec |= (nss << WL_RSPEC_VHT_NSS_SHIFT) | mcs; + bw = 0; + switch (CHSPEC_BW(rtt_target->chanspec)) { + case WL_CHANSPEC_BW_20: + bw = WL_RSPEC_BW_20MHZ; + break; + case WL_CHANSPEC_BW_40: + bw = WL_RSPEC_BW_40MHZ; + break; + case WL_CHANSPEC_BW_80: + bw = WL_RSPEC_BW_80MHZ; + break; + case WL_CHANSPEC_BW_160: + bw = WL_RSPEC_BW_160MHZ; + break; + default: + DHD_ERROR(("CHSPEC_BW not supported : %d", + CHSPEC_BW(rtt_target->chanspec))); + goto exit; + } + rspec |= bw; + tof_params->tx_rate = htol16(rspec & 0xffff); + tof_params->vht_rate = htol16(rspec >> 16); + } + + /* Set Method to TOF */ + proxd_tune.method = PROXD_TOF_METHOD; + err = dhd_iovar(dhd, 0, "proxd_tune", (char *)&proxd_tune, sizeof(proxd_tune), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to set proxd_tune %d\n", __FUNCTION__, err)); + goto exit; + } + + /* Set Method to TOF */ + proxd_params.method = PROXD_TOF_METHOD; + err = dhd_iovar(dhd, 0, "proxd_params", (char *)&proxd_params, sizeof(proxd_params), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to set proxd_params %d\n", __FUNCTION__, err)); + goto exit; + } + err = dhd_iovar(dhd, 0, "proxd_find", (char *)NULL, 0, 1); + if (err < 0) { + DHD_ERROR(("%s : failed to set proxd_find %d\n", __FUNCTION__, err)); + goto exit; + } +exit: + if (err < 0) { + rtt_status->status = RTT_STOPPED; + if (set_mpc) { + /* enable mpc again in case of error */ + mpc = 1; + err = dhd_iovar(dhd, 0, "mpc", (char *)&mpc, sizeof(mpc), 1); + } + } + return err; +} + +int +dhd_rtt_register_noti_callback(dhd_pub_t *dhd, void *ctx, dhd_rtt_compl_noti_fn noti_fn) +{ + int err = BCME_OK; + struct rtt_noti_callback *cb = NULL, *iter; + rtt_status_info_t *rtt_status; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(noti_fn, "noti_fn is NULL", err); + + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + spin_lock_bh(¬i_list_lock); + list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { + if (iter->noti_fn == noti_fn) { + goto exit; + } + } + cb = kmalloc(sizeof(struct rtt_noti_callback), GFP_ATOMIC); + if (!cb) { + err = -ENOMEM; + goto exit; + } + cb->noti_fn = noti_fn; + cb->ctx = ctx; + list_add(&cb->list, &rtt_status->noti_fn_list); +exit: + spin_unlock_bh(¬i_list_lock); + return err; +} + +int +dhd_rtt_unregister_noti_callback(dhd_pub_t *dhd, dhd_rtt_compl_noti_fn noti_fn) +{ + int err = BCME_OK; + struct rtt_noti_callback *cb = NULL, *iter; + rtt_status_info_t *rtt_status; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(noti_fn, "noti_fn is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + spin_lock_bh(¬i_list_lock); + list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { + if (iter->noti_fn == noti_fn) { + cb = iter; + list_del(&cb->list); + break; + } + } + spin_unlock_bh(¬i_list_lock); + if (cb) { + kfree(cb); + } + return err; +} + +static int +dhd_rtt_convert_to_host(rtt_result_t *rtt_results, const wl_proxd_event_data_t* evp) +{ + int err = BCME_OK; + int i; + char eabuf[ETHER_ADDR_STR_LEN]; + char diststr[40]; + struct timespec ts; + NULL_CHECK(rtt_results, "rtt_results is NULL", err); + NULL_CHECK(evp, "evp is NULL", err); + DHD_RTT(("%s enter\n", __FUNCTION__)); + rtt_results->distance = ntoh32(evp->distance); + rtt_results->sdrtt = ntoh32(evp->sdrtt); + rtt_results->ftm_cnt = ntoh16(evp->ftm_cnt); + rtt_results->avg_rssi = ntoh16(evp->avg_rssi); + rtt_results->validfrmcnt = ntoh16(evp->validfrmcnt); + rtt_results->meanrtt = ntoh32(evp->meanrtt); + rtt_results->modertt = ntoh32(evp->modertt); + rtt_results->medianrtt = ntoh32(evp->medianrtt); + rtt_results->err_code = evp->err_code; + rtt_results->tx_rate.preamble = (evp->OFDM_frame_type == TOF_FRAME_RATE_VHT)? 3 : 0; + rtt_results->tx_rate.nss = 0; /* 1 x 1 */ + rtt_results->tx_rate.bw = + (evp->bandwidth == TOF_BW_80MHZ)? 2 : (evp->bandwidth == TOF_BW_40MHZ)? 1 : 0; + rtt_results->TOF_type = evp->TOF_type; + if (evp->TOF_type == TOF_TYPE_ONE_WAY) { + /* convert to 100kbps unit */ + rtt_results->tx_rate.bitrate = WL_RATE_6M * 5; + rtt_results->tx_rate.rateMcsIdx = WL_RATE_6M; + } else { + rtt_results->tx_rate.bitrate = WL_RATE_6M * 5; + rtt_results->tx_rate.rateMcsIdx = 0; /* MCS 0 */ + } + memset(diststr, 0, sizeof(diststr)); + if (rtt_results->distance == 0xffffffff || rtt_results->distance == 0) { + sprintf(diststr, "distance=-1m\n"); + } else { + sprintf(diststr, "distance=%d.%d m\n", + rtt_results->distance >> 4, ((rtt_results->distance & 0xf) * 125) >> 1); + } + + if (ntoh32(evp->mode) == WL_PROXD_MODE_INITIATOR) { + DHD_RTT(("Target:(%s) %s;\n", bcm_ether_ntoa((&evp->peer_mac), eabuf), diststr)); + DHD_RTT(("RTT : mean %d mode %d median %d\n", rtt_results->meanrtt, + rtt_results->modertt, rtt_results->medianrtt)); + } else { + DHD_RTT(("Initiator:(%s) %s; ", bcm_ether_ntoa((&evp->peer_mac), eabuf), diststr)); + } + if (rtt_results->sdrtt > 0) { + DHD_RTT(("sigma:%d.%d\n", rtt_results->sdrtt/10, rtt_results->sdrtt % 10)); + } else { + DHD_RTT(("sigma:0\n")); + } + + DHD_RTT(("rssi:%d validfrmcnt %d, err_code : %d\n", rtt_results->avg_rssi, + rtt_results->validfrmcnt, evp->err_code)); + + switch (evp->err_code) { + case TOF_REASON_OK: + rtt_results->err_code = RTT_REASON_SUCCESS; + break; + case TOF_REASON_TIMEOUT: + rtt_results->err_code = RTT_REASON_TIMEOUT; + break; + case TOF_REASON_NOACK: + rtt_results->err_code = RTT_REASON_NO_RSP; + break; + case TOF_REASON_ABORT: + rtt_results->err_code = RTT_REASON_ABORT; + break; + default: + rtt_results->err_code = RTT_REASON_FAILURE; + break; + } + rtt_results->peer_mac = evp->peer_mac; + /* get the time elapsed from boot time */ + get_monotonic_boottime(&ts); + rtt_results->ts = (uint64) TIMESPEC_TO_US(ts); + + for (i = 0; i < rtt_results->ftm_cnt; i++) { + rtt_results->ftm_buff[i].value = ltoh32(evp->ftm_buff[i].value); + rtt_results->ftm_buff[i].rssi = ltoh32(evp->ftm_buff[i].rssi); + } + return err; +} + +int +dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) +{ + int err = BCME_OK; + int len = 0; + int idx; + uint status, event_type, flags, reason, ftm_cnt; + rtt_status_info_t *rtt_status; + wl_proxd_event_data_t* evp; + struct rtt_noti_callback *iter; + rtt_result_t *rtt_result, *entry, *next; + gfp_t kflags; + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + event_type = ntoh32_ua((void *)&event->event_type); + flags = ntoh16_ua((void *)&event->flags); + status = ntoh32_ua((void *)&event->status); + reason = ntoh32_ua((void *)&event->reason); + + if (event_type != WLC_E_PROXD) { + goto exit; + } + kflags = in_softirq()? GFP_ATOMIC : GFP_KERNEL; + evp = (wl_proxd_event_data_t*)event_data; + DHD_RTT(("%s enter : mode: %s, reason :%d \n", __FUNCTION__, + (ntoh16(evp->mode) == WL_PROXD_MODE_INITIATOR)? + "initiator":"target", reason)); + switch (reason) { + case WLC_E_PROXD_STOP: + DHD_RTT(("WLC_E_PROXD_STOP\n")); + break; + case WLC_E_PROXD_ERROR: + case WLC_E_PROXD_COMPLETED: + if (reason == WLC_E_PROXD_ERROR) { + DHD_RTT(("WLC_E_PROXD_ERROR\n")); + } else { + DHD_RTT(("WLC_E_PROXD_COMPLETED\n")); + } + + if (!in_atomic()) { + mutex_lock(&rtt_status->rtt_mutex); + } + ftm_cnt = ntoh16(evp->ftm_cnt); + + if (ftm_cnt > 0) { + len = OFFSETOF(rtt_result_t, ftm_buff); + } else { + len = sizeof(rtt_result_t); + } + /* check whether the results is already reported or not */ + list_for_each_entry(entry, &rtt_status->rtt_results_cache, list) { + if (!memcmp(&entry->peer_mac, &evp->peer_mac, ETHER_ADDR_LEN)) { + if (!in_atomic()) { + mutex_unlock(&rtt_status->rtt_mutex); + } + goto exit; + } + } + rtt_result = kzalloc(len + sizeof(ftm_sample_t) * ftm_cnt, kflags); + if (!rtt_result) { + if (!in_atomic()) { + mutex_unlock(&rtt_status->rtt_mutex); + } + err = -ENOMEM; + goto exit; + } + /* point to target_info in status struct and increase pointer */ + rtt_result->target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx]; + /* find next target to trigger RTT */ + for (idx = (rtt_status->cur_idx + 1); + idx < rtt_status->rtt_config.rtt_target_cnt; idx++) { + /* skip the disabled device */ + if (rtt_status->rtt_config.target_info[idx].disable) { + continue; + } else { + /* set the idx to cur_idx */ + rtt_status->cur_idx = idx; + break; + } + } + /* convert the event results to host format */ + dhd_rtt_convert_to_host(rtt_result, evp); + list_add_tail(&rtt_result->list, &rtt_status->rtt_results_cache); + if (idx < rtt_status->rtt_config.rtt_target_cnt) { + /* restart to measure RTT from next device */ + schedule_work(&rtt_status->work); + } else { + DHD_RTT(("RTT_STOPPED\n")); + rtt_status->status = RTT_STOPPED; + /* to turn on mpc mode */ + schedule_work(&rtt_status->work); + /* notify the completed information to others */ + list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { + iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache); + } + /* remove the rtt results in cache */ + list_for_each_entry_safe(rtt_result, next, + &rtt_status->rtt_results_cache, list) { + list_del(&rtt_result->list); + kfree(rtt_result); + } + /* reinit the HEAD */ + INIT_LIST_HEAD(&rtt_status->rtt_results_cache); + /* clear information for rtt_config */ + bzero(&rtt_status->rtt_config, sizeof(rtt_status->rtt_config)); + rtt_status->cur_idx = 0; + } + if (!in_atomic()) { + mutex_unlock(&rtt_status->rtt_mutex); + } + + break; + case WLC_E_PROXD_GONE: + DHD_RTT(("WLC_E_PROXD_GONE\n")); + break; + case WLC_E_PROXD_START: + /* event for targets / accesspoints */ + DHD_RTT(("WLC_E_PROXD_START\n")); + break; + case WLC_E_PROXD_COLLECT_START: + DHD_RTT(("WLC_E_PROXD_COLLECT_START\n")); + break; + case WLC_E_PROXD_COLLECT_STOP: + DHD_RTT(("WLC_E_PROXD_COLLECT_STOP\n")); + break; + case WLC_E_PROXD_COLLECT_COMPLETED: + DHD_RTT(("WLC_E_PROXD_COLLECT_COMPLETED\n")); + break; + case WLC_E_PROXD_COLLECT_ERROR: + DHD_RTT(("WLC_E_PROXD_COLLECT_ERROR; ")); + break; + default: + DHD_ERROR(("WLC_E_PROXD: supported EVENT reason code:%d\n", reason)); + break; + } + +exit: + return err; +} + +static void +dhd_rtt_work(struct work_struct *work) +{ + rtt_status_info_t *rtt_status; + dhd_pub_t *dhd; + rtt_status = container_of(work, rtt_status_info_t, work); + if (rtt_status == NULL) { + DHD_ERROR(("%s : rtt_status is NULL\n", __FUNCTION__)); + return; + } + dhd = rtt_status->dhd; + if (dhd == NULL) { + DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__)); + return; + } + (void) dhd_rtt_start(dhd); +} + +int +dhd_rtt_capability(dhd_pub_t *dhd, rtt_capabilities_t *capa) +{ + rtt_status_info_t *rtt_status; + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + NULL_CHECK(capa, "capa is NULL", err); + bzero(capa, sizeof(rtt_capabilities_t)); + + if (rtt_status->capability & RTT_CAP_ONE_WAY) { + capa->rtt_one_sided_supported = 1; + } + if (rtt_status->capability & RTT_CAP_11V_WAY) { + capa->rtt_11v_supported = 1; + } + if (rtt_status->capability & RTT_CAP_11MC_WAY) { + capa->rtt_ftm_supported = 1; + } + if (rtt_status->capability & RTT_CAP_VS_WAY) { + capa->rtt_vs_supported = 1; + } + + return err; +} + +int +dhd_rtt_init(dhd_pub_t *dhd) +{ + int err = BCME_OK; + rtt_status_info_t *rtt_status; + NULL_CHECK(dhd, "dhd is NULL", err); + if (dhd->rtt_state) { + goto exit; + } + dhd->rtt_state = MALLOC(dhd->osh, sizeof(rtt_status_info_t)); + if (dhd->rtt_state == NULL) { + DHD_ERROR(("failed to create rtt_state\n")); + goto exit; + } + bzero(dhd->rtt_state, sizeof(rtt_status_info_t)); + rtt_status = GET_RTTSTATE(dhd); + rtt_status->dhd = dhd; + err = dhd_iovar(dhd, 0, "proxd_params", NULL, 0, 1); + if (err != BCME_UNSUPPORTED) { + rtt_status->capability |= RTT_CAP_ONE_WAY; + rtt_status->capability |= RTT_CAP_VS_WAY; + DHD_ERROR(("%s: Support RTT Service\n", __FUNCTION__)); + } + mutex_init(&rtt_status->rtt_mutex); + INIT_LIST_HEAD(&rtt_status->noti_fn_list); + INIT_LIST_HEAD(&rtt_status->rtt_results_cache); + INIT_WORK(&rtt_status->work, dhd_rtt_work); +exit: + return err; +} + +int +dhd_rtt_deinit(dhd_pub_t *dhd) +{ + int err = BCME_OK; + rtt_status_info_t *rtt_status; + rtt_result_t *rtt_result, *next; + struct rtt_noti_callback *iter, *iter2; + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + rtt_status->status = RTT_STOPPED; + /* clear evt callback list */ + if (!list_empty(&rtt_status->noti_fn_list)) { + list_for_each_entry_safe(iter, iter2, &rtt_status->noti_fn_list, list) { + list_del(&iter->list); + kfree(iter); + } + } + /* remove the rtt results */ + if (!list_empty(&rtt_status->rtt_results_cache)) { + list_for_each_entry_safe(rtt_result, next, &rtt_status->rtt_results_cache, list) { + list_del(&rtt_result->list); + kfree(rtt_result); + } + } + MFREE(dhd->osh, dhd->rtt_state, sizeof(rtt_status_info_t)); + dhd->rtt_state = NULL; + return err; +} +#endif /* RTT_SUPPORT */ diff --git a/drivers/amlogic/wifi/bcmdhd/dhd_rtt.h b/drivers/amlogic/wifi/bcmdhd/dhd_rtt.h new file mode 100644 index 0000000000000..2fbb9c973cd33 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/dhd_rtt.h @@ -0,0 +1,234 @@ +/* + * Broadcom Dongle Host Driver (DHD), RTT + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_rtt.h 558438 2015-05-22 06:05:11Z $ + */ +#ifndef __DHD_RTT_H__ +#define __DHD_RTT_H__ + +#include "dngl_stats.h" + +#define RTT_MAX_TARGET_CNT 10 +#define RTT_MAX_FRAME_CNT 25 +#define RTT_MAX_RETRY_CNT 10 +#define DEFAULT_FTM_CNT 6 +#define DEFAULT_RETRY_CNT 6 + + +/* DSSS, CCK and 802.11n rates in [500kbps] units */ +#define WL_MAXRATE 108 /* in 500kbps units */ +#define WL_RATE_1M 2 /* in 500kbps units */ +#define WL_RATE_2M 4 /* in 500kbps units */ +#define WL_RATE_5M5 11 /* in 500kbps units */ +#define WL_RATE_11M 22 /* in 500kbps units */ +#define WL_RATE_6M 12 /* in 500kbps units */ +#define WL_RATE_9M 18 /* in 500kbps units */ +#define WL_RATE_12M 24 /* in 500kbps units */ +#define WL_RATE_18M 36 /* in 500kbps units */ +#define WL_RATE_24M 48 /* in 500kbps units */ +#define WL_RATE_36M 72 /* in 500kbps units */ +#define WL_RATE_48M 96 /* in 500kbps units */ +#define WL_RATE_54M 108 /* in 500kbps units */ + + +enum rtt_role { + RTT_INITIATOR = 0, + RTT_TARGET = 1 +}; +enum rtt_status { + RTT_STOPPED = 0, + RTT_STARTED = 1 +}; +typedef int64_t wifi_timestamp; /* In microseconds (us) */ +typedef int64_t wifi_timespan; +typedef int wifi_rssi; + +typedef enum { + RTT_INVALID, + RTT_ONE_WAY, + RTT_TWO_WAY, + RTT_AUTO +} rtt_type_t; + +typedef enum { + RTT_PEER_STA, + RTT_PEER_AP, + RTT_PEER_P2P, + RTT_PEER_NAN, + RTT_PEER_INVALID +} rtt_peer_type_t; + +typedef enum rtt_reason { + RTT_REASON_SUCCESS, + RTT_REASON_FAILURE, + RTT_REASON_NO_RSP, + RTT_REASON_REJECTED, + RTT_REASON_NOT_SCHEDULED_YET, + RTT_REASON_TIMEOUT, + RTT_REASON_AP_ON_DIFF_CH, + RTT_REASON_AP_NO_CAP, + RTT_REASON_ABORT +} rtt_reason_t; + +typedef enum rtt_capability { + RTT_CAP_NONE = 0, + RTT_CAP_ONE_WAY = (1 << (0)), + RTT_CAP_11V_WAY = (1 << (1)), /* IEEE802.11v */ + RTT_CAP_11MC_WAY = (1 << (2)), /* IEEE802.11mc */ + RTT_CAP_VS_WAY = (1 << (3)) /* BRCM vendor specific */ +} rtt_capability_t; + +typedef struct wifi_channel_info { + wifi_channel_width_t width; + wifi_channel center_freq; /* primary 20 MHz channel */ + wifi_channel center_freq0; /* center freq (MHz) first segment */ + wifi_channel center_freq1; /* center freq (MHz) second segment valid for 80 + 80 */ +} wifi_channel_info_t; + +typedef struct wifi_rate { + uint32 preamble :3; /* 0: OFDM, 1: CCK, 2 : HT, 3: VHT, 4..7 reserved */ + uint32 nss :2; /* 0 : 1x1, 1: 2x2, 3: 3x3, 4: 4x4 */ + uint32 bw :3; /* 0: 20Mhz, 1: 40Mhz, 2: 80Mhz, 3: 160Mhz */ + /* OFDM/CCK rate code would be as per IEEE std in the unit of 0.5 mb + * HT/VHT it would be mcs index + */ + uint32 rateMcsIdx :8; + uint32 reserved :16; /* reserved */ + uint32 bitrate; /* unit of 100 Kbps */ +} wifi_rate_t; + +typedef struct rtt_target_info { + struct ether_addr addr; + rtt_type_t type; /* rtt_type */ + rtt_peer_type_t peer; /* peer type */ + wifi_channel_info_t channel; /* channel information */ + chanspec_t chanspec; /* chanspec for channel */ + int8 continuous; /* 0 = single shot or 1 = continous raging */ + bool disable; /* disable for RTT measurement */ + uint32 interval; /* interval of RTT measurement (unit ms) when continuous = true */ + uint32 measure_cnt; /* total number of RTT measurement when continuous */ + uint32 ftm_cnt; /* num of packets in each RTT measurement */ + uint32 retry_cnt; /* num of retries if sampling fails */ +} rtt_target_info_t; + +typedef struct rtt_result { + struct list_head list; + uint16 ver; /* version */ + rtt_target_info_t *target_info; /* target info */ + uint16 mode; /* mode: target/initiator */ + uint16 method; /* method: rssi/TOF/AOA */ + uint8 err_code; /* error classification */ + uint8 TOF_type; /* one way or two way TOF */ + wifi_rate_t tx_rate; /* tx rate */ + struct ether_addr peer_mac; /* (e.g for tgt:initiator's */ + int32 distance; /* dst to tgt, units (meter * 16) */ + uint32 meanrtt; /* mean delta */ + uint32 modertt; /* Mode delta */ + uint32 medianrtt; /* median RTT */ + uint32 sdrtt; /* Standard deviation of RTT */ + int16 avg_rssi; /* avg rssi across the ftm frames */ + int16 validfrmcnt; /* Firmware's valid frame counts */ + wifi_timestamp ts; /* the time elapsed from boot time when driver get this result */ + uint16 ftm_cnt; /* num of rtd measurments/length in the ftm buffer */ + ftm_sample_t ftm_buff[1]; /* 1 ... ftm_cnt */ +} rtt_result_t; + +typedef struct rtt_report { + struct ether_addr addr; + uint num_measurement; /* measurement number in case of continous raging */ + rtt_reason_t status; /* raging status */ + rtt_type_t type; /* rtt type */ + rtt_peer_type_t peer; /* peer type */ + wifi_channel_info_t channel; /* channel information */ + wifi_rssi rssi; /* avg rssi accroos the ftm frames */ + wifi_rssi rssi_spread; /* rssi spread in 0.5 db steps e.g. 5 implies 2.5 spread */ + wifi_rate_t tx_rate; /* tx rate */ + wifi_timespan rtt; /* round trip time in nanoseconds */ + wifi_timespan rtt_sd; /* rtt standard deviation in nanoseconds */ + wifi_timespan rtt_spread; /* difference between max and min rtt times recorded */ + int32 distance; /* distance in cm (optional) */ + int32 distance_sd; /* standard deviation in cm (optional) */ + int32 distance_spread; /* difference between max and min distance recorded (optional) */ + wifi_timestamp ts; /* time of the measurement (in microseconds since boot) */ +} rtt_report_t; + +/* RTT Capabilities */ +typedef struct rtt_capabilities { + uint8 rtt_one_sided_supported; /* if 1-sided rtt data collection is supported */ + uint8 rtt_11v_supported; /* if 11v rtt data collection is supported */ + uint8 rtt_ftm_supported; /* if ftm rtt data collection is supported */ + uint8 rtt_vs_supported; /* if vendor specific data collection supported */ +} rtt_capabilities_t; + +typedef struct rtt_config_params { + int8 rtt_target_cnt; + rtt_target_info_t target_info[RTT_MAX_TARGET_CNT]; +} rtt_config_params_t; + +typedef void (*dhd_rtt_compl_noti_fn)(void *ctx, void *rtt_data); +/* Linux wrapper to call common dhd_rtt_set_cfg */ +int +dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf); + +int +dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt); + +int +dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, + dhd_rtt_compl_noti_fn noti_fn); + +int +dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn); + +int +dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa); + +/* export to upper layer */ +chanspec_t +dhd_rtt_convert_to_chspec(wifi_channel_info_t channel); + +int +dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params); + +int +dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt); + + +int +dhd_rtt_register_noti_callback(dhd_pub_t *dhd, void *ctx, dhd_rtt_compl_noti_fn noti_fn); + +int +dhd_rtt_unregister_noti_callback(dhd_pub_t *dhd, dhd_rtt_compl_noti_fn noti_fn); + +int +dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data); + +int +dhd_rtt_capability(dhd_pub_t *dhd, rtt_capabilities_t *capa); + +int +dhd_rtt_init(dhd_pub_t *dhd); + +int +dhd_rtt_deinit(dhd_pub_t *dhd); +#endif /* __DHD_RTT_H__ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_sdio.c b/drivers/amlogic/wifi/bcmdhd/dhd_sdio.c similarity index 92% rename from drivers/net/wireless/bcmdhd/dhd_sdio.c rename to drivers/amlogic/wifi/bcmdhd/dhd_sdio.c index 02e1f6d3392a8..a5059f9bd21f8 100644 --- a/drivers/net/wireless/bcmdhd/dhd_sdio.c +++ b/drivers/amlogic/wifi/bcmdhd/dhd_sdio.c @@ -1,9 +1,30 @@ /* * DHD Bus Module for SDIO * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_sdio.c 506046 2014-10-02 12:40:12Z $ + * + * <> + * + * $Id: dhd_sdio.c 593728 2015-10-19 09:20:32Z $ */ #include @@ -23,10 +44,8 @@ #include #include #include -#if defined(DHD_DEBUG) #include #include -#endif /* defined(DHD_DEBUG) */ #include #include @@ -83,7 +102,6 @@ extern bool bcmsdh_fatal_error(void *sdh); #define DHD_TXMINMAX 1 /* Max tx frames if rx still pending */ #define MEMBLOCK 2048 /* Block size used for downloading of dongle image */ -#define MAX_NVRAMBUF_SIZE (16 * 1024) /* max nvram buf size */ #define MAX_DATA_BUF (64 * 1024) /* Must be large enough to hold biggest possible glom */ #ifndef DHD_FIRSTREAD @@ -156,9 +174,8 @@ DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) DEFINE_MUTEX(_dhd_sdio_mutex_lock_); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */ -#endif +#endif -#ifdef DHD_DEBUG /* Device console log buffer state */ #define CONSOLE_LINE_MAX 192 #define CONSOLE_BUFFER_MAX 2024 @@ -170,7 +187,6 @@ typedef struct dhd_console { uint8 *buf; /* Log buffer (host copy) */ uint last; /* Last buffer read index */ } dhd_console_t; -#endif /* DHD_DEBUG */ #define REMAP_ENAB(bus) ((bus)->remap) #define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize))) @@ -223,7 +239,6 @@ typedef struct dhd_bus { uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */ char *fw_path; /* module_param: path to firmware image */ char *nv_path; /* module_param: path to nvram vars file */ - const char *nvram_params; /* user specified nvram params. */ uint blocksize; /* Block size of SDIO transfers */ uint roundup; /* Max roundup limit */ @@ -280,7 +295,9 @@ typedef struct dhd_bus { int32 sd_rxchain; /* If bcmsdh api accepts PKT chains */ bool use_rxchain; /* If dhd should use PKT chains */ bool sleeping; /* Is SDIO bus sleeping? */ +#if defined(SUPPORT_P2P_GO_PS) wait_queue_head_t bus_sleep; +#endif /* LINUX && SUPPORT_P2P_GO_PS */ uint rxflow_mode; /* Rx flow control mode */ bool rxflow; /* Is rx flow control on */ uint prev_rxlim_hit; /* Is prev rx limit exceeded (per dpc schedule) */ @@ -384,6 +401,10 @@ typedef struct dhd_bus { #define DHD_NOPMU(dhd) (FALSE) +#if defined(BCMSDIOH_STD) +#define BLK_64_MAXTXGLOM 20 +#endif /* BCMSDIOH_STD */ + #ifdef DHD_DEBUG static int qcount[NUMPRIO]; static int tx_packets[NUMPRIO]; @@ -395,6 +416,7 @@ const uint dhd_deferred_tx = 1; extern uint dhd_watchdog_ms; extern void dhd_os_wd_timer(void *bus, uint wdtick); +int dhd_enableOOB(dhd_pub_t *dhd, bool sleep); /* Tx/Rx bounds */ uint dhd_txbound; @@ -567,11 +589,14 @@ static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq); static void dhdsdio_sdtest_set(dhd_bus_t *bus, uint count); #endif -#ifdef DHD_DEBUG static int dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size); +#ifdef DHD_DEBUG static int dhd_serialconsole(dhd_bus_t *bus, bool get, bool enable, int *bcmerror); #endif /* DHD_DEBUG */ +#if defined(DHD_FW_COREDUMP) +static int dhdsdio_mem_dump(dhd_bus_t *bus); +#endif /* DHD_FW_COREDUMP */ static int dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap); static int dhdsdio_download_state(dhd_bus_t *bus, bool enter); @@ -749,13 +774,17 @@ dhdsdio_sr_cap(dhd_bus_t *bus) (bus->sih->chip == BCM4354_CHIP_ID) || (bus->sih->chip == BCM4356_CHIP_ID) || (bus->sih->chip == BCM4358_CHIP_ID) || + (bus->sih->chip == BCM43569_CHIP_ID) || (bus->sih->chip == BCM4371_CHIP_ID) || (BCM4349_CHIP(bus->sih->chip)) || - (bus->sih->chip == BCM4350_CHIP_ID)) { + (bus->sih->chip == BCM4350_CHIP_ID) || + (bus->sih->chip == BCM43012_CHIP_ID)) { core_capext = TRUE; } else { - core_capext = bcmsdh_reg_read(bus->sdh, CORE_CAPEXT_ADDR, 4); - core_capext = (core_capext & CORE_CAPEXT_SR_SUPPORTED_MASK); + core_capext = bcmsdh_reg_read(bus->sdh, + si_get_pmu_reg_addr(bus->sih, OFFSETOF(chipcregs_t, core_cap_ext)), + 4); + core_capext = (core_capext & CORE_CAPEXT_SR_SUPPORTED_MASK); } if (!(core_capext)) return FALSE; @@ -771,6 +800,7 @@ dhdsdio_sr_cap(dhd_bus_t *bus) (bus->sih->chip == BCM4354_CHIP_ID) || (bus->sih->chip == BCM4356_CHIP_ID) || (bus->sih->chip == BCM4358_CHIP_ID) || + (bus->sih->chip == BCM43569_CHIP_ID) || (bus->sih->chip == BCM4371_CHIP_ID) || (bus->sih->chip == BCM4350_CHIP_ID)) { uint32 enabval = 0; @@ -785,6 +815,7 @@ dhdsdio_sr_cap(dhd_bus_t *bus) (bus->sih->chip == BCM4354_CHIP_ID) || (bus->sih->chip == BCM4356_CHIP_ID) || (bus->sih->chip == BCM4358_CHIP_ID) || + (bus->sih->chip == BCM43569_CHIP_ID) || (bus->sih->chip == BCM4371_CHIP_ID)) enabval &= CC_CHIPCTRL3_SR_ENG_ENABLE; @@ -792,7 +823,8 @@ dhdsdio_sr_cap(dhd_bus_t *bus) cap = TRUE; } else { data = bcmsdh_reg_read(bus->sdh, - SI_ENUM_BASE + OFFSETOF(chipcregs_t, retention_ctl), 4); + si_get_pmu_reg_addr(bus->sih, OFFSETOF(chipcregs_t, retention_ctl)), + 4); if ((data & (RCTL_MACPHY_DISABLE_MASK | RCTL_LOGIC_DISABLE_MASK)) == 0) cap = TRUE; } @@ -803,9 +835,7 @@ dhdsdio_sr_cap(dhd_bus_t *bus) static int dhdsdio_srwar_init(dhd_bus_t *bus) { -#if !defined(NDISVER) || (NDISVER < 0x0630) bcmsdh_gpio_init(bus->sdh); -#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */ #ifdef USE_OOB_GPIO1 dhdsdio_oobwakeup_init(bus); @@ -824,11 +854,19 @@ dhdsdio_sr_init(dhd_bus_t *bus) if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2)) dhdsdio_srwar_init(bus); - val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL); - val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT; - bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, - 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT, &err); - val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL); + if (bus->sih->chip == BCM43012_CHIP_ID) { + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL); + val |= 1 << SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, + 1 << SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT, &err); + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL); + } else { + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL); + val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, + 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT, &err); + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL); + } #ifdef USE_CMD14 /* Add CMD14 Support */ @@ -838,9 +876,13 @@ dhdsdio_sr_init(dhd_bus_t *bus) dhdsdio_devcap_set(bus, SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC); - bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, - SBSDIO_FUNC1_CHIPCLKCSR, SBSDIO_FORCE_HT, &err); - + if (bus->sih->chip == BCM43012_CHIP_ID) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, SBSDIO_HT_AVAIL_REQ, &err); + } else { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, SBSDIO_FORCE_HT, &err); + } bus->_slpauto = dhd_slpauto ? TRUE : FALSE; bus->_srenab = TRUE; @@ -880,30 +922,42 @@ dhdsdio_clk_kso_init(dhd_bus_t *bus) #define KSO_WAIT_US 50 #define KSO_WAIT_MS 1 #define KSO_SLEEP_RETRY_COUNT 20 +#define KSO_WAKE_RETRY_COUNT 100 #define ERROR_BCME_NODEVICE_MAX 1 -#define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US) +#define DEFAULT_MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US) +#ifndef CUSTOM_MAX_KSO_ATTEMPTS +#define CUSTOM_MAX_KSO_ATTEMPTS DEFAULT_MAX_KSO_ATTEMPTS +#endif + static int dhdsdio_clk_kso_enab(dhd_bus_t *bus, bool on) { uint8 wr_val = 0, rd_val, cmp_val, bmask; int err = 0; int try_cnt = 0; - - if (!bus->dhd->conf->kso_enable) - return 0; - + return 0; KSO_DBG(("%s> op:%s\n", __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"))); wr_val |= (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT); bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err); + + /* In case of 43012 chip, the chip could go down immediately after KSO bit is cleared. + * So the further reads of KSO register could fail. Thereby just bailing out immediately + * after clearing KSO bit, to avoid polling of KSO bit. + */ + if ((!on) && (bus->sih->chip == BCM43012_CHIP_ID)) { + return err; + } + if (on) { cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK | SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK; bmask = cmp_val; OSL_SLEEP(3); + } else { /* Put device to sleep, turn off KSO */ cmp_val = 0; @@ -923,17 +977,18 @@ dhdsdio_clk_kso_enab(dhd_bus_t *bus, bool on) OSL_DELAY(KSO_WAIT_US); bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err); - } while (try_cnt++ < MAX_KSO_ATTEMPTS); + } while (try_cnt++ < CUSTOM_MAX_KSO_ATTEMPTS); if (try_cnt > 2) KSO_DBG(("%s> op:%s, try_cnt:%d, rd_val:%x, ERR:%x \n", __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err)); - if (try_cnt > MAX_KSO_ATTEMPTS) { + if (try_cnt > CUSTOM_MAX_KSO_ATTEMPTS) { DHD_ERROR(("%s> op:%s, ERROR: try_cnt:%d, rd_val:%x, ERR:%x \n", __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err)); } + return err; } @@ -1040,12 +1095,12 @@ dhdsdio_clk_devsleep_iovar(dhd_bus_t *bus, bool on) #ifdef USE_CMD14 err = bcmsdh_sleep(bus->sdh, TRUE); #else + + err = dhdsdio_clk_kso_enab(bus, FALSE); if (OOB_WAKEUP_ENAB(bus)) { -#if !defined(NDISVER) || (NDISVER < 0x0630) err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, FALSE); /* GPIO_1 is off */ -#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */ } #endif /* USE_CMD14 */ } else { @@ -1055,7 +1110,6 @@ dhdsdio_clk_devsleep_iovar(dhd_bus_t *bus, bool on) DHD_TRACE(("%s: Request SD clk\n", __FUNCTION__)); dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); } -#if !defined(NDISVER) || (NDISVER < 0x0630) if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2)) { SPINWAIT_SLEEP(sdioh_spinwait_sleep, @@ -1066,7 +1120,6 @@ dhdsdio_clk_devsleep_iovar(dhd_bus_t *bus, bool on) DHD_ERROR(("ERROR: GPIO_DEV_SRSTATE still low!\n")); } } -#endif #ifdef USE_CMD14 err = bcmsdh_sleep(bus->sdh, FALSE); if (SLPAUTO_ENAB(bus) && (err != 0)) { @@ -1097,9 +1150,7 @@ dhdsdio_clk_devsleep_iovar(dhd_bus_t *bus, bool on) #else if (OOB_WAKEUP_ENAB(bus)) { -#if !defined(NDISVER) || (NDISVER < 0x0630) err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, TRUE); /* GPIO_1 is on */ -#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */ } do { err = dhdsdio_clk_kso_enab(bus, TRUE); @@ -1111,6 +1162,8 @@ dhdsdio_clk_devsleep_iovar(dhd_bus_t *bus, bool on) DHD_ERROR(("ERROR: kso set failed retry: %d\n", retry)); err = 0; /* continue anyway */ } + + #endif /* !USE_CMD14 */ if (err == 0) { @@ -1196,6 +1249,7 @@ dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok) #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) else if (ht_avail_error == HT_AVAIL_ERROR_MAX) { + bus->dhd->hang_reason = HANG_REASON_HT_AVAIL_ERROR; dhd_os_send_hang_message(bus->dhd); } #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */ @@ -1483,7 +1537,12 @@ dhdsdio_bussleep(dhd_bus_t *bus, bool sleep) /* Going to sleep: set the alarm and turn off the lights... */ if (sleep) { /* Don't sleep if something is pending */ +#ifdef DHD_USE_IDLECOUNT + if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq) || bus->readframes || + bus->ctrl_frame_stat) +#else if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq)) +#endif /* DHD_USE_IDLECOUNT */ return BCME_BUSY; @@ -1520,7 +1579,9 @@ dhdsdio_bussleep(dhd_bus_t *bus, bool sleep) /* Change state */ bus->sleeping = TRUE; +#if defined(SUPPORT_P2P_GO_PS) wake_up(&bus->bus_sleep); +#endif /* LINUX && SUPPORT_P2P_GO_PS */ } else { /* Waking up: bus power up is ok, set local state */ @@ -1563,6 +1624,35 @@ dhdsdio_bussleep(dhd_bus_t *bus, bool sleep) return err; } +int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size) +{ + int func_blk_size = function_num; + int bcmerr = 0; + int result; + + bcmerr = dhd_bus_iovar_op(dhd, "sd_blocksize", &func_blk_size, + sizeof(int), &result, sizeof(int), IOV_GET); + + if (bcmerr != BCME_OK) { + DHD_ERROR(("%s: Get F%d Block size error\n", __FUNCTION__, function_num)); + return BCME_ERROR; + } + + if (result != block_size) { + DHD_ERROR(("%s: F%d Block size set from %d to %d\n", + __FUNCTION__, function_num, result, block_size)); + func_blk_size = function_num << 16 | block_size; + bcmerr = dhd_bus_iovar_op(dhd, "sd_blocksize", NULL, + 0, &func_blk_size, sizeof(int32), IOV_SET); + if (bcmerr != BCME_OK) { + DHD_ERROR(("%s: Set F2 Block size error\n", __FUNCTION__)); + return BCME_ERROR; + } + } + + return BCME_OK; +} + #if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN) void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable) @@ -1592,7 +1682,7 @@ dhd_enable_oob_intr(struct dhd_bus *bus, bool enable) dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); #endif /* !defined(HW_OOB) */ } -#endif +#endif int dhd_bus_txdata(struct dhd_bus *bus, void *pkt) @@ -1624,6 +1714,9 @@ dhd_bus_txdata(struct dhd_bus *bus, void *pkt) prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK)); + /* move from dhdsdio_sendfromq(), try to orphan skb early */ + PKTORPHAN(pkt, bus->dhd->conf->tsq); + /* Check for existing queue, current flow-control, pending event, or pending clock */ if (dhd_deferred_tx || bus->fcstate || pktq_len(&bus->txq) || bus->dpc_sched || (!DATAOK(bus)) || (bus->flowcontrol & NBITVAL(prec)) || @@ -2588,16 +2681,11 @@ dhdsdio_sendfromq_swtxglom(dhd_bus_t *bus, uint maxframes) datalen = 0; #ifdef PKT_STATICS - if (txglom_cnt < 2) - tx_statics.glom_1_count++; - else if (txglom_cnt < 3) - tx_statics.glom_3_count++; - else if (txglom_cnt < 8) - tx_statics.glom_3_8_count++; - else - tx_statics.glom_8_count++; - if (txglom_cnt > tx_statics.glom_max) - tx_statics.glom_max = txglom_cnt; + if (txglom_cnt) { + tx_statics.glom_cnt[txglom_cnt-1]++; + if (txglom_cnt > tx_statics.glom_max) + tx_statics.glom_max = txglom_cnt; + } #endif for (i = 0; i < txglom_cnt; i++) { uint datalen_tmp = 0; @@ -2827,6 +2915,9 @@ dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes) osh = dhd->osh; tx_prec_map = ~bus->flowcontrol; +#ifdef DHD_LOSSLESS_ROAMING + tx_prec_map &= dhd->dequeue_prec_map; +#endif for (cnt = 0; (cnt < maxframes) && DATAOK(bus);) { int i; int num_pkt = 1; @@ -2835,19 +2926,26 @@ dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes) dhd_os_sdlock_txq(bus->dhd); if (bus->txglom_enable) { - num_pkt = MIN((uint32)DATABUFCNT(bus), (uint32)bus->txglomsize); + uint32 glomlimit = (uint32)bus->txglomsize; +#if defined(BCMSDIOH_STD) + if (bus->blocksize == 64) { + glomlimit = MIN((uint32)bus->txglomsize, BLK_64_MAXTXGLOM); + } +#endif /* BCMSDIOH_STD */ + num_pkt = MIN((uint32)DATABUFCNT(bus), glomlimit); num_pkt = MIN(num_pkt, ARRAYSIZE(pkts)); } num_pkt = MIN(num_pkt, pktq_mlen(&bus->txq, tx_prec_map)); for (i = 0; i < num_pkt; i++) { - pkts[i] = pktq_mdeq(&bus->txq, ~bus->flowcontrol, &prec_out); + pkts[i] = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out); if (!pkts[i]) { DHD_ERROR(("%s: pktq_mlen non-zero when no pkt\n", __FUNCTION__)); ASSERT(0); break; } - PKTORPHAN(pkts[i]); + /* move orphan to dhd_bus_txdata() */ + /* PKTORPHAN(pkts[i], bus->dhd->conf->tsq); */ datalen += PKTLEN(osh, pkts[i]); } dhd_os_sdunlock_txq(bus->dhd); @@ -2863,16 +2961,11 @@ dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes) } cnt += i; #ifdef PKT_STATICS - if (num_pkt < 2) - tx_statics.glom_1_count++; - else if (num_pkt < 3) - tx_statics.glom_3_count++; - else if (num_pkt < 8) - tx_statics.glom_3_8_count++; - else - tx_statics.glom_8_count++; - if (num_pkt > tx_statics.glom_max) - tx_statics.glom_max = num_pkt; + if (num_pkt) { + tx_statics.glom_cnt[num_pkt-1]++; + if (num_pkt > tx_statics.glom_max) + tx_statics.glom_max = num_pkt; + } #endif /* In poll mode, need to check for other events */ @@ -3048,6 +3141,14 @@ dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen) } else { bus->dhd->txcnt_timeout++; if (!bus->dhd->hang_was_sent) { +#ifdef CUSTOMER_HW4_DEBUG + uint32 status, retry = 0; + R_SDREG(status, &bus->regs->intstatus, retry); + DHD_TRACE_HW4(("%s: txcnt_timeout, INT status=0x%08X\n", + __FUNCTION__, status)); + DHD_TRACE_HW4(("%s : tx_max : %d, tx_seq : %d, clkstate : %d \n", + __FUNCTION__, bus->tx_max, bus->tx_seq, bus->clkstate)); +#endif /* CUSTOMER_HW4_DEBUG */ DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d\n", __FUNCTION__, bus->dhd->txcnt_timeout)); } @@ -3114,7 +3215,6 @@ dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen) { int timeleft; uint rxlen = 0; - bool pending; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); @@ -3122,7 +3222,7 @@ dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen) return -EIO; /* Wait until control frame is available */ - timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending); + timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen); dhd_os_sdlock(bus->dhd); rxlen = bus->rxlen; @@ -3142,23 +3242,14 @@ dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen) #else DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__)); #endif /* DHD_DEBUG */ -#ifdef DHD_DEBUG - dhd_os_sdlock(bus->dhd); - dhdsdio_checkdied(bus, NULL, 0); - dhd_os_sdunlock(bus->dhd); -#endif /* DHD_DEBUG */ - } else if (pending == TRUE) { - /* signal pending */ - DHD_ERROR(("%s: signal pending\n", __FUNCTION__)); - return -EINTR; - + dhd_os_sdlock(bus->dhd); + dhdsdio_checkdied(bus, NULL, 0); + dhd_os_sdunlock(bus->dhd); } else { DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__)); -#ifdef DHD_DEBUG dhd_os_sdlock(bus->dhd); dhdsdio_checkdied(bus, NULL, 0); dhd_os_sdunlock(bus->dhd); -#endif /* DHD_DEBUG */ } if (timeleft == 0) { if (rxlen == 0) @@ -3547,7 +3638,6 @@ dhdsdio_membytes(dhd_bus_t *bus, bool write, uint32 address, uint8 *data, uint s return bcmerror; } -#ifdef DHD_DEBUG static int dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh) { @@ -3555,6 +3645,17 @@ dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh) int rv, i; uint32 shaddr = 0; + if (bus->sih == NULL) { + if (bus->dhd && bus->dhd->dongle_reset) { + DHD_ERROR(("%s: Dongle is in reset state\n", __FUNCTION__)); + return BCME_NOTREADY; + } else { + ASSERT(bus->dhd); + ASSERT(bus->sih); + DHD_ERROR(("%s: The address of sih is invalid\n", __FUNCTION__)); + return BCME_ERROR; + } + } if (CHIPID(bus->sih->chip) == BCM43430_CHIP_ID && !dhdsdio_sr_cap(bus)) bus->srmemsize = 0; @@ -3614,6 +3715,7 @@ dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh) #define CONSOLE_LINE_MAX 192 +#ifdef DHD_DEBUG static int dhdsdio_readconsole(dhd_bus_t *bus) { @@ -3689,6 +3791,7 @@ dhdsdio_readconsole(dhd_bus_t *bus) return BCME_OK; } +#endif /* DHD_DEBUG */ static int dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size) @@ -3858,6 +3961,12 @@ dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size) DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf)); } +#if defined(DHD_FW_COREDUMP) + if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) { + /* Mem dump to a file on device */ + dhdsdio_mem_dump(bus); + } +#endif /* #if defined(DHD_FW_COREDUMP) */ done: if (mbuffer) @@ -3869,8 +3978,68 @@ dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size) return bcmerror; } -#endif /* #ifdef DHD_DEBUG */ +#if defined(DHD_FW_COREDUMP) +static int +dhdsdio_mem_dump(dhd_bus_t *bus) +{ + int ret = 0; + int size; /* Full mem size */ + int start = bus->dongle_ram_base; /* Start address */ + int read_size = 0; /* Read size of each iteration */ + uint8 *buf = NULL, *databuf = NULL; + + /* Get full mem size */ + size = bus->ramsize; + buf = MALLOC(bus->dhd->osh, size); + if (!buf) { + printf("%s: Out of memory (%d bytes)\n", __FUNCTION__, size); + return -1; + } + + /* Read mem content */ + printf("Dump dongle memory"); + databuf = buf; + while (size) + { + read_size = MIN(MEMBLOCK, size); + if ((ret = dhdsdio_membytes(bus, FALSE, start, databuf, read_size))) + { + printf("%s: Error membytes %d\n", __FUNCTION__, ret); + if (buf) { + MFREE(bus->dhd->osh, buf, size); + } + return -1; + } + /* Decrement size and increment start address */ + size -= read_size; + start += read_size; + databuf += read_size; + } + printf("Done\n"); + + dhd_save_fwdump(bus->dhd, buf, bus->ramsize); + /* free buf before return !!! */ + if (write_to_file(bus->dhd, buf, bus->ramsize)) + { + printf("%s: Error writing to files\n", __FUNCTION__); + return -1; + } + + /* buf free handled in write_to_file, not here */ + return 0; +} +#endif /* DHD_FW_COREDUMP */ + +int +dhd_socram_dump(dhd_bus_t * bus) +{ +#if defined(DHD_FW_COREDUMP) + return (dhdsdio_mem_dump(bus)); +#else + return -1; +#endif +} int dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len) @@ -3979,7 +4148,7 @@ dhd_serialconsole(dhd_bus_t *bus, bool set, bool enable, int *bcmerror) return (int_val & uart_enab); } -#endif +#endif static int dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name, @@ -4453,7 +4622,7 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const ch bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL, ((uint8)mesbusyctrl | 0x80), NULL); break; -#endif +#endif case IOV_GVAL(IOV_DONGLEISOLATION): @@ -4799,6 +4968,23 @@ dhdsdio_download_state(dhd_bus_t *bus, bool enter) bcmerror = dhdsdio_membytes(bus, TRUE, 0, (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr)); + if (bcmerror == BCME_OK) { + uint32 tmp; + + /* verify write */ + bcmerror = dhdsdio_membytes(bus, FALSE, 0, + (uint8 *)&tmp, sizeof(tmp)); + + if (bcmerror == BCME_OK && tmp != bus->resetinstr) { + DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n", + __FUNCTION__, bus->resetinstr)); + DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n", + __FUNCTION__, tmp)); + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + } + /* now remove reset and halt and continue to run CR4 */ } @@ -4983,9 +5169,7 @@ dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) /* Turn off the bus (F2), free any pending packets */ DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); -#if !defined(NDISVER) || (NDISVER < 0x0630) bcmsdh_intr_disable(bus->sdh); -#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL); /* Clear any pending interrupts now that F2 is disabled */ @@ -5077,6 +5261,7 @@ dhd_txglom_enable(dhd_pub_t *dhdp, bool enable) bus->txglom_enable = FALSE; printf("%s: enable %d\n", __FUNCTION__, bus->txglom_enable); dhd_conf_set_txglom_params(bus->dhd, bus->txglom_enable); + bcmsdh_set_mode(bus->sdh, bus->dhd->conf->txglom_mode); } int @@ -5115,8 +5300,13 @@ dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) /* Force clocks on backplane to be sure F2 interrupt propagates */ saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); if (!err) { - bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, - (saveclk | SBSDIO_FORCE_HT), &err); + if (bus->sih->chip == BCM43012_CHIP_ID) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + (saveclk | SBSDIO_HT_AVAIL_REQ), &err); + } else { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + (saveclk | SBSDIO_FORCE_HT), &err); + } } if (err) { DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err)); @@ -5167,6 +5357,13 @@ dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) /* Set bus state according to enable result */ dhdp->busstate = DHD_BUS_DATA; + /* Need to set fn2 block size to match fn1 block size. + * Requests to fn2 go thru fn1. * + * faltwig has this code contitioned with #if !BCMSPI_ANDROID. + * It would be cleaner to use the ->sdh->block_sz[fno] instead of + * 64, but this layer has no access to sdh types. + */ + /* bcmsdh_intr_unmask(bus->sdh); */ bus->intdis = FALSE; @@ -5803,6 +6000,15 @@ dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq) dhd_os_sdunlock(bus->dhd); dhd_rx_frame(bus->dhd, idx, list_head[idx], cnt, 0); dhd_os_sdlock(bus->dhd); +#if defined(SDIO_ISR_THREAD) + /* terence 20150615: fix for below error due to bussleep in watchdog after dhd_os_sdunlock here, + * so call BUS_WAKE to wake up bus again + * dhd_bcmsdh_recv_buf: Device asleep + * dhdsdio_readframes: RXHEADER FAILED: -40 + * dhdsdio_rxfail: abort command, terminate frame, send NAK + */ + BUS_WAKE(bus); +#endif } } } @@ -6541,14 +6747,12 @@ dhdsdio_hostmail(dhd_bus_t *bus) bus->flowcontrol = fcbits; } -#ifdef DHD_DEBUG /* At least print a message if FW halted */ if (hmb_data & HMB_DATA_FWHALT) { DHD_ERROR(("INTERNAL ERROR: FIRMWARE HALTED : set BUS DOWN\n")); dhdsdio_checkdied(bus, NULL, 0); bus->dhd->busstate = DHD_BUS_DOWN; } -#endif /* DHD_DEBUG */ /* Shouldn't be any others */ if (hmb_data & ~(HMB_DATA_DEVREADY | @@ -6576,6 +6780,9 @@ dhdsdio_dpc(dhd_bus_t *bus) uint framecnt = 0; /* Temporary counter of tx/rx frames */ bool rxdone = TRUE; /* Flag for no more read data */ bool resched = FALSE; /* Flag indicating resched wanted */ +#ifdef DEBUG_DPC_THREAD_WATCHDOG + bool is_resched_by_readframe = FALSE; +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ DHD_TRACE(("%s: Enter\n", __FUNCTION__)); dhd_os_sdlock(bus->dhd); @@ -6743,16 +6950,14 @@ dhdsdio_dpc(dhd_bus_t *bus) * or clock availability. (Allows tx loop to check ipend if desired.) * (Unless register access seems hosed, as we may not be able to ACK...) */ - if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh)) { + if (!bus->dhd->conf->oob_enabled_later && bus->intr && bus->intdis && !bcmsdh_regfail(sdh)) { DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n", __FUNCTION__, rxdone, framecnt)); bus->intdis = FALSE; #if defined(OOB_INTR_ONLY) bcmsdh_oob_intr_set(bus->sdh, TRUE); #endif /* defined(OOB_INTR_ONLY) */ -#if !defined(NDISVER) || (NDISVER < 0x0630) bcmsdh_intr_enable(sdh); -#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */ } #if defined(OOB_INTR_ONLY) && !defined(HW_OOB) @@ -6781,7 +6986,10 @@ dhdsdio_dpc(dhd_bus_t *bus) /* Send queued frames (limit 1 if rx may still be pending) */ else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate && pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && DATAOK(bus)) { - framecnt = rxdone ? txlimit : MIN(txlimit, dhd_txminmax); + if (bus->dhd->conf->dhd_txminmax < 0) + framecnt = rxdone ? txlimit : MIN(txlimit, DATABUFCNT(bus)); + else + framecnt = rxdone ? txlimit : MIN(txlimit, bus->dhd->conf->dhd_txminmax); #if defined(SWTXGLOM) if (bus->dhd->conf->swtxglom) framecnt = dhdsdio_sendfromq_swtxglom(bus, framecnt); @@ -6795,7 +7003,7 @@ dhdsdio_dpc(dhd_bus_t *bus) if (bus->dhd->conf->txctl_tmo_fix) { set_current_state(TASK_INTERRUPTIBLE); if (!kthread_should_stop()) - schedule_timeout(1); + schedule_timeout(1); set_current_state(TASK_RUNNING); } resched = TRUE; @@ -6833,12 +7041,42 @@ dhdsdio_dpc(dhd_bus_t *bus) exit: - if (!resched && dhd_dpcpoll) { - if (dhdsdio_readframes(bus, dhd_rxbound, &rxdone) != 0) - resched = TRUE; + if (!resched) { + /* Re-enable interrupts to detect new device events (mailbox, rx frame) + * or clock availability. (Allows tx loop to check ipend if desired.) + * (Unless register access seems hosed, as we may not be able to ACK...) + */ + if (bus->dhd->conf->oob_enabled_later && bus->intr && bus->intdis && !bcmsdh_regfail(sdh)) { + DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n", + __FUNCTION__, rxdone, framecnt)); + bus->intdis = FALSE; +#if defined(OOB_INTR_ONLY) + bcmsdh_oob_intr_set(bus->sdh, TRUE); +#endif /* defined(OOB_INTR_ONLY) */ + bcmsdh_intr_enable(sdh); + } + if (dhd_dpcpoll) { + if (dhdsdio_readframes(bus, dhd_rxbound, &rxdone) != 0) { + resched = TRUE; +#ifdef DEBUG_DPC_THREAD_WATCHDOG + is_resched_by_readframe = TRUE; +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + } + } } dhd_os_sdunlock(bus->dhd); +#ifdef DEBUG_DPC_THREAD_WATCHDOG + if (bus->dhd->dhd_bug_on) { + DHD_INFO(("%s: resched = %d ctrl_frame_stat = %d intstatus 0x%08x" + " ipend = %d pktq_mlen = %d is_resched_by_readframe = %d \n", + __FUNCTION__, resched, bus->ctrl_frame_stat, + bus->intstatus, bus->ipend, + pktq_mlen(&bus->txq, ~bus->flowcontrol), is_resched_by_readframe)); + + bus->dhd->dhd_bug_on = FALSE; + } +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ return resched; } @@ -6896,9 +7134,7 @@ dhdsdio_isr(void *arg) DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n")); } -#if !defined(NDISVER) || (NDISVER < 0x0630) bcmsdh_intr_disable(sdh); -#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */ bus->intdis = TRUE; #if defined(SDIO_ISR_THREAD) @@ -6913,11 +7149,8 @@ dhdsdio_isr(void *arg) } DHD_OS_WAKE_UNLOCK(bus->dhd); #else - -#if !defined(NDISVER) || (NDISVER < 0x0630) bus->dpc_sched = TRUE; dhd_sched_dpc(bus->dhd); -#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */ #endif /* defined(SDIO_ISR_THREAD) */ @@ -6926,22 +7159,40 @@ dhdsdio_isr(void *arg) #ifdef PKT_STATICS void dhdsdio_txpktstatics(void) { - uint total, f1, f2, f3, f4; - printf("Randy: TYPE EVENT: %d pkts (size=%d) transfered\n", tx_statics.event_count, tx_statics.event_size); - printf("Randy: TYPE CTRL: %d pkts (size=%d) transfered\n", tx_statics.ctrl_count, tx_statics.ctrl_size); - printf("Randy: TYPE DATA: %d pkts (size=%d) transfered\n", tx_statics.data_count, tx_statics.data_size); - if(tx_statics.glom_1_count || tx_statics.glom_3_count || tx_statics.glom_3_8_count || tx_statics.glom_8_count) { - total = tx_statics.glom_1_count + tx_statics.glom_3_count + tx_statics.glom_3_8_count + tx_statics.glom_8_count; - f1 = (tx_statics.glom_1_count*100) / total; - f2 = (tx_statics.glom_3_count*100) / total; - f3 = (tx_statics.glom_3_8_count*100) / total; - f4 = (tx_statics.glom_8_count*100) / total; - printf("Randy: glomsize==1: %d(%d), tglomsize==2: %d(%d), pkts 3<=glomsize<8: %d(%d), pkts glomszie>=8: %d(%d)\n", - tx_statics.glom_1_count, f1, tx_statics.glom_3_count, f2, tx_statics.glom_3_8_count, f3, tx_statics.glom_8_count, f4); - printf("Randy: data/glom=%d, glom_max=%d\n", tx_statics.data_count/total, tx_statics.glom_max); - } - printf("Randy: TYPE RX GLOM: %d pkts (size=%d) transfered\n", tx_statics.glom_count, tx_statics.glom_size); - printf("Randy: TYPE TEST: %d pkts (size=%d) transfered\n\n\n", tx_statics.test_count, tx_statics.test_size); + uint i, total = 0; + + printf("%s: TYPE EVENT: %d pkts (size=%d) transfered\n", + __FUNCTION__, tx_statics.event_count, tx_statics.event_size); + printf("%s: TYPE CTRL: %d pkts (size=%d) transfered\n", + __FUNCTION__, tx_statics.ctrl_count, tx_statics.ctrl_size); + printf("%s: TYPE DATA: %d pkts (size=%d) transfered\n", + __FUNCTION__, tx_statics.data_count, tx_statics.data_size); + printf("%s: Glom size distribution:\n", __FUNCTION__); + for (i=0;idhd); /* Poll period: check device if appropriate. */ - if (!SLPAUTO_ENAB(bus) && (bus->poll && (++bus->polltick >= bus->pollrate))) { + // terence 20160615: remove !SLPAUTO_ENAB(bus) to fix not able to polling if sr supported + if (1 && (bus->poll && (++bus->polltick >= bus->pollrate))) { uint32 intstatus = 0; /* Reset poll tick */ @@ -7561,9 +7813,7 @@ dhdsdio_chipmatch(uint16 chipid) return TRUE; if (chipid == BCM43349_CHIP_ID) return TRUE; - if (chipid == BCM4345_CHIP_ID) - return TRUE; - if (chipid == BCM43454_CHIP_ID) + if (chipid == BCM4345_CHIP_ID || chipid == BCM43454_CHIP_ID) return TRUE; if (chipid == BCM4350_CHIP_ID) return TRUE; @@ -7573,12 +7823,16 @@ dhdsdio_chipmatch(uint16 chipid) return TRUE; if (chipid == BCM4358_CHIP_ID) return TRUE; + if (chipid == BCM43569_CHIP_ID) + return TRUE; if (chipid == BCM4371_CHIP_ID) return TRUE; if (chipid == BCM43430_CHIP_ID) return TRUE; if (BCM4349_CHIP(chipid)) return TRUE; + if (chipid == BCM43012_CHIP_ID) + return TRUE; return FALSE; } @@ -7606,7 +7860,7 @@ dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot, } mutex_lock(&_dhd_sdio_mutex_lock_); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */ -#endif +#endif /* Init global variables at run-time, not as part of the declaration. * This is required to support init/de-init of the driver. Initialization @@ -7620,11 +7874,10 @@ dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot, sd1idle = TRUE; dhd_readahead = TRUE; retrydata = FALSE; -#if !defined(PLATFORM_MPS) + +#ifdef DISABLE_FLOW_CONTROL dhd_doflow = FALSE; -#else - dhd_doflow = TRUE; -#endif /* OEM_ANDROID */ +#endif /* DISABLE_FLOW_CONTROL */ dhd_dongle_ramsize = 0; dhd_txminmax = DHD_TXMINMAX; @@ -7704,6 +7957,10 @@ dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot, bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1; bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */ +#if defined(SUPPORT_P2P_GO_PS) + init_waitqueue_head(&bus->bus_sleep); +#endif /* LINUX && SUPPORT_P2P_GO_PS */ + /* attempt to attach to the dongle */ if (!(dhdsdio_probe_attach(bus, osh, sdh, regsva, devid))) { DHD_ERROR(("%s: dhdsdio_probe_attach failed\n", __FUNCTION__)); @@ -7753,6 +8010,14 @@ dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot, goto fail; } } + else { + /* Set random MAC address during boot time */ + get_random_bytes(&bus->dhd->mac.octet[3], 3); + /* Adding BRCM OUI */ + bus->dhd->mac.octet[0] = 0; + bus->dhd->mac.octet[1] = 0x90; + bus->dhd->mac.octet[2] = 0x4C; + } #endif #ifdef GET_OTP_MAC_ENABLE @@ -7768,6 +8033,14 @@ dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot, goto fail; } +#ifdef BCMHOST_XTAL_PU_TIME_MOD + bcmsdh_reg_write(bus->sdh, 0x18000620, 2, 11); +#ifdef BCM4330_CHIP + bcmsdh_reg_write(bus->sdh, 0x18000628, 4, 0x0000F801); +#else + bcmsdh_reg_write(bus->sdh, 0x18000628, 4, 0x00F80001); +#endif /* BCM4330_CHIP */ +#endif /* BCMHOST_XTAL_PU_TIME_MOD */ #if defined(MULTIPLE_SUPPLICANT) wl_android_post_init(); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe @@ -7775,9 +8048,7 @@ dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot, mutex_unlock(&_dhd_sdio_mutex_lock_); DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__)); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ -#endif - - init_waitqueue_head(&bus->bus_sleep); +#endif return bus; @@ -7790,289 +8061,11 @@ dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot, mutex_unlock(&_dhd_sdio_mutex_lock_); DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__)); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ -#endif +#endif return NULL; } -#ifdef REGON_BP_HANG_FIX -static int dhd_sdio_backplane_reset(struct dhd_bus *bus) -{ - uint32 temp = 0; - DHD_ERROR(("Resetting the backplane to avoid failure in firmware download..\n")); - - temp = bcmsdh_reg_read(bus->sdh, 0x180021e0, 4); - DHD_INFO(("SDIO Clk Control Reg = %x\n", temp)); - - /* Force HT req from PMU */ - bcmsdh_reg_write(bus->sdh, 0x18000644, 4, 0x6000005); - - /* Increase the clock stretch duration. */ - bcmsdh_reg_write(bus->sdh, 0x18000630, 4, 0xC8FFC8); - - /* Setting ALP clock request in SDIOD clock control status register */ - bcmsdh_reg_write(bus->sdh, 0x180021e0, 4, 0x41); - - /* Allowing clock from SR engine to SR memory */ - bcmsdh_reg_write(bus->sdh, 0x18004400, 4, 0xf92f1); - /* Disabling SR Engine before SR binary download. */ - bcmsdh_reg_write(bus->sdh, 0x18000650, 4, 0x3); - bcmsdh_reg_write(bus->sdh, 0x18000654, 4, 0x0); - - /* Enabling clock from backplane to SR memory */ - bcmsdh_reg_write(bus->sdh, 0x18004400, 4, 0xf9af1); - - /* Initializing SR memory address register in SOCRAM */ - bcmsdh_reg_write(bus->sdh, 0x18004408, 4, 0x0); - - /* Downloading the SR binary */ - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0xc0002000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x80008000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x1051f080); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x80008000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x1050f080); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x80008000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x1050f080); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x80008000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x1050f080); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000004); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000604); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00001604); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00001404); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a08c80); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010001); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x14a00000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00011404); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00002000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x04a00000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00002000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0xf8000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00002000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x04a00000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00002000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0xf8000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00011604); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010604); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010004); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x14a00000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000004); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010001); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x14a00000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010004); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x14a00000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000008); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x04a00000); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000008); - bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0xfc000000); - /* SR Binary Download complete */ - - /* Allowing clock from SR engine to SR memory */ - bcmsdh_reg_write(bus->sdh, 0x18004400, 4, 0xf92f1); - - /* Turning ON SR Engine to initiate backplane reset Repeated ?? Maharana */ - bcmsdh_reg_write(bus->sdh, 0x18000650, 4, 0x3); - bcmsdh_reg_write(bus->sdh, 0x18000654, 4, 0x0); - bcmsdh_reg_write(bus->sdh, 0x18000650, 4, 0x3); - bcmsdh_reg_write(bus->sdh, 0x18000654, 4, 0x2); - bcmsdh_reg_write(bus->sdh, 0x18000650, 4, 0x3); - bcmsdh_reg_write(bus->sdh, 0x18000654, 4, 0x3); - bcmsdh_reg_write(bus->sdh, 0x18000650, 4, 0x3); - bcmsdh_reg_write(bus->sdh, 0x18000654, 4, 0x37); - bcmsdh_reg_write(bus->sdh, 0x18000650, 4, 0x3); - temp = bcmsdh_reg_read(bus->sdh, 0x18000654, 4); - DHD_INFO(("0x18000654 = %x\n", temp)); - bcmsdh_reg_write(bus->sdh, 0x18000654, 4, 0x800037); - OSL_DELAY(100000); - /* Rolling back the original values for clock stretch and PMU timers */ - bcmsdh_reg_write(bus->sdh, 0x18000644, 4, 0x0); - bcmsdh_reg_write(bus->sdh, 0x18000630, 4, 0xC800C8); - /* Removing ALP clock request in SDIOD clock control status register */ - bcmsdh_reg_write(bus->sdh, 0x180021e0, 4, 0x40); - OSL_DELAY(10000); - return TRUE; -} - -static int dhdsdio_sdio_hang_war(struct dhd_bus *bus) -{ - uint32 temp = 0, temp2 = 0, counter = 0, BT_pwr_up = 0, BT_ready = 0; - /* Removing reset of D11 Core */ - bcmsdh_reg_write(bus->sdh, 0x18101408, 4, 0x3); - bcmsdh_reg_write(bus->sdh, 0x18101800, 4, 0x0); - bcmsdh_reg_write(bus->sdh, 0x18101408, 4, 0x1); - /* Reading CLB XTAL BT cntrl register */ - bcmsdh_reg_write(bus->sdh, 0x180013D8, 2, 0xD1); - bcmsdh_reg_write(bus->sdh, 0x180013DA, 2, 0x12); - bcmsdh_reg_write(bus->sdh, 0x180013D8, 2, 0x2D0); - /* Read if BT is powered up */ - temp = bcmsdh_reg_read(bus->sdh, 0x180013DA, 2); - /* Read BT_ready from WLAN wireless register */ - temp2 = bcmsdh_reg_read(bus->sdh, 0x1800002C, 4); - /* - Check if the BT is powered up and ready. The duration between BT being powered up - and BT becoming ready is the problematic window for WLAN. If we move ahead at this - time then we may encounter a corrupted backplane later. So we wait for BT to be ready - and then proceed after checking the health of the backplane. If the backplane shows - indications of failure then we have to do a full reset of the backplane using SR engine - and then proceed. - */ - (temp & 0xF0) ? (BT_pwr_up = 1):(BT_pwr_up = 0); - (temp2 & (1<<17)) ? (BT_ready = 1):(BT_ready = 0); - DHD_ERROR(("WARNING: Checking if BT is ready BT_pwr_up = %x" - "BT_ready = %x \n", BT_pwr_up, BT_ready)); - while (BT_pwr_up && !BT_ready) - { - OSL_DELAY(1000); - bcmsdh_reg_write(bus->sdh, 0x180013D8, 2, 0x2D0); - temp = bcmsdh_reg_read(bus->sdh, 0x180013DA, 2); - temp2 = bcmsdh_reg_read(bus->sdh, 0x1800002C, 4); - (temp & 0xF0) ? (BT_pwr_up = 1):(BT_pwr_up = 0); - (temp2 & (1<<17)) ? (BT_ready = 1):(BT_ready = 0); - counter++; - if (counter == 5000) - { - DHD_ERROR(("WARNING: Going ahead after 5 secs with" - "risk of failure because BT ready is not yet set\n")); - break; - } - } - DHD_ERROR(("\nWARNING: WL Proceeding BT_pwr_up = %x BT_ready = %x" - "\n", BT_pwr_up, BT_ready)); - counter = 0; - OSL_DELAY(10000); - /* - Get the information of who accessed the crucial backplane entities - by reading read and write access registers - */ - DHD_TRACE(("%d: Read Value @ 0x18104808 = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18104808, 4))); - DHD_TRACE(("%d: Read Value @ 0x1810480C = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810480C, 4))); - DHD_TRACE(("%d: Read Value @ 0x18106808 = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18106808, 4))); - DHD_TRACE(("%d: Read Value @ 0x1810680C = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810680C, 4))); - DHD_TRACE(("%d: Read Value @ 0x18107808 = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18107808, 4))); - DHD_TRACE(("%d: Read Value @ 0x1810780C = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810780C, 4))); - DHD_TRACE(("%d: Read Value @ 0x18108808 = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18108808, 4))); - DHD_TRACE(("%d: Read Value @ 0x1810880C = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810880C, 4))); - DHD_TRACE(("%d: Read Value @ 0x18109808 = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18109808, 4))); - DHD_TRACE(("%d: Read Value @ 0x1810980C = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810980C, 4))); - DHD_TRACE(("%d: Read Value @ 0x1810C808 = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810c808, 4))); - DHD_TRACE(("%d: Read Value @ 0x1810C80C = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810c80C, 4))); - counter = 0; - while ((bcmsdh_reg_read(bus->sdh, 0x18104808, 4) == 5) || - (bcmsdh_reg_read(bus->sdh, 0x1810480C, 4) == 5) || - (bcmsdh_reg_read(bus->sdh, 0x18106808, 4) == 5) || - (bcmsdh_reg_read(bus->sdh, 0x1810680C, 4) == 5) || - (bcmsdh_reg_read(bus->sdh, 0x1810780C, 4) == 5) || - (bcmsdh_reg_read(bus->sdh, 0x1810780C, 4) == 5) || - (bcmsdh_reg_read(bus->sdh, 0x1810880C, 4) == 5) || - (bcmsdh_reg_read(bus->sdh, 0x1810880C, 4) == 5) || - (bcmsdh_reg_read(bus->sdh, 0x1810980C, 4) == 5) || - (bcmsdh_reg_read(bus->sdh, 0x1810980C, 4) == 5) || - (bcmsdh_reg_read(bus->sdh, 0x1810C80C, 4) == 5) || - (bcmsdh_reg_read(bus->sdh, 0x1810C80C, 4) == 5)) - { - if (++counter > 10) - { - DHD_ERROR(("Unable to recover the backkplane corruption" - "..Tried %d times.. Exiting\n", counter)); - break; - } - OSL_DELAY(10000); - dhd_sdio_backplane_reset(bus); - /* - Get the information of who accessed the crucial backplane - entities by reading read and write access registers - */ - DHD_ERROR(("%d: Read Value @ 0x18104808 = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18104808, 4))); - DHD_ERROR(("%d: Read Value @ 0x1810480C = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810480C, 4))); - DHD_ERROR(("%d: Read Value @ 0x18106808 = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18106808, 4))); - DHD_ERROR(("%d: Read Value @ 0x1810680C = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810680C, 4))); - DHD_ERROR(("%d: Read Value @ 0x18107808 = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18107808, 4))); - DHD_ERROR(("%d: Read Value @ 0x1810780C = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810780C, 4))); - DHD_ERROR(("%d: Read Value @ 0x18108808 = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18108808, 4))); - DHD_ERROR(("%d: Read Value @ 0x1810880C = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810880C, 4))); - DHD_ERROR(("%d: Read Value @ 0x18109808 = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18109808, 4))); - DHD_ERROR(("%d: Read Value @ 0x1810980C = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810980C, 4))); - DHD_ERROR(("%d: Read Value @ 0x1810C808 = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810c808, 4))); - DHD_ERROR(("%d: Read Value @ 0x1810C80C = %x." - "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810c80C, 4))); - } - /* Set the WL ready to indicate BT that we are done with backplane reset */ - DHD_ERROR(("Setting up AXI_OK\n")); - bcmsdh_reg_write(bus->sdh, 0x18000658, 4, 0x3); - temp = bcmsdh_reg_read(bus->sdh, 0x1800065c, 4); - temp |= 0x80000000; - bcmsdh_reg_write(bus->sdh, 0x1800065c, 4, temp); - return TRUE; -} -#endif /* REGON_BP_HANG_FIX */ static bool dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva, uint16 devid) @@ -8088,10 +8081,10 @@ dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva, DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __FUNCTION__)); } -#if defined(DHD_DEBUG) +#if defined(DHD_DEBUG) && !defined(CUSTOMER_HW4_DEBUG) DHD_ERROR(("F1 signature read @0x18000000=0x%4x\n", bcmsdh_reg_read(bus->sdh, SI_ENUM_BASE, 4))); -#endif +#endif /* DHD_DEBUG && !CUSTOMER_HW4_DEBUG */ /* Force PLL off until si_attach() programs PLL control regs */ @@ -8166,11 +8159,6 @@ dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva, bus->sih->socitype, bus->sih->chip, bus->sih->chiprev, bus->sih->chippkg)); #endif /* DHD_DEBUG */ -#ifdef REGON_BP_HANG_FIX - /* WAR - for 43241 B0-B1-B2. B3 onwards do not need this */ - if (((uint16)bus->sih->chip == BCM4324_CHIP_ID) && (bus->sih->chiprev < 3)) - dhdsdio_sdio_hang_war(bus); -#endif /* REGON_BP_HANG_FIX */ bcmsdh_chipinfo(sdh, bus->sih->chip, bus->sih->chiprev); @@ -8224,6 +8212,7 @@ dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva, case BCM4354_CHIP_ID: case BCM4356_CHIP_ID: case BCM4358_CHIP_ID: + case BCM43569_CHIP_ID: case BCM4371_CHIP_ID: bus->dongle_ram_base = CR4_4350_RAM_BASE; break; @@ -8236,7 +8225,9 @@ dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva, ? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE; break; case BCM4349_CHIP_GRPID: - bus->dongle_ram_base = CR4_4349_RAM_BASE; + /* RAM base changed from 4349c0(revid=9) onwards */ + bus->dongle_ram_base = ((bus->sih->chiprev < 9) ? + CR4_4349_RAM_BASE: CR4_4349_RAM_BASE_FROM_REV_9); break; default: bus->dongle_ram_base = 0; @@ -8436,12 +8427,14 @@ dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh) int dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, - char *pfw_path, char *pnv_path, char *pconf_path) + char *pfw_path, char *pnv_path, + char *pclm_path, char *pconf_path) { int ret; bus->fw_path = pfw_path; bus->nv_path = pnv_path; + bus->dhd->clm_path = pclm_path; bus->dhd->conf_path = pconf_path; ret = dhdsdio_download_firmware(bus, osh, bus->sdh); @@ -8450,44 +8443,76 @@ dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, return ret; } -static int -dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh) +void +dhd_set_path_params(struct dhd_bus *bus) { - int ret; - - DHD_TRACE_HW4(("%s: firmware path=%s, nvram path=%s\n", - __FUNCTION__, bus->fw_path, bus->nv_path)); - DHD_OS_WAKE_LOCK(bus->dhd); - - /* Download the firmware */ - dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); - /* External conf takes precedence if specified */ dhd_conf_preinit(bus->dhd); + + if (bus->dhd->conf_path[0] == '\0') { + dhd_conf_set_path(bus->dhd, "config.txt", bus->dhd->conf_path, bus->nv_path); + } + if (bus->dhd->clm_path[0] == '\0') { + dhd_conf_set_path(bus->dhd, "clm.blob", bus->dhd->clm_path, bus->fw_path); + } +#ifdef CONFIG_PATH_AUTO_SELECT + dhd_conf_set_conf_name_by_chip(bus->dhd, bus->dhd->conf_path); +#endif + dhd_conf_read_config(bus->dhd, bus->dhd->conf_path); + dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path); dhd_conf_set_nv_name_by_chip(bus->dhd, bus->nv_path); + dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path); + dhd_conf_set_fw_name_by_mac(bus->dhd, bus->sdh, bus->fw_path); dhd_conf_set_nv_name_by_mac(bus->dhd, bus->sdh, bus->nv_path); + + printf("Final fw_path=%s\n", bus->fw_path); + printf("Final nv_path=%s\n", bus->nv_path); + printf("Final clm_path=%s\n", bus->dhd->clm_path); + printf("Final conf_path=%s\n", bus->dhd->conf_path); + +} + +void +dhd_set_bus_params(struct dhd_bus *bus) +{ if (bus->dhd->conf->dhd_poll >= 0) { - printf("%s: set polling mode %d\n", __FUNCTION__, bus->dhd->conf->dhd_poll); bus->poll = bus->dhd->conf->dhd_poll; if (!bus->pollrate) bus->pollrate = 1; + printf("%s: set polling mode %d\n", __FUNCTION__, bus->dhd->conf->dhd_poll); } if (bus->dhd->conf->use_rxchain >= 0) { - printf("%s: set use_rxchain %d\n", __FUNCTION__, bus->dhd->conf->use_rxchain); bus->use_rxchain = (bool)bus->dhd->conf->use_rxchain; + printf("%s: set use_rxchain %d\n", __FUNCTION__, bus->dhd->conf->use_rxchain); + } + if (bus->dhd->conf->txinrx_thres >= 0) { + bus->txinrx_thres = bus->dhd->conf->txinrx_thres; + printf("%s: set txinrx_thres %d\n", __FUNCTION__, bus->txinrx_thres); } if (bus->dhd->conf->txglomsize >= 0) { - printf("%s: set txglomsize %d\n", __FUNCTION__, bus->dhd->conf->txglomsize); bus->txglomsize = bus->dhd->conf->txglomsize; + printf("%s: set txglomsize %d\n", __FUNCTION__, bus->dhd->conf->txglomsize); } - bcmsdh_set_mode(sdh, bus->dhd->conf->txglom_mode); +} - printf("Final fw_path=%s\n", bus->fw_path); - printf("Final nv_path=%s\n", bus->nv_path); - printf("Final conf_path=%s\n", bus->dhd->conf_path); +static int +dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh) +{ + int ret; + + + DHD_TRACE_HW4(("%s: firmware path=%s, nvram path=%s\n", + __FUNCTION__, bus->fw_path, bus->nv_path)); + DHD_OS_WAKE_LOCK(bus->dhd); + + /* Download the firmware */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + dhd_set_path_params(bus); + dhd_set_bus_params(bus); ret = _dhdsdio_download_firmware(bus); @@ -8618,7 +8643,7 @@ dhdsdio_disconnect(void *ptr) } mutex_lock(&_dhd_sdio_mutex_lock_); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */ -#endif +#endif if (bus) { @@ -8643,20 +8668,24 @@ dhdsdio_suspend(void *context) int ret = 0; dhd_bus_t *bus = (dhd_bus_t*)context; +#ifdef SUPPORT_P2P_GO_PS int wait_time = 0; + if (bus->idletime > 0) { wait_time = msecs_to_jiffies(bus->idletime * dhd_watchdog_ms); } - +#endif /* SUPPORT_P2P_GO_PS */ ret = dhd_os_check_wakelock(bus->dhd); +#ifdef SUPPORT_P2P_GO_PS // terence 20141124: fix for suspend issue - if (SLPAUTO_ENAB(bus) && (!ret) && (bus->dhd->up)) { + if (SLPAUTO_ENAB(bus) && (!ret) && (bus->dhd->up) && (bus->dhd->op_mode != DHD_FLAG_HOSTAP_MODE)) { if (wait_event_timeout(bus->bus_sleep, bus->sleeping, wait_time) == 0) { if (!bus->sleeping) { return 1; } } } +#endif /* SUPPORT_P2P_GO_PS */ return ret; } @@ -8668,7 +8697,7 @@ dhdsdio_resume(void *context) if (dhd_os_check_if_up(bus->dhd)) bcmsdh_oob_intr_set(bus->sdh, TRUE); -#endif +#endif return 0; } @@ -8900,26 +8929,6 @@ dhdsdio_download_code_file(struct dhd_bus *bus, char *pfw_path) return bcmerror; } -/* - EXAMPLE: nvram_array - nvram_arry format: - name=value - Use carriage return at the end of each assignment, and an empty string with - carriage return at the end of array. - - For example: - unsigned char nvram_array[] = {"name1=value1\n", "name2=value2\n", "\n"}; - Hex values start with 0x, and mac addr format: xx:xx:xx:xx:xx:xx. - - Search "EXAMPLE: nvram_array" to see how the array is activated. -*/ - -void -dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params) -{ - bus->nvram_params = nvram_params; -} - static int dhdsdio_download_nvram(struct dhd_bus *bus) { @@ -8934,9 +8943,8 @@ dhdsdio_download_nvram(struct dhd_bus *bus) pnv_path = bus->nv_path; nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0')); - if (!nvram_file_exists && (bus->nvram_params == NULL)) - return (0); + /* For Get nvram from UEFI */ if (nvram_file_exists) { image = dhd_os_open_image(pnv_path); if (image == NULL) { @@ -8952,15 +8960,9 @@ dhdsdio_download_nvram(struct dhd_bus *bus) goto err; } - /* Download variables */ - if (nvram_file_exists) { - len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image); - } - else { - len = strlen(bus->nvram_params); - ASSERT(len <= MAX_NVRAMBUF_SIZE); - memcpy(memblock, bus->nvram_params, len); - } + /* For Get nvram from image or UEFI (when image == NULL ) */ + len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image); + if (len > 0 && len < MAX_NVRAMBUF_SIZE) { bufp = (char *)memblock; bufp[len] = 0; @@ -9050,10 +9052,6 @@ _dhdsdio_download_firmware(struct dhd_bus *bus) goto err; } - /* EXAMPLE: nvram_array */ - /* If a valid nvram_arry is specified as above, it can be passed down to dongle */ - /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */ - /* External nvram takes precedence if specified */ if (dhdsdio_download_nvram(bus)) { DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__)); @@ -9138,6 +9136,18 @@ dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf return ret; } +uint8 +dhd_bus_is_ioready(struct dhd_bus *bus) +{ + uint8 enable; + bcmsdh_info_t *sdh; + ASSERT(bus); + ASSERT(bus->sih != NULL); + enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2); + sdh = bus->sdh; + return (enable == bcmsdh_cfg_read(sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL)); +} + uint dhd_bus_chip(struct dhd_bus *bus) { @@ -9208,7 +9218,7 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) dhd_enable_oob_intr(bus, FALSE); bcmsdh_oob_intr_set(bus->sdh, FALSE); bcmsdh_oob_intr_unregister(bus->sdh); -#endif +#endif /* Clean tx/rx buffer pointers, detach from the dongle */ dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE, TRUE); @@ -9251,7 +9261,7 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) bcmsdh_oob_intr_set(bus->sdh, TRUE); #elif defined(FORCE_WOWLAN) dhd_enable_oob_intr(bus, TRUE); -#endif +#endif bus->dhd->dongle_reset = FALSE; bus->dhd->up = TRUE; @@ -9259,7 +9269,7 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) #if !defined(IGNORE_ETH0_DOWN) /* Restore flow control */ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF); -#endif +#endif dhd_os_wd_timer(dhdp, dhd_watchdog_ms); DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__)); @@ -9268,12 +9278,19 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE, FALSE); } - } else + } else { + DHD_ERROR(("%s Failed to download binary to the dongle\n", + __FUNCTION__)); + if (bus->sih != NULL) { + si_detach(bus->sih); + bus->sih = NULL; + } bcmerror = BCME_SDIO_ERROR; + } } else bcmerror = BCME_SDIO_ERROR; - dhd_os_sdunlock(dhdp); + dhd_os_sdunlock(dhdp); } else { bcmerror = BCME_SDIO_ERROR; printf("%s called when dongle is not in reset\n", @@ -9352,31 +9369,14 @@ dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint si return dhdsdio_membytes(bus, set, address, data, size); } -#if defined(NDISVER) && (NDISVER >= 0x0630) -void -dhd_bus_reject_ioreqs(dhd_pub_t *dhdp, bool reject) -{ - - DHD_TRACE(("%s: Enter\n", __FUNCTION__)); - - bcmsdh_reject_ioreqs(dhdp->bus->sdh, reject); -} void -dhd_bus_waitfor_iodrain(dhd_pub_t *dhdp) -{ - - DHD_TRACE(("%s: Enter\n", __FUNCTION__)); - - bcmsdh_waitfor_iodrain(dhdp->bus->sdh); -} -#endif /* (NDISVER) && (NDISVER >= 0x0630) */ - -void -dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path, char *pconf_path) +dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path, + char *pclm_path, char *pconf_path) { bus->fw_path = pfw_path; bus->nv_path = pnv_path; + bus->dhd->clm_path = pclm_path; bus->dhd->conf_path = pconf_path; } @@ -9494,28 +9494,3 @@ void dhd_sdio_reg_write(void *h, uint32 addr, uint32 val) dhd_os_sdunlock(bus->dhd); } #endif /* DEBUGGER */ - -#if defined(SOFTAP_TPUT_ENHANCE) -void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time) -{ - if (!dhdp || !dhdp->bus) { - DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__)); - return; - } - dhdp->bus->idletime = idle_time; -} - -void dhd_bus_getidletime(dhd_pub_t *dhdp, int* idle_time) -{ - if (!dhdp || !dhdp->bus) { - DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__)); - return; - } - - if (!idle_time) { - DHD_ERROR(("%s:Arg idle_time is NULL\n", __FUNCTION__)); - return; - } - *idle_time = dhdp->bus->idletime; -} -#endif /* SOFTAP_TPUT_ENHANCE */ diff --git a/drivers/amlogic/wifi/bcmdhd/dhd_static_buf.c b/drivers/amlogic/wifi/bcmdhd/dhd_static_buf.c new file mode 100644 index 0000000000000..36cca5b32551a --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/dhd_static_buf.c @@ -0,0 +1,380 @@ +#include +#include +#include +#include +#include +#include +#include + +enum dhd_prealloc_index { + DHD_PREALLOC_PROT = 0, + DHD_PREALLOC_RXBUF, + DHD_PREALLOC_DATABUF, + DHD_PREALLOC_OSL_BUF, + DHD_PREALLOC_SKB_BUF, + DHD_PREALLOC_WIPHY_ESCAN0 = 5, + DHD_PREALLOC_WIPHY_ESCAN1 = 6, + DHD_PREALLOC_DHD_INFO = 7, + DHD_PREALLOC_DHD_WLFC_INFO = 8, + DHD_PREALLOC_IF_FLOW_LKUP = 9, + DHD_PREALLOC_MEMDUMP_BUF = 10, + DHD_PREALLOC_MEMDUMP_RAM = 11, + DHD_PREALLOC_DHD_WLFC_HANGER = 12, + DHD_PREALLOC_MAX +}; + +#define STATIC_BUF_MAX_NUM 20 +#define STATIC_BUF_SIZE (PAGE_SIZE*2) + +#define DHD_PREALLOC_PROT_SIZE (16 * 1024) +#define DHD_PREALLOC_RXBUF_SIZE (24 * 1024) +#define DHD_PREALLOC_DATABUF_SIZE (64 * 1024) +#define DHD_PREALLOC_OSL_BUF_SIZE (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE) +#define DHD_PREALLOC_WIPHY_ESCAN0_SIZE (64 * 1024) +#define DHD_PREALLOC_DHD_INFO_SIZE (24 * 1024) +#define DHD_PREALLOC_DHD_WLFC_HANGER_SIZE (64 * 1024) +#ifdef CONFIG_64BIT +#define DHD_PREALLOC_IF_FLOW_LKUP_SIZE (20 * 1024 * 2) +#else +#define DHD_PREALLOC_IF_FLOW_LKUP_SIZE (20 * 1024) +#endif + +#if defined(CONFIG_64BIT) +#define WLAN_DHD_INFO_BUF_SIZE (24 * 1024) +#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024) +#define WLAN_DHD_IF_FLOW_LKUP_SIZE (64 * 1024) +#else +#define WLAN_DHD_INFO_BUF_SIZE (16 * 1024) +#define WLAN_DHD_WLFC_BUF_SIZE (24 * 1024) +#define WLAN_DHD_IF_FLOW_LKUP_SIZE (20 * 1024) +#endif /* CONFIG_64BIT */ +#define WLAN_DHD_MEMDUMP_SIZE (800 * 1024) + +#ifdef CONFIG_BCMDHD_PCIE +#define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1) +#define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2) +#define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4) + +#define DHD_SKB_1PAGE_BUF_NUM 0 +#define DHD_SKB_2PAGE_BUF_NUM 64 +#define DHD_SKB_4PAGE_BUF_NUM 0 +#else +#define DHD_SKB_HDRSIZE 336 +#define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE) +#define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE) +#define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE) + +#define DHD_SKB_1PAGE_BUF_NUM 8 +#define DHD_SKB_2PAGE_BUF_NUM 8 +#define DHD_SKB_4PAGE_BUF_NUM 1 +#endif /* CONFIG_BCMDHD_PCIE */ + +/* The number is defined in linux_osl.c + * WLAN_SKB_1_2PAGE_BUF_NUM => STATIC_PKT_1_2PAGE_NUM + * WLAN_SKB_BUF_NUM => STATIC_PKT_MAX_NUM + */ +#define WLAN_SKB_1_2PAGE_BUF_NUM ((DHD_SKB_1PAGE_BUF_NUM) + \ + (DHD_SKB_2PAGE_BUF_NUM)) +#define WLAN_SKB_BUF_NUM ((WLAN_SKB_1_2PAGE_BUF_NUM) + (DHD_SKB_4PAGE_BUF_NUM)) + +void *wlan_static_prot = NULL; +void *wlan_static_rxbuf = NULL; +void *wlan_static_databuf = NULL; +void *wlan_static_osl_buf = NULL; +void *wlan_static_scan_buf0 = NULL; +void *wlan_static_scan_buf1 = NULL; +void *wlan_static_dhd_info_buf = NULL; +void *wlan_static_dhd_wlfc_info_buf = NULL; +void *wlan_static_if_flow_lkup = NULL; +void *wlan_static_dhd_wlfc_hanger_buf = NULL; + +static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM]; + +void *dhd_wlan_mem_prealloc(int section, unsigned long size) +{ + printk("%s: sectoin %d, %ld\n", __FUNCTION__, section, size); + if (section == DHD_PREALLOC_PROT) + return wlan_static_prot; + + if (section == DHD_PREALLOC_RXBUF) + return wlan_static_rxbuf; + + if (section == DHD_PREALLOC_DATABUF) + return wlan_static_databuf; + + if (section == DHD_PREALLOC_SKB_BUF) + return wlan_static_skb; + + if (section == DHD_PREALLOC_WIPHY_ESCAN0) + return wlan_static_scan_buf0; + + if (section == DHD_PREALLOC_WIPHY_ESCAN1) + return wlan_static_scan_buf1; + + if (section == DHD_PREALLOC_OSL_BUF) { + if (size > DHD_PREALLOC_OSL_BUF_SIZE) { + pr_err("request OSL_BUF(%lu) is bigger than static size(%ld).\n", + size, DHD_PREALLOC_OSL_BUF_SIZE); + return NULL; + } + return wlan_static_osl_buf; + } + + if (section == DHD_PREALLOC_DHD_INFO) { + if (size > DHD_PREALLOC_DHD_INFO_SIZE) { + pr_err("request DHD_INFO size(%lu) is bigger than static size(%d).\n", + size, DHD_PREALLOC_DHD_INFO_SIZE); + return NULL; + } + return wlan_static_dhd_info_buf; + } + if (section == DHD_PREALLOC_DHD_WLFC_INFO) { + if (size > WLAN_DHD_WLFC_BUF_SIZE) { + pr_err("request DHD_WLFC_INFO size(%lu) is bigger than static size(%d).\n", + size, WLAN_DHD_WLFC_BUF_SIZE); + return NULL; + } + return wlan_static_dhd_wlfc_info_buf; + } + if (section == DHD_PREALLOC_IF_FLOW_LKUP) { + if (size > DHD_PREALLOC_IF_FLOW_LKUP_SIZE) { + pr_err("request DHD_IF_FLOW_LKUP size(%lu) is bigger than static size(%d).\n", + size, DHD_PREALLOC_IF_FLOW_LKUP_SIZE); + return NULL; + } + + return wlan_static_if_flow_lkup; + } + if (section == DHD_PREALLOC_DHD_WLFC_HANGER) { + if (size > DHD_PREALLOC_DHD_WLFC_HANGER_SIZE) { + pr_err("request DHD_WLFC_HANGER size(%lu) is bigger than static size(%d).\n", + size, DHD_PREALLOC_DHD_WLFC_HANGER_SIZE); + return NULL; + } + return wlan_static_dhd_wlfc_hanger_buf; + } + if ((section < 0) || (section > DHD_PREALLOC_MAX)) + pr_err("request section id(%d) is out of max index %d\n", + section, DHD_PREALLOC_MAX); + + pr_err("%s: failed to alloc section %d, size=%ld\n", __FUNCTION__, section, size); + + return NULL; +} +EXPORT_SYMBOL(dhd_wlan_mem_prealloc); + +static int dhd_init_wlan_mem(void) +{ + int i; + int j; + + for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) { + wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE); + if (!wlan_static_skb[i]) { + goto err_skb_alloc; + } + printk("%s: sectoin %d skb[%d], size=%ld\n", __FUNCTION__, DHD_PREALLOC_SKB_BUF, i, DHD_SKB_1PAGE_BUFSIZE); + } + + for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) { + wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE); + if (!wlan_static_skb[i]) { + goto err_skb_alloc; + } + printk("%s: sectoin %d skb[%d], size=%ld\n", __FUNCTION__, DHD_PREALLOC_SKB_BUF, i, DHD_SKB_2PAGE_BUFSIZE); + } + +#if !defined(CONFIG_BCMDHD_PCIE) + wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE); + if (!wlan_static_skb[i]) { + goto err_skb_alloc; + } +#endif /* !CONFIG_BCMDHD_PCIE */ + + wlan_static_prot = kmalloc(DHD_PREALLOC_PROT_SIZE, GFP_KERNEL); + if (!wlan_static_prot) { + pr_err("Failed to alloc wlan_static_prot\n"); + goto err_mem_alloc; + } + printk("%s: sectoin %d, size=%d\n", __FUNCTION__, DHD_PREALLOC_PROT, DHD_PREALLOC_PROT_SIZE); + +#if defined(CONFIG_BCMDHD_SDIO) + wlan_static_rxbuf = kmalloc(DHD_PREALLOC_RXBUF_SIZE, GFP_KERNEL); + if (!wlan_static_rxbuf) { + pr_err("Failed to alloc wlan_static_rxbuf\n"); + goto err_mem_alloc; + } + printk("%s: sectoin %d, size=%d\n", __FUNCTION__, DHD_PREALLOC_RXBUF, DHD_PREALLOC_RXBUF_SIZE); + + wlan_static_databuf = kmalloc(DHD_PREALLOC_DATABUF_SIZE, GFP_KERNEL); + if (!wlan_static_databuf) { + pr_err("Failed to alloc wlan_static_databuf\n"); + goto err_mem_alloc; + } + printk("%s: sectoin %d, size=%d\n", __FUNCTION__, DHD_PREALLOC_DATABUF, DHD_PREALLOC_DATABUF_SIZE); +#endif + + wlan_static_osl_buf = kmalloc(DHD_PREALLOC_OSL_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_osl_buf) { + pr_err("Failed to alloc wlan_static_osl_buf\n"); + goto err_mem_alloc; + } + printk("%s: sectoin %d, size=%ld\n", __FUNCTION__, DHD_PREALLOC_OSL_BUF, DHD_PREALLOC_OSL_BUF_SIZE); + + wlan_static_scan_buf0 = kmalloc(DHD_PREALLOC_WIPHY_ESCAN0_SIZE, GFP_KERNEL); + if (!wlan_static_scan_buf0) { + pr_err("Failed to alloc wlan_static_scan_buf0\n"); + goto err_mem_alloc; + } + printk("%s: sectoin %d, size=%d\n", __FUNCTION__, DHD_PREALLOC_WIPHY_ESCAN0, DHD_PREALLOC_WIPHY_ESCAN0_SIZE); + + wlan_static_dhd_info_buf = kmalloc(DHD_PREALLOC_DHD_INFO_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_info_buf) { + pr_err("Failed to alloc wlan_static_dhd_info_buf\n"); + goto err_mem_alloc; + } + printk("%s: sectoin %d, size=%d\n", __FUNCTION__, DHD_PREALLOC_DHD_INFO, DHD_PREALLOC_DHD_INFO_SIZE); + + wlan_static_dhd_wlfc_info_buf = kmalloc(WLAN_DHD_WLFC_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_wlfc_info_buf) { + pr_err("Failed to alloc wlan_static_dhd_wlfc_info_buf\n"); + goto err_mem_alloc; + } + printk("%s: sectoin %d, size=%d\n", __FUNCTION__, DHD_PREALLOC_DHD_WLFC_INFO, WLAN_DHD_WLFC_BUF_SIZE); + + wlan_static_dhd_wlfc_hanger_buf = kmalloc(DHD_PREALLOC_DHD_WLFC_HANGER_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_wlfc_hanger_buf) { + pr_err("Failed to alloc wlan_static_dhd_wlfc_hanger_buf\n"); + goto err_mem_alloc; + } + printk("%s: sectoin %d, size=%d\n", __FUNCTION__, DHD_PREALLOC_DHD_WLFC_HANGER, DHD_PREALLOC_DHD_WLFC_HANGER_SIZE); + +#ifdef CONFIG_BCMDHD_PCIE + wlan_static_if_flow_lkup = kmalloc(DHD_PREALLOC_IF_FLOW_LKUP_SIZE, GFP_KERNEL); + if (!wlan_static_if_flow_lkup) { + pr_err("Failed to alloc wlan_static_if_flow_lkup\n"); + goto err_mem_alloc; + } +#endif /* CONFIG_BCMDHD_PCIE */ + + return 0; + +err_mem_alloc: + + if (wlan_static_prot) + kfree(wlan_static_prot); + +#if defined(CONFIG_BCMDHD_SDIO) + if (wlan_static_rxbuf) + kfree(wlan_static_rxbuf); + + if (wlan_static_databuf) + kfree(wlan_static_databuf); +#endif + + if (wlan_static_dhd_info_buf) + kfree(wlan_static_dhd_info_buf); + + if (wlan_static_dhd_wlfc_info_buf) + kfree(wlan_static_dhd_wlfc_info_buf); + + if (wlan_static_dhd_wlfc_hanger_buf) + kfree(wlan_static_dhd_wlfc_hanger_buf); + + if (wlan_static_scan_buf1) + kfree(wlan_static_scan_buf1); + + if (wlan_static_scan_buf0) + kfree(wlan_static_scan_buf0); + + if (wlan_static_osl_buf) + kfree(wlan_static_osl_buf); + +#ifdef CONFIG_BCMDHD_PCIE + if (wlan_static_if_flow_lkup) + kfree(wlan_static_if_flow_lkup); +#endif + pr_err("Failed to mem_alloc for WLAN\n"); + + i = WLAN_SKB_BUF_NUM; + +err_skb_alloc: + pr_err("Failed to skb_alloc for WLAN\n"); + for (j = 0; j < i; j++) { + dev_kfree_skb(wlan_static_skb[j]); + } + + return -ENOMEM; +} + +static int __init +dhd_static_buf_init(void) +{ + printk(KERN_ERR "%s()\n", __FUNCTION__); + + dhd_init_wlan_mem(); + + return 0; +} + +static void __exit +dhd_static_buf_exit(void) +{ + int i; + + printk(KERN_ERR "%s()\n", __FUNCTION__); + + for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) { + if (wlan_static_skb[i]) + dev_kfree_skb(wlan_static_skb[i]); + } + + for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) { + if (wlan_static_skb[i]) + dev_kfree_skb(wlan_static_skb[i]); + } + +#if !defined(CONFIG_BCMDHD_PCIE) + if (wlan_static_skb[i]) + dev_kfree_skb(wlan_static_skb[i]); +#endif /* !CONFIG_BCMDHD_PCIE */ + + if (wlan_static_prot) + kfree(wlan_static_prot); + +#if defined(CONFIG_BCMDHD_SDIO) + if (wlan_static_rxbuf) + kfree(wlan_static_rxbuf); + + if (wlan_static_databuf) + kfree(wlan_static_databuf); +#endif + + if (wlan_static_osl_buf) + kfree(wlan_static_osl_buf); + + if (wlan_static_scan_buf0) + kfree(wlan_static_scan_buf0); + + if (wlan_static_dhd_info_buf) + kfree(wlan_static_dhd_info_buf); + + if (wlan_static_dhd_wlfc_info_buf) + kfree(wlan_static_dhd_wlfc_info_buf); + + if (wlan_static_dhd_wlfc_hanger_buf) + kfree(wlan_static_dhd_wlfc_hanger_buf); + + if (wlan_static_scan_buf1) + kfree(wlan_static_scan_buf1); + +#ifdef CONFIG_BCMDHD_PCIE + if (wlan_static_if_flow_lkup) + kfree(wlan_static_if_flow_lkup); +#endif + return; +} + +module_init(dhd_static_buf_init); + +module_exit(dhd_static_buf_exit); diff --git a/drivers/net/wireless/bcmdhd/dhd_wlfc.c b/drivers/amlogic/wifi/bcmdhd/dhd_wlfc.c similarity index 78% rename from drivers/net/wireless/bcmdhd/dhd_wlfc.c rename to drivers/amlogic/wifi/bcmdhd/dhd_wlfc.c index a04cd53801bb7..81cf51fcc4ee8 100644 --- a/drivers/net/wireless/bcmdhd/dhd_wlfc.c +++ b/drivers/amlogic/wifi/bcmdhd/dhd_wlfc.c @@ -1,12 +1,34 @@ /* * DHD PROP_TXSTATUS Module. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_wlfc.c 501046 2014-09-06 01:25:16Z $ + * + * <> + * + * $Id: dhd_wlfc.c 579277 2015-08-14 04:49:50Z $ * */ + #include #include @@ -17,13 +39,18 @@ #include #include + #include +#include -#ifdef PROP_TXSTATUS +#ifdef PROP_TXSTATUS /* a form of flow control between host and dongle */ #include #include #endif + +#ifdef DHDTCPACK_SUPPRESS #include +#endif /* DHDTCPACK_SUPPRESS */ /* @@ -35,18 +62,30 @@ * */ +#if defined(DHD_WLFC_THREAD) +#define WLFC_THREAD_QUICK_RETRY_WAIT_MS 10 /* 10 msec */ +#define WLFC_THREAD_RETRY_WAIT_MS 10000 /* 10 sec */ +#endif /* defined (DHD_WLFC_THREAD) */ + #ifdef PROP_TXSTATUS -#ifdef QMONITOR -#define DHD_WLFC_QMON_COMPLETE(entry) dhd_qmon_txcomplete(&entry->qmon) -#else #define DHD_WLFC_QMON_COMPLETE(entry) -#endif /* QMONITOR */ #define LIMIT_BORROW +/** reordering related */ + +#if defined(DHD_WLFC_THREAD) +static void +_dhd_wlfc_thread_wakeup(dhd_pub_t *dhdp) +{ + dhdp->wlfc_thread_go = TRUE; + wake_up_interruptible(&dhdp->wlfc_wqhead); +} +#endif /* DHD_WLFC_THREAD */ + static uint16 _dhd_wlfc_adjusted_seq(void* p, uint8 current_seq) { @@ -65,6 +104,16 @@ _dhd_wlfc_adjusted_seq(void* p, uint8 current_seq) return seq; } +/** + * Enqueue a caller supplied packet on a caller supplied precedence queue, optionally reorder + * suppressed packets. + * @param[in] pq caller supplied packet queue to enqueue the packet on + * @param[in] prec precedence of the to-be-queued packet + * @param[in] p transmit packet to enqueue + * @param[in] qHead if TRUE, enqueue to head instead of tail. Used to maintain d11 seq order. + * @param[in] current_seq + * @param[in] reOrder reOrder on odd precedence (=suppress queue) + */ static void _dhd_wlfc_prec_enque(struct pktq *pq, int prec, void* p, bool qHead, uint8 current_seq, bool reOrder) @@ -76,9 +125,9 @@ _dhd_wlfc_prec_enque(struct pktq *pq, int prec, void* p, bool qHead, if (!p) return; - ASSERT(prec >= 0 && prec < pq->num_prec); - ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */ + /* queueing chains not allowed and no segmented SKB (Kernel-3.18.y) */ + ASSERT(!((PKTLINK(p) != NULL) && (PKTLINK(p) != p))); ASSERT(!pktq_full(pq)); ASSERT(!pktq_pfull(pq, prec)); @@ -144,15 +193,17 @@ _dhd_wlfc_prec_enque(struct pktq *pq, int prec, void* p, bool qHead, if (pq->hi_prec < prec) pq->hi_prec = (uint8)prec; -} +} /* _dhd_wlfc_prec_enque */ -/* Create a place to store all packet pointers submitted to the firmware until - a status comes back, suppress or otherwise. - - hang-er: noun, a contrivance on which things are hung, as a hook. -*/ +/** + * Create a place to store all packet pointers submitted to the firmware until a status comes back, + * suppress or otherwise. + * + * hang-er: noun, a contrivance on which things are hung, as a hook. + */ +/** @deprecated soon */ static void* -_dhd_wlfc_hanger_create(osl_t *osh, int max_items) +_dhd_wlfc_hanger_create(dhd_pub_t *dhd, int max_items) { int i; wlfc_hanger_t* hanger; @@ -160,9 +211,10 @@ _dhd_wlfc_hanger_create(osl_t *osh, int max_items) /* allow only up to a specific size for now */ ASSERT(max_items == WLFC_HANGER_MAXITEMS); - if ((hanger = (wlfc_hanger_t*)MALLOC(osh, WLFC_HANGER_SIZE(max_items))) == NULL) + if ((hanger = (wlfc_hanger_t*)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_DHD_WLFC_HANGER, + WLFC_HANGER_SIZE(max_items))) == NULL) { return NULL; - + } memset(hanger, 0, WLFC_HANGER_SIZE(max_items)); hanger->max_items = max_items; @@ -172,18 +224,20 @@ _dhd_wlfc_hanger_create(osl_t *osh, int max_items) return hanger; } +/** @deprecated soon */ static int -_dhd_wlfc_hanger_delete(osl_t *osh, void* hanger) +_dhd_wlfc_hanger_delete(dhd_pub_t *dhd, void* hanger) { wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; if (h) { - MFREE(osh, h, WLFC_HANGER_SIZE(h->max_items)); + DHD_OS_PREFREE(dhd, h, WLFC_HANGER_SIZE(h->max_items)); return BCME_OK; } return BCME_BADARG; } +/** @deprecated soon */ static uint16 _dhd_wlfc_hanger_get_free_slot(void* hanger) { @@ -209,6 +263,7 @@ _dhd_wlfc_hanger_get_free_slot(void* hanger) return WLFC_HANGER_MAXITEMS; } +/** @deprecated soon */ static int _dhd_wlfc_hanger_get_genbit(void* hanger, void* pkt, uint32 slot_id, int* gen) { @@ -222,19 +277,23 @@ _dhd_wlfc_hanger_get_genbit(void* hanger, void* pkt, uint32 slot_id, int* gen) return BCME_NOTFOUND; if (h) { - if ((h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE) || - (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) { + if (h->items[slot_id].state != WLFC_HANGER_ITEM_STATE_FREE) { *gen = h->items[slot_id].gen; } else { + DHD_ERROR(("Error: %s():%d item not used\n", + __FUNCTION__, __LINE__)); rc = BCME_NOTFOUND; } - } - else + + } else { rc = BCME_BADARG; + } + return rc; } +/** @deprecated soon */ static int _dhd_wlfc_hanger_pushpkt(void* hanger, void* pkt, uint32 slot_id) { @@ -248,23 +307,26 @@ _dhd_wlfc_hanger_pushpkt(void* hanger, void* pkt, uint32 slot_id) h->items[slot_id].pkt_state = 0; h->items[slot_id].pkt_txstatus = 0; h->pushed++; - } - else { + } else { h->failed_to_push++; rc = BCME_NOTFOUND; } - } - else + } else { rc = BCME_BADARG; + } + return rc; } +/** @deprecated soon */ static int _dhd_wlfc_hanger_poppkt(void* hanger, uint32 slot_id, void** pktout, bool remove_from_hanger) { int rc = BCME_OK; wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + *pktout = NULL; + /* this packet was not pushed at the time it went to the firmware */ if (slot_id == WLFC_HANGER_MAXITEMS) return BCME_NOTFOUND; @@ -280,17 +342,18 @@ _dhd_wlfc_hanger_poppkt(void* hanger, uint32 slot_id, void** pktout, bool remove h->items[slot_id].identifier = 0; h->popped++; } - } - else { + } else { h->failed_to_pop++; rc = BCME_NOTFOUND; } - } - else + } else { rc = BCME_BADARG; + } + return rc; } +/** @deprecated soon */ static int _dhd_wlfc_hanger_mark_suppressed(void* hanger, uint32 slot_id, uint8 gen) { @@ -304,17 +367,18 @@ _dhd_wlfc_hanger_mark_suppressed(void* hanger, uint32 slot_id, uint8 gen) h->items[slot_id].gen = gen; if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE) { h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED; - } - else + } else { rc = BCME_BADARG; - } - else + } + } else { rc = BCME_BADARG; + } return rc; } -/* remove reference of specific packet in hanger */ +/** remove reference of specific packet in hanger */ +/** @deprecated soon */ static bool _dhd_wlfc_hanger_remove_reference(wlfc_hanger_t* h, void* pkt) { @@ -324,23 +388,25 @@ _dhd_wlfc_hanger_remove_reference(wlfc_hanger_t* h, void* pkt) return FALSE; } - for (i = 0; i < h->max_items; i++) { - if (pkt == h->items[i].pkt) { - if ((h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) || - (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) { - h->items[i].state = WLFC_HANGER_ITEM_STATE_FREE; - h->items[i].pkt = NULL; - h->items[i].gen = 0xff; - h->items[i].identifier = 0; - } + i = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(pkt))); + + if ((i < h->max_items) && (pkt == h->items[i].pkt)) { + if (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED) { + h->items[i].state = WLFC_HANGER_ITEM_STATE_FREE; + h->items[i].pkt = NULL; + h->items[i].gen = 0xff; + h->items[i].identifier = 0; return TRUE; + } else { + DHD_ERROR(("Error: %s():%d item not suppressed\n", + __FUNCTION__, __LINE__)); } } return FALSE; } - +/** afq = At Firmware Queue, queue containing packets pending in the dongle */ static int _dhd_wlfc_enque_afq(athost_wl_status_info_t* ctx, void *p) { @@ -360,6 +426,7 @@ _dhd_wlfc_enque_afq(athost_wl_status_info_t* ctx, void *p) return BCME_OK; } +/** afq = At Firmware Queue, queue containing packets pending in the dongle */ static int _dhd_wlfc_deque_afq(athost_wl_status_info_t* ctx, uint16 hslot, uint8 hcnt, uint8 prec, void **pktout) @@ -413,6 +480,8 @@ _dhd_wlfc_deque_afq(athost_wl_status_info_t* ctx, uint16 hslot, uint8 hcnt, uint return BCME_ERROR; } + bcm_pkt_validate_chk(p); + if (!b) { /* head packet is matched */ if ((q->head = PKTLINK(p)) == NULL) { @@ -439,8 +508,22 @@ _dhd_wlfc_deque_afq(athost_wl_status_info_t* ctx, uint16 hslot, uint8 hcnt, uint } return BCME_OK; -} +} /* _dhd_wlfc_deque_afq */ +/** + * Flow control information piggy backs on packets, in the form of one or more TLVs. This function + * pushes one or more TLVs onto a packet that is going to be sent towards the dongle. + * + * @param[in] ctx + * @param[in/out] packet + * @param[in] tim_signal TRUE if parameter 'tim_bmp' is valid + * @param[in] tim_bmp + * @param[in] mac_handle + * @param[in] htodtag + * @param[in] htodseq d11 seqno for seqno reuse, only used if 'seq reuse' was agreed upon + * earlier between host and firmware. + * @param[in] skip_wlfc_hdr + */ static int _dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void** packet, bool tim_signal, uint8 tim_bmp, uint8 mac_handle, uint32 htodtag, uint16 htodseq, bool skip_wlfc_hdr) @@ -478,7 +561,7 @@ _dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void** packet, bool tim_signa wlh[TLV_TAG_OFF] = WLFC_CTL_TYPE_PKTTAG; wlh[TLV_LEN_OFF] = WLFC_CTL_VALUE_LEN_PKTTAG; - memcpy(&wlh[TLV_HDR_LEN], &wl_pktinfo, sizeof(uint32)); + memcpy(&wlh[TLV_HDR_LEN] /* dst */, &wl_pktinfo, sizeof(uint32)); if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) { uint16 wl_seqinfo = htol16(htodseq); @@ -499,7 +582,6 @@ _dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void** packet, bool tim_signa memset(&wlh[dataOffset - fillers], WLFC_CTL_TYPE_FILLER, fillers); push_bdc_hdr: - PKTPUSH(ctx->osh, p, BDC_HEADER_LEN); h = (struct bdc_header *)PKTDATA(ctx->osh, p); h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT); @@ -513,8 +595,12 @@ _dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void** packet, bool tim_signa BDC_SET_IF_IDX(h, DHD_PKTTAG_IF(PKTTAG(p))); *packet = p; return BCME_OK; -} +} /* _dhd_wlfc_pushheader */ +/** + * Removes (PULLs) flow control related headers from the caller supplied packet, is invoked eg + * when a packet is about to be freed. + */ static int _dhd_wlfc_pullheader(athost_wl_status_info_t* ctx, void* pktbuf) { @@ -541,6 +627,9 @@ _dhd_wlfc_pullheader(athost_wl_status_info_t* ctx, void* pktbuf) return BCME_OK; } +/** + * @param[in/out] p packet + */ static wlfc_mac_descriptor_t* _dhd_wlfc_find_table_entry(athost_wl_status_info_t* ctx, void* p) { @@ -559,7 +648,8 @@ _dhd_wlfc_find_table_entry(athost_wl_status_info_t* ctx, void* p) * STA/GC gets the Mac Entry for TDLS destinations, TDLS destinations * have their own entry. */ - if ((DHD_IF_ROLE_STA(iftype) || ETHER_ISMULTI(dstn)) && + if ((iftype == WLC_E_IF_ROLE_STA || ETHER_ISMULTI(dstn) || + iftype == WLC_E_IF_ROLE_P2P_CLIENT) && (ctx->destination_entries.interfaces[ifid].occupied)) { entry = &ctx->destination_entries.interfaces[ifid]; } @@ -586,8 +676,16 @@ _dhd_wlfc_find_table_entry(athost_wl_status_info_t* ctx, void* p) DHD_PKTTAG_SET_ENTRY(PKTTAG(p), entry); return entry; -} - +} /* _dhd_wlfc_find_table_entry */ + +/** + * In case a packet must be dropped (because eg the queues are full), various tallies have to be + * be updated. Called from several other functions. + * @param[in] dhdp pointer to public DHD structure + * @param[in] prec precedence of the packet + * @param[in] p the packet to be dropped + * @param[in] bPktInQ TRUE if packet is part of a queue + */ static int _dhd_wlfc_prec_drop(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ) { @@ -634,6 +732,7 @@ _dhd_wlfc_prec_drop(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ) if (bPktInQ) { ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--; ctx->pkt_cnt_per_ac[prec>>1]--; + ctx->pkt_cnt_in_psq--; } ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))][DHD_PKTTAG_FIFO(PKTTAG(p))]--; @@ -644,8 +743,15 @@ _dhd_wlfc_prec_drop(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ) PKTFREE(ctx->osh, p, TRUE); return 0; -} - +} /* _dhd_wlfc_prec_drop */ + +/** + * Called when eg the host handed a new packet over to the driver, or when the dongle reported + * that a packet could currently not be transmitted (=suppressed). This function enqueues a transmit + * packet in the host driver to be (re)transmitted at a later opportunity. + * @param[in] dhdp pointer to public DHD structure + * @param[in] qHead When TRUE, queue packet at head instead of tail, to preserve d11 sequence + */ static bool _dhd_wlfc_prec_enq_with_drop(dhd_pub_t *dhdp, struct pktq *pq, void *pkt, int prec, bool qHead, uint8 current_seq) @@ -667,9 +773,9 @@ _dhd_wlfc_prec_enq_with_drop(dhd_pub_t *dhdp, struct pktq *pq, void *pkt, int pr } /* Determine precedence from which to evict packet, if any */ - if (pktq_pfull(pq, prec)) + if (pktq_pfull(pq, prec)) { eprec = prec; - else if (pktq_full(pq)) { + } else if (pktq_full(pq)) { p = pktq_peek_tail(pq, &eprec); if (!p) { DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); @@ -698,23 +804,26 @@ _dhd_wlfc_prec_enq_with_drop(dhd_pub_t *dhdp, struct pktq *pq, void *pkt, int pr WLFC_GET_REORDERSUPP(dhdp->wlfc_mode)); ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(pkt))][prec>>1]++; ctx->pkt_cnt_per_ac[prec>>1]++; + ctx->pkt_cnt_in_psq++; return TRUE; -} - +} /* _dhd_wlfc_prec_enq_with_drop */ +/** + * Called during eg the 'committing' of a transmit packet from the OS layer to a lower layer, in + * the event that this 'commit' failed. + */ static int _dhd_wlfc_rollback_packet_toq(athost_wl_status_info_t* ctx, void* p, ewlfc_packet_state_t pkt_type, uint32 hslot) { /* - put the packet back to the head of queue - - - suppressed packet goes back to suppress sub-queue - - pull out the header, if new or delayed packet - - Note: hslot is used only when header removal is done. - */ + * put the packet back to the head of queue + * - suppressed packet goes back to suppress sub-queue + * - pull out the header, if new or delayed packet + * + * Note: hslot is used only when header removal is done. + */ wlfc_mac_descriptor_t* entry; int rc = BCME_OK; int prec, fifo_id; @@ -755,17 +864,19 @@ _dhd_wlfc_rollback_packet_toq(athost_wl_status_info_t* ctx, DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); rc = BCME_ERROR; } + exit: if (rc != BCME_OK) { ctx->stats.rollback_failed++; _dhd_wlfc_prec_drop(ctx->dhdp, fifo_id, p, FALSE); - } - else + } else { ctx->stats.rollback++; + } return rc; -} +} /* _dhd_wlfc_rollback_packet_toq */ +/** Returns TRUE if host OS -> DHD flow control is allowed on the caller supplied interface */ static bool _dhd_wlfc_allow_fc(athost_wl_status_info_t* ctx, uint8 ifid) { @@ -789,7 +900,7 @@ _dhd_wlfc_allow_fc(athost_wl_status_info_t* ctx, uint8 ifid) uint32 curr_t = OSL_SYSUPTIME(); if (ctx->fc_defer_timestamp == 0) { - /* first signle ac scenario */ + /* first single ac scenario */ ctx->fc_defer_timestamp = curr_t; return FALSE; } @@ -807,8 +918,12 @@ _dhd_wlfc_allow_fc(athost_wl_status_info_t* ctx, uint8 ifid) } return ctx->allow_fc; -} +} /* _dhd_wlfc_allow_fc */ +/** + * Starts or stops the flow of transmit packets from the host OS towards the DHD, depending on + * low/high watermarks. + */ static void _dhd_wlfc_flow_control_check(athost_wl_status_info_t* ctx, struct pktq* pq, uint8 if_id) { @@ -855,7 +970,7 @@ _dhd_wlfc_flow_control_check(athost_wl_status_info_t* ctx, struct pktq* pq, uint } return; -} +} /* _dhd_wlfc_flow_control_check */ static int _dhd_wlfc_send_signalonly_packet(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry, @@ -892,16 +1007,25 @@ _dhd_wlfc_send_signalonly_packet(athost_wl_status_info_t* ctx, wlfc_mac_descript _dhd_wlfc_pullheader(ctx, p); PKTFREE(ctx->osh, p, TRUE); } - } - else { + } else { DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n", __FUNCTION__, dummylen)); rc = BCME_NOMEM; + dhdp->tx_pktgetfail++; } + return rc; -} +} /* _dhd_wlfc_send_signalonly_packet */ -/* Return TRUE if traffic availability changed */ +/** + * Called on eg receiving 'mac close' indication from dongle. Updates the per-MAC administration + * maintained in caller supplied parameter 'entry'. + * + * @param[in/out] entry administration about a remote MAC entity + * @param[in] prec precedence queue for this remote MAC entitity + * + * Return value: TRUE if traffic availability changed + */ static bool _dhd_wlfc_traffic_pending_check(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry, int prec) @@ -911,14 +1035,14 @@ _dhd_wlfc_traffic_pending_check(athost_wl_status_info_t* ctx, wlfc_mac_descripto if (entry->state == WLFC_STATE_CLOSE) { if ((pktq_plen(&entry->psq, (prec << 1)) == 0) && (pktq_plen(&entry->psq, ((prec << 1) + 1)) == 0)) { - + /* no packets in both 'normal' and 'suspended' queues */ if (entry->traffic_pending_bmp & NBITVAL(prec)) { rc = TRUE; entry->traffic_pending_bmp = entry->traffic_pending_bmp & ~ NBITVAL(prec); } - } - else { + } else { + /* packets are queued in host for transmission to dongle */ if (!(entry->traffic_pending_bmp & NBITVAL(prec))) { rc = TRUE; entry->traffic_pending_bmp = @@ -926,6 +1050,7 @@ _dhd_wlfc_traffic_pending_check(athost_wl_status_info_t* ctx, wlfc_mac_descripto } } } + if (rc) { /* request a TIM update to firmware at the next piggyback opportunity */ if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) { @@ -933,14 +1058,18 @@ _dhd_wlfc_traffic_pending_check(athost_wl_status_info_t* ctx, wlfc_mac_descripto _dhd_wlfc_send_signalonly_packet(ctx, entry, entry->traffic_pending_bmp); entry->traffic_lastreported_bmp = entry->traffic_pending_bmp; entry->send_tim_signal = 0; - } - else { + } else { rc = FALSE; } } + return rc; -} +} /* _dhd_wlfc_traffic_pending_check */ +/** + * Called on receiving a 'd11 suppressed' or 'wl suppressed' tx status from the firmware. Enqueues + * the packet to transmit to firmware again at a later opportunity. + */ static int _dhd_wlfc_enque_suppressed(athost_wl_status_info_t* ctx, int prec, void* p) { @@ -971,6 +1100,16 @@ _dhd_wlfc_enque_suppressed(athost_wl_status_info_t* ctx, int prec, void* p) return BCME_OK; } +/** + * Called when a transmit packet is about to be 'committed' from the OS layer to a lower layer + * towards the dongle (eg the DBUS layer). Updates wlfc administration. May modify packet. + * + * @param[in/out] ctx driver specific flow control administration + * @param[in/out] entry The remote MAC entity for which the packet is destined. + * @param[in/out] packet Packet to send. This function optionally adds TLVs to the packet. + * @param[in] header_needed True if packet is 'new' to flow control + * @param[out] slot Handle to container in which the packet was 'parked' + */ static int _dhd_wlfc_pretx_pktprocess(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry, void** packet, int header_needed, uint32* slot) @@ -980,7 +1119,7 @@ _dhd_wlfc_pretx_pktprocess(athost_wl_status_info_t* ctx, bool send_tim_update = FALSE; uint32 htod = 0; uint16 htodseq = 0; - uint8 free_ctr, flags = 0; + uint8 free_ctr; int gen = 0xff; dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp; void * p = *packet; @@ -997,6 +1136,7 @@ _dhd_wlfc_pretx_pktprocess(athost_wl_status_info_t* ctx, } if (entry->send_tim_signal) { + /* sends a traffic indication bitmap to the dongle */ send_tim_update = TRUE; entry->send_tim_signal = 0; entry->traffic_lastreported_bmp = entry->traffic_pending_bmp; @@ -1035,46 +1175,62 @@ _dhd_wlfc_pretx_pktprocess(athost_wl_status_info_t* ctx, return BCME_ERROR; } - flags = WLFC_PKTFLAG_PKTFROMHOST; + WL_TXSTATUS_SET_FREERUNCTR(htod, free_ctr); + WL_TXSTATUS_SET_HSLOT(htod, hslot); + WL_TXSTATUS_SET_FIFO(htod, DHD_PKTTAG_FIFO(PKTTAG(p))); + WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST); + WL_TXSTATUS_SET_GENERATION(htod, gen); + DHD_PKTTAG_SETPKTDIR(PKTTAG(p), 1); + if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) { /* Indicate that this packet is being sent in response to an explicit request from the firmware side. */ - flags |= WLFC_PKTFLAG_PKT_REQUESTED; - } - if (pkt_is_dhcp(ctx->osh, p)) { - flags |= WLFC_PKTFLAG_PKT_FORCELOWRATE; + WLFC_PKTFLAG_SET_PKTREQUESTED(htod); + } else { + WLFC_PKTFLAG_CLR_PKTREQUESTED(htod); } - WL_TXSTATUS_SET_FREERUNCTR(htod, free_ctr); - WL_TXSTATUS_SET_HSLOT(htod, hslot); - WL_TXSTATUS_SET_FIFO(htod, DHD_PKTTAG_FIFO(PKTTAG(p))); - WL_TXSTATUS_SET_FLAGS(htod, flags); - WL_TXSTATUS_SET_GENERATION(htod, gen); - DHD_PKTTAG_SETPKTDIR(PKTTAG(p), 1); - rc = _dhd_wlfc_pushheader(ctx, &p, send_tim_update, entry->traffic_lastreported_bmp, entry->mac_handle, htod, htodseq, FALSE); if (rc == BCME_OK) { DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod); - if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && header_needed) { - /* - a new header was created for this packet. - push to hanger slot and scrub q. Since bus - send succeeded, increment seq number as well. - */ - rc = _dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot); - if (rc == BCME_OK) { + if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) { + wlfc_hanger_t *h = (wlfc_hanger_t*)(ctx->hanger); + if (header_needed) { + /* + a new header was created for this packet. + push to hanger slot and scrub q. Since bus + send succeeded, increment seq number as well. + */ + rc = _dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot); + if (rc == BCME_OK) { #ifdef PROP_TXSTATUS_DEBUG - ((wlfc_hanger_t*)(ctx->hanger))->items[hslot].push_time = - OSL_SYSUPTIME(); + h->items[hslot].push_time = + OSL_SYSUPTIME(); #endif + } else { + DHD_ERROR(("%s() hanger_pushpkt() failed, rc: %d\n", + __FUNCTION__, rc)); + } } else { - DHD_ERROR(("%s() hanger_pushpkt() failed, rc: %d\n", - __FUNCTION__, rc)); + /* clear hanger state */ + if (((wlfc_hanger_t*)(ctx->hanger))->items[hslot].pkt != p) + DHD_ERROR(("%s() pkt not match: cur %p, hanger pkt %p\n", + __FUNCTION__, p, h->items[hslot].pkt)); + ASSERT(h->items[hslot].pkt == p); + bcm_object_feature_set(h->items[hslot].pkt, + BCM_OBJECT_FEATURE_PKT_STATE, 0); + h->items[hslot].pkt_state = 0; + h->items[hslot].pkt_txstatus = 0; + h->items[hslot].state = WLFC_HANGER_ITEM_STATE_INUSE; } + } else if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) { + /* clear hanger state */ + ((wlfc_hanger_t*)(ctx->hanger))->items[hslot].pkt_state = 0; + ((wlfc_hanger_t*)(ctx->hanger))->items[hslot].pkt_txstatus = 0; } if ((rc == BCME_OK) && header_needed) { @@ -1085,8 +1241,12 @@ _dhd_wlfc_pretx_pktprocess(athost_wl_status_info_t* ctx, *slot = hslot; *packet = p; return rc; -} +} /* _dhd_wlfc_pretx_pktprocess */ +/** + * A remote wireless mac may be temporarily 'closed' due to power management. Returns '1' if remote + * mac is in the 'open' state, otherwise '0'. + */ static int _dhd_wlfc_is_destination_open(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry, int prec) @@ -1095,6 +1255,7 @@ _dhd_wlfc_is_destination_open(athost_wl_status_info_t* ctx, ASSERT(&ctx->destination_entries.other == entry); return 1; } + if (ctx->destination_entries.interfaces[entry->interface_id].iftype == WLC_E_IF_ROLE_P2P_GO) { /* - destination interface is of type p2p GO. @@ -1108,6 +1269,7 @@ _dhd_wlfc_is_destination_open(athost_wl_status_info_t* ctx, return 0; } } + /* AP, p2p_go -> unicast desc entry, STA/p2p_cl -> interface desc. entry */ if (((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) && (entry->requested_packet == 0)) || @@ -1116,22 +1278,33 @@ _dhd_wlfc_is_destination_open(athost_wl_status_info_t* ctx, } return 1; -} - +} /* _dhd_wlfc_is_destination_open */ + +/** + * Dequeues a suppressed or delayed packet from a queue + * @param[in/out] ctx Driver specific flow control administration + * @param[in] prec Precedence of queue to dequeue from + * @param[out] ac_credit_spent Boolean, returns 0 or 1 + * @param[out] needs_hdr Boolean, returns 0 or 1 + * @param[out] entry_out The remote MAC for which the packet is destined + * @param[in] only_no_credit If TRUE, searches all entries instead of just the active ones + * + * Return value: the dequeued packet + */ static void* _dhd_wlfc_deque_delayedq(athost_wl_status_info_t* ctx, int prec, uint8* ac_credit_spent, uint8* needs_hdr, wlfc_mac_descriptor_t** entry_out, bool only_no_credit) { - dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp; wlfc_mac_descriptor_t* entry; int total_entries; void* p = NULL; int i; + uint8 credit_spent = ((prec == AC_COUNT) && !ctx->bcmc_credit_supported) ? 0 : 1; *entry_out = NULL; /* most cases a packet will count against FIFO credit */ - *ac_credit_spent = ((prec == AC_COUNT) && !ctx->bcmc_credit_supported) ? 0 : 1; + *ac_credit_spent = credit_spent; /* search all entries, include nodes as well as interfaces */ if (only_no_credit) { @@ -1150,14 +1323,10 @@ _dhd_wlfc_deque_delayedq(athost_wl_status_info_t* ctx, int prec, } ASSERT(entry); - if (entry->transit_count < 0) { - DHD_ERROR(("Error: %s():%d transit_count %d < 0\n", - __FUNCTION__, __LINE__, entry->transit_count)); - continue; - } if (entry->occupied && _dhd_wlfc_is_destination_open(ctx, entry, prec) && (entry->transit_count < WL_TXSTATUS_FREERUNCTR_MASK) && - !(WLFC_GET_REORDERSUPP(dhdp->wlfc_mode) && entry->suppressed)) { + (!entry->suppressed)) { + *ac_credit_spent = credit_spent; if (entry->state == WLFC_STATE_CLOSE) { *ac_credit_spent = 0; } @@ -1168,16 +1337,13 @@ _dhd_wlfc_deque_delayedq(athost_wl_status_info_t* ctx, int prec, p = pktq_pdeq(&entry->psq, PSQ_SUP_IDX(prec)); *needs_hdr = 0; if (p == NULL) { - if (entry->suppressed == TRUE) { - /* skip this entry */ - continue; - } /* De-Q from delay Q */ p = pktq_pdeq(&entry->psq, PSQ_DLY_IDX(prec)); *needs_hdr = 1; } if (p != NULL) { + bcm_pkt_validate_chk(p); /* did the packet come from suppress sub-queue? */ if (entry->requested_credit > 0) { entry->requested_credit--; @@ -1192,20 +1358,22 @@ _dhd_wlfc_deque_delayedq(athost_wl_status_info_t* ctx, int prec, *entry_out = entry; ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec]--; ctx->pkt_cnt_per_ac[prec]--; + ctx->pkt_cnt_in_psq--; _dhd_wlfc_flow_control_check(ctx, &entry->psq, DHD_PKTTAG_IF(PKTTAG(p))); /* - A packet has been picked up, update traffic - availability bitmap, if applicable - */ + * A packet has been picked up, update traffic availability bitmap, + * if applicable. + */ _dhd_wlfc_traffic_pending_check(ctx, entry, prec); return p; } } } return NULL; -} +} /* _dhd_wlfc_deque_delayedq */ +/** Enqueues caller supplied packet on either a 'suppressed' or 'delayed' queue */ static int _dhd_wlfc_enque_delayq(athost_wl_status_info_t* ctx, void* pktbuf, int prec) { @@ -1231,20 +1399,15 @@ _dhd_wlfc_enque_delayq(athost_wl_status_info_t* ctx, void* pktbuf, int prec) return BCME_ERROR; } -#ifdef QMONITOR - dhd_qmon_tx(&entry->qmon); -#endif - /* - A packet has been pushed, update traffic availability bitmap, - if applicable - */ + /* A packet has been pushed, update traffic availability bitmap, if applicable */ _dhd_wlfc_traffic_pending_check(ctx, entry, prec); } return BCME_OK; -} +} /* _dhd_wlfc_enque_delayq */ +/** Returns TRUE if caller supplied packet is destined for caller supplied interface */ static bool _dhd_wlfc_ifpkt_fn(void* p, void *p_ifid) { if (!p || !p_ifid) @@ -1253,6 +1416,7 @@ static bool _dhd_wlfc_ifpkt_fn(void* p, void *p_ifid) return (DHD_PKTTAG_WLFCPKT(PKTTAG(p))&& (*((uint8 *)p_ifid) == DHD_PKTTAG_IF(PKTTAG(p)))); } +/** Returns TRUE if caller supplied packet is destined for caller supplied remote MAC */ static bool _dhd_wlfc_entrypkt_fn(void* p, void *entry) { if (!p || !entry) @@ -1265,6 +1429,7 @@ static void _dhd_wlfc_return_implied_credit(athost_wl_status_info_t* wlfc, void* pkt) { dhd_pub_t *dhdp; + bool credit_return = FALSE; if (!wlfc || !pkt) { return; @@ -1276,6 +1441,8 @@ _dhd_wlfc_return_implied_credit(athost_wl_status_info_t* wlfc, void* pkt) int lender, credit_returned = 0; uint8 fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pkt)); + credit_return = TRUE; + /* Note that borrower is fifo_id */ /* Return credits to highest priority lender first */ for (lender = AC_COUNT; lender >= 0; lender--) { @@ -1291,8 +1458,16 @@ _dhd_wlfc_return_implied_credit(athost_wl_status_info_t* wlfc, void* pkt) wlfc->FIFO_credit[fifo_id]++; } } + + BCM_REFERENCE(credit_return); +#if defined(DHD_WLFC_THREAD) + if (credit_return) { + _dhd_wlfc_thread_wakeup(dhdp); + } +#endif /* defined(DHD_WLFC_THREAD) */ } +/** Removes and frees a packet from the hanger. Called during eg tx complete. */ static void _dhd_wlfc_hanger_free_pkt(athost_wl_status_info_t* wlfc, uint32 slot_id, uint8 pkt_state, int pkt_txstatus) @@ -1311,15 +1486,13 @@ _dhd_wlfc_hanger_free_pkt(athost_wl_status_info_t* wlfc, uint32 slot_id, uint8 p return; item = &hanger->items[slot_id]; - item->pkt_state |= pkt_state; - if (pkt_txstatus != -1) { - item->pkt_txstatus = pkt_txstatus; - } if (item->pkt) { - if ((item->pkt_state & WLFC_HANGER_PKT_STATE_TXCOMPLETE) && - (item->pkt_state & (WLFC_HANGER_PKT_STATE_TXSTATUS | - WLFC_HANGER_PKT_STATE_CLEANUP))) { + item->pkt_state |= pkt_state; + if (pkt_txstatus != -1) + item->pkt_txstatus = (uint8)pkt_txstatus; + bcm_object_feature_set(item->pkt, BCM_OBJECT_FEATURE_PKT_STATE, item->pkt_state); + if (item->pkt_state == WLFC_HANGER_PKT_STATE_COMPLETE) { void *p = NULL; void *pkt = item->pkt; uint8 old_state = item->state; @@ -1327,24 +1500,13 @@ _dhd_wlfc_hanger_free_pkt(athost_wl_status_info_t* wlfc, uint32 slot_id, uint8 p BCM_REFERENCE(ret); BCM_REFERENCE(pkt); ASSERT((ret == BCME_OK) && p && (pkt == p)); - - /* free packet */ - if (!(item->pkt_state & WLFC_HANGER_PKT_STATE_TXSTATUS)) { - /* cleanup case */ - wlfc_mac_descriptor_t *entry = _dhd_wlfc_find_table_entry(wlfc, p); - - ASSERT(entry); - entry->transit_count--; - if (entry->suppressed && - (--entry->suppr_transit_count == 0)) { - entry->suppressed = FALSE; - } - _dhd_wlfc_return_implied_credit(wlfc, p); - wlfc->stats.cleanup_fw_cnt++; - /* slot not freeable yet */ - item->state = old_state; + if (old_state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED) { + printf("ERROR: free a suppressed pkt %p state %d pkt_state %d\n", + pkt, old_state, item->pkt_state); } + ASSERT(old_state != WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED); + /* free packet */ wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))] [DHD_PKTTAG_FIFO(PKTTAG(p))]--; wlfc->stats.pktout++; @@ -1352,16 +1514,15 @@ _dhd_wlfc_hanger_free_pkt(athost_wl_status_info_t* wlfc, uint32 slot_id, uint8 p PKTFREE(wlfc->osh, p, TRUE); } } else { - if (item->pkt_state & WLFC_HANGER_PKT_STATE_TXSTATUS) { - /* free slot */ - if (item->state == WLFC_HANGER_ITEM_STATE_FREE) - DHD_ERROR(("Error: %s():%d get multi TXSTATUS for one packet???\n", - __FUNCTION__, __LINE__)); - item->state = WLFC_HANGER_ITEM_STATE_FREE; - } + /* free slot */ + if (item->state == WLFC_HANGER_ITEM_STATE_FREE) + DHD_ERROR(("Error: %s():%d Multiple TXSTATUS or BUSRETURNED: %d (%d)\n", + __FUNCTION__, __LINE__, item->pkt_state, pkt_state)); + item->state = WLFC_HANGER_ITEM_STATE_FREE; } -} +} /* _dhd_wlfc_hanger_free_pkt */ +/** Called during eg detach() */ static void _dhd_wlfc_pktq_flush(athost_wl_status_info_t* ctx, struct pktq *pq, bool dir, f_processpkt_t fn, void *arg, q_type_t q_type) @@ -1378,7 +1539,6 @@ _dhd_wlfc_pktq_flush(athost_wl_status_info_t* ctx, struct pktq *pq, return; } - for (prec = 0; prec < pq->num_prec; prec++) { struct pktq_prec *q; void *p, *prev = NULL; @@ -1386,6 +1546,7 @@ _dhd_wlfc_pktq_flush(athost_wl_status_info_t* ctx, struct pktq *pq, q = &pq->q[prec]; p = q->head; while (p) { + bcm_pkt_validate_chk(p); if (fn == NULL || (*fn)(p, arg)) { bool head = (p == q->head); if (head) @@ -1398,6 +1559,7 @@ _dhd_wlfc_pktq_flush(athost_wl_status_info_t* ctx, struct pktq *pq, } ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--; ctx->pkt_cnt_per_ac[prec>>1]--; + ctx->pkt_cnt_in_psq--; ctx->stats.cleanup_psq_cnt++; if (!(prec & 1)) { /* pkt in delayed q, so fake push BDC header for @@ -1418,10 +1580,14 @@ _dhd_wlfc_pktq_flush(athost_wl_status_info_t* ctx, struct pktq *pq, } else if (q_type == Q_TYPE_AFQ) { wlfc_mac_descriptor_t* entry = _dhd_wlfc_find_table_entry(ctx, p); - entry->transit_count--; - if (entry->suppressed && - (--entry->suppr_transit_count == 0)) { - entry->suppressed = FALSE; + if (entry->transit_count) + entry->transit_count--; + if (entry->suppr_transit_count) { + entry->suppr_transit_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; } _dhd_wlfc_return_implied_credit(ctx, p); ctx->stats.cleanup_fw_cnt++; @@ -1452,8 +1618,10 @@ _dhd_wlfc_pktq_flush(athost_wl_status_info_t* ctx, struct pktq *pq, if (fn == NULL) ASSERT(pq->len == 0); -} +} /* _dhd_wlfc_pktq_flush */ + +/** !BCMDBUS specific function. Dequeues a packet from the caller supplied queue. */ static void* _dhd_wlfc_pktq_pdeq_with_fn(struct pktq *pq, int prec, f_processpkt_t fn, void *arg) { @@ -1476,6 +1644,8 @@ _dhd_wlfc_pktq_pdeq_with_fn(struct pktq *pq, int prec, f_processpkt_t fn, void * if (p == NULL) return NULL; + bcm_pkt_validate_chk(p); + if (prev == NULL) { if ((q->head = PKTLINK(p)) == NULL) { q->tail = NULL; @@ -1496,6 +1666,7 @@ _dhd_wlfc_pktq_pdeq_with_fn(struct pktq *pq, int prec, f_processpkt_t fn, void * return p; } +/** !BCMDBUS specific function */ static void _dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) { @@ -1538,10 +1709,14 @@ _dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) DHD_ERROR(("%s: can't find pkt(%p) in hanger, free it anyway\n", __FUNCTION__, pkt)); } - entry->transit_count--; - if (entry->suppressed && - (--entry->suppr_transit_count == 0)) { - entry->suppressed = FALSE; + if (entry->transit_count) + entry->transit_count--; + if (entry->suppr_transit_count) { + entry->suppr_transit_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; } _dhd_wlfc_return_implied_credit(wlfc, pkt); wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pkt))][DHD_PKTTAG_FIFO(PKTTAG(pkt))]--; @@ -1550,8 +1725,9 @@ _dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) dhd_txcomplete(dhd, pkt, FALSE); PKTFREE(wlfc->osh, pkt, TRUE); } -} +} /* _dhd_wlfc_cleanup_txq */ +/** called during eg detach */ void _dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) { @@ -1564,13 +1740,13 @@ _dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) wlfc->stats.cleanup_txq_cnt = 0; wlfc->stats.cleanup_psq_cnt = 0; wlfc->stats.cleanup_fw_cnt = 0; + /* - * flush sequence shoulde be txq -> psq -> hanger/afq, hanger has to be last one + * flush sequence should be txq -> psq -> hanger/afq, hanger has to be last one */ /* flush bus->txq */ _dhd_wlfc_cleanup_txq(dhd, fn, arg); - /* flush psq, search all entries, include nodes as well as interfaces */ total_entries = sizeof(wlfc->destination_entries)/sizeof(wlfc_mac_descriptor_t); table = (wlfc_mac_descriptor_t*)&wlfc->destination_entries; @@ -1613,16 +1789,16 @@ _dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) if ((h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) || (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) { if (fn == NULL || (*fn)(h->items[i].pkt, arg)) { - _dhd_wlfc_hanger_free_pkt(wlfc, i, - WLFC_HANGER_PKT_STATE_CLEANUP, FALSE); + h->items[i].state = WLFC_HANGER_ITEM_STATE_FLUSHED; } } } } return; -} +} /* _dhd_wlfc_cleanup */ +/** Called after eg the dongle signalled a new remote MAC that it connected with to the DHD */ static int _dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry, uint8 action, uint8 ifid, uint8 iftype, uint8* ea, @@ -1630,9 +1806,6 @@ _dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* { int rc = BCME_OK; -#ifdef QMONITOR - dhd_qmon_reset(&entry->qmon); -#endif if ((action == eWLFC_MAC_ENTRY_ACTION_ADD) || (action == eWLFC_MAC_ENTRY_ACTION_UPDATE)) { entry->occupied = 1; @@ -1641,6 +1814,7 @@ _dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry->interface_id = ifid; entry->iftype = iftype; entry->ac_bitmap = 0xff; /* update this when handling APSD */ + /* for an interface entry we may not care about the MAC address */ if (ea != NULL) memcpy(&entry->ea[0], ea, ETHER_ADDR_LEN); @@ -1649,17 +1823,14 @@ _dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry->suppressed = FALSE; entry->transit_count = 0; entry->suppr_transit_count = 0; + entry->onbus_pkts_count = 0; } -#ifdef P2PONEINT - if ((action == eWLFC_MAC_ENTRY_ACTION_ADD) || - ((action == eWLFC_MAC_ENTRY_ACTION_UPDATE) && (entry->psq.num_prec == 0))) -#else - if (action == eWLFC_MAC_ENTRY_ACTION_ADD) -#endif - { + if (action == eWLFC_MAC_ENTRY_ACTION_ADD) { dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp); + pktq_init(&entry->psq, WLFC_PSQ_PREC_COUNT, WLFC_PSQ_LEN); + if (WLFC_GET_AFQ(dhdp->wlfc_mode)) { pktq_init(&entry->afq, WLFC_AFQ_PREC_COUNT, WLFC_PSQ_LEN); } @@ -1671,7 +1842,6 @@ _dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* ctx->active_entry_head->prev->next = entry; ctx->active_entry_head->prev = entry; entry->next = ctx->active_entry_head; - } else { ASSERT(ctx->active_entry_count == 0); entry->prev = entry->next = entry; @@ -1714,9 +1884,12 @@ _dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* } } return rc; -} +} /* _dhd_wlfc_mac_entry_update */ + #ifdef LIMIT_BORROW + +/** LIMIT_BORROW specific function */ static int _dhd_wlfc_borrow_credit(athost_wl_status_info_t* ctx, int highest_lender_ac, int borrower_ac, bool bBorrowAll) @@ -1748,6 +1921,7 @@ _dhd_wlfc_borrow_credit(athost_wl_status_info_t* ctx, int highest_lender_ac, int return rc; } +/** LIMIT_BORROW specific function */ static int _dhd_wlfc_return_credit(athost_wl_status_info_t* ctx, int lender_ac, int borrower_ac) { if ((ctx == NULL) || (lender_ac < 0) || (lender_ac > AC_COUNT) || @@ -1763,8 +1937,13 @@ static int _dhd_wlfc_return_credit(athost_wl_status_info_t* ctx, int lender_ac, return BCME_OK; } + #endif /* LIMIT_BORROW */ +/** + * Called on an interface event (WLC_E_IF) indicated by firmware. + * @param action : eg eWLFC_MAC_ENTRY_ACTION_UPDATE or eWLFC_MAC_ENTRY_ACTION_ADD + */ static int _dhd_wlfc_interface_entry_update(void* state, uint8 action, uint8 ifid, uint8 iftype, uint8* ea) @@ -1781,6 +1960,10 @@ _dhd_wlfc_interface_entry_update(void* state, _dhd_wlfc_ifpkt_fn, &ifid); } +/** + * Called eg on receiving a WLC_E_BCMC_CREDIT_SUPPORT event from the dongle (broadcast/multicast + * specific) + */ static int _dhd_wlfc_BCMCCredit_support_update(void* state) { @@ -1790,6 +1973,7 @@ _dhd_wlfc_BCMCCredit_support_update(void* state) return BCME_OK; } +/** Called eg on receiving a WLC_E_FIFO_CREDIT_MAP event from the dongle */ static int _dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits) { @@ -1822,6 +2006,16 @@ _dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits) return BCME_OK; } +/** + * Called during committing of a transmit packet from the OS DHD layer to the next layer towards + * the dongle (eg the DBUS layer). All transmit packets flow via this function to the next layer. + * + * @param[in/out] ctx Driver specific flow control administration + * @param[in] ac Access Category (QoS) of called supplied packet + * @param[in] commit_info Contains eg the packet to send + * @param[in] fcommit Function pointer to transmit function of next software layer + * @param[in] commit_ctx Opaque context used when calling next layer + */ static int _dhd_wlfc_handle_packet_commit(athost_wl_status_info_t* ctx, int ac, dhd_wlfc_commit_info_t *commit_info, f_commitpkt_t fcommit, void* commit_ctx) @@ -1866,6 +2060,7 @@ _dhd_wlfc_handle_packet_commit(athost_wl_status_info_t* ctx, int ac, commit_info->mac_entry->suppr_transit_count++; } commit_info->mac_entry->transit_count++; + commit_info->mac_entry->onbus_pkts_count++; } else if (commit_info->needs_hdr) { if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) { void *pout = NULL; @@ -1891,10 +2086,11 @@ _dhd_wlfc_handle_packet_commit(athost_wl_status_info_t* ctx, int ac, } return rc; -} +} /* _dhd_wlfc_handle_packet_commit */ +/** Returns remote MAC descriptor for caller supplied MAC address */ static uint8 -_dhd_wlfc_find_mac_desc_id_from_mac(dhd_pub_t *dhdp, uint8* ea) +_dhd_wlfc_find_mac_desc_id_from_mac(dhd_pub_t *dhdp, uint8 *ea) { wlfc_mac_descriptor_t* table = ((athost_wl_status_info_t*)dhdp->wlfc_state)->destination_entries.nodes; @@ -1910,13 +2106,101 @@ _dhd_wlfc_find_mac_desc_id_from_mac(dhd_pub_t *dhdp, uint8* ea) return WLFC_MAC_DESC_ID_INVALID; } +/** + * Called when the host receives a WLFC_CTL_TYPE_TXSTATUS event from the dongle, indicating the + * status of a frame that the dongle attempted to transmit over the wireless medium. + */ +static int +dhd_wlfc_suppressed_acked_update(dhd_pub_t *dhd, uint16 hslot, uint8 prec, uint8 hcnt) +{ + athost_wl_status_info_t* ctx; + wlfc_mac_descriptor_t* entry = NULL; + struct pktq *pq; + struct pktq_prec *q; + void *p, *b; + + if (!dhd) { + DHD_ERROR(("%s: dhd(%p)\n", __FUNCTION__, dhd)); + return BCME_BADARG; + } + ctx = (athost_wl_status_info_t*)dhd->wlfc_state; + if (!ctx) { + DHD_ERROR(("%s: ctx(%p)\n", __FUNCTION__, ctx)); + return BCME_ERROR; + } + + ASSERT(hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM + 1)); + + if (hslot < WLFC_MAC_DESC_TABLE_SIZE) + entry = &ctx->destination_entries.nodes[hslot]; + else if (hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM)) + entry = &ctx->destination_entries.interfaces[hslot - WLFC_MAC_DESC_TABLE_SIZE]; + else + entry = &ctx->destination_entries.other; + + pq = &entry->psq; + + ASSERT(((prec << 1) + 1) < pq->num_prec); + + q = &pq->q[((prec << 1) + 1)]; + + b = NULL; + p = q->head; + + while (p && (hcnt != WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p))))) { + b = p; + p = PKTLINK(p); + } + + if (p == NULL) { + /* none is matched */ + if (b) { + DHD_ERROR(("%s: can't find matching seq(%d)\n", __FUNCTION__, hcnt)); + } else { + DHD_ERROR(("%s: queue is empty\n", __FUNCTION__)); + } + + return BCME_ERROR; + } + + if (!b) { + /* head packet is matched */ + if ((q->head = PKTLINK(p)) == NULL) { + q->tail = NULL; + } + } else { + /* middle packet is matched */ + PKTSETLINK(b, PKTLINK(p)); + if (PKTLINK(p) == NULL) { + q->tail = b; + } + } + + q->len--; + pq->len--; + ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec]--; + ctx->pkt_cnt_per_ac[prec]--; + + PKTSETLINK(p, NULL); + + if (WLFC_GET_AFQ(dhd->wlfc_mode)) { + _dhd_wlfc_enque_afq(ctx, p); + } else { + _dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot); + } + + entry->transit_count++; + + return BCME_OK; +} + static int _dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len, void** p_mac) { - uint8 status_flag; + uint8 status_flag_ori, status_flag; uint32 status; int ret = BCME_OK; - int remove_from_hanger = 1; + int remove_from_hanger_ori, remove_from_hanger = 1; void* pktbuf = NULL; uint8 fifo_id = 0, gen = 0, count = 0, hcnt; uint16 hslot; @@ -1925,6 +2209,7 @@ _dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len, uint16 seq = 0, seq_fromfw = 0, seq_num = 0; memcpy(&status, pkt_info, sizeof(uint32)); + status = ltoh32(status); status_flag = WL_TXSTATUS_GET_FLAGS(status); hcnt = WL_TXSTATUS_GET_FREERUNCTR(status); hslot = WL_TXSTATUS_GET_HSLOT(status); @@ -1933,6 +2218,7 @@ _dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len, if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { memcpy(&seq, pkt_info + WLFC_CTL_VALUE_LEN_TXSTATUS, WLFC_CTL_VALUE_LEN_SEQ); + seq = ltoh16(seq); seq_fromfw = WL_SEQ_GET_FROMFW(seq); seq_num = WL_SEQ_GET_NUM(seq); } @@ -1941,24 +2227,20 @@ _dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len, if (status_flag == WLFC_CTL_PKTFLAG_DISCARD) { wlfc->stats.pkt_freed += len; - } - - else if (status_flag == WLFC_CTL_PKTFLAG_DISCARD_NOACK) { + } else if (status_flag == WLFC_CTL_PKTFLAG_DISCARD_NOACK) { wlfc->stats.pkt_freed += len; - } - - else if (status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) { + } else if (status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) { wlfc->stats.d11_suppress += len; remove_from_hanger = 0; - } - - else if (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS) { + } else if (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS) { wlfc->stats.wl_suppress += len; remove_from_hanger = 0; + } else if (status_flag == WLFC_CTL_PKTFLAG_TOSSED_BYWLC) { + wlfc->stats.wlc_tossed_pkts += len; } - else if (status_flag == WLFC_CTL_PKTFLAG_TOSSED_BYWLC) { - wlfc->stats.wlc_tossed_pkts += len; + else if (status_flag == WLFC_CTL_PKTFLAG_SUPPRESS_ACKED) { + wlfc->stats.pkt_freed += len; } if (dhd->proptxstatus_txstatus_ignore) { @@ -1968,15 +2250,29 @@ _dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len, return BCME_OK; } + status_flag_ori = status_flag; + remove_from_hanger_ori = remove_from_hanger; + while (count < len) { + if (status_flag == WLFC_CTL_PKTFLAG_SUPPRESS_ACKED) { + dhd_wlfc_suppressed_acked_update(dhd, hslot, fifo_id, hcnt); + } if (WLFC_GET_AFQ(dhd->wlfc_mode)) { ret = _dhd_wlfc_deque_afq(wlfc, hslot, hcnt, fifo_id, &pktbuf); } else { + status_flag = status_flag_ori; + remove_from_hanger = remove_from_hanger_ori; ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger, hslot, &pktbuf, FALSE); if (!pktbuf) { _dhd_wlfc_hanger_free_pkt(wlfc, hslot, WLFC_HANGER_PKT_STATE_TXSTATUS, -1); goto cont; + } else { + wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger; + if (h->items[hslot].state == WLFC_HANGER_ITEM_STATE_FLUSHED) { + status_flag = WLFC_CTL_PKTFLAG_DISCARD; + remove_from_hanger = 1; + } } } @@ -1984,6 +2280,8 @@ _dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len, goto cont; } + bcm_pkt_validate_chk(pktbuf); + /* set fifo_id to correct value because not all FW does that */ fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pktbuf)); @@ -2038,8 +2336,12 @@ _dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len, if this packet did not count against FIFO credit, it must have taken a requested_credit from the destination entry (for pspoll etc.) */ - if (!DHD_PKTTAG_ONETIMEPKTRQST(PKTTAG(pktbuf))) + if (!DHD_PKTTAG_ONETIMEPKTRQST(PKTTAG(pktbuf))) { entry->requested_credit++; +#if defined(DHD_WLFC_THREAD) + _dhd_wlfc_thread_wakeup(dhd); +#endif /* DHD_WLFC_THREAD */ + } #ifdef PROP_TXSTATUS_DEBUG entry->dstncredit_acks++; #endif @@ -2085,9 +2387,14 @@ _dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len, } } /* pkt back from firmware side */ - entry->transit_count--; - if (entry->suppressed && (--entry->suppr_transit_count == 0)) { - entry->suppressed = FALSE; + if (entry->transit_count) + entry->transit_count--; + if (entry->suppr_transit_count) { + entry->suppr_transit_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; } cont: @@ -2102,9 +2409,14 @@ _dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len, count++; } + return BCME_OK; -} +} /* _dhd_wlfc_compressed_txstatus_update */ +/** + * Called when eg host receives a 'WLFC_CTL_TYPE_FIFO_CREDITBACK' event from the dongle. + * @param[in] credits caller supplied credit that will be added to the host credit. + */ static int _dhd_wlfc_fifocreditback_indicate(dhd_pub_t *dhd, uint8* credits) { @@ -2129,8 +2441,7 @@ _dhd_wlfc_fifocreditback_indicate(dhd_pub_t *dhd, uint8* credits) wlfc->FIFO_credit[lender] += wlfc->credits_borrowed[i][lender]; wlfc->credits_borrowed[i][lender] = 0; - } - else { + } else { wlfc->credits_borrowed[i][lender] -= credits[i]; wlfc->FIFO_credit[lender] += credits[i]; credits[i] = 0; @@ -2149,9 +2460,15 @@ _dhd_wlfc_fifocreditback_indicate(dhd_pub_t *dhd, uint8* credits) } } +#if defined(DHD_WLFC_THREAD) + _dhd_wlfc_thread_wakeup(dhd); +#endif /* defined(DHD_WLFC_THREAD) */ + return BCME_OK; -} +} /* _dhd_wlfc_fifocreditback_indicate */ + +/** !BCMDBUS specific function */ static void _dhd_wlfc_suppress_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) { @@ -2185,11 +2502,20 @@ _dhd_wlfc_suppress_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) PKTSETLINK(pkt, NULL); entry = _dhd_wlfc_find_table_entry(wlfc, pkt); + if (entry) { + if (entry->onbus_pkts_count > 0) + entry->onbus_pkts_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; + } /* fake a suppression txstatus */ htod = DHD_PKTTAG_H2DTAG(PKTTAG(pkt)); WL_TXSTATUS_SET_FLAGS(htod, WLFC_CTL_PKTFLAG_WLSUPPRESS); WL_TXSTATUS_SET_GENERATION(htod, entry->generation); + htod = htol32(htod); memcpy(results, &htod, WLFC_CTL_VALUE_LEN_TXSTATUS); if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(pkt)); @@ -2197,6 +2523,7 @@ _dhd_wlfc_suppress_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) WL_SEQ_SET_FROMFW(htodseq, 1); WL_SEQ_SET_FROMDRV(htodseq, 0); } + htodseq = htol16(htodseq); memcpy(results + WLFC_CTL_VALUE_LEN_TXSTATUS, &htodseq, WLFC_CTL_VALUE_LEN_SEQ); } @@ -2215,8 +2542,7 @@ _dhd_wlfc_suppress_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) if (bCreditUpdate) { _dhd_wlfc_fifocreditback_indicate(dhd, credits); } -} - +} /* _dhd_wlfc_suppress_txq */ static int _dhd_wlfc_dbg_senum_check(dhd_pub_t *dhd, uint8 *value) @@ -2226,6 +2552,7 @@ _dhd_wlfc_dbg_senum_check(dhd_pub_t *dhd, uint8 *value) (void)dhd; bcopy(&value[2], ×tamp, sizeof(uint32)); + timestamp = ltoh32(timestamp); DHD_INFO(("RXPKT: SEQ: %d, timestamp %d\n", value[1], timestamp)); return BCME_OK; } @@ -2260,6 +2587,7 @@ _dhd_wlfc_add_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor } } +/** called on eg receiving 'mac open' event from the dongle. */ static void _dhd_wlfc_remove_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry) { @@ -2287,6 +2615,7 @@ _dhd_wlfc_remove_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descrip } } +/** called on eg receiving a WLFC_CTL_TYPE_MACDESC_ADD TLV from the dongle */ static int _dhd_wlfc_mac_table_update(dhd_pub_t *dhd, uint8* value, uint8 type) { @@ -2349,15 +2678,16 @@ _dhd_wlfc_mac_table_update(dhd_pub_t *dhd, uint8* value, uint8 type) } BCM_REFERENCE(rc); return BCME_OK; -} +} /* _dhd_wlfc_mac_table_update */ +/** Called on a 'mac open' or 'mac close' event indicated by the dongle */ static int _dhd_wlfc_psmode_update(dhd_pub_t *dhd, uint8* value, uint8 type) { /* Handle PS on/off indication */ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; wlfc_mac_descriptor_t* table; - wlfc_mac_descriptor_t* desc; + wlfc_mac_descriptor_t* desc; /* a table maps from mac handle to mac descriptor */ uint8 mac_handle = value[0]; int i; @@ -2371,24 +2701,22 @@ _dhd_wlfc_psmode_update(dhd_pub_t *dhd, uint8* value, uint8 type) desc->requested_credit = 0; desc->requested_packet = 0; _dhd_wlfc_remove_requested_entry(wlfc, desc); - } - else { + } else { desc->state = WLFC_STATE_CLOSE; DHD_WLFC_CTRINC_MAC_CLOSE(desc); - /* - Indicate to firmware if there is any traffic pending. - */ + /* Indicate to firmware if there is any traffic pending. */ for (i = 0; i < AC_COUNT; i++) { _dhd_wlfc_traffic_pending_check(wlfc, desc, i); } } - } - else { + } else { wlfc->stats.psmode_update_failed++; } + return BCME_OK; -} +} /* _dhd_wlfc_psmode_update */ +/** called upon receiving 'interface open' or 'interface close' event from the dongle */ static int _dhd_wlfc_interface_update(dhd_pub_t *dhd, uint8* value, uint8 type) { @@ -2403,8 +2731,7 @@ _dhd_wlfc_interface_update(dhd_pub_t *dhd, uint8* value, uint8 type) if (type == WLFC_CTL_TYPE_INTERFACE_OPEN) { table[if_id].state = WLFC_STATE_OPEN; /* WLFC_DBGMESG(("INTERFACE[%d] OPEN\n", if_id)); */ - } - else { + } else { table[if_id].state = WLFC_STATE_CLOSE; /* WLFC_DBGMESG(("INTERFACE[%d] CLOSE\n", if_id)); */ } @@ -2416,6 +2743,7 @@ _dhd_wlfc_interface_update(dhd_pub_t *dhd, uint8* value, uint8 type) return BCME_OK; } +/** Called on receiving a WLFC_CTL_TYPE_MAC_REQUEST_CREDIT TLV from the dongle */ static int _dhd_wlfc_credit_request(dhd_pub_t *dhd, uint8* value) { @@ -2435,13 +2763,19 @@ _dhd_wlfc_credit_request(dhd_pub_t *dhd, uint8* value) desc->ac_bitmap = value[2] & (~(1<stats.credit_request_failed++; } + return BCME_OK; } +/** Called on receiving a WLFC_CTL_TYPE_MAC_REQUEST_PACKET TLV from the dongle */ static int _dhd_wlfc_packet_request(dhd_pub_t *dhd, uint8* value) { @@ -2461,13 +2795,19 @@ _dhd_wlfc_packet_request(dhd_pub_t *dhd, uint8* value) desc->ac_bitmap = value[2] & (~(1<stats.packet_request_failed++; } + return BCME_OK; } +/** Called when host receives a WLFC_CTL_TYPE_HOST_REORDER_RXPKTS TLV from the dongle */ static void _dhd_wlfc_reorderinfo_indicate(uint8 *val, uint8 len, uchar *info_buf, uint *info_len) { @@ -2475,9 +2815,9 @@ _dhd_wlfc_reorderinfo_indicate(uint8 *val, uint8 len, uchar *info_buf, uint *inf if (info_buf) { bcopy(val, info_buf, len); *info_len = len; - } - else + } else { *info_len = 0; + } } } @@ -2539,7 +2879,7 @@ int dhd_wlfc_enable(dhd_pub_t *dhd) wlfc->dhdp = dhd; if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { - wlfc->hanger = _dhd_wlfc_hanger_create(dhd->osh, WLFC_HANGER_MAXITEMS); + wlfc->hanger = _dhd_wlfc_hanger_create(dhd, WLFC_HANGER_MAXITEMS); if (wlfc->hanger == NULL) { DHD_OS_PREFREE(dhd, dhd->wlfc_state, sizeof(athost_wl_status_info_t)); @@ -2551,13 +2891,11 @@ int dhd_wlfc_enable(dhd_pub_t *dhd) dhd->proptxstatus_mode = WLFC_FCMODE_EXPLICIT_CREDIT; /* default to check rx pkt */ + dhd->wlfc_rxpkt_chk = TRUE; if (dhd->op_mode & DHD_FLAG_IBSS_MODE) { dhd->wlfc_rxpkt_chk = FALSE; - } else { - dhd->wlfc_rxpkt_chk = TRUE; } - /* initialize all interfaces to accept traffic */ for (i = 0; i < WLFC_MAX_IFNUM; i++) { wlfc->hostif_flow_state[i] = OFF; @@ -2572,72 +2910,66 @@ int dhd_wlfc_enable(dhd_pub_t *dhd) exit: + DHD_ERROR(("%s: ret=%d\n", __FUNCTION__, rc)); dhd_os_wlfc_unblock(dhd); return rc; -} +} /* dhd_wlfc_enable */ + #ifdef SUPPORT_P2P_GO_PS + +/** + * Called when the host platform enters a lower power mode, eg right before a system hibernate. + * SUPPORT_P2P_GO_PS specific function. + */ int dhd_wlfc_suspend(dhd_pub_t *dhd) { - - uint32 iovbuf[4]; /* Room for "tlv" + '\0' + parameter */ uint32 tlv = 0; DHD_TRACE(("%s: masking wlfc events\n", __FUNCTION__)); if (!dhd->wlfc_enabled) return -1; - bcm_mkiovar("tlv", NULL, 0, (char*)iovbuf, sizeof(iovbuf)); - if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) { - DHD_ERROR(("%s: failed to get bdcv2 tlv signaling\n", __FUNCTION__)); + if (!dhd_wl_ioctl_get_intiovar(dhd, "tlv", &tlv, WLC_GET_VAR, FALSE, 0)) return -1; - } - tlv = iovbuf[0]; if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) == 0) return 0; tlv &= ~(WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS); - bcm_mkiovar("tlv", (char *)&tlv, 4, (char*)iovbuf, sizeof(iovbuf)); - if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) { - DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n", - __FUNCTION__, tlv)); + if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) return -1; - } return 0; } - int +/** + * Called when the host platform resumes from a power management operation, eg resume after a + * system hibernate. SUPPORT_P2P_GO_PS specific function. + */ +int dhd_wlfc_resume(dhd_pub_t *dhd) { - uint32 iovbuf[4]; /* Room for "tlv" + '\0' + parameter */ uint32 tlv = 0; DHD_TRACE(("%s: unmasking wlfc events\n", __FUNCTION__)); if (!dhd->wlfc_enabled) return -1; - bcm_mkiovar("tlv", NULL, 0, (char*)iovbuf, sizeof(iovbuf)); - if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) { - DHD_ERROR(("%s: failed to get bdcv2 tlv signaling\n", __FUNCTION__)); + if (!dhd_wl_ioctl_get_intiovar(dhd, "tlv", &tlv, WLC_GET_VAR, FALSE, 0)) return -1; - } - tlv = iovbuf[0]; if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) == (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) return 0; tlv |= (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS); - bcm_mkiovar("tlv", (char *)&tlv, 4, (char*)iovbuf, sizeof(iovbuf)); - if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, (char*)iovbuf, sizeof(iovbuf), TRUE, 0) < 0) { - DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n", - __FUNCTION__, tlv)); + if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) return -1; - } return 0; } + #endif /* SUPPORT_P2P_GO_PS */ +/** A flow control header was received from firmware, containing one or more TLVs */ int dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, uchar *reorder_info_buf, uint *reorder_info_len) @@ -2706,8 +3038,7 @@ dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, uchar if (type == WLFC_CTL_TYPE_TXSTATUS) { _dhd_wlfc_compressed_txstatus_update(dhd, value, 1, &entry); - } - else if (type == WLFC_CTL_TYPE_COMP_TXSTATUS) { + } else if (type == WLFC_CTL_TYPE_COMP_TXSTATUS) { uint8 compcnt_offset = WLFC_CTL_VALUE_LEN_TXSTATUS; if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { @@ -2715,31 +3046,23 @@ dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, uchar } _dhd_wlfc_compressed_txstatus_update(dhd, value, value[compcnt_offset], &entry); - } - else if (type == WLFC_CTL_TYPE_FIFO_CREDITBACK) + } else if (type == WLFC_CTL_TYPE_FIFO_CREDITBACK) { _dhd_wlfc_fifocreditback_indicate(dhd, value); - - else if (type == WLFC_CTL_TYPE_RSSI) + } else if (type == WLFC_CTL_TYPE_RSSI) { _dhd_wlfc_rssi_indicate(dhd, value); - - else if (type == WLFC_CTL_TYPE_MAC_REQUEST_CREDIT) + } else if (type == WLFC_CTL_TYPE_MAC_REQUEST_CREDIT) { _dhd_wlfc_credit_request(dhd, value); - - else if (type == WLFC_CTL_TYPE_MAC_REQUEST_PACKET) + } else if (type == WLFC_CTL_TYPE_MAC_REQUEST_PACKET) { _dhd_wlfc_packet_request(dhd, value); - - else if ((type == WLFC_CTL_TYPE_MAC_OPEN) || - (type == WLFC_CTL_TYPE_MAC_CLOSE)) + } else if ((type == WLFC_CTL_TYPE_MAC_OPEN) || + (type == WLFC_CTL_TYPE_MAC_CLOSE)) { _dhd_wlfc_psmode_update(dhd, value, type); - - else if ((type == WLFC_CTL_TYPE_MACDESC_ADD) || - (type == WLFC_CTL_TYPE_MACDESC_DEL)) + } else if ((type == WLFC_CTL_TYPE_MACDESC_ADD) || + (type == WLFC_CTL_TYPE_MACDESC_DEL)) { _dhd_wlfc_mac_table_update(dhd, value, type); - - else if (type == WLFC_CTL_TYPE_TRANS_ID) + } else if (type == WLFC_CTL_TYPE_TRANS_ID) { _dhd_wlfc_dbg_senum_check(dhd, value); - - else if ((type == WLFC_CTL_TYPE_INTERFACE_OPEN) || + } else if ((type == WLFC_CTL_TYPE_INTERFACE_OPEN) || (type == WLFC_CTL_TYPE_INTERFACE_CLOSE)) { _dhd_wlfc_interface_update(dhd, value, type); } @@ -2748,28 +3071,30 @@ dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, uchar /* suppress all packets for this mac entry from bus->txq */ _dhd_wlfc_suppress_txq(dhd, _dhd_wlfc_entrypkt_fn, entry); } - } + } /* while */ + if (remainder != 0 && wlfc) { /* trouble..., something is not right */ wlfc->stats.tlv_parse_failed++; } - } + } /* if */ if (wlfc) wlfc->stats.dhd_hdrpulls++; dhd_os_wlfc_unblock(dhd); return BCME_OK; -} +} /* dhd_wlfc_parse_header_info */ -int -dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, void* commit_ctx, void *pktbuf, - bool need_toggle_host_if) +KERNEL_THREAD_RETURN_TYPE +dhd_wlfc_transfer_packets(void *data) { + dhd_pub_t *dhdp = (dhd_pub_t *)data; int ac, single_ac = 0, rc = BCME_OK; dhd_wlfc_commit_info_t commit_info; athost_wl_status_info_t* ctx; int bus_retry_count = 0; + int pkt_send = 0; uint8 tx_map = 0; /* packets (send + in queue), Bitmask for 4 ACs + BC/MC */ uint8 rx_map = 0; /* received packets, Bitmask for 4 ACs + BC/MC */ @@ -2778,35 +3103,32 @@ dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, void* commit_ctx int lender; - if ((dhdp == NULL) || (fcommit == NULL)) { - DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); - return BCME_BADARG; - } - - dhd_os_wlfc_block(dhdp); - - if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { - if (pktbuf) { - DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 0); +#if defined(DHD_WLFC_THREAD) + /* wait till someone wakeup me up, will change it at running time */ + int wait_msec = msecs_to_jiffies(0xFFFFFFFF); +#endif /* defined(DHD_WLFC_THREAD) */ + +#if defined(DHD_WLFC_THREAD) + while (1) { + bus_retry_count = 0; + pkt_send = 0; + tx_map = 0; + rx_map = 0; + packets_map = 0; + wait_msec = wait_event_interruptible_timeout(dhdp->wlfc_wqhead, + dhdp->wlfc_thread_go, wait_msec); + if (kthread_should_stop()) { + break; } - rc = WLFC_UNSUPPORTED; - goto exit2; - } - - ctx = (athost_wl_status_info_t*)dhdp->wlfc_state; + dhdp->wlfc_thread_go = FALSE; - - if (dhdp->proptxstatus_module_ignore) { - if (pktbuf) { - uint32 htod = 0; - WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST); - _dhd_wlfc_pushheader(ctx, &pktbuf, FALSE, 0, 0, htod, 0, FALSE); - if (fcommit(commit_ctx, pktbuf)) - PKTFREE(ctx->osh, pktbuf, TRUE); - rc = BCME_OK; - } - goto exit; - } + dhd_os_wlfc_block(dhdp); +#endif /* defined(DHD_WLFC_THREAD) */ + ctx = (athost_wl_status_info_t*)dhdp->wlfc_state; +#if defined(DHD_WLFC_THREAD) + if (!ctx) + goto exit; +#endif /* defined(DHD_WLFC_THREAD) */ memset(&commit_info, 0, sizeof(commit_info)); @@ -2823,19 +3145,6 @@ dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, void* commit_ctx low priority packet starvation. */ - if (pktbuf) { - DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 1); - ac = DHD_PKTTAG_FIFO(PKTTAG(pktbuf)); - /* en-queue the packets to respective queue. */ - rc = _dhd_wlfc_enque_delayq(ctx, pktbuf, ac); - if (rc) { - _dhd_wlfc_prec_drop(ctx->dhdp, (ac << 1), pktbuf, FALSE); - } else { - ctx->stats.pktin++; - ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pktbuf))][ac]++; - } - } - for (ac = AC_COUNT; ac >= 0; ac--) { if (dhdp->wlfc_rxpkt_chk) { /* check rx packet */ @@ -2850,6 +3159,7 @@ dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, void* commit_ctx if (ctx->pkt_cnt_per_ac[ac] == 0) { continue; } + tx_map |= (1 << ac); single_ac = ac + 1; while (FALSE == dhdp->proptxstatus_txoff) { @@ -2895,11 +3205,12 @@ dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, void* commit_ctx ASSERT(ctx->FIFO_credit[ac] >= commit_info.ac_fifo_credit_spent); } /* here we can ensure have credit or no credit needed */ - rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, fcommit, - commit_ctx); + rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, + ctx->fcommit, ctx->commit_ctx); /* Bus commits may fail (e.g. flow control); abort after retries */ if (rc == BCME_OK) { + pkt_send++; if (commit_info.ac_fifo_credit_spent && (lender == -1)) { ctx->FIFO_credit[ac]--; } @@ -3000,11 +3311,11 @@ dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, void* commit_ctx eWLFC_PKTTYPE_SUPPRESSED; rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, - fcommit, commit_ctx); + ctx->fcommit, ctx->commit_ctx); /* Bus commits may fail (e.g. flow control); abort after retries */ if (rc == BCME_OK) { - + pkt_send++; if (commit_info.ac_fifo_credit_spent) { #ifndef LIMIT_BORROW ctx->FIFO_credit[ac]--; @@ -3026,20 +3337,120 @@ dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, void* commit_ctx } } + BCM_REFERENCE(pkt_send); + exit: - if (need_toggle_host_if && ctx->toggle_host_if) { - ctx->toggle_host_if = 0; +#if defined(DHD_WLFC_THREAD) + dhd_os_wlfc_unblock(dhdp); + if (ctx && ctx->pkt_cnt_in_psq && pkt_send) { + wait_msec = msecs_to_jiffies(WLFC_THREAD_QUICK_RETRY_WAIT_MS); + } else { + wait_msec = msecs_to_jiffies(WLFC_THREAD_RETRY_WAIT_MS); + } + } + return 0; +#else + return rc; +#endif /* defined(DHD_WLFC_THREAD) */ +} + +/** + * Enqueues a transmit packet in the next layer towards the dongle, eg the DBUS layer. Called by + * eg dhd_sendpkt(). + * @param[in] dhdp Pointer to public DHD structure + * @param[in] fcommit Pointer to transmit function of next layer + * @param[in] commit_ctx Opaque context used when calling next layer + * @param[in] pktbuf Packet to send + * @param[in] need_toggle_host_if If TRUE, resets flag ctx->toggle_host_if + */ +int +dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, void* commit_ctx, void *pktbuf, + bool need_toggle_host_if) +{ + int rc = BCME_OK; + athost_wl_status_info_t* ctx; + +#if defined(DHD_WLFC_THREAD) + if (!pktbuf) + return BCME_OK; +#endif /* defined(DHD_WLFC_THREAD) */ + + if ((dhdp == NULL) || (fcommit == NULL)) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhdp); + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { + if (pktbuf) { + DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 0); + } + rc = WLFC_UNSUPPORTED; + goto exit; + } + + ctx = (athost_wl_status_info_t*)dhdp->wlfc_state; + + + if (dhdp->proptxstatus_module_ignore) { + if (pktbuf) { + uint32 htod = 0; + WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST); + _dhd_wlfc_pushheader(ctx, &pktbuf, FALSE, 0, 0, htod, 0, FALSE); + if (fcommit(commit_ctx, pktbuf)) { + /* free it if failed, otherwise do it in tx complete cb */ + PKTFREE(ctx->osh, pktbuf, TRUE); + } + rc = BCME_OK; + } + goto exit; + } + + if (pktbuf) { + int ac = DHD_PKTTAG_FIFO(PKTTAG(pktbuf)); + ASSERT(ac <= AC_COUNT); + DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 1); + /* en-queue the packets to respective queue. */ + rc = _dhd_wlfc_enque_delayq(ctx, pktbuf, ac); + if (rc) { + _dhd_wlfc_prec_drop(ctx->dhdp, (ac << 1), pktbuf, FALSE); + } else { + ctx->stats.pktin++; + ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pktbuf))][ac]++; + } + } + + if (!ctx->fcommit) { + ctx->fcommit = fcommit; + } else { + ASSERT(ctx->fcommit == fcommit); + } + if (!ctx->commit_ctx) { + ctx->commit_ctx = commit_ctx; + } else { + ASSERT(ctx->commit_ctx == commit_ctx); } -exit2: +#if defined(DHD_WLFC_THREAD) + _dhd_wlfc_thread_wakeup(dhdp); +#else + dhd_wlfc_transfer_packets(dhdp); +#endif /* defined(DHD_WLFC_THREAD) */ + +exit: dhd_os_wlfc_unblock(dhdp); return rc; -} +} /* dhd_wlfc_commit_packets */ +/** + * Called when the (lower) DBUS layer indicates completion (succesfull or not) of a transmit packet + */ int dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success) { athost_wl_status_info_t* wlfc; + wlfc_mac_descriptor_t *entry; void* pout = NULL; int rtn = BCME_OK; if ((dhd == NULL) || (txp == NULL)) { @@ -3047,6 +3458,8 @@ dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success) return BCME_BADARG; } + bcm_pkt_validate_chk(txp); + dhd_os_wlfc_block(dhd); if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { @@ -3065,9 +3478,10 @@ dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success) goto EXIT; } - if (!success || dhd->proptxstatus_txstatus_ignore) { - wlfc_mac_descriptor_t *entry = _dhd_wlfc_find_table_entry(wlfc, txp); + entry = _dhd_wlfc_find_table_entry(wlfc, txp); + ASSERT(entry); + if (!success || dhd->proptxstatus_txstatus_ignore) { WLFC_DBGMESG(("At: %s():%d, bus_complete() failure for %p, htod_tag:0x%08x\n", __FUNCTION__, __LINE__, txp, DHD_PKTTAG_H2DTAG(PKTTAG(txp)))); if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { @@ -3082,10 +3496,10 @@ dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success) /* return the credit, if necessary */ _dhd_wlfc_return_implied_credit(wlfc, txp); - entry->transit_count--; - if (entry->suppressed && (--entry->suppr_transit_count == 0)) { - entry->suppressed = FALSE; - } + if (entry->transit_count) + entry->transit_count--; + if (entry->suppr_transit_count) + entry->suppr_transit_count--; wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(txp))][DHD_PKTTAG_FIFO(PKTTAG(txp))]--; wlfc->stats.pktout++; PKTFREE(wlfc->osh, txp, TRUE); @@ -3096,19 +3510,25 @@ dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success) } else { int hslot = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(txp))); _dhd_wlfc_hanger_free_pkt(wlfc, hslot, - WLFC_HANGER_PKT_STATE_TXCOMPLETE, -1); + WLFC_HANGER_PKT_STATE_BUSRETURNED, -1); } } + ASSERT(entry->onbus_pkts_count > 0); + if (entry->onbus_pkts_count > 0) + entry->onbus_pkts_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; EXIT: dhd_os_wlfc_unblock(dhd); return rtn; -} +} /* dhd_wlfc_txcomplete */ int dhd_wlfc_init(dhd_pub_t *dhd) { - char iovbuf[14]; /* Room for "tlv" + '\0' + parameter */ /* enable all signals & indicate host proptxstatus logic is active */ uint32 tlv, mode, fw_caps; int ret = 0; @@ -3140,11 +3560,7 @@ dhd_wlfc_init(dhd_pub_t *dhd) */ /* enable proptxtstatus signaling by default */ - bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf)); - if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) { - DHD_ERROR(("dhd_wlfc_init(): failed to enable/disable bdcv2 tlv signaling\n")); - } - else { + if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) { /* Leaving the message for now, it should be removed after a while; once the tlv situation is stable. @@ -3153,15 +3569,12 @@ dhd_wlfc_init(dhd_pub_t *dhd) dhd->wlfc_enabled?"enabled":"disabled", tlv)); } + mode = 0; + /* query caps */ - ret = bcm_mkiovar("wlfc_mode", (char *)&mode, 4, iovbuf, sizeof(iovbuf)); - if (ret > 0) { - ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0); - } + ret = dhd_wl_ioctl_get_intiovar(dhd, "wlfc_mode", &fw_caps, WLC_GET_VAR, FALSE, 0); - if (ret >= 0) { - fw_caps = *((uint32 *)iovbuf); - mode = 0; + if (!ret) { DHD_ERROR(("%s: query wlfc_mode succeed, fw_caps=0x%x\n", __FUNCTION__, fw_caps)); if (WLFC_IS_OLD_DEF(fw_caps)) { @@ -3172,10 +3585,7 @@ dhd_wlfc_init(dhd_pub_t *dhd) WLFC_SET_REUSESEQ(mode, WLFC_GET_REUSESEQ(fw_caps)); WLFC_SET_REORDERSUPP(mode, WLFC_GET_REORDERSUPP(fw_caps)); } - ret = bcm_mkiovar("wlfc_mode", (char *)&mode, 4, iovbuf, sizeof(iovbuf)); - if (ret > 0) { - ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); - } + ret = dhd_wl_ioctl_set_intiovar(dhd, "wlfc_mode", mode, WLC_SET_VAR, TRUE, 0); } dhd_os_wlfc_block(dhd); @@ -3188,6 +3598,7 @@ dhd_wlfc_init(dhd_pub_t *dhd) dhd->wlfc_mode = mode; } } + DHD_ERROR(("dhd_wlfc_init(): wlfc_mode=0x%x, ret=%d\n", dhd->wlfc_mode, ret)); dhd_os_wlfc_unblock(dhd); @@ -3196,12 +3607,12 @@ dhd_wlfc_init(dhd_pub_t *dhd) dhd->plat_init((void *)dhd); return BCME_OK; -} +} /* dhd_wlfc_init */ +/** AMPDU host reorder specific function */ int dhd_wlfc_hostreorder_init(dhd_pub_t *dhd) { - char iovbuf[14]; /* Room for "tlv" + '\0' + parameter */ /* enable only ampdu hostreorder here */ uint32 tlv; @@ -3215,12 +3626,10 @@ dhd_wlfc_hostreorder_init(dhd_pub_t *dhd) tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE; /* enable proptxtstatus signaling by default */ - bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf)); - if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) { + if (dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) { DHD_ERROR(("%s(): failed to enable/disable bdcv2 tlv signaling\n", __FUNCTION__)); - } - else { + } else { /* Leaving the message for now, it should be removed after a while; once the tlv situation is stable. @@ -3232,6 +3641,8 @@ dhd_wlfc_hostreorder_init(dhd_pub_t *dhd) dhd_os_wlfc_block(dhd); dhd->proptxstatus_mode = WLFC_ONLY_AMPDU_HOSTREORDER; dhd_os_wlfc_unblock(dhd); + /* terence 20161229: enable ampdu_hostreorder if tlv enable hostreorder */ + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 1, 0, TRUE); return BCME_OK; } @@ -3258,7 +3669,7 @@ dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) return BCME_OK; } -/* release all packet resources */ +/** release all packet resources */ int dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) { @@ -3284,12 +3695,10 @@ dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) int dhd_wlfc_deinit(dhd_pub_t *dhd) { - char iovbuf[32]; /* Room for "ampdu_hostreorder" or "tlv" + '\0' + parameter */ /* cleanup all psq related resources */ athost_wl_status_info_t* wlfc; uint32 tlv = 0; uint32 hostreorder = 0; - int ret = BCME_OK; if (dhd == NULL) { DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); @@ -3302,19 +3711,13 @@ dhd_wlfc_deinit(dhd_pub_t *dhd) dhd_os_wlfc_unblock(dhd); return BCME_OK; } + dhd->wlfc_enabled = FALSE; dhd_os_wlfc_unblock(dhd); /* query ampdu hostreorder */ - bcm_mkiovar("ampdu_hostreorder", NULL, 0, iovbuf, sizeof(iovbuf)); - ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0); - if (ret == BCME_OK) - hostreorder = *((uint32 *)iovbuf); - else { - hostreorder = 0; - DHD_ERROR(("%s():%d, ampdu_hostreorder get failed Err = %d\n", - __FUNCTION__, __LINE__, ret)); - } + (void) dhd_wl_ioctl_get_intiovar(dhd, "ampdu_hostreorder", + &hostreorder, WLC_GET_VAR, FALSE, 0); if (hostreorder) { tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE; @@ -3323,20 +3726,7 @@ dhd_wlfc_deinit(dhd_pub_t *dhd) } /* Disable proptxtstatus signaling for deinit */ - bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf)); - ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); - - if (ret == BCME_OK) { - /* - Leaving the message for now, it should be removed after a while; once - the tlv situation is stable. - */ - DHD_ERROR(("%s():%d successfully %s bdcv2 tlv signaling, %d\n", - __FUNCTION__, __LINE__, - dhd->wlfc_enabled?"enabled":"disabled", tlv)); - } else - DHD_ERROR(("%s():%d failed to enable/disable bdcv2 tlv signaling Err = %d\n", - __FUNCTION__, __LINE__, ret)); + (void) dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0); dhd_os_wlfc_block(dhd); @@ -3347,26 +3737,20 @@ dhd_wlfc_deinit(dhd_pub_t *dhd) wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; -#ifdef PROP_TXSTATUS_DEBUG - if (!WLFC_GET_AFQ(dhd->wlfc_mode)) - { + _dhd_wlfc_cleanup(dhd, NULL, NULL); + + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { int i; wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger; for (i = 0; i < h->max_items; i++) { if (h->items[i].state != WLFC_HANGER_ITEM_STATE_FREE) { - WLFC_DBGMESG(("%s() pkt[%d] = 0x%p, FIFO_credit_used:%d\n", - __FUNCTION__, i, h->items[i].pkt, - DHD_PKTTAG_CREDITCHECK(PKTTAG(h->items[i].pkt)))); + _dhd_wlfc_hanger_free_pkt(wlfc, i, + WLFC_HANGER_PKT_STATE_COMPLETE, TRUE); } } - } -#endif - - _dhd_wlfc_cleanup(dhd, NULL, NULL); - if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { /* delete hanger */ - _dhd_wlfc_hanger_delete(dhd->osh, wlfc->hanger); + _dhd_wlfc_hanger_delete(dhd, h); } @@ -3377,13 +3761,20 @@ dhd_wlfc_deinit(dhd_pub_t *dhd) dhd->proptxstatus_mode = hostreorder ? WLFC_ONLY_AMPDU_HOSTREORDER : WLFC_FCMODE_NONE; + DHD_ERROR(("%s: wlfc_mode=0x%x, tlv=%d\n", __FUNCTION__, dhd->wlfc_mode, tlv)); + dhd_os_wlfc_unblock(dhd); if (dhd->plat_deinit) dhd->plat_deinit((void *)dhd); return BCME_OK; -} +} /* dhd_wlfc_init */ +/** + * Called on an interface event (WLC_E_IF) indicated by firmware + * @param[in] dhdp Pointer to public DHD structure + * @param[in] action eg eWLFC_MAC_ENTRY_ACTION_UPDATE or eWLFC_MAC_ENTRY_ACTION_ADD + */ int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea) { int rc; @@ -3406,6 +3797,7 @@ int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 if return rc; } +/** Called eg on receiving a WLC_E_FIFO_CREDIT_MAP event from the dongle */ int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data) { int rc; @@ -3429,6 +3821,10 @@ int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data) return rc; } +/** + * Called eg on receiving a WLC_E_BCMC_CREDIT_SUPPORT event from the dongle (broadcast/multicast + * specific) + */ int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp) { int rc; @@ -3451,6 +3847,7 @@ int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp) return rc; } +/** debug specific function */ int dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) { @@ -3543,14 +3940,17 @@ dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) iftype_desc, ((wlfc->hostif_flow_state[i] == OFF) ? " OFF":" ON")); - bcm_bprintf(strbuf, "INTERFACE[%d].PSQ(len,state,credit),(trans,supp_trans)" - "= (%d,%s,%d),(%d,%d)\n", + bcm_bprintf(strbuf, "INTERFACE[%d].PSQ(len,state,credit)," + "(trans,supp_trans,onbus)" + "= (%d,%s,%d),(%d,%d,%d)\n", i, interfaces[i].psq.len, ((interfaces[i].state == WLFC_STATE_OPEN) ? "OPEN":"CLOSE"), interfaces[i].requested_credit, - interfaces[i].transit_count, interfaces[i].suppr_transit_count); + interfaces[i].transit_count, + interfaces[i].suppr_transit_count, + interfaces[i].onbus_pkts_count); bcm_bprintf(strbuf, "INTERFACE[%d].PSQ" "(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2)," @@ -3584,14 +3984,17 @@ dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) ea[0], ea[1], ea[2], ea[3], ea[4], ea[5], mac_table[i].interface_id); - bcm_bprintf(strbuf, "MAC_table[%d].PSQ(len,state,credit),(trans,supp_trans)" - "= (%d,%s,%d),(%d,%d)\n", + bcm_bprintf(strbuf, "MAC_table[%d].PSQ(len,state,credit)," + "(trans,supp_trans,onbus)" + "= (%d,%s,%d),(%d,%d,%d)\n", i, mac_table[i].psq.len, ((mac_table[i].state == WLFC_STATE_OPEN) ? " OPEN":"CLOSE"), mac_table[i].requested_credit, - mac_table[i].transit_count, mac_table[i].suppr_transit_count); + mac_table[i].transit_count, + mac_table[i].suppr_transit_count, + mac_table[i].onbus_pkts_count); #ifdef PROP_TXSTATUS_DEBUG bcm_bprintf(strbuf, "MAC_table[%d]: (opened, closed) = (%d, %d)\n", i, mac_table[i].opened_ct, mac_table[i].closed_ct); @@ -3757,7 +4160,7 @@ dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) dhd_os_wlfc_unblock(dhdp); return BCME_OK; -} +} /* dhd_wlfc_dump */ int dhd_wlfc_clear_counts(dhd_pub_t *dhd) { @@ -3795,6 +4198,7 @@ int dhd_wlfc_clear_counts(dhd_pub_t *dhd) return BCME_OK; } +/** returns TRUE if flow control is enabled */ int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val) { if (!dhd || !val) { @@ -3811,6 +4215,7 @@ int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val) return BCME_OK; } +/** Called via an IOVAR */ int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val) { if (!dhd || !val) { @@ -3827,6 +4232,7 @@ int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val) return BCME_OK; } +/** Called via an IOVAR */ int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val) { if (!dhd) { @@ -3845,6 +4251,7 @@ int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val) return BCME_OK; } +/** Called when rx frame is received from the dongle */ bool dhd_wlfc_is_header_only_pkt(dhd_pub_t * dhd, void *pktbuf) { athost_wl_status_info_t* wlfc; @@ -3901,9 +4308,14 @@ int dhd_wlfc_flowcontrol(dhd_pub_t *dhdp, bool state, bool bAcquireLock) dhd_os_wlfc_unblock(dhdp); } +#if defined(DHD_WLFC_THREAD) + _dhd_wlfc_thread_wakeup(dhd); +#endif /* defined(DHD_WLFC_THREAD) */ + return BCME_OK; } +/** Called when eg an rx frame is received from the dongle */ int dhd_wlfc_save_rxpath_ac_time(dhd_pub_t * dhd, uint8 prio) { athost_wl_status_info_t* wlfc; @@ -3936,6 +4348,7 @@ int dhd_wlfc_save_rxpath_ac_time(dhd_pub_t * dhd, uint8 prio) return BCME_OK; } +/** called via an IOVAR */ int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val) { if (!dhd || !val) { @@ -3952,9 +4365,9 @@ int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val) return BCME_OK; } +/** called via an IOVAR */ int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val) { - char iovbuf[14]; /* Room for "tlv" + '\0' + parameter */ uint32 tlv = 0; bool bChanged = FALSE; @@ -3984,12 +4397,10 @@ int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val) if (bChanged) { /* select enable proptxtstatus signaling */ - bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf)); - if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) { + if (dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) { DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n", __FUNCTION__, tlv)); - } - else { + } else { DHD_ERROR(("%s: successfully set bdcv2 tlv signaling to 0x%x\n", __FUNCTION__, tlv)); } @@ -3997,6 +4408,7 @@ int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val) return BCME_OK; } +/** called via an IOVAR */ int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val) { if (!dhd || !val) { @@ -4013,6 +4425,7 @@ int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val) return BCME_OK; } +/** called via an IOVAR */ int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val) { if (!dhd) { @@ -4029,6 +4442,7 @@ int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val) return BCME_OK; } +/** called via an IOVAR */ int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val) { if (!dhd || !val) { @@ -4045,6 +4459,7 @@ int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val) return BCME_OK; } +/** called via an IOVAR */ int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val) { if (!dhd) { @@ -4061,6 +4476,7 @@ int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val) return BCME_OK; } +/** called via an IOVAR */ int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val) { if (!dhd || !val) { @@ -4077,6 +4493,7 @@ int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val) return BCME_OK; } +/** called via an IOVAR */ int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val) { if (!dhd) { @@ -4092,4 +4509,5 @@ int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val) return BCME_OK; } + #endif /* PROP_TXSTATUS */ diff --git a/drivers/net/wireless/bcmdhd/dhd_wlfc.h b/drivers/amlogic/wifi/bcmdhd/dhd_wlfc.h similarity index 70% rename from drivers/net/wireless/bcmdhd/dhd_wlfc.h rename to drivers/amlogic/wifi/bcmdhd/dhd_wlfc.h index 05365421edf14..a6fd465e35fda 100644 --- a/drivers/net/wireless/bcmdhd/dhd_wlfc.h +++ b/drivers/amlogic/wifi/bcmdhd/dhd_wlfc.h @@ -1,17 +1,41 @@ /* -* $Copyright Open 2009 Broadcom Corporation$ -* $Id: dhd_wlfc.h 501046 2014-09-06 01:25:16Z $ -* -*/ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_wlfc.h 557035 2015-05-15 18:48:57Z $ + * + */ #ifndef __wlfc_host_driver_definitions_h__ #define __wlfc_host_driver_definitions_h__ -#ifdef QMONITOR -#include -#endif /* #define OOO_DEBUG */ +#define KERNEL_THREAD_RETURN_TYPE int + +typedef int (*f_commitpkt_t)(void* ctx, void* p); +typedef bool (*f_processpkt_t)(void* p, void* arg); + #define WLFC_UNSUPPORTED -9999 #define WLFC_NO_TRAFFIC -1 @@ -19,26 +43,28 @@ #define BUS_RETRIES 1 /* # of retries before aborting a bus tx operation */ -/* 16 bits will provide an absolute max of 65536 slots */ +/** 16 bits will provide an absolute max of 65536 slots */ #define WLFC_HANGER_MAXITEMS 3072 #define WLFC_HANGER_ITEM_STATE_FREE 1 #define WLFC_HANGER_ITEM_STATE_INUSE 2 #define WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED 3 +#define WLFC_HANGER_ITEM_STATE_FLUSHED 4 #define WLFC_HANGER_PKT_STATE_TXSTATUS 1 -#define WLFC_HANGER_PKT_STATE_TXCOMPLETE 2 -#define WLFC_HANGER_PKT_STATE_CLEANUP 4 +#define WLFC_HANGER_PKT_STATE_BUSRETURNED 2 +#define WLFC_HANGER_PKT_STATE_COMPLETE \ + (WLFC_HANGER_PKT_STATE_TXSTATUS | WLFC_HANGER_PKT_STATE_BUSRETURNED) typedef enum { - Q_TYPE_PSQ, - Q_TYPE_AFQ + Q_TYPE_PSQ, /**< Power Save Queue, contains both delayed and suppressed packets */ + Q_TYPE_AFQ /**< At Firmware Queue */ } q_type_t; typedef enum ewlfc_packet_state { - eWLFC_PKTTYPE_NEW, - eWLFC_PKTTYPE_DELAYED, - eWLFC_PKTTYPE_SUPPRESSED, + eWLFC_PKTTYPE_NEW, /**< unused in the code (Jan 2015) */ + eWLFC_PKTTYPE_DELAYED, /**< packet did not enter wlfc yet */ + eWLFC_PKTTYPE_SUPPRESSED, /**< packet entered wlfc and was suppressed by the dongle */ eWLFC_PKTTYPE_MAX } ewlfc_packet_state_t; @@ -52,7 +78,7 @@ typedef enum ewlfc_mac_entry_action { typedef struct wlfc_hanger_item { uint8 state; uint8 gen; - uint8 pkt_state; + uint8 pkt_state; /**< bitmask containing eg WLFC_HANGER_PKT_STATE_TXCOMPLETE */ uint8 pkt_txstatus; uint32 identifier; void* pkt; @@ -62,6 +88,7 @@ typedef struct wlfc_hanger_item { struct wlfc_hanger_item *next; } wlfc_hanger_item_t; +/** hanger contains packets that have been posted by the dhd to the dongle and are expected back */ typedef struct wlfc_hanger { int max_items; uint32 pushed; @@ -76,10 +103,10 @@ typedef struct wlfc_hanger { #define WLFC_HANGER_SIZE(n) ((sizeof(wlfc_hanger_t) - \ sizeof(wlfc_hanger_item_t)) + ((n)*sizeof(wlfc_hanger_item_t))) -#define WLFC_STATE_OPEN 1 -#define WLFC_STATE_CLOSE 2 +#define WLFC_STATE_OPEN 1 /**< remote MAC is able to receive packets */ +#define WLFC_STATE_CLOSE 2 /**< remote MAC is in power save mode */ -#define WLFC_PSQ_PREC_COUNT ((AC_COUNT + 1) * 2) /* 2 for each AC traffic and bc/mc */ +#define WLFC_PSQ_PREC_COUNT ((AC_COUNT + 1) * 2) /**< 2 for each AC traffic and bc/mc */ #define WLFC_AFQ_PREC_COUNT (AC_COUNT + 1) #define WLFC_PSQ_LEN 2048 @@ -87,43 +114,48 @@ typedef struct wlfc_hanger { #define WLFC_FLOWCONTROL_HIWATER (2048 - 256) #define WLFC_FLOWCONTROL_LOWATER 256 +#if (WLFC_FLOWCONTROL_HIWATER >= (WLFC_PSQ_LEN - 256)) +#undef WLFC_FLOWCONTROL_HIWATER +#define WLFC_FLOWCONTROL_HIWATER (WLFC_PSQ_LEN - 256) +#undef WLFC_FLOWCONTROL_LOWATER +#define WLFC_FLOWCONTROL_LOWATER (WLFC_FLOWCONTROL_HIWATER / 4) +#endif + #define WLFC_LOG_BUF_SIZE (1024*1024) +/** Properties related to a remote MAC entity */ typedef struct wlfc_mac_descriptor { - uint8 occupied; + uint8 occupied; /**< if 0, this descriptor is unused and thus can be (re)used */ uint8 interface_id; - uint8 iftype; - uint8 state; - uint8 ac_bitmap; /* for APSD */ + uint8 iftype; /**< eg WLC_E_IF_ROLE_STA */ + uint8 state; /**< eg WLFC_STATE_OPEN */ + uint8 ac_bitmap; /**< automatic power save delivery (APSD) */ uint8 requested_credit; - uint8 requested_packet; + uint8 requested_packet; /**< unit: [number of packets] */ uint8 ea[ETHER_ADDR_LEN]; - /* - maintain (MAC,AC) based seq count for - packets going to the device. As well as bc/mc. - */ + + /** maintain (MAC,AC) based seq count for packets going to the device. As well as bc/mc. */ uint8 seq[AC_COUNT + 1]; - uint8 generation; - struct pktq psq; - /* packets at firmware */ + uint8 generation; /**< toggles between 0 and 1 */ + struct pktq psq; /**< contains both 'delayed' and 'suppressed' packets */ + /** packets at firmware queue */ struct pktq afq; - /* The AC pending bitmap that was reported to the fw at last change */ + /** The AC pending bitmap that was reported to the fw at last change */ uint8 traffic_lastreported_bmp; - /* The new AC pending bitmap */ + /** The new AC pending bitmap */ uint8 traffic_pending_bmp; - /* 1= send on next opportunity */ + /** 1= send on next opportunity */ uint8 send_tim_signal; - uint8 mac_handle; - /* Number of packets at dongle for this entry. */ + uint8 mac_handle; /**< mac handles are assigned by the dongle */ + /** Number of packets at dongle for this entry. */ int transit_count; - /* Numbe of suppression to wait before evict from delayQ */ + /** Number of suppression to wait before evict from delayQ */ int suppr_transit_count; - /* flag. TRUE when in suppress state */ + /** pkt sent to bus but no bus TX complete yet */ + int onbus_pkts_count; + /** flag. TRUE when remote MAC is in suppressed state */ uint8 suppressed; -#ifdef QMONITOR - dhd_qmon_t qmon; -#endif /* QMONITOR */ #ifdef PROP_TXSTATUS_DEBUG uint32 dstncredit_sent_packets; @@ -135,6 +167,7 @@ typedef struct wlfc_mac_descriptor { struct wlfc_mac_descriptor* next; } wlfc_mac_descriptor_t; +/** A 'commit' is the hand over of a packet from the host OS layer to the layer below (eg DBUS) */ typedef struct dhd_wlfc_commit_info { uint8 needs_hdr; uint8 ac_fifo_credit_spent; @@ -185,7 +218,7 @@ typedef struct athost_wl_stat_counters { uint32 drop_pkts[WLFC_PSQ_PREC_COUNT]; uint32 ooo_pkts[AC_COUNT + 1]; #ifdef PROP_TXSTATUS_DEBUG - /* all pkt2bus -> txstatus latency accumulated */ + /** all pkt2bus -> txstatus latency accumulated */ uint32 latency_sample_count; uint32 total_status_latency; uint32 latency_most_recent; @@ -220,87 +253,100 @@ typedef struct athost_wl_stat_counters { #define WLFC_FCMODE_EXPLICIT_CREDIT 2 #define WLFC_ONLY_AMPDU_HOSTREORDER 3 -/* Reserved credits ratio when borrowed by hihger priority */ +/** Reserved credits ratio when borrowed by hihger priority */ #define WLFC_BORROW_LIMIT_RATIO 4 -/* How long to defer borrowing in milliseconds */ +/** How long to defer borrowing in milliseconds */ #define WLFC_BORROW_DEFER_PERIOD_MS 100 -/* How long to defer flow control in milliseconds */ +/** How long to defer flow control in milliseconds */ #define WLFC_FC_DEFER_PERIOD_MS 200 -/* How long to detect occurance per AC in miliseconds */ +/** How long to detect occurance per AC in miliseconds */ #define WLFC_RX_DETECTION_THRESHOLD_MS 100 -/* Mask to represent available ACs (note: BC/MC is ignored */ +/** Mask to represent available ACs (note: BC/MC is ignored) */ #define WLFC_AC_MASK 0xF +/** flow control specific information, only 1 instance during driver lifetime */ typedef struct athost_wl_status_info { uint8 last_seqid_to_wlc; - /* OSL handle */ - osl_t* osh; - /* dhd pub */ - void* dhdp; + /** OSL handle */ + osl_t *osh; + /** dhd public struct pointer */ + void *dhdp; + + f_commitpkt_t fcommit; + void* commit_ctx; - /* stats */ + /** statistics */ athost_wl_stat_counters_t stats; + /** incremented on eg receiving a credit map event from the dongle */ int Init_FIFO_credit[AC_COUNT + 2]; - - /* the additional ones are for bc/mc and ATIM FIFO */ + /** the additional ones are for bc/mc and ATIM FIFO */ int FIFO_credit[AC_COUNT + 2]; - - /* Credit borrow counts for each FIFO from each of the other FIFOs */ + /** Credit borrow counts for each FIFO from each of the other FIFOs */ int credits_borrowed[AC_COUNT + 2][AC_COUNT + 2]; - /* packet hanger and MAC->handle lookup table */ - void* hanger; + /** packet hanger and MAC->handle lookup table */ + void *hanger; + struct { - /* table for individual nodes */ + /** table for individual nodes */ wlfc_mac_descriptor_t nodes[WLFC_MAC_DESC_TABLE_SIZE]; - /* table for interfaces */ + /** table for interfaces */ wlfc_mac_descriptor_t interfaces[WLFC_MAX_IFNUM]; /* OS may send packets to unknown (unassociated) destinations */ - /* A place holder for bc/mc and packets to unknown destinations */ + /** A place holder for bc/mc and packets to unknown destinations */ wlfc_mac_descriptor_t other; } destination_entries; - wlfc_mac_descriptor_t *active_entry_head; + wlfc_mac_descriptor_t *active_entry_head; /**< a chain of MAC descriptors */ int active_entry_count; - wlfc_mac_descriptor_t* requested_entry[WLFC_MAC_DESC_TABLE_SIZE]; + wlfc_mac_descriptor_t *requested_entry[WLFC_MAC_DESC_TABLE_SIZE]; int requested_entry_count; /* pkt counts for each interface and ac */ int pkt_cnt_in_q[WLFC_MAX_IFNUM][AC_COUNT+1]; int pkt_cnt_per_ac[AC_COUNT+1]; int pkt_cnt_in_drv[WLFC_MAX_IFNUM][AC_COUNT+1]; - uint8 allow_fc; + int pkt_cnt_in_psq; + uint8 allow_fc; /**< Boolean */ uint32 fc_defer_timestamp; uint32 rx_timestamp[AC_COUNT+1]; - /* ON/OFF state for flow control to the host network interface */ + + /** ON/OFF state for flow control to the host network interface */ uint8 hostif_flow_state[WLFC_MAX_IFNUM]; uint8 host_ifidx; - /* to flow control an OS interface */ + + /** to flow control an OS interface */ uint8 toggle_host_if; - /* To borrow credits */ + /** To borrow credits */ uint8 allow_credit_borrow; - /* ac number for the first single ac traffic */ + /** ac number for the first single ac traffic */ uint8 single_ac; - /* Timestamp for the first single ac traffic */ + /** Timestamp for the first single ac traffic */ uint32 single_ac_timestamp; bool bcmc_credit_supported; } athost_wl_status_info_t; -/* Please be mindful that total pkttag space is 32 octets only */ +/** Please be mindful that total pkttag space is 32 octets only */ typedef struct dhd_pkttag { - /* + +#ifdef BCM_OBJECT_TRACE + /* if use this field, keep it at the first 4 bytes */ + uint32 sn; +#endif /* BCM_OBJECT_TRACE */ + + /** b[15] - 1 = wlfc packet b[14:13] - encryption exemption b[12 ] - 1 = event channel @@ -317,34 +363,33 @@ typedef struct dhd_pkttag { b[3:0] - interface index */ uint16 if_flags; - /* destination MAC address for this packet so that not every - module needs to open the packet to find this - */ + + /** + * destination MAC address for this packet so that not every module needs to open the packet + * to find this + */ uint8 dstn_ether[ETHER_ADDR_LEN]; - /* - This 32-bit goes from host to device for every packet. - */ + + /** This 32-bit goes from host to device for every packet. */ uint32 htod_tag; - /* - This 16-bit is original seq number for every suppress packet. - */ + /** This 16-bit is original seq number for every suppress packet. */ uint16 htod_seq; - /* - This address is mac entry for every packet. - */ - void* entry; - /* bus specific stuff */ + /** This address is mac entry for every packet. */ + void *entry; + + /** bus specific stuff */ union { struct { - void* stuff; + void *stuff; uint32 thing1; uint32 thing2; } sd; + struct { - void* bus; - void* urb; + void *bus; + void *urb; } usb; } bus_specific; } dhd_pkttag_t; @@ -453,9 +498,6 @@ typedef struct dhd_pkttag { #define PSQ_SUP_IDX(x) (x * 2 + 1) #define PSQ_DLY_IDX(x) (x * 2) -typedef int (*f_commitpkt_t)(void* ctx, void* p); -typedef bool (*f_processpkt_t)(void* p, void* arg); - #ifdef PROP_TXSTATUS_DEBUG #define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do { (entry)->closed_ct++; } while (0) #define DHD_WLFC_CTRINC_MAC_OPEN(entry) do { (entry)->opened_ct++; } while (0) @@ -464,9 +506,15 @@ typedef bool (*f_processpkt_t)(void* p, void* arg); #define DHD_WLFC_CTRINC_MAC_OPEN(entry) do {} while (0) #endif +#ifdef BCM_OBJECT_TRACE +#define DHD_PKTTAG_SET_SN(tag, val) ((dhd_pkttag_t*)(tag))->sn = (val) +#define DHD_PKTTAG_SN(tag) (((dhd_pkttag_t*)(tag))->sn) +#endif /* BCM_OBJECT_TRACE */ + /* public functions */ int dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, uchar *reorder_info_buf, uint *reorder_info_len); +KERNEL_THREAD_RETURN_TYPE dhd_wlfc_transfer_packets(void *data); int dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, void* commit_ctx, void *pktbuf, bool need_toggle_host_if); int dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success); @@ -502,4 +550,5 @@ int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val); int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val); int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val); + #endif /* __wlfc_host_driver_definitions_h__ */ diff --git a/drivers/amlogic/wifi/bcmdhd/dngl_stats.h b/drivers/amlogic/wifi/bcmdhd/dngl_stats.h new file mode 100644 index 0000000000000..66e4f4528f7d4 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/dngl_stats.h @@ -0,0 +1,283 @@ +/* + * Common stats definitions for clients of dongle + * ports + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dngl_stats.h 523030 2014-12-25 17:28:07Z $ + */ + +#ifndef _dngl_stats_h_ +#define _dngl_stats_h_ + +#include +#include + +typedef struct { + unsigned long rx_packets; /* total packets received */ + unsigned long tx_packets; /* total packets transmitted */ + unsigned long rx_bytes; /* total bytes received */ + unsigned long tx_bytes; /* total bytes transmitted */ + unsigned long rx_errors; /* bad packets received */ + unsigned long tx_errors; /* packet transmit problems */ + unsigned long rx_dropped; /* packets dropped by dongle */ + unsigned long tx_dropped; /* packets dropped by dongle */ + unsigned long multicast; /* multicast packets received */ +} dngl_stats_t; + +typedef int32 wifi_radio; +typedef int32 wifi_channel; +typedef int32 wifi_rssi; +typedef struct { uint16 version; uint16 length; } ver_len; + +typedef enum wifi_channel_width { + WIFI_CHAN_WIDTH_20 = 0, + WIFI_CHAN_WIDTH_40 = 1, + WIFI_CHAN_WIDTH_80 = 2, + WIFI_CHAN_WIDTH_160 = 3, + WIFI_CHAN_WIDTH_80P80 = 4, + WIFI_CHAN_WIDTH_5 = 5, + WIFI_CHAN_WIDTH_10 = 6, + WIFI_CHAN_WIDTH_INVALID = -1 +} wifi_channel_width_t; + +typedef enum { + WIFI_DISCONNECTED = 0, + WIFI_AUTHENTICATING = 1, + WIFI_ASSOCIATING = 2, + WIFI_ASSOCIATED = 3, + WIFI_EAPOL_STARTED = 4, /* if done by firmware/driver */ + WIFI_EAPOL_COMPLETED = 5, /* if done by firmware/driver */ +} wifi_connection_state; + +typedef enum { + WIFI_ROAMING_IDLE = 0, + WIFI_ROAMING_ACTIVE = 1 +} wifi_roam_state; + +typedef enum { + WIFI_INTERFACE_STA = 0, + WIFI_INTERFACE_SOFTAP = 1, + WIFI_INTERFACE_IBSS = 2, + WIFI_INTERFACE_P2P_CLIENT = 3, + WIFI_INTERFACE_P2P_GO = 4, + WIFI_INTERFACE_NAN = 5, + WIFI_INTERFACE_MESH = 6 +} wifi_interface_mode; + +#define WIFI_CAPABILITY_QOS 0x00000001 /* set for QOS association */ +#define WIFI_CAPABILITY_PROTECTED 0x00000002 /* set for protected association (802.11 + * beacon frame control protected bit set) + */ +#define WIFI_CAPABILITY_INTERWORKING 0x00000004 /* set if 802.11 Extended Capabilities + * element interworking bit is set + */ +#define WIFI_CAPABILITY_HS20 0x00000008 /* set for HS20 association */ +#define WIFI_CAPABILITY_SSID_UTF8 0x00000010 /* set is 802.11 Extended Capabilities + * element UTF-8 SSID bit is set + */ +#define WIFI_CAPABILITY_COUNTRY 0x00000020 /* set is 802.11 Country Element is present */ + +typedef struct { + wifi_interface_mode mode; /* interface mode */ + uint8 mac_addr[6]; /* interface mac address (self) */ + wifi_connection_state state; /* connection state (valid for STA, CLI only) */ + wifi_roam_state roaming; /* roaming state */ + uint32 capabilities; /* WIFI_CAPABILITY_XXX (self) */ + uint8 ssid[DOT11_MAX_SSID_LEN+1]; /* null terminated SSID */ + uint8 bssid[ETHER_ADDR_LEN]; /* bssid */ + uint8 ap_country_str[3]; /* country string advertised by AP */ + uint8 country_str[3]; /* country string for this association */ +} wifi_interface_info; + +typedef wifi_interface_info *wifi_interface_handle; + +/* channel information */ +typedef struct { + wifi_channel_width_t width; /* channel width (20, 40, 80, 80+80, 160) */ + wifi_channel center_freq; /* primary 20 MHz channel */ + wifi_channel center_freq0; /* center frequency (MHz) first segment */ + wifi_channel center_freq1; /* center frequency (MHz) second segment */ +} wifi_channel_info; + +/* wifi rate */ +typedef struct { + uint32 preamble; /* 0: OFDM, 1:CCK, 2:HT 3:VHT 4..7 reserved */ + uint32 nss; /* 0:1x1, 1:2x2, 3:3x3, 4:4x4 */ + uint32 bw; /* 0:20MHz, 1:40Mhz, 2:80Mhz, 3:160Mhz */ + uint32 rateMcsIdx; /* OFDM/CCK rate code would be as per ieee std + * in the units of 0.5mbps + */ + /* HT/VHT it would be mcs index */ + uint32 reserved; /* reserved */ + uint32 bitrate; /* units of 100 Kbps */ +} wifi_rate; + +/* channel statistics */ +typedef struct { + wifi_channel_info channel; /* channel */ + uint32 on_time; /* msecs the radio is awake (32 bits number + * accruing over time) + */ + uint32 cca_busy_time; /* msecs the CCA register is busy (32 bits number + * accruing over time) + */ +} wifi_channel_stat; + +/* radio statistics */ +typedef struct { + struct { + uint16 version; + uint16 length; + }; + wifi_radio radio; /* wifi radio (if multiple radio supported) */ + uint32 on_time; /* msecs the radio is awake (32 bits number + * accruing over time) + */ + uint32 tx_time; /* msecs the radio is transmitting (32 bits + * number accruing over time) + */ + uint32 rx_time; /* msecs the radio is in active receive (32 bits + * number accruing over time) + */ + uint32 on_time_scan; /* msecs the radio is awake due to all scan (32 bits + * number accruing over time) + */ + uint32 on_time_nbd; /* msecs the radio is awake due to NAN (32 bits + * number accruing over time) + */ + uint32 on_time_gscan; /* msecs the radio is awake due to G?scan (32 bits + * number accruing over time) + */ + uint32 on_time_roam_scan; /* msecs the radio is awake due to roam?scan (32 bits + * number accruing over time) + */ + uint32 on_time_pno_scan; /* msecs the radio is awake due to PNO scan (32 bits + * number accruing over time) + */ + uint32 on_time_hs20; /* msecs the radio is awake due to HS2.0 scans and + * GAS exchange (32 bits number accruing over time) + */ + uint32 num_channels; /* number of channels */ + wifi_channel_stat channels[1]; /* channel statistics */ +} wifi_radio_stat; + +/* per rate statistics */ +typedef struct { + struct { + uint16 version; + uint16 length; + }; + uint32 tx_mpdu; /* number of successfully transmitted data pkts (ACK rcvd) */ + uint32 rx_mpdu; /* number of received data pkts */ + uint32 mpdu_lost; /* number of data packet losses (no ACK) */ + uint32 retries; /* total number of data pkt retries */ + uint32 retries_short; /* number of short data pkt retries */ + uint32 retries_long; /* number of long data pkt retries */ + wifi_rate rate; /* rate information */ +} wifi_rate_stat; + +/* access categories */ +typedef enum { + WIFI_AC_VO = 0, + WIFI_AC_VI = 1, + WIFI_AC_BE = 2, + WIFI_AC_BK = 3, + WIFI_AC_MAX = 4 +} wifi_traffic_ac; + +/* wifi peer type */ +typedef enum +{ + WIFI_PEER_STA, + WIFI_PEER_AP, + WIFI_PEER_P2P_GO, + WIFI_PEER_P2P_CLIENT, + WIFI_PEER_NAN, + WIFI_PEER_TDLS, + WIFI_PEER_INVALID +} wifi_peer_type; + +/* per peer statistics */ +typedef struct { + wifi_peer_type type; /* peer type (AP, TDLS, GO etc.) */ + uint8 peer_mac_address[6]; /* mac address */ + uint32 capabilities; /* peer WIFI_CAPABILITY_XXX */ + uint32 num_rate; /* number of rates */ + wifi_rate_stat rate_stats[1]; /* per rate statistics, number of entries = num_rate */ +} wifi_peer_info; + +/* per access category statistics */ +typedef struct { + wifi_traffic_ac ac; /* access category (VI, VO, BE, BK) */ + uint32 tx_mpdu; /* number of successfully transmitted unicast data pkts + * (ACK rcvd) + */ + uint32 rx_mpdu; /* number of received unicast mpdus */ + uint32 tx_mcast; /* number of succesfully transmitted multicast + * data packets + */ + /* STA case: implies ACK received from AP for the + * unicast packet in which mcast pkt was sent + */ + uint32 rx_mcast; /* number of received multicast data packets */ + uint32 rx_ampdu; /* number of received unicast a-mpdus */ + uint32 tx_ampdu; /* number of transmitted unicast a-mpdus */ + uint32 mpdu_lost; /* number of data pkt losses (no ACK) */ + uint32 retries; /* total number of data pkt retries */ + uint32 retries_short; /* number of short data pkt retries */ + uint32 retries_long; /* number of long data pkt retries */ + uint32 contention_time_min; /* data pkt min contention time (usecs) */ + uint32 contention_time_max; /* data pkt max contention time (usecs) */ + uint32 contention_time_avg; /* data pkt avg contention time (usecs) */ + uint32 contention_num_samples; /* num of data pkts used for contention statistics */ +} wifi_wmm_ac_stat; + +/* interface statistics */ +typedef struct { + wifi_interface_handle iface; /* wifi interface */ + wifi_interface_info info; /* current state of the interface */ + uint32 beacon_rx; /* access point beacon received count from + * connected AP + */ + uint32 mgmt_rx; /* access point mgmt frames received count from + * connected AP (including Beacon) + */ + uint32 mgmt_action_rx; /* action frames received count */ + uint32 mgmt_action_tx; /* action frames transmit count */ + wifi_rssi rssi_mgmt; /* access Point Beacon and Management frames RSSI + * (averaged) + */ + wifi_rssi rssi_data; /* access Point Data Frames RSSI (averaged) from + * connected AP + */ + wifi_rssi rssi_ack; /* access Point ACK RSSI (averaged) from + * connected AP + */ + wifi_wmm_ac_stat ac[WIFI_AC_MAX]; /* per ac data packet statistics */ + uint32 num_peers; /* number of peers */ + wifi_peer_info peer_info[1]; /* per peer statistics */ +} wifi_iface_stat; + +#endif /* _dngl_stats_h_ */ diff --git a/drivers/amlogic/wifi/bcmdhd/dngl_wlhdr.h b/drivers/amlogic/wifi/bcmdhd/dngl_wlhdr.h new file mode 100644 index 0000000000000..93e0b5a5b69dc --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/dngl_wlhdr.h @@ -0,0 +1,43 @@ +/* + * Dongle WL Header definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dngl_wlhdr.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _dngl_wlhdr_h_ +#define _dngl_wlhdr_h_ + +typedef struct wl_header { + uint8 type; /* Header type */ + uint8 version; /* Header version */ + int8 rssi; /* RSSI */ + uint8 pad; /* Unused */ +} wl_header_t; + +#define WL_HEADER_LEN sizeof(wl_header_t) +#define WL_HEADER_TYPE 0 +#define WL_HEADER_VER 1 +#endif /* _dngl_wlhdr_h_ */ diff --git a/drivers/net/wireless/bcmdhd/hnd_pktpool.c b/drivers/amlogic/wifi/bcmdhd/hnd_pktpool.c similarity index 55% rename from drivers/net/wireless/bcmdhd/hnd_pktpool.c rename to drivers/amlogic/wifi/bcmdhd/hnd_pktpool.c index 242f4322117a2..f3555e40ce914 100644 --- a/drivers/net/wireless/bcmdhd/hnd_pktpool.c +++ b/drivers/amlogic/wifi/bcmdhd/hnd_pktpool.c @@ -1,16 +1,51 @@ /* * HND generic packet pool operation primitives * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: $ + * + * <> + * + * $Id: hnd_pktpool.c 591285 2015-10-07 11:56:29Z $ */ #include #include +#include #include #include +/* mutex macros for thread safe */ +#ifdef HND_PKTPOOL_THREAD_SAFE +#define HND_PKTPOOL_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex) +#define HND_PKTPOOL_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex) +#define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) osl_ext_mutex_acquire(mutex, msec) +#define HND_PKTPOOL_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex) +#else +#define HND_PKTPOOL_MUTEX_CREATE(name, mutex) OSL_EXT_SUCCESS +#define HND_PKTPOOL_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS +#define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS +#define HND_PKTPOOL_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS +#endif + /* Registry size is one larger than max pools, as slot #0 is reserved */ #define PKTPOOLREG_RSVD_ID (0U) #define PKTPOOLREG_RSVD_PTR (POOLPTR(0xdeaddead)) @@ -35,6 +70,12 @@ #define PKTPOOL_REGISTRY_FOREACH(id) \ for ((id) = 1U; (id) <= pktpools_max; (id)++) +enum pktpool_empty_cb_state { + EMPTYCB_ENABLED = 0, /* Enable callback when new packets are added to pool */ + EMPTYCB_DISABLED, /* Disable callback when new packets are added to pool */ + EMPTYCB_SKIPPED /* Packet was added to pool when callback was disabled */ +}; + uint32 pktpools_max = 0U; /* maximum number of pools that may be initialized */ pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; /* Pktpool registry */ @@ -42,7 +83,18 @@ pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; /* Pktpool registry */ static int pktpool_register(pktpool_t * poolptr); static int pktpool_deregister(pktpool_t * poolptr); +/** add declaration */ +static int pktpool_avail_notify(pktpool_t *pktp); + /** accessor functions required when ROMming this file, forced into RAM */ + + +pktpool_t * +BCMRAMFN(get_pktpools_registry)(int id) +{ + return pktpools_registry[id]; +} + static void BCMRAMFN(pktpool_registry_set)(int id, pktpool_t *pp) { @@ -185,6 +237,10 @@ pktpool_init(osl_t *osh, pktpool_t *pktp, int *pplen, int plen, bool istx, uint8 pktp->plen = (uint16)plen; pktp->type = type; + if (HND_PKTPOOL_MUTEX_CREATE("pktpool", &pktp->mutex) != OSL_EXT_SUCCESS) { + return BCME_ERROR; + } + pktp->maxlen = PKTPOOL_LEN_MAX; pktplen = LIMIT_TO_MAX(pktplen, pktp->maxlen); @@ -269,6 +325,9 @@ pktpool_deinit(osl_t *osh, pktpool_t *pktp) pktpool_deregister(pktp); /* release previously acquired unique pool id */ POOLSETID(pktp, PKTPOOL_INVALID_ID); + if (HND_PKTPOOL_MUTEX_DELETE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + pktp->inited = FALSE; /* Are there still pending pkts? */ @@ -284,6 +343,10 @@ pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal) int err = 0; int len, psize, maxlen; + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + ASSERT(pktp->plen != 0); maxlen = pktp->maxlen; @@ -304,13 +367,22 @@ pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal) } } + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + if (pktp->cbcnt) { + if (pktp->empty == FALSE) + pktpool_avail_notify(pktp); + } + return err; } static void * pktpool_deq(pktpool_t *pktp) { - void *p; + void *p = NULL; if (pktp->avail == 0) return NULL; @@ -358,6 +430,8 @@ pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg) ASSERT(cb != NULL); + if (pktp == NULL) + return BCME_ERROR; ASSERT(pktp->rxcplidfn.cb == NULL); pktp->rxcplidfn.cb = cb; pktp->rxcplidfn.arg = arg; @@ -389,39 +463,63 @@ pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg) int pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg) { + int err = 0; int i; + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + ASSERT(cb != NULL); i = pktp->cbcnt; - if (i == PKTPOOL_CB_MAX) - return BCME_ERROR; + if (i == PKTPOOL_CB_MAX_AVL) { + err = BCME_ERROR; + goto done; + } ASSERT(pktp->cbs[i].cb == NULL); pktp->cbs[i].cb = cb; pktp->cbs[i].arg = arg; pktp->cbcnt++; - return 0; +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; } int pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg) { + int err = 0; int i; + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + ASSERT(cb != NULL); i = pktp->ecbcnt; - if (i == PKTPOOL_CB_MAX) - return BCME_ERROR; + if (i == PKTPOOL_CB_MAX) { + err = BCME_ERROR; + goto done; + } ASSERT(pktp->ecbs[i].cb == NULL); pktp->ecbs[i].cb = cb; pktp->ecbs[i].arg = arg; pktp->ecbcnt++; - return 0; +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; } static int @@ -443,20 +541,32 @@ pktpool_empty_notify(pktpool_t *pktp) int pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg) { + int err = 0; int i; + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + ASSERT(cb); i = pktp->dbg_cbcnt; - if (i == PKTPOOL_CB_MAX) - return BCME_ERROR; + if (i == PKTPOOL_CB_MAX) { + err = BCME_ERROR; + goto done; + } ASSERT(pktp->dbg_cbs[i].cb == NULL); pktp->dbg_cbs[i].cb = cb; pktp->dbg_cbs[i].arg = arg; pktp->dbg_cbcnt++; - return 0; +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; } int pktpool_dbg_notify(pktpool_t *pktp); @@ -466,11 +576,19 @@ pktpool_dbg_notify(pktpool_t *pktp) { int i; + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + for (i = 0; i < pktp->dbg_cbcnt; i++) { ASSERT(pktp->dbg_cbs[i].cb); pktp->dbg_cbs[i].cb(pktp, pktp->dbg_cbs[i].arg); } + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + return 0; } @@ -479,6 +597,10 @@ pktpool_dbg_dump(pktpool_t *pktp) { int i; + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + printf("pool len=%d maxlen=%d\n", pktp->dbg_qlen, pktp->maxlen); for (i = 0; i < pktp->dbg_qlen; i++) { ASSERT(pktp->dbg_q[i].p); @@ -486,6 +608,10 @@ pktpool_dbg_dump(pktpool_t *pktp) pktp->dbg_q[i].p, pktp->dbg_q[i].dur/100, PKTPOOLSTATE(pktp->dbg_q[i].p)); } + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + return 0; } @@ -495,6 +621,10 @@ pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats) int i; int state; + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + bzero(stats, sizeof(pktpool_stats_t)); for (i = 0; i < pktp->dbg_qlen; i++) { ASSERT(pktp->dbg_q[i].p != NULL); @@ -518,6 +648,10 @@ pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats) } } + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + return 0; } @@ -526,8 +660,12 @@ pktpool_start_trigger(pktpool_t *pktp, void *p) { uint32 cycles, i; + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + if (!PKTPOOL(OSH_NULL, p)) - return 0; + goto done; OSL_GETCYCLES(cycles); @@ -540,6 +678,11 @@ pktpool_start_trigger(pktpool_t *pktp, void *p) } } +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + return 0; } @@ -549,8 +692,12 @@ pktpool_stop_trigger(pktpool_t *pktp, void *p) { uint32 cycles, i; + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + if (!PKTPOOL(OSH_NULL, p)) - return 0; + goto done; OSL_GETCYCLES(cycles); @@ -572,6 +719,11 @@ pktpool_stop_trigger(pktpool_t *pktp, void *p) } } +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + return 0; } #endif /* BCMDBG_POOL */ @@ -580,7 +732,17 @@ int pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp) { ASSERT(pktp); + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + pktp->availcb_excl = NULL; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + return 0; } @@ -588,8 +750,14 @@ int pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb) { int i; + int err; ASSERT(pktp); + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + ASSERT(pktp->availcb_excl == NULL); for (i = 0; i < pktp->cbcnt; i++) { if (cb == pktp->cbs[i].cb) { @@ -599,9 +767,15 @@ pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb) } if (pktp->availcb_excl == NULL) - return BCME_ERROR; + err = BCME_ERROR; else - return 0; + err = 0; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; } static int @@ -643,6 +817,11 @@ pktpool_get(pktpool_t *pktp) { void *p; + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + p = pktpool_deq(pktp); if (p == NULL) { @@ -652,15 +831,25 @@ pktpool_get(pktpool_t *pktp) p = pktpool_deq(pktp); if (p == NULL) - return NULL; + goto done; } + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return NULL; + return p; } void pktpool_free(pktpool_t *pktp, void *p) { + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + ASSERT(p != NULL); #ifdef BCMDBG_POOL /* pktpool_stop_trigger(pktp, p); */ @@ -668,22 +857,51 @@ pktpool_free(pktpool_t *pktp, void *p) pktpool_enq(pktp, p); - if (pktp->emptycb_disable) - return; - + /** + * Feed critical DMA with freshly freed packets, to avoid DMA starvation. + * If any avail callback functions are registered, send a notification + * that a new packet is available in the pool. + */ if (pktp->cbcnt) { - if (pktp->empty == FALSE) - pktpool_avail_notify(pktp); + /* To more efficiently use the cpu cycles, callbacks can be temporarily disabled. + * This allows to feed on burst basis as opposed to inefficient per-packet basis. + */ + if (pktp->emptycb_disable == EMPTYCB_ENABLED) { + /** + * If the call originated from pktpool_empty_notify, the just freed packet + * is needed in pktpool_get. + * Therefore don't call pktpool_avail_notify. + */ + if (pktp->empty == FALSE) + pktpool_avail_notify(pktp); + } else { + /** + * The callback is temporarily disabled, log that a packet has been freed. + */ + pktp->emptycb_disable = EMPTYCB_SKIPPED; + } } + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return; } int pktpool_add(pktpool_t *pktp, void *p) { + int err = 0; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + ASSERT(p != NULL); - if (pktp->len == pktp->maxlen) - return BCME_RANGE; + if (pktp->len == pktp->maxlen) { + err = BCME_RANGE; + goto done; + } /* pkts in pool have same length */ ASSERT(pktp->plen == PKTLEN(OSH_NULL, p)); @@ -696,7 +914,12 @@ pktpool_add(pktpool_t *pktp, void *p) pktp->dbg_q[pktp->dbg_qlen++].p = p; #endif - return 0; +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; } /* Force pktpool_setmaxlen () into RAM as it uses a constant @@ -705,6 +928,10 @@ pktpool_add(pktpool_t *pktp, void *p) int BCMRAMFN(pktpool_setmaxlen)(pktpool_t *pktp, uint16 maxlen) { + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + if (maxlen > PKTPOOL_LEN_MAX) maxlen = PKTPOOL_LEN_MAX; @@ -714,6 +941,10 @@ BCMRAMFN(pktpool_setmaxlen)(pktpool_t *pktp, uint16 maxlen) */ pktp->maxlen = (pktp->len > maxlen) ? pktp->len : maxlen; + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + return pktp->maxlen; } @@ -722,12 +953,179 @@ pktpool_emptycb_disable(pktpool_t *pktp, bool disable) { ASSERT(pktp); - pktp->emptycb_disable = disable; + /** + * To more efficiently use the cpu cycles, callbacks can be temporarily disabled. + * If callback is going to be re-enabled, check if any packet got + * freed and added back to the pool while callback was disabled. + * When this is the case do the callback now, provided that callback functions + * are registered and this call did not originate from pktpool_empty_notify. + */ + if ((!disable) && (pktp->cbcnt) && (pktp->empty == FALSE) && + (pktp->emptycb_disable == EMPTYCB_SKIPPED)) { + pktpool_avail_notify(pktp); + } + + /* Enable or temporarily disable callback when packet becomes available. */ + pktp->emptycb_disable = disable ? EMPTYCB_DISABLED : EMPTYCB_ENABLED; } bool pktpool_emptycb_disabled(pktpool_t *pktp) { ASSERT(pktp); - return pktp->emptycb_disable; + return pktp->emptycb_disable != EMPTYCB_ENABLED; +} + +#ifdef BCMPKTPOOL +#include + +pktpool_t *pktpool_shared = NULL; + +#ifdef BCMFRAGPOOL +pktpool_t *pktpool_shared_lfrag = NULL; +#endif /* BCMFRAGPOOL */ + +pktpool_t *pktpool_shared_rxlfrag = NULL; + +static osl_t *pktpool_osh = NULL; + +void +hnd_pktpool_init(osl_t *osh) +{ + int n; + + /* Construct a packet pool registry before initializing packet pools */ + n = pktpool_attach(osh, PKTPOOL_MAXIMUM_ID); + if (n != PKTPOOL_MAXIMUM_ID) { + ASSERT(0); + return; + } + + pktpool_shared = MALLOCZ(osh, sizeof(pktpool_t)); + if (pktpool_shared == NULL) { + ASSERT(0); + goto error1; + } + +#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) + pktpool_shared_lfrag = MALLOCZ(osh, sizeof(pktpool_t)); + if (pktpool_shared_lfrag == NULL) { + ASSERT(0); + goto error2; + } +#endif + +#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) + pktpool_shared_rxlfrag = MALLOCZ(osh, sizeof(pktpool_t)); + if (pktpool_shared_rxlfrag == NULL) { + ASSERT(0); + goto error3; + } +#endif + + + /* + * At this early stage, there's not enough memory to allocate all + * requested pkts in the shared pool. Need to add to the pool + * after reclaim + * + * n = NRXBUFPOST + SDPCMD_RXBUFS; + * + * Initialization of packet pools may fail (BCME_ERROR), if the packet pool + * registry is not initialized or the registry is depleted. + * + * A BCME_NOMEM error only indicates that the requested number of packets + * were not filled into the pool. + */ + n = 1; + if (pktpool_init(osh, pktpool_shared, + &n, PKTBUFSZ, FALSE, lbuf_basic) == BCME_ERROR) { + ASSERT(0); + goto error4; + } + pktpool_setmaxlen(pktpool_shared, SHARED_POOL_LEN); + +#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) + n = 1; + if (pktpool_init(osh, pktpool_shared_lfrag, + &n, PKTFRAGSZ, TRUE, lbuf_frag) == BCME_ERROR) { + ASSERT(0); + goto error5; + } + pktpool_setmaxlen(pktpool_shared_lfrag, SHARED_FRAG_POOL_LEN); +#endif +#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) + n = 1; + if (pktpool_init(osh, pktpool_shared_rxlfrag, + &n, PKTRXFRAGSZ, TRUE, lbuf_rxfrag) == BCME_ERROR) { + ASSERT(0); + goto error6; + } + pktpool_setmaxlen(pktpool_shared_rxlfrag, SHARED_RXFRAG_POOL_LEN); +#endif + + pktpool_osh = osh; + + return; + +#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) +error6: +#endif + +#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) + pktpool_deinit(osh, pktpool_shared_lfrag); +error5: +#endif + +#if (defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)) || \ + (defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)) + pktpool_deinit(osh, pktpool_shared); +#endif + +error4: +#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) + hnd_free(pktpool_shared_rxlfrag); + pktpool_shared_rxlfrag = (pktpool_t *)NULL; +error3: +#endif /* BCMRXFRAGPOOL */ + +#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) + hnd_free(pktpool_shared_lfrag); + pktpool_shared_lfrag = (pktpool_t *)NULL; +error2: +#endif /* BCMFRAGPOOL */ + + hnd_free(pktpool_shared); + pktpool_shared = (pktpool_t *)NULL; + +error1: + pktpool_dettach(osh); +} + +void +hnd_pktpool_fill(pktpool_t *pktpool, bool minimal) +{ + pktpool_fill(pktpool_osh, pktpool, minimal); +} + +/* refill pktpools after reclaim */ +void +hnd_pktpool_refill(bool minimal) +{ + if (POOL_ENAB(pktpool_shared)) { + pktpool_fill(pktpool_osh, pktpool_shared, minimal); + } +/* fragpool reclaim */ +#ifdef BCMFRAGPOOL + if (POOL_ENAB(pktpool_shared_lfrag)) { + pktpool_fill(pktpool_osh, pktpool_shared_lfrag, minimal); + } +#endif /* BCMFRAGPOOL */ +/* rx fragpool reclaim */ +#ifdef BCMRXFRAGPOOL + if (POOL_ENAB(pktpool_shared_rxlfrag)) { + pktpool_fill(pktpool_osh, pktpool_shared_rxlfrag, minimal); + } +#endif } +#endif /* BCMPKTPOOL */ diff --git a/drivers/net/wireless/bcmdhd/hnd_pktq.c b/drivers/amlogic/wifi/bcmdhd/hnd_pktq.c similarity index 51% rename from drivers/net/wireless/bcmdhd/hnd_pktq.c rename to drivers/amlogic/wifi/bcmdhd/hnd_pktq.c index 91039bb2d85a7..71de6af41098e 100644 --- a/drivers/net/wireless/bcmdhd/hnd_pktq.c +++ b/drivers/amlogic/wifi/bcmdhd/hnd_pktq.c @@ -1,16 +1,51 @@ /* * HND generic pktq operation primitives * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: $ + * + * <> + * + * $Id: hnd_pktq.c 605726 2015-12-11 07:08:16Z $ */ #include #include +#include #include #include +/* mutex macros for thread safe */ +#ifdef HND_PKTQ_THREAD_SAFE +#define HND_PKTQ_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex) +#define HND_PKTQ_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex) +#define HND_PKTQ_MUTEX_ACQUIRE(mutex, msec) osl_ext_mutex_acquire(mutex, msec) +#define HND_PKTQ_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex) +#else +#define HND_PKTQ_MUTEX_CREATE(name, mutex) OSL_EXT_SUCCESS +#define HND_PKTQ_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS +#define HND_PKTQ_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS +#define HND_PKTQ_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS +#endif + /* * osl multiple-precedence packet queue * hi_prec is always >= the number of the highest non-empty precedence @@ -20,8 +55,13 @@ pktq_penq(struct pktq *pq, int prec, void *p) { struct pktq_prec *q; + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + ASSERT(prec >= 0 && prec < pq->num_prec); - ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */ + /* queueing chains not allowed and no segmented SKB (Kernel-3.18.y) */ + ASSERT(!((PKTLINK(p) != NULL) && (PKTLINK(p) != p))); ASSERT(!pktq_full(pq)); ASSERT(!pktq_pfull(pq, prec)); @@ -41,6 +81,10 @@ pktq_penq(struct pktq *pq, int prec, void *p) if (pq->hi_prec < prec) pq->hi_prec = (uint8)prec; + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + return p; } @@ -49,8 +93,13 @@ pktq_penq_head(struct pktq *pq, int prec, void *p) { struct pktq_prec *q; + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + ASSERT(prec >= 0 && prec < pq->num_prec); - ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */ + /* queueing chains not allowed and no segmented SKB (Kernel-3.18.y) */ + ASSERT(!((PKTLINK(p) != NULL) && (PKTLINK(p) != p))); ASSERT(!pktq_full(pq)); ASSERT(!pktq_pfull(pq, prec)); @@ -69,6 +118,10 @@ pktq_penq_head(struct pktq *pq, int prec, void *p) if (pq->hi_prec < prec) pq->hi_prec = (uint8)prec; + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + return p; } @@ -81,11 +134,15 @@ pktq_append(struct pktq *pq, int prec, struct spktq *list) struct pktq_prec *q; struct pktq_prec *list_q; + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + list_q = &list->q[0]; /* empty list check */ if (list_q->head == NULL) - return; + goto done; ASSERT(prec >= 0 && prec < pq->num_prec); ASSERT(PKTLINK(list_q->tail) == NULL); /* terminated list */ @@ -111,6 +168,11 @@ pktq_append(struct pktq *pq, int prec, struct spktq *list) list_q->tail = NULL; list_q->len = 0; list->len = 0; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; } /* @@ -122,11 +184,15 @@ pktq_prepend(struct pktq *pq, int prec, struct spktq *list) struct pktq_prec *q; struct pktq_prec *list_q; + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + list_q = &list->q[0]; /* empty list check */ if (list_q->head == NULL) - return; + goto done; ASSERT(prec >= 0 && prec < pq->num_prec); ASSERT(PKTLINK(list_q->tail) == NULL); /* terminated list */ @@ -158,6 +224,11 @@ pktq_prepend(struct pktq *pq, int prec, struct spktq *list) list_q->tail = NULL; list_q->len = 0; list->len = 0; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; } void * BCMFASTPATH @@ -166,12 +237,16 @@ pktq_pdeq(struct pktq *pq, int prec) struct pktq_prec *q; void *p; + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + ASSERT(prec >= 0 && prec < pq->num_prec); q = &pq->q[prec]; if ((p = q->head) == NULL) - return NULL; + goto done; if ((q->head = PKTLINK(p)) == NULL) q->tail = NULL; @@ -182,6 +257,11 @@ pktq_pdeq(struct pktq *pq, int prec) PKTSETLINK(p, NULL); +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + return p; } @@ -189,17 +269,21 @@ void * BCMFASTPATH pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p) { struct pktq_prec *q; - void *p; + void *p = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; ASSERT(prec >= 0 && prec < pq->num_prec); q = &pq->q[prec]; if (prev_p == NULL) - return NULL; + goto done; if ((p = PKTLINK(prev_p)) == NULL) - return NULL; + goto done; q->len--; @@ -208,6 +292,11 @@ pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p) PKTSETLINK(prev_p, PKTLINK(p)); PKTSETLINK(p, NULL); +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + return p; } @@ -217,6 +306,10 @@ pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg) struct pktq_prec *q; void *p, *prev = NULL; + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + ASSERT(prec >= 0 && prec < pq->num_prec); q = &pq->q[prec]; @@ -231,7 +324,7 @@ pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg) } } if (p == NULL) - return NULL; + goto done; if (prev == NULL) { if ((q->head = PKTLINK(p)) == NULL) { @@ -250,6 +343,11 @@ pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg) PKTSETLINK(p, NULL); +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + return p; } @@ -259,12 +357,16 @@ pktq_pdeq_tail(struct pktq *pq, int prec) struct pktq_prec *q; void *p, *prev; + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + ASSERT(prec >= 0 && prec < pq->num_prec); q = &pq->q[prec]; if ((p = q->head) == NULL) - return NULL; + goto done; for (prev = NULL; p != q->tail; p = PKTLINK(p)) prev = p; @@ -279,6 +381,11 @@ pktq_pdeq_tail(struct pktq *pq, int prec) pq->len--; +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + return p; } @@ -286,45 +393,60 @@ void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn, int arg) { struct pktq_prec *q; - void *p, *prev = NULL; + void *p, *next, *prev = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; q = &pq->q[prec]; p = q->head; while (p) { + next = PKTLINK(p); if (fn == NULL || (*fn)(p, arg)) { bool head = (p == q->head); if (head) - q->head = PKTLINK(p); + q->head = next; else - PKTSETLINK(prev, PKTLINK(p)); + PKTSETLINK(prev, next); PKTSETLINK(p, NULL); PKTFREE(osh, p, dir); q->len--; pq->len--; - p = (head ? q->head : PKTLINK(prev)); } else { prev = p; - p = PKTLINK(p); } + p = next; } + q->tail = prev; + if (q->head == NULL) { ASSERT(q->len == 0); - q->tail = NULL; + ASSERT(q->tail == NULL); } + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; } bool BCMFASTPATH pktq_pdel(struct pktq *pq, void *pktbuf, int prec) { + bool ret = FALSE; struct pktq_prec *q; - void *p; + void *p = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return FALSE; ASSERT(prec >= 0 && prec < pq->num_prec); /* Should this just assert pktbuf? */ if (!pktbuf) - return FALSE; + goto done; q = &pq->q[prec]; @@ -335,7 +457,7 @@ pktq_pdel(struct pktq *pq, void *pktbuf, int prec) for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p)) ; if (p == NULL) - return FALSE; + goto done; PKTSETLINK(p, PKTLINK(pktbuf)); if (q->tail == pktbuf) @@ -345,14 +467,24 @@ pktq_pdel(struct pktq *pq, void *pktbuf, int prec) q->len--; pq->len--; PKTSETLINK(pktbuf, NULL); - return TRUE; + ret = TRUE; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return ret; } -void +bool pktq_init(struct pktq *pq, int num_prec, int max_len) { int prec; + if (HND_PKTQ_MUTEX_CREATE("pktq", &pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC); /* pq is variable size; only zero out what's requested */ @@ -364,6 +496,17 @@ pktq_init(struct pktq *pq, int num_prec, int max_len) for (prec = 0; prec < num_prec; prec++) pq->q[prec].max = pq->max; + + return TRUE; +} + +bool +pktq_deinit(struct pktq *pq) +{ + if (HND_PKTQ_MUTEX_DELETE(&pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return TRUE; } void @@ -371,27 +514,39 @@ pktq_set_max_plen(struct pktq *pq, int prec, int max_len) { ASSERT(prec >= 0 && prec < pq->num_prec); + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + if (prec < pq->num_prec) pq->q[prec].max = (uint16)max_len; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; } void * BCMFASTPATH pktq_deq(struct pktq *pq, int *prec_out) { struct pktq_prec *q; - void *p; + void *p = NULL; int prec; - if (pq->len == 0) + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) return NULL; + if (pq->len == 0) + goto done; + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) pq->hi_prec--; q = &pq->q[prec]; if ((p = q->head) == NULL) - return NULL; + goto done; if ((q->head = PKTLINK(p)) == NULL) q->tail = NULL; @@ -405,6 +560,11 @@ pktq_deq(struct pktq *pq, int *prec_out) PKTSETLINK(p, NULL); +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + return p; } @@ -412,12 +572,16 @@ void * BCMFASTPATH pktq_deq_tail(struct pktq *pq, int *prec_out) { struct pktq_prec *q; - void *p, *prev; + void *p = NULL, *prev; int prec; - if (pq->len == 0) + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) return NULL; + if (pq->len == 0) + goto done; + for (prec = 0; prec < pq->hi_prec; prec++) if (pq->q[prec].head) break; @@ -425,7 +589,7 @@ pktq_deq_tail(struct pktq *pq, int *prec_out) q = &pq->q[prec]; if ((p = q->head) == NULL) - return NULL; + goto done; for (prev = NULL; p != q->tail; p = PKTLINK(p)) prev = p; @@ -445,6 +609,11 @@ pktq_deq_tail(struct pktq *pq, int *prec_out) PKTSETLINK(p, NULL); +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + return p; } @@ -452,27 +621,44 @@ void * pktq_peek(struct pktq *pq, int *prec_out) { int prec; + void *p = NULL; - if (pq->len == 0) + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) return NULL; + if (pq->len == 0) + goto done; + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) pq->hi_prec--; if (prec_out) *prec_out = prec; - return (pq->q[prec].head); + p = pq->q[prec].head; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; } void * pktq_peek_tail(struct pktq *pq, int *prec_out) { int prec; + void *p = NULL; - if (pq->len == 0) + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) return NULL; + if (pq->len == 0) + goto done; + for (prec = 0; prec < pq->hi_prec; prec++) if (pq->q[prec].head) break; @@ -480,7 +666,14 @@ pktq_peek_tail(struct pktq *pq, int *prec_out) if (prec_out) *prec_out = prec; - return (pq->q[prec].tail); + p = pq->q[prec].tail; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; } void @@ -488,17 +681,25 @@ pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg) { int prec; + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + /* Optimize flush, if pktq len = 0, just return. * pktq len of 0 means pktq's prec q's are all empty. */ - if (pq->len == 0) { - return; - } + if (pq->len == 0) + goto done; for (prec = 0; prec < pq->num_prec; prec++) pktq_pflush(osh, pq, prec, dir, fn, arg); if (fn == NULL) ASSERT(pq->len == 0); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; } /* Return sum of lengths of a specific set of precedences */ @@ -507,12 +708,20 @@ pktq_mlen(struct pktq *pq, uint prec_bmp) { int prec, len; + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return 0; + len = 0; for (prec = 0; prec <= pq->hi_prec; prec++) if (prec_bmp & (1 << prec)) len += pq->q[prec].len; + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return 0; + return len; } @@ -521,28 +730,36 @@ void * BCMFASTPATH pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out) { struct pktq_prec *q; - void *p; + void *p = NULL; int prec; - if (pq->len == 0) - { + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) return NULL; - } + + if (pq->len == 0) + goto done; + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) pq->hi_prec--; while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL) if (prec-- == 0) - return NULL; + goto done; q = &pq->q[prec]; if ((p = q->head) == NULL) - return NULL; + goto done; if (prec_out) *prec_out = prec; +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + return p; } /* Priority dequeue from a specific set of precedences */ @@ -550,23 +767,27 @@ void * BCMFASTPATH pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out) { struct pktq_prec *q; - void *p; + void *p = NULL; int prec; - if (pq->len == 0) + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) return NULL; + if (pq->len == 0) + goto done; + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) pq->hi_prec--; while ((pq->q[prec].head == NULL) || ((prec_bmp & (1 << prec)) == 0)) if (prec-- == 0) - return NULL; + goto done; q = &pq->q[prec]; if ((p = q->head) == NULL) - return NULL; + goto done; if ((q->head = PKTLINK(p)) == NULL) q->tail = NULL; @@ -586,5 +807,88 @@ pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out) PKTSETLINK(p, NULL); +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + return p; } + +#ifdef HND_PKTQ_THREAD_SAFE +int +pktq_pavail(struct pktq *pq, int prec) +{ + int ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return 0; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + ret = pq->q[prec].max - pq->q[prec].len; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return 0; + + return ret; +} + +bool +pktq_pfull(struct pktq *pq, int prec) +{ + bool ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return FALSE; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + ret = pq->q[prec].len >= pq->q[prec].max; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return ret; +} + +int +pktq_avail(struct pktq *pq) +{ + int ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return 0; + + ret = pq->max - pq->len; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return 0; + + return ret; +} + +bool +pktq_full(struct pktq *pq) +{ + bool ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return FALSE; + + ret = pq->len >= pq->max; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return ret; +} +#endif /* HND_PKTQ_THREAD_SAFE */ diff --git a/drivers/net/wireless/bcmdhd/hndpmu.c b/drivers/amlogic/wifi/bcmdhd/hndpmu.c similarity index 78% rename from drivers/net/wireless/bcmdhd/hndpmu.c rename to drivers/amlogic/wifi/bcmdhd/hndpmu.c index b551754a4bb3f..c0c658203dda1 100644 --- a/drivers/net/wireless/bcmdhd/hndpmu.c +++ b/drivers/amlogic/wifi/bcmdhd/hndpmu.c @@ -2,9 +2,30 @@ * Misc utility routines for accessing PMU corerev specific features * of the SiliconBackplane-based Broadcom chips. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: hndpmu.c 475037 2014-05-02 23:55:49Z $ + * + * <> + * + * $Id: hndpmu.c 530092 2015-01-29 04:44:58Z $ */ @@ -160,23 +181,28 @@ static const sdiod_drive_str_t sdiod_drive_strength_tab7_1v8[] = { void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength) { - chipcregs_t *cc = NULL; - uint origidx, intr_val = 0; sdiod_drive_str_t *str_tab = NULL; uint32 str_mask = 0; /* only alter desired bits in PMU chipcontrol 1 register */ uint32 str_shift = 0; uint32 str_ovr_pmuctl = PMU_CHIPCTL0; /* PMU chipcontrol register containing override bit */ uint32 str_ovr_pmuval = 0; /* position of bit within this register */ + pmuregs_t *pmu; + uint origidx; if (!(sih->cccaps & CC_CAP_PMU)) { return; } - /* Remember original core before switch to chipc */ - if (CHIPID(sih->chip) == BCM43362_CHIP_ID) - cc = (chipcregs_t *) si_switch_core(sih, CC_CORE_ID, &origidx, &intr_val); + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); - switch (SDIOD_DRVSTR_KEY(sih->chip, sih->pmurev)) { + switch (SDIOD_DRVSTR_KEY(CHIPID(sih->chip), sih->pmurev)) { case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1): str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab1; str_mask = 0x30000000; @@ -230,40 +256,12 @@ si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength) break; default: PMU_MSG(("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n", - bcm_chipname(sih->chip, chn, 8), sih->chiprev, sih->pmurev)); + bcm_chipname( + CHIPID(sih->chip), chn, 8), CHIPREV(sih->chiprev), sih->pmurev)); break; } - if (CHIPID(sih->chip) == BCM43362_CHIP_ID) { - if (str_tab != NULL && cc != NULL) { - uint32 cc_data_temp; - int i; - - /* Pick the lowest available drive strength equal or greater than the - * requested strength. Drive strength of 0 requests tri-state. - */ - for (i = 0; drivestrength < str_tab[i].strength; i++) - ; - - if (i > 0 && drivestrength > str_tab[i].strength) - i--; - - W_REG(osh, &cc->chipcontrol_addr, PMU_CHIPCTL1); - cc_data_temp = R_REG(osh, &cc->chipcontrol_data); - cc_data_temp &= ~str_mask; - cc_data_temp |= str_tab[i].sel << str_shift; - W_REG(osh, &cc->chipcontrol_data, cc_data_temp); - if (str_ovr_pmuval) { /* enables the selected drive strength */ - W_REG(osh, &cc->chipcontrol_addr, str_ovr_pmuctl); - OR_REG(osh, &cc->chipcontrol_data, str_ovr_pmuval); - } - PMU_MSG(("SDIO: %dmA drive strength requested; set to %dmA\n", - drivestrength, str_tab[i].strength)); - } - /* Return to original core */ - si_restore_core(sih, origidx, intr_val); - } - else if (str_tab != NULL) { + if (str_tab != NULL) { uint32 cc_data_temp; int i; @@ -276,16 +274,19 @@ si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength) if (i > 0 && drivestrength > str_tab[i].strength) i--; - W_REG(osh, PMUREG(sih, chipcontrol_addr), PMU_CHIPCTL1); - cc_data_temp = R_REG(osh, PMUREG(sih, chipcontrol_data)); + W_REG(osh, &pmu->chipcontrol_addr, PMU_CHIPCTL1); + cc_data_temp = R_REG(osh, &pmu->chipcontrol_data); cc_data_temp &= ~str_mask; cc_data_temp |= str_tab[i].sel << str_shift; - W_REG(osh, PMUREG(sih, chipcontrol_data), cc_data_temp); + W_REG(osh, &pmu->chipcontrol_data, cc_data_temp); if (str_ovr_pmuval) { /* enables the selected drive strength */ - W_REG(osh, PMUREG(sih, chipcontrol_addr), str_ovr_pmuctl); - OR_REG(osh, PMUREG(sih, chipcontrol_data), str_ovr_pmuval); + W_REG(osh, &pmu->chipcontrol_addr, str_ovr_pmuctl); + OR_REG(osh, &pmu->chipcontrol_data, str_ovr_pmuval); } PMU_MSG(("SDIO: %dmA drive strength requested; set to %dmA\n", drivestrength, str_tab[i].strength)); } + + /* Return to original core */ + si_setcoreidx(sih, origidx); } /* si_sdiod_drive_strength_init */ diff --git a/drivers/net/wireless/bcmdhd/include/aidmp.h b/drivers/amlogic/wifi/bcmdhd/include/aidmp.h similarity index 83% rename from drivers/net/wireless/bcmdhd/include/aidmp.h rename to drivers/amlogic/wifi/bcmdhd/include/aidmp.h index 6a7b78de9b247..6654364b91036 100644 --- a/drivers/net/wireless/bcmdhd/include/aidmp.h +++ b/drivers/amlogic/wifi/bcmdhd/include/aidmp.h @@ -1,9 +1,30 @@ /* * Broadcom AMBA Interconnect definitions. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: aidmp.h 456346 2014-02-18 16:48:52Z $ + * + * <> + * + * $Id: aidmp.h 514727 2014-11-12 03:02:48Z $ */ #ifndef _AIDMP_H @@ -40,6 +61,7 @@ #define ER_ADD 4 #define ER_END 0xe #define ER_BAD 0xffffffff +#define ER_SZ_MAX 4096 /* 4KB */ /* EROM CompIdentA */ #define CIA_MFG_MASK 0xfff00000 @@ -298,14 +320,14 @@ typedef volatile struct _aidmp { #define AI_RESETREADID 0x808 #define AI_RESETWRITEID 0x80c -#define AI_ERRLOGCTRL 0xa00 -#define AI_ERRLOGDONE 0xa04 -#define AI_ERRLOGSTATUS 0xa08 -#define AI_ERRLOGADDRLO 0xa0c -#define AI_ERRLOGADDRHI 0xa10 -#define AI_ERRLOGID 0xa14 -#define AI_ERRLOGUSER 0xa18 -#define AI_ERRLOGFLAGS 0xa1c +#define AI_ERRLOGCTRL 0x900 +#define AI_ERRLOGDONE 0x904 +#define AI_ERRLOGSTATUS 0x908 +#define AI_ERRLOGADDRLO 0x90c +#define AI_ERRLOGADDRHI 0x910 +#define AI_ERRLOGID 0x914 +#define AI_ERRLOGUSER 0x918 +#define AI_ERRLOGFLAGS 0x91c #define AI_INTSTATUS 0xa00 #define AI_CONFIG 0xe00 #define AI_ITCR 0xf00 @@ -342,6 +364,17 @@ typedef volatile struct _aidmp { /* resetctrl */ #define AIRC_RESET 1 +/* errlogctrl */ +#define AIELC_TO_EXP_MASK 0x0000001f0 /* backplane timeout exponent */ +#define AIELC_TO_EXP_SHIFT 4 +#define AIELC_TO_ENAB_SHIFT 9 /* backplane timeout enable */ + +/* errlogdone */ +#define AIELD_ERRDONE_MASK 0x3 + +/* errlogstatus */ +#define AIELS_TIMEOUT_MASK 0x3 + /* config */ #define AICFG_OOB 0x00000020 #define AICFG_IOS 0x00000010 @@ -364,5 +397,6 @@ typedef volatile struct _aidmp { #define AI_OOBSEL_5_SHIFT 8 #define AI_OOBSEL_6_SHIFT 16 #define AI_OOBSEL_7_SHIFT 24 +#define AI_IOCTRL_ENABLE_D11_PME (1 << 14) #endif /* _AIDMP_H */ diff --git a/drivers/amlogic/wifi/bcmdhd/include/bcm_cfg.h b/drivers/amlogic/wifi/bcmdhd/include/bcm_cfg.h new file mode 100644 index 0000000000000..e71f5c82da6c2 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/bcm_cfg.h @@ -0,0 +1,32 @@ +/* + * BCM common config options + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcm_cfg.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _bcm_cfg_h_ +#define _bcm_cfg_h_ +#endif /* _bcm_cfg_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h b/drivers/amlogic/wifi/bcmdhd/include/bcm_mpool_pub.h similarity index 89% rename from drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h rename to drivers/amlogic/wifi/bcmdhd/include/bcm_mpool_pub.h index 0375285ee1aa0..79ae0f5d4a9cd 100644 --- a/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h +++ b/drivers/amlogic/wifi/bcmdhd/include/bcm_mpool_pub.h @@ -35,9 +35,30 @@ * and instrumentation on top of the heap, without modifying the heap * allocation implementation. * - * $Copyright Open Broadcom Corporation$ - * - * $Id: bcm_mpool_pub.h 407097 2013-06-11 18:43:16Z $ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcm_mpool_pub.h 514727 2014-11-12 03:02:48Z $ */ #ifndef _BCM_MPOOL_PUB_H diff --git a/drivers/amlogic/wifi/bcmdhd/include/bcm_ring.h b/drivers/amlogic/wifi/bcmdhd/include/bcm_ring.h new file mode 100644 index 0000000000000..5f1b38c65e3cd --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/bcm_ring.h @@ -0,0 +1,616 @@ +#ifndef __bcm_ring_included__ +#define __bcm_ring_included__ + +/* + * +---------------------------------------------------------------------------- + * + * bcm_ring.h : Ring context abstraction + * + * The ring context tracks the WRITE and READ indices where elements may be + * produced and consumed respectively. All elements in the ring need to be + * fixed size. + * + * NOTE: A ring of size N, may only hold N-1 elements. + * + * +---------------------------------------------------------------------------- + * + * API Notes: + * + * Ring manipulation API allows for: + * Pending operations: Often before some work can be completed, it may be + * desired that several resources are available, e.g. space for production in + * a ring. Approaches such as, #1) reserve resources one by one and return them + * if another required resource is not available, or #2) employ a two pass + * algorithm of first testing whether all resources are available, have a + * an impact on performance critical code. The approach taken here is more akin + * to approach #2, where a test for resource availability essentially also + * provides the index for production in an un-committed state. + * The same approach is taken for the consumer side. + * + * - Pending production: Fetch the next index where a ring element may be + * produced. The caller may not commit the WRITE of the element. + * - Pending consumption: Fetch the next index where a ring element may be + * consumed. The caller may not commut the READ of the element. + * + * Producer side API: + * - bcm_ring_is_full : Test whether ring is full + * - bcm_ring_prod : Fetch index where an element may be produced (commit) + * - bcm_ring_prod_pend: Fetch index where an element may be produced (pending) + * - bcm_ring_prod_done: Commit a previous pending produce fetch + * - bcm_ring_prod_avail: Fetch total number free slots eligible for production + * + * Consumer side API: + * - bcm_ring_is_empty : Test whether ring is empty + * - bcm_ring_cons : Fetch index where an element may be consumed (commit) + * - bcm_ring_cons_pend: Fetch index where an element may be consumed (pending) + * - bcm_ring_cons_done: Commit a previous pending consume fetch + * - bcm_ring_cons_avail: Fetch total number elements eligible for consumption + * + * - bcm_ring_sync_read: Sync read offset in peer ring, from local ring + * - bcm_ring_sync_write: Sync write offset in peer ring, from local ring + * + * +---------------------------------------------------------------------------- + * + * Design Notes: + * Following items are not tracked in a ring context (design decision) + * - width of a ring element. + * - depth of the ring. + * - base of the buffer, where the elements are stored. + * - count of number of free slots in the ring + * + * Implementation Notes: + * - When BCM_RING_DEBUG is enabled, need explicit bcm_ring_init(). + * - BCM_RING_EMPTY and BCM_RING_FULL are (-1) + * + * +---------------------------------------------------------------------------- + * + * Usage Notes: + * An application may incarnate a ring of some fixed sized elements, by defining + * - a ring data buffer to store the ring elements. + * - depth of the ring (max number of elements managed by ring context). + * Preferrably, depth may be represented as a constant. + * - width of a ring element: to be used in pointer arithmetic with the ring's + * data buffer base and an index to fetch the ring element. + * + * Use bcm_workq_t to instantiate a pair of workq constructs, one for the + * producer and the other for the consumer, both pointing to the same circular + * buffer. The producer may operate on it's own local workq and flush the write + * index to the consumer. Likewise the consumer may use its local workq and + * flush the read index to the producer. This way we do not repeatedly access + * the peer's context. The two peers may reside on different CPU cores with a + * private L1 data cache. + * +---------------------------------------------------------------------------- + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * <> + * + * $Id: bcm_ring.h 591283 2015-10-07 11:52:00Z $ + * + * -*- Mode: C; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*- + * vim: set ts=4 noet sw=4 tw=80: + * + * +---------------------------------------------------------------------------- + */ + +#ifdef ____cacheline_aligned +#define __ring_aligned ____cacheline_aligned +#else +#define __ring_aligned +#endif + +/* Conditional compile for debug */ +/* #define BCM_RING_DEBUG */ + +#define BCM_RING_EMPTY (-1) +#define BCM_RING_FULL (-1) +#define BCM_RING_NULL ((bcm_ring_t *)NULL) + +#if defined(BCM_RING_DEBUG) +#define RING_ASSERT(exp) ASSERT(exp) +#define BCM_RING_IS_VALID(ring) (((ring) != BCM_RING_NULL) && \ + ((ring)->self == (ring))) +#else /* ! BCM_RING_DEBUG */ +#define RING_ASSERT(exp) do {} while (0) +#define BCM_RING_IS_VALID(ring) ((ring) != BCM_RING_NULL) +#endif /* ! BCM_RING_DEBUG */ + +#define BCM_RING_SIZE_IS_VALID(ring_size) ((ring_size) > 0) + +/* + * +---------------------------------------------------------------------------- + * Ring Context + * +---------------------------------------------------------------------------- + */ +typedef struct bcm_ring { /* Ring context */ +#if defined(BCM_RING_DEBUG) + struct bcm_ring *self; /* ptr to self for IS VALID test */ +#endif /* BCM_RING_DEBUG */ + int write __ring_aligned; /* WRITE index in a circular ring */ + int read __ring_aligned; /* READ index in a circular ring */ +} bcm_ring_t; + + +static INLINE void bcm_ring_init(bcm_ring_t *ring); +static INLINE void bcm_ring_copy(bcm_ring_t *to, bcm_ring_t *from); +static INLINE bool bcm_ring_is_empty(bcm_ring_t *ring); + +static INLINE int __bcm_ring_next_write(bcm_ring_t *ring, const int ring_size); + +static INLINE bool __bcm_ring_full(bcm_ring_t *ring, int next_write); +static INLINE bool bcm_ring_is_full(bcm_ring_t *ring, const int ring_size); + +static INLINE void bcm_ring_prod_done(bcm_ring_t *ring, int write); +static INLINE int bcm_ring_prod_pend(bcm_ring_t *ring, int *pend_write, + const int ring_size); +static INLINE int bcm_ring_prod(bcm_ring_t *ring, const int ring_size); + +static INLINE void bcm_ring_cons_done(bcm_ring_t *ring, int read); +static INLINE int bcm_ring_cons_pend(bcm_ring_t *ring, int *pend_read, + const int ring_size); +static INLINE int bcm_ring_cons(bcm_ring_t *ring, const int ring_size); + +static INLINE void bcm_ring_sync_read(bcm_ring_t *peer, const bcm_ring_t *self); +static INLINE void bcm_ring_sync_write(bcm_ring_t *peer, const bcm_ring_t *self); + +static INLINE int bcm_ring_prod_avail(const bcm_ring_t *ring, + const int ring_size); +static INLINE int bcm_ring_cons_avail(const bcm_ring_t *ring, + const int ring_size); +static INLINE void bcm_ring_cons_all(bcm_ring_t *ring); + + +/** + * bcm_ring_init - initialize a ring context. + * @ring: pointer to a ring context + */ +static INLINE void +bcm_ring_init(bcm_ring_t *ring) +{ + ASSERT(ring != (bcm_ring_t *)NULL); +#if defined(BCM_RING_DEBUG) + ring->self = ring; +#endif /* BCM_RING_DEBUG */ + ring->write = 0; + ring->read = 0; +} + +/** + * bcm_ring_copy - copy construct a ring + * @to: pointer to the new ring context + * @from: pointer to orig ring context + */ +static INLINE void +bcm_ring_copy(bcm_ring_t *to, bcm_ring_t *from) +{ + bcm_ring_init(to); + + to->write = from->write; + to->read = from->read; +} + +/** + * bcm_ring_is_empty - "Boolean" test whether ring is empty. + * @ring: pointer to a ring context + * + * PS. does not return BCM_RING_EMPTY value. + */ +static INLINE bool +bcm_ring_is_empty(bcm_ring_t *ring) +{ + RING_ASSERT(BCM_RING_IS_VALID(ring)); + return (ring->read == ring->write); +} + + +/** + * __bcm_ring_next_write - determine the index where the next write may occur + * (with wrap-around). + * @ring: pointer to a ring context + * @ring_size: size of the ring + * + * PRIVATE INTERNAL USE ONLY. + */ +static INLINE int +__bcm_ring_next_write(bcm_ring_t *ring, const int ring_size) +{ + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + return ((ring->write + 1) % ring_size); +} + + +/** + * __bcm_ring_full - support function for ring full test. + * @ring: pointer to a ring context + * @next_write: next location in ring where an element is to be produced + * + * PRIVATE INTERNAL USE ONLY. + */ +static INLINE bool +__bcm_ring_full(bcm_ring_t *ring, int next_write) +{ + return (next_write == ring->read); +} + + +/** + * bcm_ring_is_full - "Boolean" test whether a ring is full. + * @ring: pointer to a ring context + * @ring_size: size of the ring + * + * PS. does not return BCM_RING_FULL value. + */ +static INLINE bool +bcm_ring_is_full(bcm_ring_t *ring, const int ring_size) +{ + int next_write; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + next_write = __bcm_ring_next_write(ring, ring_size); + return __bcm_ring_full(ring, next_write); +} + + +/** + * bcm_ring_prod_done - commit a previously pending index where production + * was requested. + * @ring: pointer to a ring context + * @write: index into ring upto where production was done. + * +---------------------------------------------------------------------------- + */ +static INLINE void +bcm_ring_prod_done(bcm_ring_t *ring, int write) +{ + RING_ASSERT(BCM_RING_IS_VALID(ring)); + ring->write = write; +} + + +/** + * bcm_ring_prod_pend - Fetch in "pend" mode, the index where an element may be + * produced. + * @ring: pointer to a ring context + * @pend_write: next index, after the returned index + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_prod_pend(bcm_ring_t *ring, int *pend_write, const int ring_size) +{ + int rtn; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + *pend_write = __bcm_ring_next_write(ring, ring_size); + if (__bcm_ring_full(ring, *pend_write)) { + *pend_write = BCM_RING_FULL; + rtn = BCM_RING_FULL; + } else { + /* production is not committed, caller needs to explicitly commit */ + rtn = ring->write; + } + return rtn; +} + + +/** + * bcm_ring_prod - Fetch and "commit" the next index where a ring element may + * be produced. + * @ring: pointer to a ring context + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_prod(bcm_ring_t *ring, const int ring_size) +{ + int next_write, prod_write; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + + next_write = __bcm_ring_next_write(ring, ring_size); + if (__bcm_ring_full(ring, next_write)) { + prod_write = BCM_RING_FULL; + } else { + prod_write = ring->write; + bcm_ring_prod_done(ring, next_write); /* "commit" production */ + } + return prod_write; +} + + +/** + * bcm_ring_cons_done - commit a previously pending read + * @ring: pointer to a ring context + * @read: index upto which elements have been consumed. + */ +static INLINE void +bcm_ring_cons_done(bcm_ring_t *ring, int read) +{ + RING_ASSERT(BCM_RING_IS_VALID(ring)); + ring->read = read; +} + + +/** + * bcm_ring_cons_pend - fetch in "pend" mode, the next index where a ring + * element may be consumed. + * @ring: pointer to a ring context + * @pend_read: index into ring upto which elements may be consumed. + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_cons_pend(bcm_ring_t *ring, int *pend_read, const int ring_size) +{ + int rtn; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + if (bcm_ring_is_empty(ring)) { + *pend_read = BCM_RING_EMPTY; + rtn = BCM_RING_EMPTY; + } else { + *pend_read = (ring->read + 1) % ring_size; + /* production is not committed, caller needs to explicitly commit */ + rtn = ring->read; + } + return rtn; +} + + +/** + * bcm_ring_cons - fetch and "commit" the next index where a ring element may + * be consumed. + * @ring: pointer to a ring context + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_cons(bcm_ring_t *ring, const int ring_size) +{ + int cons_read; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + if (bcm_ring_is_empty(ring)) { + cons_read = BCM_RING_EMPTY; + } else { + cons_read = ring->read; + ring->read = (ring->read + 1) % ring_size; /* read is committed */ + } + return cons_read; +} + + +/** + * bcm_ring_sync_read - on consumption, update peer's read index. + * @peer: pointer to peer's producer ring context + * @self: pointer to consumer's ring context + */ +static INLINE void +bcm_ring_sync_read(bcm_ring_t *peer, const bcm_ring_t *self) +{ + RING_ASSERT(BCM_RING_IS_VALID(peer)); + RING_ASSERT(BCM_RING_IS_VALID(self)); + peer->read = self->read; /* flush read update to peer producer */ +} + + +/** + * bcm_ring_sync_write - on consumption, update peer's write index. + * @peer: pointer to peer's consumer ring context + * @self: pointer to producer's ring context + */ +static INLINE void +bcm_ring_sync_write(bcm_ring_t *peer, const bcm_ring_t *self) +{ + RING_ASSERT(BCM_RING_IS_VALID(peer)); + RING_ASSERT(BCM_RING_IS_VALID(self)); + peer->write = self->write; /* flush write update to peer consumer */ +} + + +/** + * bcm_ring_prod_avail - fetch total number of available empty slots in the + * ring for production. + * @ring: pointer to a ring context + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_prod_avail(const bcm_ring_t *ring, const int ring_size) +{ + int prod_avail; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + if (ring->write >= ring->read) { + prod_avail = (ring_size - (ring->write - ring->read) - 1); + } else { + prod_avail = (ring->read - (ring->write + 1)); + } + ASSERT(prod_avail < ring_size); + return prod_avail; +} + + +/** + * bcm_ring_cons_avail - fetch total number of available elements for consumption. + * @ring: pointer to a ring context + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_cons_avail(const bcm_ring_t *ring, const int ring_size) +{ + int cons_avail; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + if (ring->read == ring->write) { + cons_avail = 0; + } else if (ring->read > ring->write) { + cons_avail = ((ring_size - ring->read) + ring->write); + } else { + cons_avail = ring->write - ring->read; + } + ASSERT(cons_avail < ring_size); + return cons_avail; +} + + +/** + * bcm_ring_cons_all - set ring in state where all elements are consumed. + * @ring: pointer to a ring context + */ +static INLINE void +bcm_ring_cons_all(bcm_ring_t *ring) +{ + ring->read = ring->write; +} + + +/** + * Work Queue + * A work Queue is composed of a ring of work items, of a specified depth. + * It HAS-A bcm_ring object, comprising of a RD and WR offset, to implement a + * producer/consumer circular ring. + */ + +struct bcm_workq { + bcm_ring_t ring; /* Ring context abstraction */ + struct bcm_workq *peer; /* Peer workq context */ + void *buffer; /* Buffer storage for work items in workQ */ + int ring_size; /* Depth of workQ */ +} __ring_aligned; + +typedef struct bcm_workq bcm_workq_t; + + +/* #define BCM_WORKQ_DEBUG */ +#if defined(BCM_WORKQ_DEBUG) +#define WORKQ_ASSERT(exp) ASSERT(exp) +#else /* ! BCM_WORKQ_DEBUG */ +#define WORKQ_ASSERT(exp) do {} while (0) +#endif /* ! BCM_WORKQ_DEBUG */ + +#define WORKQ_AUDIT(workq) \ + WORKQ_ASSERT((workq) != BCM_WORKQ_NULL); \ + WORKQ_ASSERT(WORKQ_PEER(workq) != BCM_WORKQ_NULL); \ + WORKQ_ASSERT((workq)->buffer == WORKQ_PEER(workq)->buffer); \ + WORKQ_ASSERT((workq)->ring_size == WORKQ_PEER(workq)->ring_size); + +#define BCM_WORKQ_NULL ((bcm_workq_t *)NULL) + +#define WORKQ_PEER(workq) ((workq)->peer) +#define WORKQ_RING(workq) (&((workq)->ring)) +#define WORKQ_PEER_RING(workq) (&((workq)->peer->ring)) + +#define WORKQ_ELEMENT(__elem_type, __workq, __index) ({ \ + WORKQ_ASSERT((__workq) != BCM_WORKQ_NULL); \ + WORKQ_ASSERT((__index) < ((__workq)->ring_size)); \ + ((__elem_type *)((__workq)->buffer)) + (__index); \ +}) + + +static INLINE void bcm_workq_init(bcm_workq_t *workq, bcm_workq_t *workq_peer, + void *buffer, int ring_size); + +static INLINE bool bcm_workq_is_empty(bcm_workq_t *workq_prod); + +static INLINE void bcm_workq_prod_sync(bcm_workq_t *workq_prod); +static INLINE void bcm_workq_cons_sync(bcm_workq_t *workq_cons); + +static INLINE void bcm_workq_prod_refresh(bcm_workq_t *workq_prod); +static INLINE void bcm_workq_cons_refresh(bcm_workq_t *workq_cons); + +/** + * bcm_workq_init - initialize a workq + * @workq: pointer to a workq context + * @buffer: pointer to a pre-allocated circular buffer to serve as a ring + * @ring_size: size of the ring in terms of max number of elements. + */ +static INLINE void +bcm_workq_init(bcm_workq_t *workq, bcm_workq_t *workq_peer, + void *buffer, int ring_size) +{ + ASSERT(workq != BCM_WORKQ_NULL); + ASSERT(workq_peer != BCM_WORKQ_NULL); + ASSERT(buffer != NULL); + ASSERT(ring_size > 0); + + WORKQ_PEER(workq) = workq_peer; + WORKQ_PEER(workq_peer) = workq; + + bcm_ring_init(WORKQ_RING(workq)); + bcm_ring_init(WORKQ_RING(workq_peer)); + + workq->buffer = workq_peer->buffer = buffer; + workq->ring_size = workq_peer->ring_size = ring_size; +} + +/** + * bcm_workq_empty - test whether there is work + * @workq_prod: producer's workq + */ +static INLINE bool +bcm_workq_is_empty(bcm_workq_t *workq_prod) +{ + return bcm_ring_is_empty(WORKQ_RING(workq_prod)); +} + +/** + * bcm_workq_prod_sync - Commit the producer write index to peer workq's ring + * @workq_prod: producer's workq whose write index must be synced to peer + */ +static INLINE void +bcm_workq_prod_sync(bcm_workq_t *workq_prod) +{ + WORKQ_AUDIT(workq_prod); + + /* cons::write <--- prod::write */ + bcm_ring_sync_write(WORKQ_PEER_RING(workq_prod), WORKQ_RING(workq_prod)); +} + +/** + * bcm_workq_cons_sync - Commit the consumer read index to the peer workq's ring + * @workq_cons: consumer's workq whose read index must be synced to peer + */ +static INLINE void +bcm_workq_cons_sync(bcm_workq_t *workq_cons) +{ + WORKQ_AUDIT(workq_cons); + + /* prod::read <--- cons::read */ + bcm_ring_sync_read(WORKQ_PEER_RING(workq_cons), WORKQ_RING(workq_cons)); +} + + +/** + * bcm_workq_prod_refresh - Fetch the updated consumer's read index + * @workq_prod: producer's workq whose read index must be refreshed from peer + */ +static INLINE void +bcm_workq_prod_refresh(bcm_workq_t *workq_prod) +{ + WORKQ_AUDIT(workq_prod); + + /* prod::read <--- cons::read */ + bcm_ring_sync_read(WORKQ_RING(workq_prod), WORKQ_PEER_RING(workq_prod)); +} + +/** + * bcm_workq_cons_refresh - Fetch the updated producer's write index + * @workq_cons: consumer's workq whose write index must be refreshed from peer + */ +static INLINE void +bcm_workq_cons_refresh(bcm_workq_t *workq_cons) +{ + WORKQ_AUDIT(workq_cons); + + /* cons::write <--- prod::write */ + bcm_ring_sync_write(WORKQ_RING(workq_cons), WORKQ_PEER_RING(workq_cons)); +} + + +#endif /* ! __bcm_ring_h_included__ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmcdc.h b/drivers/amlogic/wifi/bcmdhd/include/bcmcdc.h similarity index 78% rename from drivers/net/wireless/bcmdhd/include/bcmcdc.h rename to drivers/amlogic/wifi/bcmdhd/include/bcmcdc.h index 76788d48edadb..a95dc31c27bdd 100644 --- a/drivers/net/wireless/bcmdhd/include/bcmcdc.h +++ b/drivers/amlogic/wifi/bcmdhd/include/bcmcdc.h @@ -4,9 +4,30 @@ * * Definitions subject to change without notice. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmcdc.h 318308 2012-03-02 02:23:42Z $ + * + * <> + * + * $Id: bcmcdc.h 514727 2014-11-12 03:02:48Z $ */ #ifndef _bcmcdc_h_ #define _bcmcdc_h_ diff --git a/drivers/net/wireless/bcmdhd/include/bcmdefs.h b/drivers/amlogic/wifi/bcmdhd/include/bcmdefs.h similarity index 75% rename from drivers/net/wireless/bcmdhd/include/bcmdefs.h rename to drivers/amlogic/wifi/bcmdhd/include/bcmdefs.h index 393e2d8c6f1e6..a02499996f615 100644 --- a/drivers/net/wireless/bcmdhd/include/bcmdefs.h +++ b/drivers/amlogic/wifi/bcmdhd/include/bcmdefs.h @@ -1,9 +1,30 @@ /* * Misc system wide definitions * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmdefs.h 474209 2014-04-30 12:16:47Z $ + * + * <> + * + * $Id: bcmdefs.h 601026 2015-11-20 06:53:19Z $ */ #ifndef _bcmdefs_h_ @@ -58,13 +79,8 @@ #undef BCM47XX_CA9 #ifndef BCMFASTPATH -#if defined(BCM47XX_CA9) -#define BCMFASTPATH __attribute__ ((__section__ (".text.fastpath"))) -#define BCMFASTPATH_HOST __attribute__ ((__section__ (".text.fastpath_host"))) -#else #define BCMFASTPATH #define BCMFASTPATH_HOST -#endif #endif /* BCMFASTPATH */ @@ -124,6 +140,12 @@ #define CHIPREV(rev) (rev) #endif +#ifdef BCMPCIEREV +#define PCIECOREREV(rev) (BCMPCIEREV) +#else +#define PCIECOREREV(rev) (rev) +#endif + /* Defines for DMA Address Width - Shared between OSL and HNDDMA */ #define DMADDR_MASK_32 0x0 /* Address mask for 32-bits */ #define DMADDR_MASK_30 0xc0000000 /* Address mask for 30-bits */ @@ -158,6 +180,10 @@ typedef dma64addr_t dmaaddr_t; #define PHYSADDRHISET(_pa, _val) PHYSADDR64HISET(_pa, _val) #define PHYSADDRLO(_pa) PHYSADDR64LO(_pa) #define PHYSADDRLOSET(_pa, _val) PHYSADDR64LOSET(_pa, _val) +#define PHYSADDRTOULONG(_pa, _ulong) \ + do { \ + _ulong = ((unsigned long)(_pa).hiaddr << 32) | ((_pa).loaddr); \ + } while (0) #else typedef unsigned long dmaaddr_t; @@ -198,11 +224,7 @@ typedef struct { /* add 40 bytes to allow for extra RPC header and info */ #define BCMEXTRAHDROOM 260 #else /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */ -#if defined(BCM47XX_CA9) -#define BCMEXTRAHDROOM 224 -#else #define BCMEXTRAHDROOM 204 -#endif /* linux && BCM47XX_CA9 */ #endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */ /* Packet alignment for most efficient SDIO (can change based on platform) */ @@ -230,7 +252,7 @@ typedef struct { #if defined(BCMASSERT_LOG) #define BCMASSERT_SUPPORT -#endif +#endif /* Macros for doing definition and get/set of bitfields * Usage example, e.g. a three-bit field (bits 4-6): @@ -261,8 +283,13 @@ typedef struct { /* Max. nvram variable table size */ #ifndef MAXSZ_NVRAM_VARS -#define MAXSZ_NVRAM_VARS 4096 -#endif +#ifdef LARGE_NVRAM_MAXSZ +#define MAXSZ_NVRAM_VARS LARGE_NVRAM_MAXSZ +#else +/* SROM12 changes */ +#define MAXSZ_NVRAM_VARS 6144 +#endif /* LARGE_NVRAM_MAXSZ */ +#endif /* !MAXSZ_NVRAM_VARS */ @@ -283,18 +310,52 @@ typedef struct { #else #define BCMLFRAG_ENAB() (0) #endif /* BCMLFRAG_ENAB */ +#define RXMODE1 1 /* descriptor split */ +#define RXMODE2 2 /* descriptor split + classification */ +#define RXMODE3 3 /* fifo split + classification */ +#define RXMODE4 4 /* fifo split + classification + hdr conversion */ + #ifdef BCMSPLITRX /* BCMLFRAG support enab macros */ extern bool _bcmsplitrx; + extern uint8 _bcmsplitrx_mode; #if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) #define BCMSPLITRX_ENAB() (_bcmsplitrx) + #define BCMSPLITRX_MODE() (_bcmsplitrx_mode) #elif defined(BCMSPLITRX_DISABLED) #define BCMSPLITRX_ENAB() (0) + #define BCMSPLITRX_MODE() (0) #else #define BCMSPLITRX_ENAB() (1) + #define BCMSPLITRX_MODE() (_bcmsplitrx_mode) #endif #else #define BCMSPLITRX_ENAB() (0) + #define BCMSPLITRX_MODE() (0) #endif /* BCMSPLITRX */ + +#ifdef BCMPCIEDEV /* BCMPCIEDEV support enab macros */ +extern bool _pciedevenab; + #if defined(WL_ENAB_RUNTIME_CHECK) + #define BCMPCIEDEV_ENAB() (_pciedevenab) + #elif defined(BCMPCIEDEV_ENABLED) + #define BCMPCIEDEV_ENAB() 1 + #else + #define BCMPCIEDEV_ENAB() 0 + #endif +#else + #define BCMPCIEDEV_ENAB() 0 +#endif /* BCMPCIEDEV */ + +#define SPLIT_RXMODE1() ((BCMSPLITRX_MODE() == RXMODE1)) +#define SPLIT_RXMODE2() ((BCMSPLITRX_MODE() == RXMODE2)) +#define SPLIT_RXMODE3() ((BCMSPLITRX_MODE() == RXMODE3)) +#define SPLIT_RXMODE4() ((BCMSPLITRX_MODE() == RXMODE4)) + +#define PKT_CLASSIFY() (SPLIT_RXMODE2() || SPLIT_RXMODE3() || SPLIT_RXMODE4()) +#define RXFIFO_SPLIT() (SPLIT_RXMODE3() || SPLIT_RXMODE4()) +#define HDR_CONV() (SPLIT_RXMODE4()) + +#define PKT_CLASSIFY_EN(x) ((PKT_CLASSIFY()) && (PKT_CLASSIFY_FIFO == (x))) #ifdef BCM_SPLITBUF extern bool _bcmsplitbuf; #if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) @@ -307,6 +368,7 @@ typedef struct { #else #define BCM_SPLITBUF_ENAB() (0) #endif /* BCM_SPLITBUF */ + /* Max size for reclaimable NVRAM array */ #ifdef DL_NVRAM #define NVRAM_ARRAY_MAXSIZE DL_NVRAM diff --git a/drivers/net/wireless/bcmdhd/include/bcmdevs.h b/drivers/amlogic/wifi/bcmdhd/include/bcmdevs.h similarity index 78% rename from drivers/net/wireless/bcmdhd/include/bcmdevs.h rename to drivers/amlogic/wifi/bcmdhd/include/bcmdevs.h index 3474601ebf441..49c1064c24094 100644 --- a/drivers/net/wireless/bcmdhd/include/bcmdevs.h +++ b/drivers/amlogic/wifi/bcmdhd/include/bcmdevs.h @@ -1,9 +1,30 @@ /* * Broadcom device-specific manifest constants. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmdevs.h 484136 2014-06-12 04:36:10Z $ + * + * <> + * + * $Id: bcmdevs.h 582052 2015-08-26 09:30:53Z $ */ #ifndef _BCMDEVS_H @@ -169,12 +190,24 @@ #define BCM4349_D11AC_ID 0x4349 /* 4349 802.11ac dualband device */ #define BCM4349_D11AC2G_ID 0x43dd /* 4349 802.11ac 2.4G device */ #define BCM4349_D11AC5G_ID 0x43de /* 4349 802.11ac 5G device */ -#define BCM4355_D11AC_ID 0x43d3 /* 4355 802.11ac dualband device */ -#define BCM4355_D11AC2G_ID 0x43d4 /* 4355 802.11ac 2.4G device */ -#define BCM4355_D11AC5G_ID 0x43d5 /* 4355 802.11ac 5G device */ -#define BCM4359_D11AC_ID 0x43d6 /* 4359 802.11ac dualband device */ -#define BCM4359_D11AC2G_ID 0x43d7 /* 4359 802.11ac 2.4G device */ -#define BCM4359_D11AC5G_ID 0x43d8 /* 4359 802.11ac 5G device */ +#define BCM53573_D11AC_ID 0x43b4 /* 53573 802.11ac dualband device */ +#define BCM53573_D11AC2G_ID 0x43b5 /* 53573 802.11ac 2.4G device */ +#define BCM53573_D11AC5G_ID 0x43b6 /* 53573 802.11ac 5G device */ +#define BCM47189_D11AC_ID 0x43c6 /* 47189 802.11ac dualband device */ +#define BCM47189_D11AC2G_ID 0x43c7 /* 47189 802.11ac 2.4G device */ +#define BCM47189_D11AC5G_ID 0x43c8 /* 47189 802.11ac 5G device */ +#define BCM4355_D11AC_ID 0x43dc /* 4355 802.11ac dualband device */ +#define BCM4355_D11AC2G_ID 0x43fc /* 4355 802.11ac 2.4G device */ +#define BCM4355_D11AC5G_ID 0x43fd /* 4355 802.11ac 5G device */ +#define BCM4359_D11AC_ID 0x43ef /* 4359 802.11ac dualband device */ +#define BCM4359_D11AC2G_ID 0x43fe /* 4359 802.11ac 2.4G device */ +#define BCM4359_D11AC5G_ID 0x43ff /* 4359 802.11ac 5G device */ +#define BCM43596_D11AC_ID 0x4415 /* 43596 802.11ac dualband device */ +#define BCM43596_D11AC2G_ID 0x4416 /* 43596 802.11ac 2.4G device */ +#define BCM43596_D11AC5G_ID 0x4417 /* 43596 802.11ac 5G device */ +#define BCM43909_D11AC_ID 0x43d0 /* 43909 802.11ac dualband device */ +#define BCM43909_D11AC2G_ID 0x43d1 /* 43909 802.11ac 2.4G device */ +#define BCM43909_D11AC5G_ID 0x43d2 /* 43909 802.11ac 5G device */ /* PCI Subsystem ID */ #define BCM943228HMB_SSID_VEN1 0x0607 @@ -222,6 +255,14 @@ #define BCM43430_D11N2G_ID 0x43e2 /* 43430 802.11n 2.4G device */ +#define BCM4365_D11AC_ID 0x43ca +#define BCM4365_D11AC2G_ID 0x43cb +#define BCM4365_D11AC5G_ID 0x43cc + +#define BCM4366_D11AC_ID 0x43c3 +#define BCM4366_D11AC2G_ID 0x43c4 +#define BCM4366_D11AC5G_ID 0x43c5 + #define BCM43349_D11N_ID 0x43e6 /* 43349 802.11n dualband id */ #define BCM43349_D11N2G_ID 0x43e7 /* 43349 802.11n 2.4Ghz band id */ #define BCM43349_D11N5G_ID 0x43e8 /* 43349 802.11n 5Ghz band id */ @@ -336,6 +377,7 @@ #define BCM4339_CHIP_ID 0x4339 /* 4339 chipcommon chipid */ #define BCM43349_CHIP_ID 43349 /* 43349(0xA955) chipcommon chipid */ #define BCM4360_CHIP_ID 0x4360 /* 4360 chipcommon chipid */ +#define BCM4364_CHIP_ID 0x4364 /* 4364 chipcommon chipid */ #define BCM4352_CHIP_ID 0x4352 /* 4352 chipcommon chipid */ #define BCM43526_CHIP_ID 0xAA06 #define BCM43340_CHIP_ID 43340 /* 43340 chipcommon chipid */ @@ -353,6 +395,7 @@ #define BCM43570_CHIP_ID 0xAA32 /* 43570 chipcommon chipid */ #define BCM4358_CHIP_ID 0x4358 /* 4358 chipcommon chipid */ #define BCM4371_CHIP_ID 0x4371 /* 4371 chipcommon chipid */ +#define BCM43012_CHIP_ID 0xA804 /* 43012 chipcommon chipid */ #define BCM4350_CHIP(chipid) ((CHIPID(chipid) == BCM4350_CHIP_ID) || \ (CHIPID(chipid) == BCM4354_CHIP_ID) || \ (CHIPID(chipid) == BCM4356_CHIP_ID) || \ @@ -366,6 +409,9 @@ (CHIPID(chipid) == BCM4358_CHIP_ID)) /* 4350 variations */ #define BCM4345_CHIP_ID 0x4345 /* 4345 chipcommon chipid */ #define BCM43454_CHIP_ID 43454 /* 43454 chipcommon chipid */ +#define BCM43455_CHIP_ID 43455 /* 43455 chipcommon chipid */ +#define BCM43457_CHIP_ID 43457 /* 43457 chipcommon chipid */ +#define BCM43458_CHIP_ID 43458 /* 43458 chipcommon chipid */ #define BCM43430_CHIP_ID 43430 /* 43430 chipcommon chipid */ #define BCM4349_CHIP_ID 0x4349 /* 4349 chipcommon chipid */ #define BCM4355_CHIP_ID 0x4355 /* 4355 chipcommon chipid */ @@ -373,20 +419,48 @@ #define BCM4349_CHIP(chipid) ((CHIPID(chipid) == BCM4349_CHIP_ID) || \ (CHIPID(chipid) == BCM4355_CHIP_ID) || \ (CHIPID(chipid) == BCM4359_CHIP_ID)) + +#define BCM4345_CHIP(chipid) (CHIPID(chipid) == BCM4345_CHIP_ID || \ + CHIPID(chipid) == BCM43454_CHIP_ID || \ + CHIPID(chipid) == BCM43455_CHIP_ID || \ + CHIPID(chipid) == BCM43457_CHIP_ID || \ + CHIPID(chipid) == BCM43458_CHIP_ID) + +#define CASE_BCM4345_CHIP case BCM4345_CHIP_ID: /* fallthrough */ \ + case BCM43454_CHIP_ID: /* fallthrough */ \ + case BCM43455_CHIP_ID: /* fallthrough */ \ + case BCM43457_CHIP_ID: /* fallthrough */ \ + case BCM43458_CHIP_ID + #define BCM4349_CHIP_GRPID BCM4349_CHIP_ID: \ case BCM4355_CHIP_ID: \ case BCM4359_CHIP_ID +#define BCM4365_CHIP_ID 0x4365 /* 4365 chipcommon chipid */ +#define BCM4366_CHIP_ID 0x4366 /* 4366 chipcommon chipid */ + +#define BCM43909_CHIP_ID 0xab85 /* 43909 chipcommon chipid */ + #define BCM43602_CHIP_ID 0xaa52 /* 43602 chipcommon chipid */ #define BCM43462_CHIP_ID 0xa9c6 /* 43462 chipcommon chipid */ +#define BCM43522_CHIP_ID 0xaa02 /* 43522 chipcommon chipid */ +#define BCM43602_CHIP(chipid) ((CHIPID(chipid) == BCM43602_CHIP_ID) || \ + (CHIPID(chipid) == BCM43462_CHIP_ID) || \ + (CHIPID(chipid) == BCM43522_CHIP_ID)) /* 43602 variations */ +#define CASE_BCM43602_CHIP case BCM43602_CHIP_ID: /* fallthrough */ \ + case BCM43462_CHIP_ID: /* fallthrough */ \ + case BCM43522_CHIP_ID #define BCM4342_CHIP_ID 4342 /* 4342 chipcommon chipid (OTP, RBBU) */ #define BCM4402_CHIP_ID 0x4402 /* 4402 chipid */ #define BCM4704_CHIP_ID 0x4704 /* 4704 chipcommon chipid */ #define BCM4706_CHIP_ID 0x5300 /* 4706 chipcommon chipid */ #define BCM4707_CHIP_ID 53010 /* 4707 chipcommon chipid */ +#define BCM47094_CHIP_ID 53030 /* 47094 chipcommon chipid */ #define BCM53018_CHIP_ID 53018 /* 53018 chipcommon chipid */ -#define BCM4707_CHIP(chipid) (((chipid) == BCM4707_CHIP_ID) || ((chipid) == BCM53018_CHIP_ID)) +#define BCM4707_CHIP(chipid) (((chipid) == BCM4707_CHIP_ID) || \ + ((chipid) == BCM53018_CHIP_ID) || \ + ((chipid) == BCM47094_CHIP_ID)) #define BCM4710_CHIP_ID 0x4710 /* 4710 chipid */ #define BCM4712_CHIP_ID 0x4712 /* 4712 chipcommon chipid */ #define BCM4716_CHIP_ID 0x4716 /* 4716 chipcommon chipid */ @@ -401,6 +475,9 @@ #define BCM5356_CHIP_ID 0x5356 /* 5356 chipcommon chipid */ #define BCM5357_CHIP_ID 0x5357 /* 5357 chipcommon chipid */ #define BCM53572_CHIP_ID 53572 /* 53572 chipcommon chipid */ +#define BCM53573_CHIP_ID 53573 /* 53573 chipcommon chipid */ +#define BCM53573_CHIP(chipid) (CHIPID(chipid) == BCM53573_CHIP_ID) +#define BCM53573_CHIP_GRPID BCM53573_CHIP_ID /* Package IDs */ #define BCM4303_PKG_ID 2 /* 4303 package id */ @@ -432,6 +509,8 @@ #define BCM4331TT_PKG_ID 8 /* 4331 12x12 package id */ #define BCM4331TN_PKG_ID 9 /* 4331 12x9 package id */ #define BCM4331TNA0_PKG_ID 0xb /* 4331 12x9 package id */ +#define BCM47189_PKG_ID 1 /* 47189 package id */ +#define BCM53573_PKG_ID 0 /* 53573 package id */ #define BCM4706L_PKG_ID 1 /* 4706L package id */ #define HDLSIM5350_PKG_ID 1 /* HDL simulator package id for a 5350 */ @@ -460,6 +539,7 @@ #define BCM4335_WLBGA_PKG_ID (0x2) /* WLBGA COB/Mobile SDIO/HSIC. */ #define BCM4335_FCBGAD_PKG_ID (0x3) /* FCBGA Debug Debug/Dev All if's. */ #define BCM4335_PKG_MASK (0x3) +#define BCM43602_12x12_PKG_ID (0x1) /* 12x12 pins package, used for e.g. router designs */ /* boardflags */ #define BFL_BTC2WIRE 0x00000001 /* old 2wire Bluetooth coexistence, OBSOLETE */ @@ -560,12 +640,15 @@ #define BFL_SROM11_EXTLNA 0x00001000 /* Board has an external LNA in 2.4GHz band */ #define BFL_SROM11_EPA_TURNON_TIME 0x00018000 /* 2 bits for different PA turn on times */ #define BFL_SROM11_EPA_TURNON_TIME_SHIFT 15 +#define BFL_SROM11_PRECAL_TX_IDX 0x00040000 /* Dedicated TX IQLOCAL IDX values */ + /* per subband, as derived from 43602A1 MCH5 */ #define BFL_SROM11_EXTLNA_5GHz 0x10000000 /* Board has an external LNA in 5GHz band */ #define BFL_SROM11_GAINBOOSTA01 0x20000000 /* 5g Gainboost for core0 and core1 */ #define BFL2_SROM11_APLL_WAR 0x00000002 /* Flag to implement alternative A-band PLL settings */ #define BFL2_SROM11_ANAPACTRL_2G 0x00100000 /* 2G ext PAs are ctrl-ed by analog PA ctrl lines */ #define BFL2_SROM11_ANAPACTRL_5G 0x00200000 /* 5G ext PAs are ctrl-ed by analog PA ctrl lines */ #define BFL2_SROM11_SINGLEANT_CCK 0x00001000 /* Tx CCK pkts on Ant 0 only */ +#define BFL2_SROM11_EPA_ON_DURING_TXIQLOCAL 0x00020000 /* Keep ext. PA's on in TX IQLO CAL */ /* boardflags3 */ #define BFL3_FEMCTRL_SUB 0x00000007 /* acphy, subrevs of femctrl on top of srom_femctrl */ @@ -614,6 +697,9 @@ #define BFL3_AVVMID_FROM_NVRAM_SHIFT 30 /* Read Av Vmid from NVRAM */ #define BFL3_VLIN_EN_FROM_NVRAM_SHIFT 31 /* Enable Vlin from NVRAM */ +/* boardflags4 for SROM12 */ +#define BFL4_SROM12_4dBPAD (1 << 0) /* To distinguigh between normal and 4dB pad board */ + /* board specific GPIO assignment, gpio 0-3 are also customer-configurable led */ #define BOARD_GPIO_BTC3W_IN 0x850 /* bit 4 is RF_ACTIVE, bit 6 is STATUS, bit 11 is PRI */ @@ -633,6 +719,7 @@ #define BOARD_GPIO_2_WLAN_PWR 0x04 /* throttle WLAN power on X29C board */ #define BOARD_GPIO_3_WLAN_PWR 0x08 /* throttle WLAN power on X28 board */ #define BOARD_GPIO_4_WLAN_PWR 0x10 /* throttle WLAN power on X19 board */ +#define BOARD_GPIO_13_WLAN_PWR 0x2000 /* throttle WLAN power on X14 board */ #define GPIO_BTC4W_OUT_4312 0x010 /* bit 4 is BT_IODISABLE */ #define GPIO_BTC4W_OUT_43224 0x020 /* bit 5 is BT_IODISABLE */ @@ -653,380 +740,6 @@ #define MIN_SLOW_CLK 32 /* us Slow clock period */ #define XTAL_ON_DELAY 1000 /* us crystal power-on delay */ -#ifndef LINUX_POSTMOGRIFY_REMOVAL -/* Reference Board Types */ -#define BU4710_BOARD 0x0400 -#define VSIM4710_BOARD 0x0401 -#define QT4710_BOARD 0x0402 - -#define BU4309_BOARD 0x040a -#define BCM94309CB_BOARD 0x040b -#define BCM94309MP_BOARD 0x040c -#define BCM4309AP_BOARD 0x040d - -#define BCM94302MP_BOARD 0x040e - -#define BU4306_BOARD 0x0416 -#define BCM94306CB_BOARD 0x0417 -#define BCM94306MP_BOARD 0x0418 - -#define BCM94710D_BOARD 0x041a -#define BCM94710R1_BOARD 0x041b -#define BCM94710R4_BOARD 0x041c -#define BCM94710AP_BOARD 0x041d - -#define BU2050_BOARD 0x041f - -#define BCM94306P50_BOARD 0x0420 - -#define BCM94309G_BOARD 0x0421 - -#define BU4704_BOARD 0x0423 -#define BU4702_BOARD 0x0424 - -#define BCM94306PC_BOARD 0x0425 /* pcmcia 3.3v 4306 card */ - -#define MPSG4306_BOARD 0x0427 - -#define BCM94702MN_BOARD 0x0428 - -/* BCM4702 1U CompactPCI Board */ -#define BCM94702CPCI_BOARD 0x0429 - -/* BCM4702 with BCM95380 VLAN Router */ -#define BCM95380RR_BOARD 0x042a - -/* cb4306 with SiGe PA */ -#define BCM94306CBSG_BOARD 0x042b - -/* cb4306 with SiGe PA */ -#define PCSG94306_BOARD 0x042d - -/* bu4704 with sdram */ -#define BU4704SD_BOARD 0x042e - -/* Dual 11a/11g Router */ -#define BCM94704AGR_BOARD 0x042f - -/* 11a-only minipci */ -#define BCM94308MP_BOARD 0x0430 - -/* 4306/gprs combo */ -#define BCM94306GPRS_BOARD 0x0432 - -/* BCM5365/BCM4704 FPGA Bringup Board */ -#define BU5365_FPGA_BOARD 0x0433 - -#define BU4712_BOARD 0x0444 -#define BU4712SD_BOARD 0x045d -#define BU4712L_BOARD 0x045f - -/* BCM4712 boards */ -#define BCM94712AP_BOARD 0x0445 -#define BCM94712P_BOARD 0x0446 - -/* BCM4318 boards */ -#define BU4318_BOARD 0x0447 -#define CB4318_BOARD 0x0448 -#define MPG4318_BOARD 0x0449 -#define MP4318_BOARD 0x044a -#define SD4318_BOARD 0x044b - -/* BCM4313 boards */ -#define BCM94313BU_BOARD 0x050f -#define BCM94313HM_BOARD 0x0510 -#define BCM94313EPA_BOARD 0x0511 -#define BCM94313HMG_BOARD 0x051C - -/* BCM63XX boards */ -#define BCM96338_BOARD 0x6338 -#define BCM96348_BOARD 0x6348 -#define BCM96358_BOARD 0x6358 -#define BCM96368_BOARD 0x6368 - -/* Another mp4306 with SiGe */ -#define BCM94306P_BOARD 0x044c - -/* mp4303 */ -#define BCM94303MP_BOARD 0x044e - -/* mpsgh4306 */ -#define BCM94306MPSGH_BOARD 0x044f - -/* BRCM 4306 w/ Front End Modules */ -#define BCM94306MPM 0x0450 -#define BCM94306MPL 0x0453 - -/* 4712agr */ -#define BCM94712AGR_BOARD 0x0451 - -/* pcmcia 4303 */ -#define PC4303_BOARD 0x0454 - -/* 5350K */ -#define BCM95350K_BOARD 0x0455 - -/* 5350R */ -#define BCM95350R_BOARD 0x0456 - -/* 4306mplna */ -#define BCM94306MPLNA_BOARD 0x0457 - -/* 4320 boards */ -#define BU4320_BOARD 0x0458 -#define BU4320S_BOARD 0x0459 -#define BCM94320PH_BOARD 0x045a - -/* 4306mph */ -#define BCM94306MPH_BOARD 0x045b - -/* 4306pciv */ -#define BCM94306PCIV_BOARD 0x045c - -#define BU4712SD_BOARD 0x045d - -#define BCM94320PFLSH_BOARD 0x045e - -#define BU4712L_BOARD 0x045f -#define BCM94712LGR_BOARD 0x0460 -#define BCM94320R_BOARD 0x0461 - -#define BU5352_BOARD 0x0462 - -#define BCM94318MPGH_BOARD 0x0463 - -#define BU4311_BOARD 0x0464 -#define BCM94311MC_BOARD 0x0465 -#define BCM94311MCAG_BOARD 0x0466 - -#define BCM95352GR_BOARD 0x0467 - -/* bcm95351agr */ -#define BCM95351AGR_BOARD 0x0470 - -/* bcm94704mpcb */ -#define BCM94704MPCB_BOARD 0x0472 - -/* 4785 boards */ -#define BU4785_BOARD 0x0478 - -/* 4321 boards */ -#define BU4321_BOARD 0x046b -#define BU4321E_BOARD 0x047c -#define MP4321_BOARD 0x046c -#define CB2_4321_BOARD 0x046d -#define CB2_4321_AG_BOARD 0x0066 -#define MC4321_BOARD 0x046e - -/* 4328 boards */ -#define BU4328_BOARD 0x0481 -#define BCM4328SDG_BOARD 0x0482 -#define BCM4328SDAG_BOARD 0x0483 -#define BCM4328UG_BOARD 0x0484 -#define BCM4328UAG_BOARD 0x0485 -#define BCM4328PC_BOARD 0x0486 -#define BCM4328CF_BOARD 0x0487 - -/* 4325 boards */ -#define BCM94325DEVBU_BOARD 0x0490 -#define BCM94325BGABU_BOARD 0x0491 - -#define BCM94325SDGWB_BOARD 0x0492 - -#define BCM94325SDGMDL_BOARD 0x04aa -#define BCM94325SDGMDL2_BOARD 0x04c6 -#define BCM94325SDGMDL3_BOARD 0x04c9 - -#define BCM94325SDABGWBA_BOARD 0x04e1 - -/* 4322 boards */ -#define BCM94322MC_SSID 0x04a4 -#define BCM94322USB_SSID 0x04a8 /* dualband */ -#define BCM94322HM_SSID 0x04b0 -#define BCM94322USB2D_SSID 0x04bf /* single band discrete front end */ - -/* 4312 boards */ -#define BCM4312MCGSG_BOARD 0x04b5 - -/* 4315 boards */ -#define BCM94315DEVBU_SSID 0x04c2 -#define BCM94315USBGP_SSID 0x04c7 -#define BCM94315BGABU_SSID 0x04ca -#define BCM94315USBGP41_SSID 0x04cb - -/* 4319 boards */ -#define BCM94319DEVBU_SSID 0X04e5 -#define BCM94319USB_SSID 0X04e6 -#define BCM94319SD_SSID 0X04e7 - -/* 4716 boards */ -#define BCM94716NR2_SSID 0x04cd - -/* 4319 boards */ -#define BCM94319DEVBU_SSID 0X04e5 -#define BCM94319USBNP4L_SSID 0X04e6 -#define BCM94319WLUSBN4L_SSID 0X04e7 -#define BCM94319SDG_SSID 0X04ea -#define BCM94319LCUSBSDN4L_SSID 0X04eb -#define BCM94319USBB_SSID 0x04ee -#define BCM94319LCSDN4L_SSID 0X0507 -#define BCM94319LSUSBN4L_SSID 0X0508 -#define BCM94319SDNA4L_SSID 0X0517 -#define BCM94319SDELNA4L_SSID 0X0518 -#define BCM94319SDELNA6L_SSID 0X0539 -#define BCM94319ARCADYAN_SSID 0X0546 -#define BCM94319WINDSOR_SSID 0x0561 -#define BCM94319MLAP_SSID 0x0562 -#define BCM94319SDNA_SSID 0x058b -#define BCM94319BHEMU3_SSID 0x0563 -#define BCM94319SDHMB_SSID 0x058c -#define BCM94319SDBREF_SSID 0x05a1 -#define BCM94319USBSDB_SSID 0x05a2 - - -/* 4329 boards */ -#define BCM94329AGB_SSID 0X04b9 -#define BCM94329TDKMDL1_SSID 0X04ba -#define BCM94329TDKMDL11_SSID 0X04fc -#define BCM94329OLYMPICN18_SSID 0X04fd -#define BCM94329OLYMPICN90_SSID 0X04fe -#define BCM94329OLYMPICN90U_SSID 0X050c -#define BCM94329OLYMPICN90M_SSID 0X050b -#define BCM94329AGBF_SSID 0X04ff -#define BCM94329OLYMPICX17_SSID 0X0504 -#define BCM94329OLYMPICX17M_SSID 0X050a -#define BCM94329OLYMPICX17U_SSID 0X0509 -#define BCM94329OLYMPICUNO_SSID 0X0564 -#define BCM94329MOTOROLA_SSID 0X0565 -#define BCM94329OLYMPICLOCO_SSID 0X0568 -/* 4336 SDIO board types */ -#define BCM94336SD_WLBGABU_SSID 0x0511 -#define BCM94336SD_WLBGAREF_SSID 0x0519 -#define BCM94336SDGP_SSID 0x0538 -#define BCM94336SDG_SSID 0x0519 -#define BCM94336SDGN_SSID 0x0538 -#define BCM94336SDGFC_SSID 0x056B - -/* 4330 SDIO board types */ -#define BCM94330SDG_SSID 0x0528 -#define BCM94330SD_FCBGABU_SSID 0x052e -#define BCM94330SD_WLBGABU_SSID 0x052f -#define BCM94330SD_FCBGA_SSID 0x0530 -#define BCM94330FCSDAGB_SSID 0x0532 -#define BCM94330OLYMPICAMG_SSID 0x0549 -#define BCM94330OLYMPICAMGEPA_SSID 0x054F -#define BCM94330OLYMPICUNO3_SSID 0x0551 -#define BCM94330WLSDAGB_SSID 0x0547 -#define BCM94330CSPSDAGBB_SSID 0x054A - -/* 43224 boards */ -#define BCM943224X21 0x056e -#define BCM943224X21_FCC 0x00d1 -#define BCM943224X21B 0x00e9 -#define BCM943224M93 0x008b -#define BCM943224M93A 0x0090 -#define BCM943224X16 0x0093 -#define BCM94322X9 0x008d -#define BCM94322M35e 0x008e - -/* 43228 Boards */ -#define BCM943228BU8_SSID 0x0540 -#define BCM943228BU9_SSID 0x0541 -#define BCM943228BU_SSID 0x0542 -#define BCM943227HM4L_SSID 0x0543 -#define BCM943227HMB_SSID 0x0544 -#define BCM943228HM4L_SSID 0x0545 -#define BCM943228SD_SSID 0x0573 - -/* 43239 Boards */ -#define BCM943239MOD_SSID 0x05ac -#define BCM943239REF_SSID 0x05aa - -/* 4331 boards */ -#define BCM94331X19 0x00D6 /* X19B */ -#define BCM94331X28 0x00E4 /* X28 */ -#define BCM94331X28B 0x010E /* X28B */ -#define BCM94331PCIEBT3Ax_SSID BCM94331X28 -#define BCM94331X12_2G_SSID 0x00EC /* X12 2G */ -#define BCM94331X12_5G_SSID 0x00ED /* X12 5G */ -#define BCM94331X29B 0x00EF /* X29B */ -#define BCM94331X29D 0x010F /* X29D */ -#define BCM94331CSAX_SSID BCM94331X29B -#define BCM94331X19C 0x00F5 /* X19C */ -#define BCM94331X33 0x00F4 /* X33 */ -#define BCM94331BU_SSID 0x0523 -#define BCM94331S9BU_SSID 0x0524 -#define BCM94331MC_SSID 0x0525 -#define BCM94331MCI_SSID 0x0526 -#define BCM94331PCIEBT4_SSID 0x0527 -#define BCM94331HM_SSID 0x0574 -#define BCM94331PCIEDUAL_SSID 0x059B -#define BCM94331MCH5_SSID 0x05A9 -#define BCM94331CS_SSID 0x05C6 -#define BCM94331CD_SSID 0x05DA - -/* 4314 Boards */ -#define BCM94314BU_SSID 0x05b1 - -/* 53572 Boards */ -#define BCM953572BU_SSID 0x058D -#define BCM953572NR2_SSID 0x058E -#define BCM947188NR2_SSID 0x058F -#define BCM953572SDRNR2_SSID 0x0590 - -/* 43236 boards */ -#define BCM943236OLYMPICSULLEY_SSID 0x594 -#define BCM943236PREPROTOBLU2O3_SSID 0x5b9 -#define BCM943236USBELNA_SSID 0x5f8 - -/* 4314 Boards */ -#define BCM94314BUSDIO_SSID 0x05c8 -#define BCM94314BGABU_SSID 0x05c9 -#define BCM94314HMEPA_SSID 0x05ca -#define BCM94314HMEPABK_SSID 0x05cb -#define BCM94314SUHMEPA_SSID 0x05cc -#define BCM94314SUHM_SSID 0x05cd -#define BCM94314HM_SSID 0x05d1 - -/* 4334 Boards */ -#define BCM94334FCAGBI_SSID 0x05df -#define BCM94334WLAGBI_SSID 0x05dd - -/* 4335 Boards */ -#define BCM94335X52 0x0114 - -/* 4345 Boards */ -#define BCM94345_SSID 0x0687 - -/* 4360 Boards */ -#define BCM94360X52C 0X0117 -#define BCM94360X52D 0X0137 -#define BCM94360X29C 0X0112 -#define BCM94360X29CP2 0X0134 -#define BCM94360X29CP3 0X013B -#define BCM94360X51 0x0111 -#define BCM94360X51P2 0x0129 -#define BCM94360X51P3 0x0142 -#define BCM94360X51A 0x0135 -#define BCM94360X51B 0x0136 -#define BCM94360CS 0x061B -#define BCM94360J28_D11AC2G 0x0c00 -#define BCM94360J28_D11AC5G 0x0c01 -#define BCM94360USBH5_D11AC5G 0x06aa -#define BCM94360MCM5 0x06d8 - -/* 4350 Boards */ -#define BCM94350X52B 0X0116 -#define BCM94350X14 0X0131 - -/* 43217 Boards */ -#define BCM943217BU_SSID 0x05d5 -#define BCM943217HM2L_SSID 0x05d6 -#define BCM943217HMITR2L_SSID 0x05d7 - -/* 43142 Boards */ -#define BCM943142HM_SSID 0x05e0 -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ /* 43341 Boards */ #define BCM943341WLABGS_SSID 0x062d @@ -1038,7 +751,9 @@ #define BCM943602RSVD1_SSID 0x06a5 #define BCM943602RSVD2_SSID 0x06a6 #define BCM943602X87 0X0133 +#define BCM943602X87P2 0X0143 #define BCM943602X238 0X0132 +#define BCM943602X238D 0X014A /* # of GPIO pins */ #define GPIO_NUMPINS 32 diff --git a/drivers/net/wireless/bcmdhd/include/bcmendian.h b/drivers/amlogic/wifi/bcmdhd/include/bcmendian.h similarity index 85% rename from drivers/net/wireless/bcmdhd/include/bcmendian.h rename to drivers/amlogic/wifi/bcmdhd/include/bcmendian.h index 5dbf675f9ca3f..27f2379473244 100644 --- a/drivers/net/wireless/bcmdhd/include/bcmendian.h +++ b/drivers/amlogic/wifi/bcmdhd/include/bcmendian.h @@ -1,9 +1,30 @@ /* * Byte order utilities * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmendian.h 402715 2013-05-16 18:50:09Z $ + * + * <> + * + * $Id: bcmendian.h 514727 2014-11-12 03:02:48Z $ * * This file by default provides proper behavior on little-endian architectures. * On big-endian architectures, IL_BIGENDIAN should be defined. diff --git a/drivers/net/wireless/bcmdhd/include/bcmmsgbuf.h b/drivers/amlogic/wifi/bcmdhd/include/bcmmsgbuf.h similarity index 57% rename from drivers/net/wireless/bcmdhd/include/bcmmsgbuf.h rename to drivers/amlogic/wifi/bcmdhd/include/bcmmsgbuf.h index 7d65beb94fed6..ab1375ea854d6 100644 --- a/drivers/net/wireless/bcmdhd/include/bcmmsgbuf.h +++ b/drivers/amlogic/wifi/bcmdhd/include/bcmmsgbuf.h @@ -4,20 +4,45 @@ * * Definitions subject to change without notice. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmmsgbuf.h 499474 2014-08-28 21:30:10Z $ + * + * <> + * + * $Id: bcmmsgbuf.h 541060 2015-03-13 23:28:01Z $ */ #ifndef _bcmmsgbuf_h_ #define _bcmmsgbuf_h_ + #include #include #include #define MSGBUF_MAX_MSG_SIZE ETHER_MAX_LEN -#define D2H_EPOCH_MODULO 253 /* sequence number wrap */ -#define D2H_EPOCH_INIT_VAL (D2H_EPOCH_MODULO + 1) +#define D2H_EPOCH_MODULO 253 /* sequence number wrap */ +#define D2H_EPOCH_INIT_VAL (D2H_EPOCH_MODULO + 1) + +#define H2D_EPOCH_MODULO 253 /* sequence number wrap */ +#define H2D_EPOCH_INIT_VAL (H2D_EPOCH_MODULO + 1) #define H2DRING_TXPOST_ITEMSIZE 48 #define H2DRING_RXPOST_ITEMSIZE 32 @@ -27,11 +52,13 @@ #define D2HRING_CTRL_CMPLT_ITEMSIZE 24 #define H2DRING_TXPOST_MAX_ITEM 512 -#define H2DRING_RXPOST_MAX_ITEM 256 -#define H2DRING_CTRL_SUB_MAX_ITEM 20 +#define H2DRING_RXPOST_MAX_ITEM 512 +#define H2DRING_CTRL_SUB_MAX_ITEM 64 #define D2HRING_TXCMPLT_MAX_ITEM 1024 -#define D2HRING_RXCMPLT_MAX_ITEM 256 -#define D2HRING_CTRL_CMPLT_MAX_ITEM 20 +#define D2HRING_RXCMPLT_MAX_ITEM 512 + +#define D2HRING_CTRL_CMPLT_MAX_ITEM 64 + enum { DNGL_TO_HOST_MSGBUF, HOST_TO_DNGL_MSGBUF @@ -76,6 +103,7 @@ enum { #endif /* PCIE_API_REV1 */ /* utility data structures */ + union addr64 { struct { uint32 low; @@ -88,24 +116,24 @@ union addr64 { uint64 u64; } DECLSPEC_ALIGN(8); -typedef union addr64 addr64_t; +typedef union addr64 bcm_addr64_t; /* IOCTL req Hdr */ /* cmn Msg Hdr */ typedef struct cmn_msg_hdr { - /* message type */ + /** message type */ uint8 msg_type; - /* interface index this is valid for */ + /** interface index this is valid for */ uint8 if_id; /* flags */ uint8 flags; - /* sequence number */ + /** sequence number */ uint8 epoch; - /* packet Identifier for the associated host buffer */ + /** packet Identifier for the associated host buffer */ uint32 request_id; } cmn_msg_hdr_t; -/* message type */ +/** message type */ typedef enum bcmpcie_msgtype { MSG_TYPE_GEN_STATUS = 0x1, MSG_TYPE_RING_STATUS = 0x2, @@ -127,6 +155,23 @@ typedef enum bcmpcie_msgtype { MSG_TYPE_RX_CMPLT = 0x12, MSG_TYPE_LPBK_DMAXFER = 0x13, MSG_TYPE_LPBK_DMAXFER_CMPLT = 0x14, + MSG_TYPE_FLOW_RING_RESUME = 0x15, + MSG_TYPE_FLOW_RING_RESUME_CMPLT = 0x16, + MSG_TYPE_FLOW_RING_SUSPEND = 0x17, + MSG_TYPE_FLOW_RING_SUSPEND_CMPLT = 0x18, + MSG_TYPE_INFO_BUF_POST = 0x19, + MSG_TYPE_INFO_BUF_CMPLT = 0x1A, + MSG_TYPE_H2D_RING_CREATE = 0x1B, + MSG_TYPE_D2H_RING_CREATE = 0x1C, + MSG_TYPE_H2D_RING_CREATE_CMPLT = 0x1D, + MSG_TYPE_D2H_RING_CREATE_CMPLT = 0x1E, + MSG_TYPE_H2D_RING_CONFIG = 0x1F, + MSG_TYPE_D2H_RING_CONFIG = 0x20, + MSG_TYPE_H2D_RING_CONFIG_CMPLT = 0x21, + MSG_TYPE_D2H_RING_CONFIG_CMPLT = 0x22, + MSG_TYPE_H2D_MAILBOX_DATA = 0x23, + MSG_TYPE_D2H_MAILBOX_DATA = 0x24, + MSG_TYPE_API_MAX_RSVD = 0x3F } bcmpcie_msg_type_t; @@ -138,16 +183,34 @@ typedef enum bcmpcie_msgtype_int { MSG_TYPE_HOST_FETCH = 0x44, MSG_TYPE_LPBK_DMAXFER_PYLD = 0x45, MSG_TYPE_TXMETADATA_PYLD = 0x46, - MSG_TYPE_HOSTDMA_PTRS = 0x47 + MSG_TYPE_INDX_UPDATE = 0x47 } bcmpcie_msgtype_int_t; typedef enum bcmpcie_msgtype_u { MSG_TYPE_TX_BATCH_POST = 0x80, MSG_TYPE_IOCTL_REQ = 0x81, - MSG_TYPE_HOST_EVNT = 0x82, + MSG_TYPE_HOST_EVNT = 0x82, /* console related */ MSG_TYPE_LOOPBACK = 0x83 } bcmpcie_msgtype_u_t; +/** + * D2H ring host wakeup soft doorbell, override the PCIE doorbell. + * Host configures an <32bit address,value> tuple, and dongle uses SBTOPCIE + * Transl0 to write specified value to host address. + * + * Use case: 32bit Address mapped to HW Accelerator Core/Thread Wakeup Register + * and value is Core/Thread context. Host will ensure routing the 32bit address + * offerred to PCIE to the mapped register. + * + * D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL + */ +typedef struct bcmpcie_soft_doorbell { + uint32 value; /* host defined value to be written, eg HW threadid */ + bcm_addr64_t haddr; /* host address, eg thread wakeup register address */ + uint16 items; /* interrupt coalescing: item count before wakeup */ + uint16 msecs; /* interrupt coalescing: timeout in millisecs */ +} bcmpcie_soft_doorbell_t; + /* if_id */ #define BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT 5 @@ -167,59 +230,58 @@ typedef enum bcmpcie_msgtype_u { /* IOCTL request message */ typedef struct ioctl_req_msg { - /* common message header */ + /** common message header */ cmn_msg_hdr_t cmn_hdr; - - /* ioctl command type */ + /** ioctl command type */ uint32 cmd; - /* ioctl transaction ID, to pair with a ioctl response */ + /** ioctl transaction ID, to pair with a ioctl response */ uint16 trans_id; - /* input arguments buffer len */ + /** input arguments buffer len */ uint16 input_buf_len; - /* expected output len */ + /** expected output len */ uint16 output_buf_len; - /* to aling the host address on 8 byte boundary */ + /** to align the host address on 8 byte boundary */ uint16 rsvd[3]; - /* always aling on 8 byte boundary */ - addr64_t host_input_buf_addr; + /** always align on 8 byte boundary */ + bcm_addr64_t host_input_buf_addr; /* rsvd */ uint32 rsvd1[2]; } ioctl_req_msg_t; -/* buffer post messages for device to use to return IOCTL responses, Events */ +/** buffer post messages for device to use to return IOCTL responses, Events */ typedef struct ioctl_resp_evt_buf_post_msg { - /* common message header */ + /** common message header */ cmn_msg_hdr_t cmn_hdr; - /* length of the host buffer supplied */ + /** length of the host buffer supplied */ uint16 host_buf_len; - /* to aling the host address on 8 byte boundary */ + /** to align the host address on 8 byte boundary */ uint16 reserved[3]; - /* always aling on 8 byte boundary */ - addr64_t host_buf_addr; + /** always align on 8 byte boundary */ + bcm_addr64_t host_buf_addr; uint32 rsvd[4]; } ioctl_resp_evt_buf_post_msg_t; typedef struct pcie_dma_xfer_params { - /* common message header */ + /** common message header */ cmn_msg_hdr_t cmn_hdr; - /* always aling on 8 byte boundary */ - addr64_t host_input_buf_addr; + /** always align on 8 byte boundary */ + bcm_addr64_t host_input_buf_addr; - /* always aling on 8 byte boundary */ - addr64_t host_ouput_buf_addr; + /** always align on 8 byte boundary */ + bcm_addr64_t host_ouput_buf_addr; - /* length of transfer */ + /** length of transfer */ uint32 xfer_len; - /* delay before doing the src txfer */ + /** delay before doing the src txfer */ uint32 srcdelay; - /* delay before doing the dest txfer */ + /** delay before doing the dest txfer */ uint32 destdelay; uint32 rsvd; } pcie_dma_xfer_params_t; -/* Complete msgbuf hdr for flow ring update from host to dongle */ +/** Complete msgbuf hdr for flow ring update from host to dongle */ typedef struct tx_flowring_create_request { cmn_msg_hdr_t msg; uint8 da[ETHER_ADDR_LEN]; @@ -232,7 +294,7 @@ typedef struct tx_flowring_create_request { uint16 int_vector; uint16 max_items; uint16 len_item; - addr64_t flow_ring_ptr; + bcm_addr64_t flow_ring_ptr; } tx_flowring_create_request_t; typedef struct tx_flowring_delete_request { @@ -249,6 +311,25 @@ typedef struct tx_flowring_flush_request { uint32 rsvd[7]; } tx_flowring_flush_request_t; +/** Subtypes for ring_config_req control message */ +typedef enum ring_config_subtype { + /** Default D2H PCIE doorbell override using ring_config_req msg */ + D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL = 1, /* Software doorbell */ + D2H_RING_CONFIG_SUBTYPE_MSI_DOORBELL = 2 /* MSI configuration */ +} ring_config_subtype_t; + +typedef struct ring_config_req { + cmn_msg_hdr_t msg; + uint16 subtype; + uint16 ring_id; + uint32 rsvd; + union { + uint32 data[6]; + /** D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL */ + bcmpcie_soft_doorbell_t soft_doorbell; + }; +} ring_config_req_t; + typedef union ctrl_submit_item { ioctl_req_msg_t ioctl_req; ioctl_resp_evt_buf_post_msg_t resp_buf_post; @@ -256,18 +337,19 @@ typedef union ctrl_submit_item { tx_flowring_create_request_t flow_create; tx_flowring_delete_request_t flow_delete; tx_flowring_flush_request_t flow_flush; + ring_config_req_t ring_config_req; unsigned char check[H2DRING_CTRL_SUB_ITEMSIZE]; } ctrl_submit_item_t; -/* Control Completion messages (20 bytes) */ +/** Control Completion messages (20 bytes) */ typedef struct compl_msg_hdr { - /* status for the completion */ + /** status for the completion */ int16 status; - /* submisison flow ring id which generated this status */ + /** submisison flow ring id which generated this status */ uint16 flow_ring_id; } compl_msg_hdr_t; -/* XOR checksum or a magic number to audit DMA done */ +/** XOR checksum or a magic number to audit DMA done */ typedef uint32 dma_done_t; /* completion header status codes */ @@ -284,83 +366,83 @@ typedef uint32 dma_done_t; #define BCMPCIE_MAX_IOCTLRESP_BUF 10 #define BCMPCIE_MAX_EVENT_BUF 11 -/* IOCTL completion response */ +/** IOCTL completion response */ typedef struct ioctl_compl_resp_msg { - /* common message header */ + /** common message header */ cmn_msg_hdr_t cmn_hdr; - /* completion message header */ + /** completion message header */ compl_msg_hdr_t compl_hdr; - /* response buffer len where a host buffer is involved */ + /** response buffer len where a host buffer is involved */ uint16 resp_len; - /* transaction id to pair with a request */ + /** transaction id to pair with a request */ uint16 trans_id; - /* cmd id */ + /** cmd id */ uint32 cmd; - /* XOR checksum or a magic number to audit DMA done */ + /** XOR checksum or a magic number to audit DMA done */ dma_done_t marker; } ioctl_comp_resp_msg_t; -/* IOCTL request acknowledgement */ +/** IOCTL request acknowledgement */ typedef struct ioctl_req_ack_msg { - /* common message header */ + /** common message header */ cmn_msg_hdr_t cmn_hdr; - /* completion message header */ + /** completion message header */ compl_msg_hdr_t compl_hdr; - /* cmd id */ + /** cmd id */ uint32 cmd; - uint32 rsvd[1]; - /* XOR checksum or a magic number to audit DMA done */ + uint32 rsvd; + /** XOR checksum or a magic number to audit DMA done */ dma_done_t marker; } ioctl_req_ack_msg_t; -/* WL event message: send from device to host */ +/** WL event message: send from device to host */ typedef struct wlevent_req_msg { - /* common message header */ + /** common message header */ cmn_msg_hdr_t cmn_hdr; - /* completion message header */ + /** completion message header */ compl_msg_hdr_t compl_hdr; - /* event data len valid with the event buffer */ + /** event data len valid with the event buffer */ uint16 event_data_len; - /* sequence number */ + /** sequence number */ uint16 seqnum; - /* rsvd */ + /** rsvd */ uint32 rsvd; - /* XOR checksum or a magic number to audit DMA done */ + /** XOR checksum or a magic number to audit DMA done */ dma_done_t marker; } wlevent_req_msg_t; -/* dma xfer complete message */ +/** dma xfer complete message */ typedef struct pcie_dmaxfer_cmplt { - /* common message header */ + /** common message header */ cmn_msg_hdr_t cmn_hdr; - /* completion message header */ + /** completion message header */ compl_msg_hdr_t compl_hdr; uint32 rsvd[2]; - /* XOR checksum or a magic number to audit DMA done */ + /** XOR checksum or a magic number to audit DMA done */ dma_done_t marker; } pcie_dmaxfer_cmplt_t; -/* general status message */ +/** general status message */ typedef struct pcie_gen_status { - /* common message header */ + /** common message header */ cmn_msg_hdr_t cmn_hdr; - /* completion message header */ + /** completion message header */ compl_msg_hdr_t compl_hdr; uint32 rsvd[2]; - /* XOR checksum or a magic number to audit DMA done */ + /** XOR checksum or a magic number to audit DMA done */ dma_done_t marker; } pcie_gen_status_t; -/* ring status message */ +/** ring status message */ typedef struct pcie_ring_status { - /* common message header */ + /** common message header */ cmn_msg_hdr_t cmn_hdr; - /* completion message header */ + /** completion message header */ compl_msg_hdr_t compl_hdr; - /* message which firmware couldn't decode */ + /** message which firmware couldn't decode */ uint16 write_idx; uint16 rsvd[3]; - /* XOR checksum or a magic number to audit DMA done */ + /** XOR checksum or a magic number to audit DMA done */ dma_done_t marker; } pcie_ring_status_t; @@ -368,14 +450,15 @@ typedef struct tx_flowring_create_response { cmn_msg_hdr_t msg; compl_msg_hdr_t cmplt; uint32 rsvd[2]; - /* XOR checksum or a magic number to audit DMA done */ + /** XOR checksum or a magic number to audit DMA done */ dma_done_t marker; } tx_flowring_create_response_t; + typedef struct tx_flowring_delete_response { cmn_msg_hdr_t msg; compl_msg_hdr_t cmplt; uint32 rsvd[2]; - /* XOR checksum or a magic number to audit DMA done */ + /** XOR checksum or a magic number to audit DMA done */ dma_done_t marker; } tx_flowring_delete_response_t; @@ -383,21 +466,31 @@ typedef struct tx_flowring_flush_response { cmn_msg_hdr_t msg; compl_msg_hdr_t cmplt; uint32 rsvd[2]; - /* XOR checksum or a magic number to audit DMA done */ + /** XOR checksum or a magic number to audit DMA done */ dma_done_t marker; } tx_flowring_flush_response_t; -/* Common layout of all d2h control messages */ +/** Common layout of all d2h control messages */ typedef struct ctrl_compl_msg { - /* common message header */ - cmn_msg_hdr_t cmn_hdr; - /* completion message header */ - compl_msg_hdr_t compl_hdr; - uint32 rsvd[2]; - /* XOR checksum or a magic number to audit DMA done */ - dma_done_t marker; + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; } ctrl_compl_msg_t; +typedef struct ring_config_resp { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} ring_config_resp_t; + typedef union ctrl_completion_item { ioctl_comp_resp_msg_t ioctl_resp; wlevent_req_msg_t event; @@ -409,23 +502,24 @@ typedef union ctrl_completion_item { tx_flowring_delete_response_t txfl_delete_resp; tx_flowring_flush_response_t txfl_flush_resp; ctrl_compl_msg_t ctrl_compl; + ring_config_resp_t ring_config_resp; unsigned char check[D2HRING_CTRL_CMPLT_ITEMSIZE]; } ctrl_completion_item_t; -/* H2D Rxpost ring work items */ +/** H2D Rxpost ring work items */ typedef struct host_rxbuf_post { - /* common message header */ + /** common message header */ cmn_msg_hdr_t cmn_hdr; - /* provided meta data buffer len */ + /** provided meta data buffer len */ uint16 metadata_buf_len; - /* provided data buffer len to receive data */ + /** provided data buffer len to receive data */ uint16 data_buf_len; - /* alignment to make the host buffers start on 8 byte boundary */ + /** alignment to make the host buffers start on 8 byte boundary */ uint32 rsvd; - /* provided meta data buffer */ - addr64_t metadata_buf_addr; - /* provided data buffer to receive data */ - addr64_t data_buf_addr; + /** provided meta data buffer */ + bcm_addr64_t metadata_buf_addr; + /** provided data buffer to receive data */ + bcm_addr64_t data_buf_addr; } host_rxbuf_post_t; typedef union rxbuf_submit_item { @@ -434,25 +528,25 @@ typedef union rxbuf_submit_item { } rxbuf_submit_item_t; -/* D2H Rxcompletion ring work items */ +/** D2H Rxcompletion ring work items */ typedef struct host_rxbuf_cmpl { - /* common message header */ + /** common message header */ cmn_msg_hdr_t cmn_hdr; - /* completion message header */ + /** completion message header */ compl_msg_hdr_t compl_hdr; - /* filled up meta data len */ + /** filled up meta data len */ uint16 metadata_len; - /* filled up buffer len to receive data */ + /** filled up buffer len to receive data */ uint16 data_len; - /* offset in the host rx buffer where the data starts */ + /** offset in the host rx buffer where the data starts */ uint16 data_offset; - /* offset in the host rx buffer where the data starts */ + /** offset in the host rx buffer where the data starts */ uint16 flags; - /* rx status */ + /** rx status */ uint32 rx_status_0; uint32 rx_status_1; - /* XOR checksum or a magic number to audit DMA done */ - dma_done_t marker; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; } host_rxbuf_cmpl_t; typedef union rxbuf_complete_item { @@ -462,24 +556,25 @@ typedef union rxbuf_complete_item { typedef struct host_txbuf_post { - /* common message header */ + /** common message header */ cmn_msg_hdr_t cmn_hdr; - /* eth header */ + /** eth header */ uint8 txhdr[ETHER_HDR_LEN]; - /* flags */ + /** flags */ uint8 flags; - /* number of segments */ + /** number of segments */ uint8 seg_cnt; - /* provided meta data buffer for txstatus */ - addr64_t metadata_buf_addr; - /* provided data buffer to receive data */ - addr64_t data_buf_addr; - /* provided meta data buffer len */ + /** provided meta data buffer for txstatus */ + bcm_addr64_t metadata_buf_addr; + /** provided data buffer to receive data */ + bcm_addr64_t data_buf_addr; + /** provided meta data buffer len */ uint16 metadata_buf_len; - /* provided data buffer len to receive data */ + /** provided data buffer len to receive data */ uint16 data_len; - uint32 flag2; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; } host_txbuf_post_t; #define BCMPCIE_PKT_FLAGS_FRAME_802_3 0x01 @@ -492,36 +587,33 @@ typedef struct host_txbuf_post { #define BCMPCIE_PKT_FLAGS_PRIO_SHIFT 5 #define BCMPCIE_PKT_FLAGS_PRIO_MASK (7 << BCMPCIE_PKT_FLAGS_PRIO_SHIFT) -/* These are added to fix up teh compile issues */ +/* These are added to fix up compile issues */ #define BCMPCIE_TXPOST_FLAGS_FRAME_802_3 BCMPCIE_PKT_FLAGS_FRAME_802_3 #define BCMPCIE_TXPOST_FLAGS_FRAME_802_11 BCMPCIE_PKT_FLAGS_FRAME_802_11 #define BCMPCIE_TXPOST_FLAGS_PRIO_SHIFT BCMPCIE_PKT_FLAGS_PRIO_SHIFT #define BCMPCIE_TXPOST_FLAGS_PRIO_MASK BCMPCIE_PKT_FLAGS_PRIO_MASK -#define BCMPCIE_PKT_FLAGS2_FORCELOWRATE_MASK 0x01 -#define BCMPCIE_PKT_FLAGS2_FORCELOWRATE_SHIFT 0 - -/* H2D Txpost ring work items */ +/** H2D Txpost ring work items */ typedef union txbuf_submit_item { host_txbuf_post_t txpost; unsigned char check[H2DRING_TXPOST_ITEMSIZE]; } txbuf_submit_item_t; -/* D2H Txcompletion ring work items */ +/** D2H Txcompletion ring work items */ typedef struct host_txbuf_cmpl { - /* common message header */ + /** common message header */ cmn_msg_hdr_t cmn_hdr; - /* completion message header */ + /** completion message header */ compl_msg_hdr_t compl_hdr; union { struct { - /* provided meta data len */ + /** provided meta data len */ uint16 metadata_len; - /* WLAN side txstatus */ + /** WLAN side txstatus */ uint16 tx_status; }; - /* XOR checksum or a magic number to audit DMA done */ - dma_done_t marker; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; }; } host_txbuf_cmpl_t; @@ -533,19 +625,22 @@ typedef union txbuf_complete_item { #define BCMPCIE_D2H_METADATA_HDRLEN 4 #define BCMPCIE_D2H_METADATA_MINLEN (BCMPCIE_D2H_METADATA_HDRLEN + 4) -/* ret buf struct */ +/** ret buf struct */ typedef struct ret_buf_ptr { uint32 low_addr; uint32 high_addr; } ret_buf_t; + #ifdef PCIE_API_REV1 + /* ioctl specific hdr */ typedef struct ioctl_hdr { uint16 cmd; uint16 retbuf_len; uint32 cmd_id; } ioctl_hdr_t; + typedef struct ioctlptr_hdr { uint16 cmd; uint16 retbuf_len; @@ -553,19 +648,22 @@ typedef struct ioctlptr_hdr { uint16 rsvd; uint32 cmd_id; } ioctlptr_hdr_t; + #else /* PCIE_API_REV1 */ + typedef struct ioctl_req_hdr { - uint32 pkt_id; /* Packet ID */ - uint32 cmd; /* IOCTL ID */ + uint32 pkt_id; /**< Packet ID */ + uint32 cmd; /**< IOCTL ID */ uint16 retbuf_len; uint16 buflen; - uint16 xt_id; /* transaction ID */ + uint16 xt_id; /**< transaction ID */ uint16 rsvd[1]; } ioctl_req_hdr_t; + #endif /* PCIE_API_REV1 */ -/* Complete msgbuf hdr for ioctl from host to dongle */ +/** Complete msgbuf hdr for ioctl from host to dongle */ typedef struct ioct_reqst_hdr { cmn_msg_hdr_t msg; #ifdef PCIE_API_REV1 @@ -575,6 +673,7 @@ typedef struct ioct_reqst_hdr { #endif ret_buf_t ret_buf; } ioct_reqst_hdr_t; + typedef struct ioctptr_reqst_hdr { cmn_msg_hdr_t msg; #ifdef PCIE_API_REV1 @@ -586,7 +685,7 @@ typedef struct ioctptr_reqst_hdr { ret_buf_t ioct_buf; } ioctptr_reqst_hdr_t; -/* ioctl response header */ +/** ioctl response header */ typedef struct ioct_resp_hdr { cmn_msg_hdr_t msg; #ifdef PCIE_API_REV1 @@ -599,7 +698,7 @@ typedef struct ioct_resp_hdr { uint32 inline_data; #ifdef PCIE_API_REV1 #else - uint16 xt_id; /* transaction ID */ + uint16 xt_id; /**< transaction ID */ uint16 rsvd[1]; #endif } ioct_resp_hdr_t; @@ -608,10 +707,10 @@ typedef struct ioct_resp_hdr { /* ret buf hdr will be stripped off inside dongle itself */ typedef struct msgbuf_ioctl_resp { ioct_resp_hdr_t ioct_hdr; - ret_buf_t ret_buf; /* ret buf pointers */ + ret_buf_t ret_buf; /**< ret buf pointers */ } msgbuf_ioct_resp_t; -/* WL evet hdr info */ +/** WL event hdr info */ typedef struct wl_event_hdr { cmn_msg_hdr_t msg; uint16 event; @@ -629,7 +728,7 @@ typedef struct txbatch_lenptr_tup { uint32 pktid; uint16 pktlen; uint16 rsvd; - ret_buf_t ret_buf; /* ret buf pointers */ + ret_buf_t ret_buf; /**< ret buf pointers */ } txbatch_lenptr_tup_t; typedef struct txbatch_cmn_msghdr { @@ -644,14 +743,14 @@ typedef struct txbatch_cmn_msghdr { typedef struct txbatch_msghdr { txbatch_cmn_msghdr_t txcmn; - txbatch_lenptr_tup_t tx_tup[0]; /* Based on packet count */ + txbatch_lenptr_tup_t tx_tup[0]; /**< Based on packet count */ } txbatch_msghdr_t; /* TX desc posting header */ typedef struct tx_lenptr_tup { uint16 pktlen; uint16 rsvd; - ret_buf_t ret_buf; /* ret buf pointers */ + ret_buf_t ret_buf; /**< ret buf pointers */ } tx_lenptr_tup_t; typedef struct txdescr_cmn_msghdr { @@ -667,22 +766,24 @@ typedef struct txdescr_msghdr { txdescr_cmn_msghdr_t txcmn; uint8 txhdr[ETHER_HDR_LEN]; uint16 rsvd; - tx_lenptr_tup_t tx_tup[0]; /* Based on descriptor count */ + tx_lenptr_tup_t tx_tup[0]; /**< Based on descriptor count */ } txdescr_msghdr_t; -/* Tx status header info */ +/** Tx status header info */ typedef struct txstatus_hdr { cmn_msg_hdr_t msg; uint32 pktid; } txstatus_hdr_t; -/* RX bufid-len-ptr tuple */ + +/** RX bufid-len-ptr tuple */ typedef struct rx_lenptr_tup { uint32 rxbufid; uint16 len; uint16 rsvd2; - ret_buf_t ret_buf; /* ret buf pointers */ + ret_buf_t ret_buf; /**< ret buf pointers */ } rx_lenptr_tup_t; -/* Rx descr Post hdr info */ + +/** Rx descr Post hdr info */ typedef struct rxdesc_msghdr { cmn_msg_hdr_t msg; uint16 rsvd0; @@ -691,7 +792,7 @@ typedef struct rxdesc_msghdr { rx_lenptr_tup_t rx_tup[0]; } rxdesc_msghdr_t; -/* RX complete tuples */ +/** RX complete tuples */ typedef struct rxcmplt_tup { uint16 retbuf_len; uint16 data_offset; @@ -699,13 +800,15 @@ typedef struct rxcmplt_tup { uint32 rxstatus1; uint32 rxbufid; } rxcmplt_tup_t; -/* RX complete messge hdr */ + +/** RX complete messge hdr */ typedef struct rxcmplt_hdr { cmn_msg_hdr_t msg; uint16 rsvd0; uint16 rxcmpltcnt; rxcmplt_tup_t rx_tup[0]; } rxcmplt_hdr_t; + typedef struct hostevent_hdr { cmn_msg_hdr_t msg; uint32 evnt_pyld; @@ -728,4 +831,33 @@ enum { /* defines for flags */ #define MSGBUF_IOC_ACTION_MASK 0x1 +#define MAX_SUSPEND_REQ 15 + +typedef struct tx_idle_flowring_suspend_request { + cmn_msg_hdr_t msg; + uint16 ring_id[MAX_SUSPEND_REQ]; /**< ring Id's */ + uint16 num; /**< number of flowid's to suspend */ +} tx_idle_flowring_suspend_request_t; + +typedef struct tx_idle_flowring_suspend_response { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint32 rsvd[2]; + dma_done_t marker; +} tx_idle_flowring_suspend_response_t; + +typedef struct tx_idle_flowring_resume_request { + cmn_msg_hdr_t msg; + uint16 flow_ring_id; + uint16 reason; + uint32 rsvd[7]; +} tx_idle_flowring_resume_request_t; + +typedef struct tx_idle_flowring_resume_response { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint32 rsvd[2]; + dma_done_t marker; +} tx_idle_flowring_resume_response_t; + #endif /* _bcmmsgbuf_h_ */ diff --git a/drivers/amlogic/wifi/bcmdhd/include/bcmnvram.h b/drivers/amlogic/wifi/bcmdhd/include/bcmnvram.h new file mode 100644 index 0000000000000..e3ba9b4166fb5 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/bcmnvram.h @@ -0,0 +1,310 @@ +/* + * NVRAM variable manipulation + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmnvram.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _bcmnvram_h_ +#define _bcmnvram_h_ + +#ifndef _LANGUAGE_ASSEMBLY + +#include +#include + +struct nvram_header { + uint32 magic; + uint32 len; + uint32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */ + uint32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */ + uint32 config_ncdl; /* ncdl values for memc */ +}; + +struct nvram_tuple { + char *name; + char *value; + struct nvram_tuple *next; +}; + +/* + * Get default value for an NVRAM variable + */ +extern char *nvram_default_get(const char *name); +/* + * validate/restore all per-interface related variables + */ +extern void nvram_validate_all(char *prefix, bool restore); + +/* + * restore specific per-interface variable + */ +extern void nvram_restore_var(char *prefix, char *name); + +/* + * Initialize NVRAM access. May be unnecessary or undefined on certain + * platforms. + */ +extern int nvram_init(void *sih); +extern int nvram_deinit(void *sih); + + +/* + * Append a chunk of nvram variables to the global list + */ +extern int nvram_append(void *si, char *vars, uint varsz); + +extern void nvram_get_global_vars(char **varlst, uint *varsz); + + +/* + * Check for reset button press for restoring factory defaults. + */ +extern int nvram_reset(void *sih); + +/* + * Disable NVRAM access. May be unnecessary or undefined on certain + * platforms. + */ +extern void nvram_exit(void *sih); + +/* + * Get the value of an NVRAM variable. The pointer returned may be + * invalid after a set. + * @param name name of variable to get + * @return value of variable or NULL if undefined + */ +extern char * nvram_get(const char *name); + +/* + * Get the value of an NVRAM variable. The pointer returned may be + * invalid after a set. + * @param name name of variable to get + * @param bit bit value to get + * @return value of variable or NULL if undefined + */ +extern char * nvram_get_bitflag(const char *name, const int bit); + +/* + * Read the reset GPIO value from the nvram and set the GPIO + * as input + */ +extern int nvram_resetgpio_init(void *sih); + +/* + * Get the value of an NVRAM variable. + * @param name name of variable to get + * @return value of variable or NUL if undefined + */ +static INLINE char * +nvram_safe_get(const char *name) +{ + char *p = nvram_get(name); + return p ? p : ""; +} + +/* + * Match an NVRAM variable. + * @param name name of variable to match + * @param match value to compare against value of variable + * @return TRUE if variable is defined and its value is string equal + * to match or FALSE otherwise + */ +static INLINE int +nvram_match(const char *name, const char *match) +{ + const char *value = nvram_get(name); + return (value && !strcmp(value, match)); +} + +/* + * Match an NVRAM variable. + * @param name name of variable to match + * @param bit bit value to get + * @param match value to compare against value of variable + * @return TRUE if variable is defined and its value is string equal + * to match or FALSE otherwise + */ +static INLINE int +nvram_match_bitflag(const char *name, const int bit, const char *match) +{ + const char *value = nvram_get_bitflag(name, bit); + return (value && !strcmp(value, match)); +} + +/* + * Inversely match an NVRAM variable. + * @param name name of variable to match + * @param match value to compare against value of variable + * @return TRUE if variable is defined and its value is not string + * equal to invmatch or FALSE otherwise + */ +static INLINE int +nvram_invmatch(const char *name, const char *invmatch) +{ + const char *value = nvram_get(name); + return (value && strcmp(value, invmatch)); +} + +/* + * Set the value of an NVRAM variable. The name and value strings are + * copied into private storage. Pointers to previously set values + * may become invalid. The new value may be immediately + * retrieved but will not be permanently stored until a commit. + * @param name name of variable to set + * @param value value of variable + * @return 0 on success and errno on failure + */ +extern int nvram_set(const char *name, const char *value); + +/* + * Set the value of an NVRAM variable. The name and value strings are + * copied into private storage. Pointers to previously set values + * may become invalid. The new value may be immediately + * retrieved but will not be permanently stored until a commit. + * @param name name of variable to set + * @param bit bit value to set + * @param value value of variable + * @return 0 on success and errno on failure + */ +extern int nvram_set_bitflag(const char *name, const int bit, const int value); +/* + * Unset an NVRAM variable. Pointers to previously set values + * remain valid until a set. + * @param name name of variable to unset + * @return 0 on success and errno on failure + * NOTE: use nvram_commit to commit this change to flash. + */ +extern int nvram_unset(const char *name); + +/* + * Commit NVRAM variables to permanent storage. All pointers to values + * may be invalid after a commit. + * NVRAM values are undefined after a commit. + * @param nvram_corrupt true to corrupt nvram, false otherwise. + * @return 0 on success and errno on failure + */ +extern int nvram_commit_internal(bool nvram_corrupt); + +/* + * Commit NVRAM variables to permanent storage. All pointers to values + * may be invalid after a commit. + * NVRAM values are undefined after a commit. + * @return 0 on success and errno on failure + */ +extern int nvram_commit(void); + +/* + * Get all NVRAM variables (format name=value\0 ... \0\0). + * @param buf buffer to store variables + * @param count size of buffer in bytes + * @return 0 on success and errno on failure + */ +extern int nvram_getall(char *nvram_buf, int count); + +/* + * returns the crc value of the nvram + * @param nvh nvram header pointer + */ +uint8 nvram_calc_crc(struct nvram_header * nvh); + +extern int nvram_space; +#endif /* _LANGUAGE_ASSEMBLY */ + +/* The NVRAM version number stored as an NVRAM variable */ +#define NVRAM_SOFTWARE_VERSION "1" + +#define NVRAM_MAGIC 0x48534C46 /* 'FLSH' */ +#define NVRAM_CLEAR_MAGIC 0x0 +#define NVRAM_INVALID_MAGIC 0xFFFFFFFF +#define NVRAM_VERSION 1 +#define NVRAM_HEADER_SIZE 20 +/* This definition is for precommit staging, and will be removed */ +#define NVRAM_SPACE 0x8000 +/* For CFE builds this gets passed in thru the makefile */ +#ifndef MAX_NVRAM_SPACE +#define MAX_NVRAM_SPACE 0x10000 +#endif +#define DEF_NVRAM_SPACE 0x8000 +#define ROM_ENVRAM_SPACE 0x1000 +#define NVRAM_LZMA_MAGIC 0x4c5a4d41 /* 'LZMA' */ + +#define NVRAM_MAX_VALUE_LEN 255 +#define NVRAM_MAX_PARAM_LEN 64 + +#define NVRAM_CRC_START_POSITION 9 /* magic, len, crc8 to be skipped */ +#define NVRAM_CRC_VER_MASK 0xffffff00 /* for crc_ver_init */ + +/* Offsets to embedded nvram area */ +#define NVRAM_START_COMPRESSED 0x400 +#define NVRAM_START 0x1000 + +#define BCM_JUMBO_NVRAM_DELIMIT '\n' +#define BCM_JUMBO_START "Broadcom Jumbo Nvram file" + + +#if (defined(FAILSAFE_UPGRADE) || defined(CONFIG_FAILSAFE_UPGRADE) || \ + defined(__CONFIG_FAILSAFE_UPGRADE_SUPPORT__)) +#define IMAGE_SIZE "image_size" +#define BOOTPARTITION "bootpartition" +#define IMAGE_BOOT BOOTPARTITION +#define PARTIALBOOTS "partialboots" +#define MAXPARTIALBOOTS "maxpartialboots" +#define IMAGE_1ST_FLASH_TRX "flash0.trx" +#define IMAGE_1ST_FLASH_OS "flash0.os" +#define IMAGE_2ND_FLASH_TRX "flash0.trx2" +#define IMAGE_2ND_FLASH_OS "flash0.os2" +#define IMAGE_FIRST_OFFSET "image_first_offset" +#define IMAGE_SECOND_OFFSET "image_second_offset" +#define LINUX_FIRST "linux" +#define LINUX_SECOND "linux2" +#endif + +#if (defined(DUAL_IMAGE) || defined(CONFIG_DUAL_IMAGE) || \ + defined(__CONFIG_DUAL_IMAGE_FLASH_SUPPORT__)) +/* Shared by all: CFE, Linux Kernel, and Ap */ +#define IMAGE_BOOT "image_boot" +#define BOOTPARTITION IMAGE_BOOT +/* CFE variables */ +#define IMAGE_1ST_FLASH_TRX "flash0.trx" +#define IMAGE_1ST_FLASH_OS "flash0.os" +#define IMAGE_2ND_FLASH_TRX "flash0.trx2" +#define IMAGE_2ND_FLASH_OS "flash0.os2" +#define IMAGE_SIZE "image_size" + +/* CFE and Linux Kernel shared variables */ +#define IMAGE_FIRST_OFFSET "image_first_offset" +#define IMAGE_SECOND_OFFSET "image_second_offset" + +/* Linux application variables */ +#define LINUX_FIRST "linux" +#define LINUX_SECOND "linux2" +#define POLICY_TOGGLE "toggle" +#define LINUX_PART_TO_FLASH "linux_to_flash" +#define LINUX_FLASH_POLICY "linux_flash_policy" + +#endif /* defined(DUAL_IMAGE||CONFIG_DUAL_IMAGE)||__CONFIG_DUAL_IMAGE_FLASH_SUPPORT__ */ + +#endif /* _bcmnvram_h_ */ diff --git a/drivers/amlogic/wifi/bcmdhd/include/bcmpcie.h b/drivers/amlogic/wifi/bcmdhd/include/bcmpcie.h new file mode 100644 index 0000000000000..0c15055a03536 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/bcmpcie.h @@ -0,0 +1,318 @@ +/* + * Broadcom PCIE + * Software-specific definitions shared between device and host side + * Explains the shared area between host and dongle + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmpcie.h 604490 2015-12-07 15:48:45Z $ + */ + + +#ifndef _bcmpcie_h_ +#define _bcmpcie_h_ + +#include + +#define ADDR_64(x) (x.addr) +#define HIGH_ADDR_32(x) ((uint32) (((sh_addr_t) x).high_addr)) +#define LOW_ADDR_32(x) ((uint32) (((sh_addr_t) x).low_addr)) + +typedef struct { + uint32 low_addr; + uint32 high_addr; +} sh_addr_t; + + +/* May be overridden by 43xxxxx-roml.mk */ +#if !defined(BCMPCIE_MAX_TX_FLOWS) +#define BCMPCIE_MAX_TX_FLOWS 40 +#endif /* ! BCMPCIE_MAX_TX_FLOWS */ + +/** + * Feature flags enabled in dongle. Advertised by dongle to DHD via the PCIe Shared structure that + * is located in device memory. + */ +#define PCIE_SHARED_VERSION 0x00005 +#define PCIE_SHARED_VERSION_MASK 0x000FF +#define PCIE_SHARED_ASSERT_BUILT 0x00100 +#define PCIE_SHARED_ASSERT 0x00200 +#define PCIE_SHARED_TRAP 0x00400 +#define PCIE_SHARED_IN_BRPT 0x00800 +#define PCIE_SHARED_SET_BRPT 0x01000 +#define PCIE_SHARED_PENDING_BRPT 0x02000 +#define PCIE_SHARED_TXPUSH_SPRT 0x04000 +#define PCIE_SHARED_EVT_SEQNUM 0x08000 +#define PCIE_SHARED_DMA_INDEX 0x10000 + +/** + * There are host types where a device interrupt can 'race ahead' of data written by the device into + * host memory. The dongle can avoid this condition using a variety of techniques (read barrier, + * using PCIe Message Signalled Interrupts, or by using the PCIE_DMA_INDEX feature). Unfortunately + * these techniques have drawbacks on router platforms. For these platforms, it was decided to not + * avoid the condition, but to detect the condition instead and act on it. + * D2H M2M DMA Complete Sync mechanism: Modulo-253-SeqNum or XORCSUM + */ +#define PCIE_SHARED_D2H_SYNC_SEQNUM 0x20000 +#define PCIE_SHARED_D2H_SYNC_XORCSUM 0x40000 +#define PCIE_SHARED_D2H_SYNC_MODE_MASK \ + (PCIE_SHARED_D2H_SYNC_SEQNUM | PCIE_SHARED_D2H_SYNC_XORCSUM) +#define PCIE_SHARED_IDLE_FLOW_RING 0x80000 +#define PCIE_SHARED_2BYTE_INDICES 0x100000 + + +#define PCIE_SHARED_D2H_MAGIC 0xFEDCBA09 +#define PCIE_SHARED_H2D_MAGIC 0x12345678 + +/** + * Message rings convey messages between host and device. They are unidirectional, and are located + * in host memory. + * + * This is the minimal set of message rings, known as 'common message rings': + */ +#define BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT 0 +#define BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT 1 +#define BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE 2 +#define BCMPCIE_D2H_MSGRING_TX_COMPLETE 3 +#define BCMPCIE_D2H_MSGRING_RX_COMPLETE 4 +#define BCMPCIE_COMMON_MSGRING_MAX_ID 4 + +#define BCMPCIE_H2D_COMMON_MSGRINGS 2 +#define BCMPCIE_D2H_COMMON_MSGRINGS 3 +#define BCMPCIE_COMMON_MSGRINGS 5 + +#define BCMPCIE_H2D_MSGRINGS(max_tx_flows) \ + (BCMPCIE_H2D_COMMON_MSGRINGS + (max_tx_flows)) + +/** + * H2D and D2H, WR and RD index, are maintained in the following arrays: + * - Array of all H2D WR Indices + * - Array of all H2D RD Indices + * - Array of all D2H WR Indices + * - Array of all D2H RD Indices + * + * The offset of the WR or RD indexes (for common rings) in these arrays are + * listed below. Arrays ARE NOT indexed by a ring's id. + * + * D2H common rings WR and RD index start from 0, even though their ringids + * start from BCMPCIE_H2D_COMMON_MSGRINGS + */ + +#define BCMPCIE_H2D_RING_IDX(h2d_ring_id) (h2d_ring_id) + +enum h2dring_idx { + /* H2D common rings */ + BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT_IDX = + BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT), + BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT_IDX = + BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT), + + /* First TxPost's WR or RD index starts after all H2D common rings */ + BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START = + BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_COMMON_MSGRINGS) +}; + +#define BCMPCIE_D2H_RING_IDX(d2h_ring_id) \ + ((d2h_ring_id) - BCMPCIE_H2D_COMMON_MSGRINGS) + +enum d2hring_idx { + /* D2H Common Rings */ + BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE_IDX = + BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE), + BCMPCIE_D2H_MSGRING_TX_COMPLETE_IDX = + BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_TX_COMPLETE), + BCMPCIE_D2H_MSGRING_RX_COMPLETE_IDX = + BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_RX_COMPLETE) +}; + +/** + * Macros for managing arrays of RD WR indices: + * rw_index_sz: + * - in dongle, rw_index_sz is known at compile time + * - in host/DHD, rw_index_sz is derived from advertized pci_shared flags + * + * ring_idx: See h2dring_idx and d2hring_idx + */ + +/** Offset of a RD or WR index in H2D or D2H indices array */ +#define BCMPCIE_RW_INDEX_OFFSET(rw_index_sz, ring_idx) \ + ((rw_index_sz) * (ring_idx)) + +/** Fetch the address of RD or WR index in H2D or D2H indices array */ +#define BCMPCIE_RW_INDEX_ADDR(indices_array_base, rw_index_sz, ring_idx) \ + (void *)((uint32)(indices_array_base) + \ + BCMPCIE_RW_INDEX_OFFSET((rw_index_sz), (ring_idx))) + +/** H2D DMA Indices array size: given max flow rings */ +#define BCMPCIE_H2D_RW_INDEX_ARRAY_SZ(rw_index_sz, max_tx_flows) \ + ((rw_index_sz) * BCMPCIE_H2D_MSGRINGS(max_tx_flows)) + +/** D2H DMA Indices array size */ +#define BCMPCIE_D2H_RW_INDEX_ARRAY_SZ(rw_index_sz) \ + ((rw_index_sz) * BCMPCIE_D2H_COMMON_MSGRINGS) + +/** + * This type is used by a 'message buffer' (which is a FIFO for messages). Message buffers are used + * for host<->device communication and are instantiated on both sides. ring_mem_t is instantiated + * both in host as well as device memory. + */ +typedef struct ring_mem { + uint16 idx; /* ring id */ + uint8 type; + uint8 rsvd; + uint16 max_item; /* Max number of items in flow ring */ + uint16 len_items; /* Items are fixed size. Length in bytes of one item */ + sh_addr_t base_addr; /* 64 bits address, either in host or device memory */ +} ring_mem_t; + + +/** + * Per flow ring, information is maintained in device memory, e.g. at what address the ringmem and + * ringstate are located. The flow ring itself can be instantiated in either host or device memory. + * + * Perhaps this type should be renamed to make clear that it resides in device memory only. + */ +typedef struct ring_info { + uint32 ringmem_ptr; /* ring mem location in dongle memory */ + + /* Following arrays are indexed using h2dring_idx and d2hring_idx, and not + * by a ringid. + */ + + /* 32bit ptr to arrays of WR or RD indices for all rings in dongle memory */ + uint32 h2d_w_idx_ptr; /* Array of all H2D ring's WR indices */ + uint32 h2d_r_idx_ptr; /* Array of all H2D ring's RD indices */ + uint32 d2h_w_idx_ptr; /* Array of all D2H ring's WR indices */ + uint32 d2h_r_idx_ptr; /* Array of all D2H ring's RD indices */ + + /* PCIE_DMA_INDEX feature: Dongle uses mem2mem DMA to sync arrays in host. + * Host may directly fetch WR and RD indices from these host-side arrays. + * + * 64bit ptr to arrays of WR or RD indices for all rings in host memory. + */ + sh_addr_t h2d_w_idx_hostaddr; /* Array of all H2D ring's WR indices */ + sh_addr_t h2d_r_idx_hostaddr; /* Array of all H2D ring's RD indices */ + sh_addr_t d2h_w_idx_hostaddr; /* Array of all D2H ring's WR indices */ + sh_addr_t d2h_r_idx_hostaddr; /* Array of all D2H ring's RD indices */ + + uint16 max_sub_queues; /* maximum number of H2D rings: common + flow */ + uint16 rsvd; +} ring_info_t; + +/** + * A structure located in TCM that is shared between host and device, primarily used during + * initialization. + */ +typedef struct { + /** shared area version captured at flags 7:0 */ + uint32 flags; + + uint32 trap_addr; + uint32 assert_exp_addr; + uint32 assert_file_addr; + uint32 assert_line; + uint32 console_addr; /**< Address of hnd_cons_t */ + + uint32 msgtrace_addr; + + uint32 fwid; + + /* Used for debug/flow control */ + uint16 total_lfrag_pkt_cnt; + uint16 max_host_rxbufs; /* rsvd in spec */ + + uint32 dma_rxoffset; /* rsvd in spec */ + + /** these will be used for sleep request/ack, d3 req/ack */ + uint32 h2d_mb_data_ptr; + uint32 d2h_mb_data_ptr; + + /* information pertinent to host IPC/msgbuf channels */ + /** location in the TCM memory which has the ring_info */ + uint32 rings_info_ptr; + + /** block of host memory for the scratch buffer */ + uint32 host_dma_scratch_buffer_len; + sh_addr_t host_dma_scratch_buffer; + + /** block of host memory for the dongle to push the status into */ + uint32 device_rings_stsblk_len; + sh_addr_t device_rings_stsblk; + + uint32 buzzz; /* BUZZZ state format strings and trace buffer */ + +} pciedev_shared_t; + +extern pciedev_shared_t pciedev_shared; + +/** + * Mailboxes notify a remote party that an event took place, using interrupts. They use hardware + * support. + */ + +/* H2D mail box Data */ +#define H2D_HOST_D3_INFORM 0x00000001 +#define H2D_HOST_DS_ACK 0x00000002 +#define H2D_HOST_DS_NAK 0x00000004 +#define H2D_HOST_CONS_INT 0x80000000 /**< h2d int for console cmds */ +#define H2D_FW_TRAP 0x20000000 /**< dump HW reg info for Livelock issue */ +#define H2D_HOST_D0_INFORM_IN_USE 0x00000008 +#define H2D_HOST_D0_INFORM 0x00000010 + +/* D2H mail box Data */ +#define D2H_DEV_D3_ACK 0x00000001 +#define D2H_DEV_DS_ENTER_REQ 0x00000002 +#define D2H_DEV_DS_EXIT_NOTE 0x00000004 +#define D2H_DEV_FWHALT 0x10000000 +#define D2H_DEV_MB_MASK (D2H_DEV_D3_ACK | D2H_DEV_DS_ENTER_REQ | \ + D2H_DEV_DS_EXIT_NOTE | D2H_DEV_FWHALT) +#define D2H_DEV_MB_INVALIDATED(x) ((!x) || (x & ~D2H_DEV_MB_MASK)) + +/** These macro's operate on type 'inuse_lclbuf_pool_t' and are used by firmware only */ +#define NEXTTXP(i, d) ((((i)+1) >= (d)) ? 0 : ((i)+1)) +#define NTXPACTIVE(r, w, d) (((r) <= (w)) ? ((w)-(r)) : ((d)-(r)+(w))) +#define NTXPAVAIL(r, w, d) (((d) - NTXPACTIVE((r), (w), (d))) > 1) + +/* Function can be used to notify host of FW halt */ +#define READ_AVAIL_SPACE(w, r, d) \ + ((w >= r) ? (w - r) : (d - r)) + +#define WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d) ((w >= r) ? (d - w) : (r - w)) +#define WRITE_SPACE_AVAIL(r, w, d) (d - (NTXPACTIVE(r, w, d)) - 1) +#define CHECK_WRITE_SPACE(r, w, d) \ + MIN(WRITE_SPACE_AVAIL(r, w, d), WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d)) + + +#define WRT_PEND(x) ((x)->wr_pending) +#define DNGL_RING_WPTR(msgbuf) (*((msgbuf)->tcm_rs_w_ptr)) +#define BCMMSGBUF_RING_SET_W_PTR(msgbuf, a) (DNGL_RING_WPTR(msgbuf) = (a)) + +#define DNGL_RING_RPTR(msgbuf) (*((msgbuf)->tcm_rs_r_ptr)) +#define BCMMSGBUF_RING_SET_R_PTR(msgbuf, a) (DNGL_RING_RPTR(msgbuf) = (a)) + +#define RING_START_PTR(x) ((x)->ringmem->base_addr.low_addr) +#define RING_MAX_ITEM(x) ((x)->ringmem->max_item) +#define RING_LEN_ITEMS(x) ((x)->ringmem->len_items) + +#endif /* _bcmpcie_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmpcispi.h b/drivers/amlogic/wifi/bcmdhd/include/bcmpcispi.h similarity index 86% rename from drivers/net/wireless/bcmdhd/include/bcmpcispi.h rename to drivers/amlogic/wifi/bcmdhd/include/bcmpcispi.h index d95f8127166b8..66c783c4aeff0 100644 --- a/drivers/net/wireless/bcmdhd/include/bcmpcispi.h +++ b/drivers/amlogic/wifi/bcmdhd/include/bcmpcispi.h @@ -1,9 +1,30 @@ /* * Broadcom PCI-SPI Host Controller Register Definitions * - * $ Copyright Open Broadcom Corporation $ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmpcispi.h 241182 2011-02-17 21:50:03Z $ + * + * <> + * + * $Id: bcmpcispi.h 514727 2014-11-12 03:02:48Z $ */ #ifndef _BCM_PCI_SPI_H #define _BCM_PCI_SPI_H diff --git a/drivers/amlogic/wifi/bcmdhd/include/bcmperf.h b/drivers/amlogic/wifi/bcmdhd/include/bcmperf.h new file mode 100644 index 0000000000000..823c3b62f09a1 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/bcmperf.h @@ -0,0 +1,39 @@ +/* + * Performance counters software interface. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmperf.h 514727 2014-11-12 03:02:48Z $ + */ +/* essai */ +#ifndef _BCMPERF_H_ +#define _BCMPERF_H_ +/* get cache hits and misses */ +#define BCMPERF_ENABLE_INSTRCOUNT() +#define BCMPERF_ENABLE_ICACHE_MISS() +#define BCMPERF_ENABLE_ICACHE_HIT() +#define BCMPERF_GETICACHE_MISS(x) ((x) = 0) +#define BCMPERF_GETICACHE_HIT(x) ((x) = 0) +#define BCMPERF_GETINSTRCOUNT(x) ((x) = 0) +#endif /* _BCMPERF_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdbus.h b/drivers/amlogic/wifi/bcmdhd/include/bcmsdbus.h similarity index 74% rename from drivers/net/wireless/bcmdhd/include/bcmsdbus.h rename to drivers/amlogic/wifi/bcmdhd/include/bcmsdbus.h index 7cf69bbd5a41c..56ea1d49b40f7 100644 --- a/drivers/net/wireless/bcmdhd/include/bcmsdbus.h +++ b/drivers/amlogic/wifi/bcmdhd/include/bcmsdbus.h @@ -2,9 +2,30 @@ * Definitions for API from sdio common code (bcmsdh) to individual * host controller drivers. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmsdbus.h 408158 2013-06-17 22:15:35Z $ + * + * <> + * + * $Id: bcmsdbus.h 514727 2014-11-12 03:02:48Z $ */ #ifndef _sdio_api_h_ @@ -32,7 +53,7 @@ #ifdef CUSTOM_MAX_TXGLOM_SIZE #define SDPCM_MAXGLOM_SIZE CUSTOM_MAX_TXGLOM_SIZE #else -#define SDPCM_MAXGLOM_SIZE 40 +#define SDPCM_MAXGLOM_SIZE 36 #endif /* CUSTOM_MAX_TXGLOM_SIZE */ #define SDPCM_TXGLOM_CPY 0 /* SDIO 2.0 should use copy mode */ @@ -50,6 +71,23 @@ #define SDPCM_DEFGLOM_SIZE SDPCM_MAXGLOM_SIZE #endif +#ifdef PKT_STATICS +typedef struct pkt_statics { + uint16 event_count; + uint32 event_size; + uint16 ctrl_count; + uint32 ctrl_size; + uint32 data_count; + uint32 data_size; + uint32 glom_cnt[SDPCM_MAXGLOM_SIZE]; + uint16 glom_max; + uint16 glom_count; + uint32 glom_size; + uint16 test_count; + uint32 test_size; +} pkt_statics_t; +#endif + typedef int SDIOH_API_RC; /* SDio Host structure */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdh.h b/drivers/amlogic/wifi/bcmdhd/include/bcmsdh.h similarity index 87% rename from drivers/net/wireless/bcmdhd/include/bcmsdh.h rename to drivers/amlogic/wifi/bcmdhd/include/bcmsdh.h index 6f21ff34ad91f..0933d227f374b 100644 --- a/drivers/net/wireless/bcmdhd/include/bcmsdh.h +++ b/drivers/amlogic/wifi/bcmdhd/include/bcmsdh.h @@ -3,9 +3,30 @@ * export functions to client drivers * abstract OS and BUS specific details of SDIO * - * $ Copyright Open License Broadcom Corporation $ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmsdh.h 450676 2014-01-22 22:45:13Z $ + * + * <> + * + * $Id: bcmsdh.h 514727 2014-11-12 03:02:48Z $ */ /** @@ -31,10 +52,6 @@ extern const uint bcmsdh_msglevel; typedef struct bcmsdh_info bcmsdh_info_t; typedef void (*bcmsdh_cb_fn_t)(void *); -#if 0 && (NDISVER >= 0x0630) && 1 -extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *cfghdl, - void **regsva, uint irq, shared_info_t *sh); -#else extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva); /** * BCMSDH API context @@ -49,7 +66,6 @@ struct bcmsdh_info uint32 sbwad; /* Save backplane window address */ void *os_cxt; /* Pointer to per-OS private data */ }; -#endif /* Detach - freeup resources allocated in attach */ extern int bcmsdh_detach(osl_t *osh, void *sdh); @@ -215,7 +231,7 @@ extern int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_ir void* oob_irq_handler_context); extern void bcmsdh_oob_intr_unregister(bcmsdh_info_t *sdh); extern void bcmsdh_oob_intr_set(bcmsdh_info_t *sdh, bool enable); -#endif +#endif extern void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *sdh); extern void bcmsdh_dev_relax(bcmsdh_info_t *sdh); extern bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *sdh); diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h b/drivers/amlogic/wifi/bcmdhd/include/bcmsdh_sdmmc.h similarity index 95% rename from drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h rename to drivers/amlogic/wifi/bcmdhd/include/bcmsdh_sdmmc.h index 266b44ac077d1..1bd35b527b9d0 100644 --- a/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h +++ b/drivers/amlogic/wifi/bcmdhd/include/bcmsdh_sdmmc.h @@ -1,14 +1,14 @@ /* * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel * - * Copyright (C) 1999-2014, Broadcom Corporation - * + * Copyright (C) 1999-2016, Broadcom Corporation + * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: - * + * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that @@ -16,12 +16,15 @@ * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. - * + * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmsdh_sdmmc.h 496576 2014-08-13 15:04:56Z $ + * + * <> + * + * $Id: bcmsdh_sdmmc.h 591160 2015-10-07 06:01:58Z $ */ #ifndef __BCMSDH_SDMMC_H__ @@ -35,7 +38,6 @@ #define sd_ctrl(x) do { if (sd_msglevel & SDH_CTRL_VAL) printf x; } while (0) #define sd_cost(x) do { if (sd_msglevel & SDH_COST_VAL) printf x; } while (0) - #define sd_sync_dma(sd, read, nbytes) #define sd_init_dma(sd) #define sd_ack_intr(sd) @@ -58,7 +60,7 @@ /* private bus modes */ #define SDIOH_MODE_SD4 2 #define CLIENT_INTR 0x100 /* Get rid of this! */ -#define SDIOH_SDMMC_MAX_SG_ENTRIES (SDPCM_MAXGLOM_SIZE+2) +#define SDIOH_SDMMC_MAX_SG_ENTRIES SDPCM_MAXGLOM_SIZE #if defined(SWTXGLOM) typedef struct glom_buf { diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h b/drivers/amlogic/wifi/bcmdhd/include/bcmsdpcm.h similarity index 89% rename from drivers/net/wireless/bcmdhd/include/bcmsdpcm.h rename to drivers/amlogic/wifi/bcmdhd/include/bcmsdpcm.h index 932686c619fe8..5c0adff8e8ada 100644 --- a/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h +++ b/drivers/amlogic/wifi/bcmdhd/include/bcmsdpcm.h @@ -2,9 +2,30 @@ * Broadcom SDIO/PCMCIA * Software-specific definitions shared between device and host side * - * $Copyright Open 2005 Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmsdpcm.h 472405 2014-04-23 23:46:55Z $ + * + * <> + * + * $Id: bcmsdpcm.h 514727 2014-11-12 03:02:48Z $ */ #ifndef _bcmsdpcm_h_ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdspi.h b/drivers/amlogic/wifi/bcmdhd/include/bcmsdspi.h similarity index 76% rename from drivers/net/wireless/bcmdhd/include/bcmsdspi.h rename to drivers/amlogic/wifi/bcmdhd/include/bcmsdspi.h index 9c082ec1364f2..b1831db8b19b7 100644 --- a/drivers/net/wireless/bcmdhd/include/bcmsdspi.h +++ b/drivers/amlogic/wifi/bcmdhd/include/bcmsdspi.h @@ -1,9 +1,30 @@ /* * SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer * - * $ Copyright Open Broadcom Corporation $ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmsdspi.h 294363 2011-11-06 23:02:20Z $ + * + * <> + * + * $Id: bcmsdspi.h 514727 2014-11-12 03:02:48Z $ */ #ifndef _BCM_SD_SPI_H #define _BCM_SD_SPI_H diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdstd.h b/drivers/amlogic/wifi/bcmdhd/include/bcmsdstd.h similarity index 88% rename from drivers/net/wireless/bcmdhd/include/bcmsdstd.h rename to drivers/amlogic/wifi/bcmdhd/include/bcmsdstd.h index c1562a6eb3c34..24df8de685d71 100644 --- a/drivers/net/wireless/bcmdhd/include/bcmsdstd.h +++ b/drivers/amlogic/wifi/bcmdhd/include/bcmsdstd.h @@ -1,9 +1,30 @@ /* * 'Standard' SDIO HOST CONTROLLER driver * - * $ Copyright Open Broadcom Corporation $ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmsdstd.h 455390 2014-02-13 22:14:56Z $ + * + * <> + * + * $Id: bcmsdstd.h 514727 2014-11-12 03:02:48Z $ */ #ifndef _BCM_SD_STD_H #define _BCM_SD_STD_H diff --git a/drivers/amlogic/wifi/bcmdhd/include/bcmspi.h b/drivers/amlogic/wifi/bcmdhd/include/bcmspi.h new file mode 100644 index 0000000000000..e9a906e797340 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/bcmspi.h @@ -0,0 +1,43 @@ +/* + * Broadcom SPI Low-Level Hardware Driver API + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmspi.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef _BCM_SPI_H +#define _BCM_SPI_H + +extern void spi_devintr_off(sdioh_info_t *sd); +extern void spi_devintr_on(sdioh_info_t *sd); +extern bool spi_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor); +extern bool spi_controller_highspeed_mode(sdioh_info_t *sd, bool hsmode); +extern bool spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr); +extern bool spi_hw_attach(sdioh_info_t *sd); +extern bool spi_hw_detach(sdioh_info_t *sd); +extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen); +extern void spi_spinbits(sdioh_info_t *sd); +extern void spi_waitbits(sdioh_info_t *sd, bool yield); + +#endif /* _BCM_SPI_H */ diff --git a/drivers/amlogic/wifi/bcmdhd/include/bcmspibrcm.h b/drivers/amlogic/wifi/bcmdhd/include/bcmspibrcm.h new file mode 100644 index 0000000000000..7c2bfc4653c13 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/bcmspibrcm.h @@ -0,0 +1,165 @@ +/* + * SD-SPI Protocol Conversion - BCMSDH->gSPI Translation Layer + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmspibrcm.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef _BCM_SPI_BRCM_H +#define _BCM_SPI_BRCM_H + +#ifndef SPI_MAX_IOFUNCS +/* Maximum number of I/O funcs */ +#define SPI_MAX_IOFUNCS 4 +#endif +/* global msglevel for debug messages - bitvals come from sdiovar.h */ + +#if defined(DHD_DEBUG) +#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0) +#define sd_trace(x) do { if (sd_msglevel & SDH_TRACE_VAL) printf x; } while (0) +#define sd_info(x) do { if (sd_msglevel & SDH_INFO_VAL) printf x; } while (0) +#define sd_debug(x) do { if (sd_msglevel & SDH_DEBUG_VAL) printf x; } while (0) +#define sd_data(x) do { if (sd_msglevel & SDH_DATA_VAL) printf x; } while (0) +#define sd_ctrl(x) do { if (sd_msglevel & SDH_CTRL_VAL) printf x; } while (0) +#else +#define sd_err(x) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) +#endif + +#define sd_log(x) + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_F1 64 +#define BLOCK_SIZE_F2 2048 +#define BLOCK_SIZE_F3 2048 + +/* internal return code */ +#define SUCCESS 0 +#undef ERROR +#define ERROR 1 +#define ERROR_UF 2 +#define ERROR_OF 3 + +/* private bus modes */ +#define SDIOH_MODE_SPI 0 + +#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */ +#define USE_MULTIBLOCK 0x4 + +struct sdioh_info { + uint cfg_bar; /* pci cfg address for bar */ + uint32 caps; /* cached value of capabilities reg */ + void *bar0; /* BAR0 for PCI Device */ + osl_t *osh; /* osh handler */ + void *controller; /* Pointer to SPI Controller's private data struct */ + uint lockcount; /* nest count of spi_lock() calls */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + bool initialized; /* card initialized */ + uint32 target_dev; /* Target device ID */ + uint32 intmask; /* Current active interrupts */ + void *sdos_info; /* Pointer to per-OS private data */ + uint32 controller_type; /* Host controller type */ + uint8 version; /* Host Controller Spec Compliance Version */ + uint irq; /* Client irq */ + uint32 intrcount; /* Client interrupts */ + uint32 local_intrcount; /* Controller interrupts */ + bool host_init_done; /* Controller initted */ + bool card_init_done; /* Client SDIO interface initted */ + bool polled_mode; /* polling for command completion */ + + bool sd_use_dma; /* DMA on CMD53 */ + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + /* polling hack in wl_linux.c:wl_timer() */ + int adapter_slot; /* Maybe dealing with multiple slots/controllers */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SPI_MAX_IOFUNCS]; /* Blocksize */ + uint32 data_xfer_count; /* Current transfer */ + uint16 card_rca; /* Current Address */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 card_dstatus; /* 32bit device status */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SPI_MAX_IOFUNCS]; + void *dma_buf; + ulong dma_phys; + int r_cnt; /* rx count */ + int t_cnt; /* tx_count */ + uint32 wordlen; /* host processor 16/32bits */ + uint32 prev_fun; + uint32 chip; + uint32 chiprev; + bool resp_delay_all; + bool dwordmode; + bool resp_delay_new; + + struct spierrstats_t spierrstats; +}; + +/************************************************************ + * Internal interfaces: per-port references into bcmspibrcm.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/************************************************************** + * Internal interfaces: bcmspibrcm.c references to per-port code + */ + +/* Interrupt (de)registration routines */ +extern int spi_register_irq(sdioh_info_t *sd, uint irq); +extern void spi_free_irq(uint irq, sdioh_info_t *sd); + +/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */ +extern void spi_lock(sdioh_info_t *sd); +extern void spi_unlock(sdioh_info_t *sd); + +/* Allocate/init/free per-OS private data */ +extern int spi_osinit(sdioh_info_t *sd); +extern void spi_osfree(sdioh_info_t *sd); + +#define SPI_RW_FLAG_M BITFIELD_MASK(1) /* Bit [31] - R/W Command Bit */ +#define SPI_RW_FLAG_S 31 +#define SPI_ACCESS_M BITFIELD_MASK(1) /* Bit [30] - Fixed/Incr Access */ +#define SPI_ACCESS_S 30 +#define SPI_FUNCTION_M BITFIELD_MASK(2) /* Bit [29:28] - Function Number */ +#define SPI_FUNCTION_S 28 +#define SPI_REG_ADDR_M BITFIELD_MASK(17) /* Bit [27:11] - Address */ +#define SPI_REG_ADDR_S 11 +#define SPI_LEN_M BITFIELD_MASK(11) /* Bit [10:0] - Packet length */ +#define SPI_LEN_S 0 + +#endif /* _BCM_SPI_BRCM_H */ diff --git a/drivers/amlogic/wifi/bcmdhd/include/bcmsrom_fmt.h b/drivers/amlogic/wifi/bcmdhd/include/bcmsrom_fmt.h new file mode 100644 index 0000000000000..a40bd569da34e --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/bcmsrom_fmt.h @@ -0,0 +1,965 @@ +/* + * SROM format definition. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsrom_fmt.h 553280 2015-04-29 07:55:29Z $ + */ + +#ifndef _bcmsrom_fmt_h_ +#define _bcmsrom_fmt_h_ + +#define SROM_MAXREV 13 /* max revision supported by driver */ + +/* Maximum srom: 12 Kilobits == 1536 bytes */ + +#define SROM_MAX 1536 +#define SROM_MAXW 594 + +#ifdef LARGE_NVRAM_MAXSZ +#define VARS_MAX LARGE_NVRAM_MAXSZ +#else +#define VARS_MAX 4096 +#endif /* LARGE_NVRAM_MAXSZ */ + +/* PCI fields */ +#define PCI_F0DEVID 48 + + +#define SROM_WORDS 64 + +#define SROM3_SWRGN_OFF 28 /* s/w region offset in words */ + +#define SROM_SSID 2 +#define SROM_SVID 3 + +#define SROM_WL1LHMAXP 29 + +#define SROM_WL1LPAB0 30 +#define SROM_WL1LPAB1 31 +#define SROM_WL1LPAB2 32 + +#define SROM_WL1HPAB0 33 +#define SROM_WL1HPAB1 34 +#define SROM_WL1HPAB2 35 + +#define SROM_MACHI_IL0 36 +#define SROM_MACMID_IL0 37 +#define SROM_MACLO_IL0 38 +#define SROM_MACHI_ET0 39 +#define SROM_MACMID_ET0 40 +#define SROM_MACLO_ET0 41 +#define SROM_MACHI_ET1 42 +#define SROM_MACMID_ET1 43 +#define SROM_MACLO_ET1 44 +#define SROM3_MACHI 37 +#define SROM3_MACMID 38 +#define SROM3_MACLO 39 + +#define SROM_BXARSSI2G 40 +#define SROM_BXARSSI5G 41 + +#define SROM_TRI52G 42 +#define SROM_TRI5GHL 43 + +#define SROM_RXPO52G 45 + +#define SROM2_ENETPHY 45 + +#define SROM_AABREV 46 +/* Fields in AABREV */ +#define SROM_BR_MASK 0x00ff +#define SROM_CC_MASK 0x0f00 +#define SROM_CC_SHIFT 8 +#define SROM_AA0_MASK 0x3000 +#define SROM_AA0_SHIFT 12 +#define SROM_AA1_MASK 0xc000 +#define SROM_AA1_SHIFT 14 + +#define SROM_WL0PAB0 47 +#define SROM_WL0PAB1 48 +#define SROM_WL0PAB2 49 + +#define SROM_LEDBH10 50 +#define SROM_LEDBH32 51 + +#define SROM_WL10MAXP 52 + +#define SROM_WL1PAB0 53 +#define SROM_WL1PAB1 54 +#define SROM_WL1PAB2 55 + +#define SROM_ITT 56 + +#define SROM_BFL 57 +#define SROM_BFL2 28 +#define SROM3_BFL2 61 + +#define SROM_AG10 58 + +#define SROM_CCODE 59 + +#define SROM_OPO 60 + +#define SROM3_LEDDC 62 + +#define SROM_CRCREV 63 + +/* SROM Rev 4: Reallocate the software part of the srom to accomodate + * MIMO features. It assumes up to two PCIE functions and 440 bytes + * of useable srom i.e. the useable storage in chips with OTP that + * implements hardware redundancy. + */ + +#define SROM4_WORDS 220 + +#define SROM4_SIGN 32 +#define SROM4_SIGNATURE 0x5372 + +#define SROM4_BREV 33 + +#define SROM4_BFL0 34 +#define SROM4_BFL1 35 +#define SROM4_BFL2 36 +#define SROM4_BFL3 37 +#define SROM5_BFL0 37 +#define SROM5_BFL1 38 +#define SROM5_BFL2 39 +#define SROM5_BFL3 40 + +#define SROM4_MACHI 38 +#define SROM4_MACMID 39 +#define SROM4_MACLO 40 +#define SROM5_MACHI 41 +#define SROM5_MACMID 42 +#define SROM5_MACLO 43 + +#define SROM4_CCODE 41 +#define SROM4_REGREV 42 +#define SROM5_CCODE 34 +#define SROM5_REGREV 35 + +#define SROM4_LEDBH10 43 +#define SROM4_LEDBH32 44 +#define SROM5_LEDBH10 59 +#define SROM5_LEDBH32 60 + +#define SROM4_LEDDC 45 +#define SROM5_LEDDC 45 + +#define SROM4_AA 46 +#define SROM4_AA2G_MASK 0x00ff +#define SROM4_AA2G_SHIFT 0 +#define SROM4_AA5G_MASK 0xff00 +#define SROM4_AA5G_SHIFT 8 + +#define SROM4_AG10 47 +#define SROM4_AG32 48 + +#define SROM4_TXPID2G 49 +#define SROM4_TXPID5G 51 +#define SROM4_TXPID5GL 53 +#define SROM4_TXPID5GH 55 + +#define SROM4_TXRXC 61 +#define SROM4_TXCHAIN_MASK 0x000f +#define SROM4_TXCHAIN_SHIFT 0 +#define SROM4_RXCHAIN_MASK 0x00f0 +#define SROM4_RXCHAIN_SHIFT 4 +#define SROM4_SWITCH_MASK 0xff00 +#define SROM4_SWITCH_SHIFT 8 + + +/* Per-path fields */ +#define MAX_PATH_SROM 4 +#define SROM4_PATH0 64 +#define SROM4_PATH1 87 +#define SROM4_PATH2 110 +#define SROM4_PATH3 133 + +#define SROM4_2G_ITT_MAXP 0 +#define SROM4_2G_PA 1 +#define SROM4_5G_ITT_MAXP 5 +#define SROM4_5GLH_MAXP 6 +#define SROM4_5G_PA 7 +#define SROM4_5GL_PA 11 +#define SROM4_5GH_PA 15 + +/* Fields in the ITT_MAXP and 5GLH_MAXP words */ +#define B2G_MAXP_MASK 0xff +#define B2G_ITT_SHIFT 8 +#define B5G_MAXP_MASK 0xff +#define B5G_ITT_SHIFT 8 +#define B5GH_MAXP_MASK 0xff +#define B5GL_MAXP_SHIFT 8 + +/* All the miriad power offsets */ +#define SROM4_2G_CCKPO 156 +#define SROM4_2G_OFDMPO 157 +#define SROM4_5G_OFDMPO 159 +#define SROM4_5GL_OFDMPO 161 +#define SROM4_5GH_OFDMPO 163 +#define SROM4_2G_MCSPO 165 +#define SROM4_5G_MCSPO 173 +#define SROM4_5GL_MCSPO 181 +#define SROM4_5GH_MCSPO 189 +#define SROM4_CDDPO 197 +#define SROM4_STBCPO 198 +#define SROM4_BW40PO 199 +#define SROM4_BWDUPPO 200 + +#define SROM4_CRCREV 219 + + +/* SROM Rev 8: Make space for a 48word hardware header for PCIe rev >= 6. + * This is acombined srom for both MIMO and SISO boards, usable in + * the .130 4Kilobit OTP with hardware redundancy. + */ + +#define SROM8_SIGN 64 + +#define SROM8_BREV 65 + +#define SROM8_BFL0 66 +#define SROM8_BFL1 67 +#define SROM8_BFL2 68 +#define SROM8_BFL3 69 + +#define SROM8_MACHI 70 +#define SROM8_MACMID 71 +#define SROM8_MACLO 72 + +#define SROM8_CCODE 73 +#define SROM8_REGREV 74 + +#define SROM8_LEDBH10 75 +#define SROM8_LEDBH32 76 + +#define SROM8_LEDDC 77 + +#define SROM8_AA 78 + +#define SROM8_AG10 79 +#define SROM8_AG32 80 + +#define SROM8_TXRXC 81 + +#define SROM8_BXARSSI2G 82 +#define SROM8_BXARSSI5G 83 +#define SROM8_TRI52G 84 +#define SROM8_TRI5GHL 85 +#define SROM8_RXPO52G 86 + +#define SROM8_FEM2G 87 +#define SROM8_FEM5G 88 +#define SROM8_FEM_ANTSWLUT_MASK 0xf800 +#define SROM8_FEM_ANTSWLUT_SHIFT 11 +#define SROM8_FEM_TR_ISO_MASK 0x0700 +#define SROM8_FEM_TR_ISO_SHIFT 8 +#define SROM8_FEM_PDET_RANGE_MASK 0x00f8 +#define SROM8_FEM_PDET_RANGE_SHIFT 3 +#define SROM8_FEM_EXTPA_GAIN_MASK 0x0006 +#define SROM8_FEM_EXTPA_GAIN_SHIFT 1 +#define SROM8_FEM_TSSIPOS_MASK 0x0001 +#define SROM8_FEM_TSSIPOS_SHIFT 0 + +#define SROM8_THERMAL 89 + +/* Temp sense related entries */ +#define SROM8_MPWR_RAWTS 90 +#define SROM8_TS_SLP_OPT_CORRX 91 +/* FOC: freiquency offset correction, HWIQ: H/W IOCAL enable, IQSWP: IQ CAL swap disable */ +#define SROM8_FOC_HWIQ_IQSWP 92 + +#define SROM8_EXTLNAGAIN 93 + +/* Temperature delta for PHY calibration */ +#define SROM8_PHYCAL_TEMPDELTA 94 + +/* Measured power 1 & 2, 0-13 bits at offset 95, MSB 2 bits are unused for now. */ +#define SROM8_MPWR_1_AND_2 95 + + +/* Per-path offsets & fields */ +#define SROM8_PATH0 96 +#define SROM8_PATH1 112 +#define SROM8_PATH2 128 +#define SROM8_PATH3 144 + +#define SROM8_2G_ITT_MAXP 0 +#define SROM8_2G_PA 1 +#define SROM8_5G_ITT_MAXP 4 +#define SROM8_5GLH_MAXP 5 +#define SROM8_5G_PA 6 +#define SROM8_5GL_PA 9 +#define SROM8_5GH_PA 12 + +/* All the miriad power offsets */ +#define SROM8_2G_CCKPO 160 + +#define SROM8_2G_OFDMPO 161 +#define SROM8_5G_OFDMPO 163 +#define SROM8_5GL_OFDMPO 165 +#define SROM8_5GH_OFDMPO 167 + +#define SROM8_2G_MCSPO 169 +#define SROM8_5G_MCSPO 177 +#define SROM8_5GL_MCSPO 185 +#define SROM8_5GH_MCSPO 193 + +#define SROM8_CDDPO 201 +#define SROM8_STBCPO 202 +#define SROM8_BW40PO 203 +#define SROM8_BWDUPPO 204 + +/* SISO PA parameters are in the path0 spaces */ +#define SROM8_SISO 96 + +/* Legacy names for SISO PA paramters */ +#define SROM8_W0_ITTMAXP (SROM8_SISO + SROM8_2G_ITT_MAXP) +#define SROM8_W0_PAB0 (SROM8_SISO + SROM8_2G_PA) +#define SROM8_W0_PAB1 (SROM8_SISO + SROM8_2G_PA + 1) +#define SROM8_W0_PAB2 (SROM8_SISO + SROM8_2G_PA + 2) +#define SROM8_W1_ITTMAXP (SROM8_SISO + SROM8_5G_ITT_MAXP) +#define SROM8_W1_MAXP_LCHC (SROM8_SISO + SROM8_5GLH_MAXP) +#define SROM8_W1_PAB0 (SROM8_SISO + SROM8_5G_PA) +#define SROM8_W1_PAB1 (SROM8_SISO + SROM8_5G_PA + 1) +#define SROM8_W1_PAB2 (SROM8_SISO + SROM8_5G_PA + 2) +#define SROM8_W1_PAB0_LC (SROM8_SISO + SROM8_5GL_PA) +#define SROM8_W1_PAB1_LC (SROM8_SISO + SROM8_5GL_PA + 1) +#define SROM8_W1_PAB2_LC (SROM8_SISO + SROM8_5GL_PA + 2) +#define SROM8_W1_PAB0_HC (SROM8_SISO + SROM8_5GH_PA) +#define SROM8_W1_PAB1_HC (SROM8_SISO + SROM8_5GH_PA + 1) +#define SROM8_W1_PAB2_HC (SROM8_SISO + SROM8_5GH_PA + 2) + +#define SROM8_CRCREV 219 + +/* SROM REV 9 */ +#define SROM9_2GPO_CCKBW20 160 +#define SROM9_2GPO_CCKBW20UL 161 +#define SROM9_2GPO_LOFDMBW20 162 +#define SROM9_2GPO_LOFDMBW20UL 164 + +#define SROM9_5GLPO_LOFDMBW20 166 +#define SROM9_5GLPO_LOFDMBW20UL 168 +#define SROM9_5GMPO_LOFDMBW20 170 +#define SROM9_5GMPO_LOFDMBW20UL 172 +#define SROM9_5GHPO_LOFDMBW20 174 +#define SROM9_5GHPO_LOFDMBW20UL 176 + +#define SROM9_2GPO_MCSBW20 178 +#define SROM9_2GPO_MCSBW20UL 180 +#define SROM9_2GPO_MCSBW40 182 + +#define SROM9_5GLPO_MCSBW20 184 +#define SROM9_5GLPO_MCSBW20UL 186 +#define SROM9_5GLPO_MCSBW40 188 +#define SROM9_5GMPO_MCSBW20 190 +#define SROM9_5GMPO_MCSBW20UL 192 +#define SROM9_5GMPO_MCSBW40 194 +#define SROM9_5GHPO_MCSBW20 196 +#define SROM9_5GHPO_MCSBW20UL 198 +#define SROM9_5GHPO_MCSBW40 200 + +#define SROM9_PO_MCS32 202 +#define SROM9_PO_LOFDM40DUP 203 +#define SROM9_EU_EDCRSTH 204 +#define SROM10_EU_EDCRSTH 204 +#define SROM8_RXGAINERR_2G 205 +#define SROM8_RXGAINERR_5GL 206 +#define SROM8_RXGAINERR_5GM 207 +#define SROM8_RXGAINERR_5GH 208 +#define SROM8_RXGAINERR_5GU 209 +#define SROM8_SUBBAND_PPR 210 +#define SROM8_PCIEINGRESS_WAR 211 +#define SROM8_EU_EDCRSTH 212 +#define SROM9_SAR 212 + +#define SROM8_NOISELVL_2G 213 +#define SROM8_NOISELVL_5GL 214 +#define SROM8_NOISELVL_5GM 215 +#define SROM8_NOISELVL_5GH 216 +#define SROM8_NOISELVL_5GU 217 +#define SROM8_NOISECALOFFSET 218 + +#define SROM9_REV_CRC 219 + +#define SROM10_CCKPWROFFSET 218 +#define SROM10_SIGN 219 +#define SROM10_SWCTRLMAP_2G 220 +#define SROM10_CRCREV 229 + +#define SROM10_WORDS 230 +#define SROM10_SIGNATURE SROM4_SIGNATURE + + +/* SROM REV 11 */ +#define SROM11_BREV 65 + +#define SROM11_BFL0 66 +#define SROM11_BFL1 67 +#define SROM11_BFL2 68 +#define SROM11_BFL3 69 +#define SROM11_BFL4 70 +#define SROM11_BFL5 71 + +#define SROM11_MACHI 72 +#define SROM11_MACMID 73 +#define SROM11_MACLO 74 + +#define SROM11_CCODE 75 +#define SROM11_REGREV 76 + +#define SROM11_LEDBH10 77 +#define SROM11_LEDBH32 78 + +#define SROM11_LEDDC 79 + +#define SROM11_AA 80 + +#define SROM11_AGBG10 81 +#define SROM11_AGBG2A0 82 +#define SROM11_AGA21 83 + +#define SROM11_TXRXC 84 + +#define SROM11_FEM_CFG1 85 +#define SROM11_FEM_CFG2 86 + +/* Masks and offsets for FEM_CFG */ +#define SROM11_FEMCTRL_MASK 0xf800 +#define SROM11_FEMCTRL_SHIFT 11 +#define SROM11_PAPDCAP_MASK 0x0400 +#define SROM11_PAPDCAP_SHIFT 10 +#define SROM11_TWORANGETSSI_MASK 0x0200 +#define SROM11_TWORANGETSSI_SHIFT 9 +#define SROM11_PDGAIN_MASK 0x01f0 +#define SROM11_PDGAIN_SHIFT 4 +#define SROM11_EPAGAIN_MASK 0x000e +#define SROM11_EPAGAIN_SHIFT 1 +#define SROM11_TSSIPOSSLOPE_MASK 0x0001 +#define SROM11_TSSIPOSSLOPE_SHIFT 0 +#define SROM11_GAINCTRLSPH_MASK 0xf800 +#define SROM11_GAINCTRLSPH_SHIFT 11 + +#define SROM11_THERMAL 87 +#define SROM11_MPWR_RAWTS 88 +#define SROM11_TS_SLP_OPT_CORRX 89 +#define SROM11_XTAL_FREQ 90 +#define SROM11_5GB0_4080_W0_A1 91 +#define SROM11_PHYCAL_TEMPDELTA 92 +#define SROM11_MPWR_1_AND_2 93 +#define SROM11_5GB0_4080_W1_A1 94 +#define SROM11_TSSIFLOOR_2G 95 +#define SROM11_TSSIFLOOR_5GL 96 +#define SROM11_TSSIFLOOR_5GM 97 +#define SROM11_TSSIFLOOR_5GH 98 +#define SROM11_TSSIFLOOR_5GU 99 + +/* Masks and offsets for Thermal parameters */ +#define SROM11_TEMPS_PERIOD_MASK 0xf0 +#define SROM11_TEMPS_PERIOD_SHIFT 4 +#define SROM11_TEMPS_HYSTERESIS_MASK 0x0f +#define SROM11_TEMPS_HYSTERESIS_SHIFT 0 +#define SROM11_TEMPCORRX_MASK 0xfc +#define SROM11_TEMPCORRX_SHIFT 2 +#define SROM11_TEMPSENSE_OPTION_MASK 0x3 +#define SROM11_TEMPSENSE_OPTION_SHIFT 0 + +#define SROM11_PDOFF_2G_40M_A0_MASK 0x000f +#define SROM11_PDOFF_2G_40M_A0_SHIFT 0 +#define SROM11_PDOFF_2G_40M_A1_MASK 0x00f0 +#define SROM11_PDOFF_2G_40M_A1_SHIFT 4 +#define SROM11_PDOFF_2G_40M_A2_MASK 0x0f00 +#define SROM11_PDOFF_2G_40M_A2_SHIFT 8 +#define SROM11_PDOFF_2G_40M_VALID_MASK 0x8000 +#define SROM11_PDOFF_2G_40M_VALID_SHIFT 15 + +#define SROM11_PDOFF_2G_40M 100 +#define SROM11_PDOFF_40M_A0 101 +#define SROM11_PDOFF_40M_A1 102 +#define SROM11_PDOFF_40M_A2 103 +#define SROM11_5GB0_4080_W2_A1 103 +#define SROM11_PDOFF_80M_A0 104 +#define SROM11_PDOFF_80M_A1 105 +#define SROM11_PDOFF_80M_A2 106 +#define SROM11_5GB1_4080_W0_A1 106 + +#define SROM11_SUBBAND5GVER 107 + +/* Per-path fields and offset */ +#define MAX_PATH_SROM_11 3 +#define SROM11_PATH0 108 +#define SROM11_PATH1 128 +#define SROM11_PATH2 148 + +#define SROM11_2G_MAXP 0 +#define SROM11_5GB1_4080_PA 0 +#define SROM11_2G_PA 1 +#define SROM11_5GB2_4080_PA 2 +#define SROM11_RXGAINS1 4 +#define SROM11_RXGAINS 5 +#define SROM11_5GB3_4080_PA 5 +#define SROM11_5GB1B0_MAXP 6 +#define SROM11_5GB3B2_MAXP 7 +#define SROM11_5GB0_PA 8 +#define SROM11_5GB1_PA 11 +#define SROM11_5GB2_PA 14 +#define SROM11_5GB3_PA 17 + +/* Masks and offsets for rxgains */ +#define SROM11_RXGAINS5GTRELNABYPA_MASK 0x8000 +#define SROM11_RXGAINS5GTRELNABYPA_SHIFT 15 +#define SROM11_RXGAINS5GTRISOA_MASK 0x7800 +#define SROM11_RXGAINS5GTRISOA_SHIFT 11 +#define SROM11_RXGAINS5GELNAGAINA_MASK 0x0700 +#define SROM11_RXGAINS5GELNAGAINA_SHIFT 8 +#define SROM11_RXGAINS2GTRELNABYPA_MASK 0x0080 +#define SROM11_RXGAINS2GTRELNABYPA_SHIFT 7 +#define SROM11_RXGAINS2GTRISOA_MASK 0x0078 +#define SROM11_RXGAINS2GTRISOA_SHIFT 3 +#define SROM11_RXGAINS2GELNAGAINA_MASK 0x0007 +#define SROM11_RXGAINS2GELNAGAINA_SHIFT 0 +#define SROM11_RXGAINS5GHTRELNABYPA_MASK 0x8000 +#define SROM11_RXGAINS5GHTRELNABYPA_SHIFT 15 +#define SROM11_RXGAINS5GHTRISOA_MASK 0x7800 +#define SROM11_RXGAINS5GHTRISOA_SHIFT 11 +#define SROM11_RXGAINS5GHELNAGAINA_MASK 0x0700 +#define SROM11_RXGAINS5GHELNAGAINA_SHIFT 8 +#define SROM11_RXGAINS5GMTRELNABYPA_MASK 0x0080 +#define SROM11_RXGAINS5GMTRELNABYPA_SHIFT 7 +#define SROM11_RXGAINS5GMTRISOA_MASK 0x0078 +#define SROM11_RXGAINS5GMTRISOA_SHIFT 3 +#define SROM11_RXGAINS5GMELNAGAINA_MASK 0x0007 +#define SROM11_RXGAINS5GMELNAGAINA_SHIFT 0 + +/* Power per rate */ +#define SROM11_CCKBW202GPO 168 +#define SROM11_CCKBW20UL2GPO 169 +#define SROM11_MCSBW202GPO 170 +#define SROM11_MCSBW202GPO_1 171 +#define SROM11_MCSBW402GPO 172 +#define SROM11_MCSBW402GPO_1 173 +#define SROM11_DOT11AGOFDMHRBW202GPO 174 +#define SROM11_OFDMLRBW202GPO 175 + +#define SROM11_MCSBW205GLPO 176 +#define SROM11_MCSBW205GLPO_1 177 +#define SROM11_MCSBW405GLPO 178 +#define SROM11_MCSBW405GLPO_1 179 +#define SROM11_MCSBW805GLPO 180 +#define SROM11_MCSBW805GLPO_1 181 +#define SROM11_RPCAL_2G 182 +#define SROM11_RPCAL_5GL 183 +#define SROM11_MCSBW205GMPO 184 +#define SROM11_MCSBW205GMPO_1 185 +#define SROM11_MCSBW405GMPO 186 +#define SROM11_MCSBW405GMPO_1 187 +#define SROM11_MCSBW805GMPO 188 +#define SROM11_MCSBW805GMPO_1 189 +#define SROM11_RPCAL_5GM 190 +#define SROM11_RPCAL_5GH 191 +#define SROM11_MCSBW205GHPO 192 +#define SROM11_MCSBW205GHPO_1 193 +#define SROM11_MCSBW405GHPO 194 +#define SROM11_MCSBW405GHPO_1 195 +#define SROM11_MCSBW805GHPO 196 +#define SROM11_MCSBW805GHPO_1 197 +#define SROM11_RPCAL_5GU 198 +#define SROM11_PDOFF_2G_CCK 199 +#define SROM11_MCSLR5GLPO 200 +#define SROM11_MCSLR5GMPO 201 +#define SROM11_MCSLR5GHPO 202 + +#define SROM11_SB20IN40HRPO 203 +#define SROM11_SB20IN80AND160HR5GLPO 204 +#define SROM11_SB40AND80HR5GLPO 205 +#define SROM11_SB20IN80AND160HR5GMPO 206 +#define SROM11_SB40AND80HR5GMPO 207 +#define SROM11_SB20IN80AND160HR5GHPO 208 +#define SROM11_SB40AND80HR5GHPO 209 +#define SROM11_SB20IN40LRPO 210 +#define SROM11_SB20IN80AND160LR5GLPO 211 +#define SROM11_SB40AND80LR5GLPO 212 +#define SROM11_TXIDXCAP2G 212 +#define SROM11_SB20IN80AND160LR5GMPO 213 +#define SROM11_SB40AND80LR5GMPO 214 +#define SROM11_TXIDXCAP5G 214 +#define SROM11_SB20IN80AND160LR5GHPO 215 +#define SROM11_SB40AND80LR5GHPO 216 + +#define SROM11_DOT11AGDUPHRPO 217 +#define SROM11_DOT11AGDUPLRPO 218 + +/* MISC */ +#define SROM11_PCIEINGRESS_WAR 220 +#define SROM11_SAR 221 + +#define SROM11_NOISELVL_2G 222 +#define SROM11_NOISELVL_5GL 223 +#define SROM11_NOISELVL_5GM 224 +#define SROM11_NOISELVL_5GH 225 +#define SROM11_NOISELVL_5GU 226 + +#define SROM11_RXGAINERR_2G 227 +#define SROM11_RXGAINERR_5GL 228 +#define SROM11_RXGAINERR_5GM 229 +#define SROM11_RXGAINERR_5GH 230 +#define SROM11_RXGAINERR_5GU 231 + +#define SROM11_EU_EDCRSTH 232 +#define SROM12_EU_EDCRSTH 232 + +#define SROM11_SIGN 64 +#define SROM11_CRCREV 233 + +#define SROM11_WORDS 234 +#define SROM11_SIGNATURE 0x0634 + + +/* SROM REV 12 */ +#define SROM12_SIGN 64 +#define SROM12_WORDS 512 +#define SROM12_SIGNATURE 0x8888 +#define SROM12_CRCREV 511 + +#define SROM12_BFL6 486 +#define SROM12_BFL7 487 + +#define SROM12_MCSBW205GX1PO 234 +#define SROM12_MCSBW205GX1PO_1 235 +#define SROM12_MCSBW405GX1PO 236 +#define SROM12_MCSBW405GX1PO_1 237 +#define SROM12_MCSBW805GX1PO 238 +#define SROM12_MCSBW805GX1PO_1 239 +#define SROM12_MCSLR5GX1PO 240 +#define SROM12_SB40AND80LR5GX1PO 241 +#define SROM12_SB20IN80AND160LR5GX1PO 242 +#define SROM12_SB20IN80AND160HR5GX1PO 243 +#define SROM12_SB40AND80HR5GX1PO 244 + +#define SROM12_MCSBW205GX2PO 245 +#define SROM12_MCSBW205GX2PO_1 246 +#define SROM12_MCSBW405GX2PO 247 +#define SROM12_MCSBW405GX2PO_1 248 +#define SROM12_MCSBW805GX2PO 249 +#define SROM12_MCSBW805GX2PO_1 250 +#define SROM12_MCSLR5GX2PO 251 +#define SROM12_SB40AND80LR5GX2PO 252 +#define SROM12_SB20IN80AND160LR5GX2PO 253 +#define SROM12_SB20IN80AND160HR5GX2PO 254 +#define SROM12_SB40AND80HR5GX2PO 255 + +/* MISC */ +#define SROM12_RXGAINS10 483 +#define SROM12_RXGAINS11 484 +#define SROM12_RXGAINS12 485 + +/* Per-path fields and offset */ +#define MAX_PATH_SROM_12 3 +#define SROM12_PATH0 256 +#define SROM12_PATH1 328 +#define SROM12_PATH2 400 + +#define SROM12_5GB42G_MAXP 0 +#define SROM12_2GB0_PA 1 +#define SROM12_2GB0_PA_W0 1 +#define SROM12_2GB0_PA_W1 2 +#define SROM12_2GB0_PA_W2 3 +#define SROM12_2GB0_PA_W3 4 + +#define SROM12_RXGAINS 5 +#define SROM12_5GB1B0_MAXP 6 +#define SROM12_5GB3B2_MAXP 7 + +#define SROM12_5GB0_PA 8 +#define SROM12_5GB0_PA_W0 8 +#define SROM12_5GB0_PA_W1 9 +#define SROM12_5GB0_PA_W2 10 +#define SROM12_5GB0_PA_W3 11 + +#define SROM12_5GB1_PA 12 +#define SROM12_5GB1_PA_W0 12 +#define SROM12_5GB1_PA_W1 13 +#define SROM12_5GB1_PA_W2 14 +#define SROM12_5GB1_PA_W3 15 + +#define SROM12_5GB2_PA 16 +#define SROM12_5GB2_PA_W0 16 +#define SROM12_5GB2_PA_W1 17 +#define SROM12_5GB2_PA_W2 18 +#define SROM12_5GB2_PA_W3 19 + +#define SROM12_5GB3_PA 20 +#define SROM12_5GB3_PA_W0 20 +#define SROM12_5GB3_PA_W1 21 +#define SROM12_5GB3_PA_W2 22 +#define SROM12_5GB3_PA_W3 23 + +#define SROM12_5GB4_PA 24 +#define SROM12_5GB4_PA_W0 24 +#define SROM12_5GB4_PA_W1 25 +#define SROM12_5GB4_PA_W2 26 +#define SROM12_5GB4_PA_W3 27 + +#define SROM12_2G40B0_PA 28 +#define SROM12_2G40B0_PA_W0 28 +#define SROM12_2G40B0_PA_W1 29 +#define SROM12_2G40B0_PA_W2 30 +#define SROM12_2G40B0_PA_W3 31 + +#define SROM12_5G40B0_PA 32 +#define SROM12_5G40B0_PA_W0 32 +#define SROM12_5G40B0_PA_W1 33 +#define SROM12_5G40B0_PA_W2 34 +#define SROM12_5G40B0_PA_W3 35 + +#define SROM12_5G40B1_PA 36 +#define SROM12_5G40B1_PA_W0 36 +#define SROM12_5G40B1_PA_W1 37 +#define SROM12_5G40B1_PA_W2 38 +#define SROM12_5G40B1_PA_W3 39 + +#define SROM12_5G40B2_PA 40 +#define SROM12_5G40B2_PA_W0 40 +#define SROM12_5G40B2_PA_W1 41 +#define SROM12_5G40B2_PA_W2 42 +#define SROM12_5G40B2_PA_W3 43 + +#define SROM12_5G40B3_PA 44 +#define SROM12_5G40B3_PA_W0 44 +#define SROM12_5G40B3_PA_W1 45 +#define SROM12_5G40B3_PA_W2 46 +#define SROM12_5G40B3_PA_W3 47 + +#define SROM12_5G40B4_PA 48 +#define SROM12_5G40B4_PA_W0 48 +#define SROM12_5G40B4_PA_W1 49 +#define SROM12_5G40B4_PA_W2 50 +#define SROM12_5G40B4_PA_W3 51 + +#define SROM12_5G80B0_PA 52 +#define SROM12_5G80B0_PA_W0 52 +#define SROM12_5G80B0_PA_W1 53 +#define SROM12_5G80B0_PA_W2 54 +#define SROM12_5G80B0_PA_W3 55 + +#define SROM12_5G80B1_PA 56 +#define SROM12_5G80B1_PA_W0 56 +#define SROM12_5G80B1_PA_W1 57 +#define SROM12_5G80B1_PA_W2 58 +#define SROM12_5G80B1_PA_W3 59 + +#define SROM12_5G80B2_PA 60 +#define SROM12_5G80B2_PA_W0 60 +#define SROM12_5G80B2_PA_W1 61 +#define SROM12_5G80B2_PA_W2 62 +#define SROM12_5G80B2_PA_W3 63 + +#define SROM12_5G80B3_PA 64 +#define SROM12_5G80B3_PA_W0 64 +#define SROM12_5G80B3_PA_W1 65 +#define SROM12_5G80B3_PA_W2 66 +#define SROM12_5G80B3_PA_W3 67 + +#define SROM12_5G80B4_PA 68 +#define SROM12_5G80B4_PA_W0 68 +#define SROM12_5G80B4_PA_W1 69 +#define SROM12_5G80B4_PA_W2 70 +#define SROM12_5G80B4_PA_W3 71 + +/* PD offset */ +#define SROM12_PDOFF_2G_CCK 472 + +#define SROM12_PDOFF_20in40M_5G_B0 473 +#define SROM12_PDOFF_20in40M_5G_B1 474 +#define SROM12_PDOFF_20in40M_5G_B2 475 +#define SROM12_PDOFF_20in40M_5G_B3 476 +#define SROM12_PDOFF_20in40M_5G_B4 477 + +#define SROM12_PDOFF_40in80M_5G_B0 478 +#define SROM12_PDOFF_40in80M_5G_B1 479 +#define SROM12_PDOFF_40in80M_5G_B2 480 +#define SROM12_PDOFF_40in80M_5G_B3 481 +#define SROM12_PDOFF_40in80M_5G_B4 482 + +#define SROM12_PDOFF_20in80M_5G_B0 488 +#define SROM12_PDOFF_20in80M_5G_B1 489 +#define SROM12_PDOFF_20in80M_5G_B2 490 +#define SROM12_PDOFF_20in80M_5G_B3 491 +#define SROM12_PDOFF_20in80M_5G_B4 492 + +#define SROM13_PDOFFSET20IN40M5GCORE3 98 +#define SROM13_PDOFFSET20IN40M5GCORE3_1 99 +#define SROM13_PDOFFSET20IN80M5GCORE3 510 +#define SROM13_PDOFFSET20IN80M5GCORE3_1 511 +#define SROM13_PDOFFSET40IN80M5GCORE3 105 +#define SROM13_PDOFFSET40IN80M5GCORE3_1 106 + +#define SROM13_PDOFFSET20IN40M2G 94 +#define SROM13_PDOFFSET20IN40M2GCORE3 95 + +#define SROM12_GPDN_L 91 /* GPIO pull down bits [15:0] */ +#define SROM12_GPDN_H 233 /* GPIO pull down bits [31:16] */ + +#define SROM13_SIGN 64 +#define SROM13_WORDS 590 +#define SROM13_SIGNATURE 0x4d55 +#define SROM13_CRCREV 589 + + +/* Per-path fields and offset */ +#define MAX_PATH_SROM_13 4 +#define SROM13_PATH0 256 +#define SROM13_PATH1 328 +#define SROM13_PATH2 400 +#define SROM13_PATH3 512 +#define SROM13_RXGAINS 5 + +#define SROM13_XTALFREQ 90 + +#define SROM13_PDOFFSET20IN40M2G 94 +#define SROM13_PDOFFSET20IN40M2GCORE3 95 +#define SROM13_SB20IN40HRLRPOX 96 + +#define SROM13_RXGAINS1CORE3 97 + +#define SROM13_PDOFFSET20IN40M5GCORE3 98 +#define SROM13_PDOFFSET20IN40M5GCORE3_1 99 + +#define SROM13_ANTGAIN_BANDBGA 100 + +#define SROM13_RXGAINS2CORE0 101 +#define SROM13_RXGAINS2CORE1 102 +#define SROM13_RXGAINS2CORE2 103 +#define SROM13_RXGAINS2CORE3 104 + +#define SROM13_PDOFFSET40IN80M5GCORE3 105 +#define SROM13_PDOFFSET40IN80M5GCORE3_1 106 + +/* power per rate */ +#define SROM13_MCS1024QAM2GPO 108 +#define SROM13_MCS1024QAM5GLPO 109 +#define SROM13_MCS1024QAM5GLPO_1 110 +#define SROM13_MCS1024QAM5GMPO 111 +#define SROM13_MCS1024QAM5GMPO_1 112 +#define SROM13_MCS1024QAM5GHPO 113 +#define SROM13_MCS1024QAM5GHPO_1 114 +#define SROM13_MCS1024QAM5GX1PO 115 +#define SROM13_MCS1024QAM5GX1PO_1 116 +#define SROM13_MCS1024QAM5GX2PO 117 +#define SROM13_MCS1024QAM5GX2PO_1 118 + +#define SROM13_MCSBW1605GLPO 119 +#define SROM13_MCSBW1605GLPO_1 120 +#define SROM13_MCSBW1605GMPO 121 +#define SROM13_MCSBW1605GMPO_1 122 +#define SROM13_MCSBW1605GHPO 123 +#define SROM13_MCSBW1605GHPO_1 124 + +#define SROM13_MCSBW1605GX1PO 125 +#define SROM13_MCSBW1605GX1PO_1 126 +#define SROM13_MCSBW1605GX2PO 127 +#define SROM13_MCSBW1605GX2PO_1 128 + +#define SROM13_ULBPPROFFS5GB0 129 +#define SROM13_ULBPPROFFS5GB1 130 +#define SROM13_ULBPPROFFS5GB2 131 +#define SROM13_ULBPPROFFS5GB3 132 +#define SROM13_ULBPPROFFS5GB4 133 +#define SROM13_ULBPPROFFS2G 134 + +#define SROM13_MCS8POEXP 135 +#define SROM13_MCS8POEXP_1 136 +#define SROM13_MCS9POEXP 137 +#define SROM13_MCS9POEXP_1 138 +#define SROM13_MCS10POEXP 139 +#define SROM13_MCS10POEXP_1 140 +#define SROM13_MCS11POEXP 141 +#define SROM13_MCS11POEXP_1 142 +#define SROM13_ULBPDOFFS5GB0A0 143 +#define SROM13_ULBPDOFFS5GB0A1 144 +#define SROM13_ULBPDOFFS5GB0A2 145 +#define SROM13_ULBPDOFFS5GB0A3 146 +#define SROM13_ULBPDOFFS5GB1A0 147 +#define SROM13_ULBPDOFFS5GB1A1 148 +#define SROM13_ULBPDOFFS5GB1A2 149 +#define SROM13_ULBPDOFFS5GB1A3 150 +#define SROM13_ULBPDOFFS5GB2A0 151 +#define SROM13_ULBPDOFFS5GB2A1 152 +#define SROM13_ULBPDOFFS5GB2A2 153 +#define SROM13_ULBPDOFFS5GB2A3 154 +#define SROM13_ULBPDOFFS5GB3A0 155 +#define SROM13_ULBPDOFFS5GB3A1 156 +#define SROM13_ULBPDOFFS5GB3A2 157 +#define SROM13_ULBPDOFFS5GB3A3 158 +#define SROM13_ULBPDOFFS5GB4A0 159 +#define SROM13_ULBPDOFFS5GB4A1 160 +#define SROM13_ULBPDOFFS5GB4A2 161 +#define SROM13_ULBPDOFFS5GB4A3 162 +#define SROM13_ULBPDOFFS2GA0 163 +#define SROM13_ULBPDOFFS2GA1 164 +#define SROM13_ULBPDOFFS2GA2 165 +#define SROM13_ULBPDOFFS2GA3 166 + +#define SROM13_RPCAL5GB4 199 + +#define SROM13_EU_EDCRSTH 232 + +#define SROM13_SWCTRLMAP4_CFG 493 +#define SROM13_SWCTRLMAP4_TX2G_FEM3TO0 494 +#define SROM13_SWCTRLMAP4_RX2G_FEM3TO0 495 +#define SROM13_SWCTRLMAP4_RXBYP2G_FEM3TO0 496 +#define SROM13_SWCTRLMAP4_MISC2G_FEM3TO0 497 +#define SROM13_SWCTRLMAP4_TX5G_FEM3TO0 498 +#define SROM13_SWCTRLMAP4_RX5G_FEM3TO0 499 +#define SROM13_SWCTRLMAP4_RXBYP5G_FEM3TO0 500 +#define SROM13_SWCTRLMAP4_MISC5G_FEM3TO0 501 +#define SROM13_SWCTRLMAP4_TX2G_FEM7TO4 502 +#define SROM13_SWCTRLMAP4_RX2G_FEM7TO4 503 +#define SROM13_SWCTRLMAP4_RXBYP2G_FEM7TO4 504 +#define SROM13_SWCTRLMAP4_MISC2G_FEM7TO4 505 +#define SROM13_SWCTRLMAP4_TX5G_FEM7TO4 506 +#define SROM13_SWCTRLMAP4_RX5G_FEM7TO4 507 +#define SROM13_SWCTRLMAP4_RXBYP5G_FEM7TO4 508 +#define SROM13_SWCTRLMAP4_MISC5G_FEM7TO4 509 + +#define SROM13_PDOFFSET20IN80M5GCORE3 510 +#define SROM13_PDOFFSET20IN80M5GCORE3_1 511 + +#define SROM13_NOISELVLCORE3 584 +#define SROM13_NOISELVLCORE3_1 585 +#define SROM13_RXGAINERRCORE3 586 +#define SROM13_RXGAINERRCORE3_1 587 + + +typedef struct { + uint8 tssipos; /* TSSI positive slope, 1: positive, 0: negative */ + uint8 extpagain; /* Ext PA gain-type: full-gain: 0, pa-lite: 1, no_pa: 2 */ + uint8 pdetrange; /* support 32 combinations of different Pdet dynamic ranges */ + uint8 triso; /* TR switch isolation */ + uint8 antswctrllut; /* antswctrl lookup table configuration: 32 possible choices */ +} srom_fem_t; + +#endif /* _bcmsrom_fmt_h_ */ diff --git a/drivers/amlogic/wifi/bcmdhd/include/bcmsrom_tbl.h b/drivers/amlogic/wifi/bcmdhd/include/bcmsrom_tbl.h new file mode 100644 index 0000000000000..f2775fbba1c59 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/bcmsrom_tbl.h @@ -0,0 +1,1400 @@ +/* + * Table that encodes the srom formats for PCI/PCIe NICs. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsrom_tbl.h 553564 2015-04-30 06:19:30Z $ + */ + +#ifndef _bcmsrom_tbl_h_ +#define _bcmsrom_tbl_h_ + +#include "sbpcmcia.h" +#include "wlioctl.h" +#include + +typedef struct { + const char *name; + uint32 revmask; + uint32 flags; + uint16 off; + uint16 mask; +} sromvar_t; + +#define SRFL_MORE 1 /* value continues as described by the next entry */ +#define SRFL_NOFFS 2 /* value bits can't be all one's */ +#define SRFL_PRHEX 4 /* value is in hexdecimal format */ +#define SRFL_PRSIGN 8 /* value is in signed decimal format */ +#define SRFL_CCODE 0x10 /* value is in country code format */ +#define SRFL_ETHADDR 0x20 /* value is an Ethernet address */ +#define SRFL_LEDDC 0x40 /* value is an LED duty cycle */ +#define SRFL_NOVAR 0x80 /* do not generate a nvram param, entry is for mfgc */ +#define SRFL_ARRAY 0x100 /* value is in an array. All elements EXCEPT FOR THE LAST + * ONE in the array should have this flag set. + */ + + +#define SROM_DEVID_PCIE 48 + +/** + * Assumptions: + * - Ethernet address spans across 3 consecutive words + * + * Table rules: + * - Add multiple entries next to each other if a value spans across multiple words + * (even multiple fields in the same word) with each entry except the last having + * it's SRFL_MORE bit set. + * - Ethernet address entry does not follow above rule and must not have SRFL_MORE + * bit set. Its SRFL_ETHADDR bit implies it takes multiple words. + * - The last entry's name field must be NULL to indicate the end of the table. Other + * entries must have non-NULL name. + */ +static const sromvar_t pci_sromvars[] = { +/* name revmask flags off mask */ +#if defined(CABLECPE) + {"devid", 0xffffff00, SRFL_PRHEX, PCI_F0DEVID, 0xffff}, +#elif defined(BCMPCIEDEV) && defined(BCMPCIEDEV_ENABLED) + {"devid", 0xffffff00, SRFL_PRHEX, SROM_DEVID_PCIE, 0xffff}, +#else + {"devid", 0xffffff00, SRFL_PRHEX|SRFL_NOVAR, PCI_F0DEVID, 0xffff}, +#endif + {"boardrev", 0x0000000e, SRFL_PRHEX, SROM_AABREV, SROM_BR_MASK}, + {"boardrev", 0x000000f0, SRFL_PRHEX, SROM4_BREV, 0xffff}, + {"boardrev", 0xffffff00, SRFL_PRHEX, SROM8_BREV, 0xffff}, + {"boardflags", 0x00000002, SRFL_PRHEX, SROM_BFL, 0xffff}, + {"boardflags", 0x00000004, SRFL_PRHEX|SRFL_MORE, SROM_BFL, 0xffff}, + {"", 0, 0, SROM_BFL2, 0xffff}, + {"boardflags", 0x00000008, SRFL_PRHEX|SRFL_MORE, SROM_BFL, 0xffff}, + {"", 0, 0, SROM3_BFL2, 0xffff}, + {"boardflags", 0x00000010, SRFL_PRHEX|SRFL_MORE, SROM4_BFL0, 0xffff}, + {"", 0, 0, SROM4_BFL1, 0xffff}, + {"boardflags", 0x000000e0, SRFL_PRHEX|SRFL_MORE, SROM5_BFL0, 0xffff}, + {"", 0, 0, SROM5_BFL1, 0xffff}, + {"boardflags", 0xffffff00, SRFL_PRHEX|SRFL_MORE, SROM8_BFL0, 0xffff}, + {"", 0, 0, SROM8_BFL1, 0xffff}, + {"boardflags2", 0x00000010, SRFL_PRHEX|SRFL_MORE, SROM4_BFL2, 0xffff}, + {"", 0, 0, SROM4_BFL3, 0xffff}, + {"boardflags2", 0x000000e0, SRFL_PRHEX|SRFL_MORE, SROM5_BFL2, 0xffff}, + {"", 0, 0, SROM5_BFL3, 0xffff}, + {"boardflags2", 0xffffff00, SRFL_PRHEX|SRFL_MORE, SROM8_BFL2, 0xffff}, + {"", 0, 0, SROM8_BFL3, 0xffff}, + {"boardtype", 0xfffffffc, SRFL_PRHEX, SROM_SSID, 0xffff}, + {"subvid", 0xfffffffc, SRFL_PRHEX, SROM_SVID, 0xffff}, + {"boardnum", 0x00000006, 0, SROM_MACLO_IL0, 0xffff}, + {"boardnum", 0x00000008, 0, SROM3_MACLO, 0xffff}, + {"boardnum", 0x00000010, 0, SROM4_MACLO, 0xffff}, + {"boardnum", 0x000000e0, 0, SROM5_MACLO, 0xffff}, + {"boardnum", 0x00000700, 0, SROM8_MACLO, 0xffff}, + {"cc", 0x00000002, 0, SROM_AABREV, SROM_CC_MASK}, + {"regrev", 0x00000008, 0, SROM_OPO, 0xff00}, + {"regrev", 0x00000010, 0, SROM4_REGREV, 0x00ff}, + {"regrev", 0x000000e0, 0, SROM5_REGREV, 0x00ff}, + {"regrev", 0x00000700, 0, SROM8_REGREV, 0x00ff}, + {"ledbh0", 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0x00ff}, + {"ledbh1", 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0xff00}, + {"ledbh2", 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0x00ff}, + {"ledbh3", 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0xff00}, + {"ledbh0", 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0x00ff}, + {"ledbh1", 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0xff00}, + {"ledbh2", 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0x00ff}, + {"ledbh3", 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0xff00}, + {"ledbh0", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0x00ff}, + {"ledbh1", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0xff00}, + {"ledbh2", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0x00ff}, + {"ledbh3", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0xff00}, + {"ledbh0", 0x00000700, SRFL_NOFFS, SROM8_LEDBH10, 0x00ff}, + {"ledbh1", 0x00000700, SRFL_NOFFS, SROM8_LEDBH10, 0xff00}, + {"ledbh2", 0x00000700, SRFL_NOFFS, SROM8_LEDBH32, 0x00ff}, + {"ledbh3", 0x00000700, SRFL_NOFFS, SROM8_LEDBH32, 0xff00}, + {"pa0b0", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB0, 0xffff}, + {"pa0b1", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB1, 0xffff}, + {"pa0b2", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB2, 0xffff}, + {"pa0itssit", 0x0000000e, 0, SROM_ITT, 0x00ff}, + {"pa0maxpwr", 0x0000000e, 0, SROM_WL10MAXP, 0x00ff}, + {"pa0b0", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB0, 0xffff}, + {"pa0b1", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB1, 0xffff}, + {"pa0b2", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB2, 0xffff}, + {"pa0itssit", 0x00000700, 0, SROM8_W0_ITTMAXP, 0xff00}, + {"pa0maxpwr", 0x00000700, 0, SROM8_W0_ITTMAXP, 0x00ff}, + {"opo", 0x0000000c, 0, SROM_OPO, 0x00ff}, + {"opo", 0x00000700, 0, SROM8_2G_OFDMPO, 0x00ff}, + {"aa2g", 0x0000000e, 0, SROM_AABREV, SROM_AA0_MASK}, + {"aa2g", 0x000000f0, 0, SROM4_AA, 0x00ff}, + {"aa2g", 0x00000700, 0, SROM8_AA, 0x00ff}, + {"aa5g", 0x0000000e, 0, SROM_AABREV, SROM_AA1_MASK}, + {"aa5g", 0x000000f0, 0, SROM4_AA, 0xff00}, + {"aa5g", 0x00000700, 0, SROM8_AA, 0xff00}, + {"ag0", 0x0000000e, 0, SROM_AG10, 0x00ff}, + {"ag1", 0x0000000e, 0, SROM_AG10, 0xff00}, + {"ag0", 0x000000f0, 0, SROM4_AG10, 0x00ff}, + {"ag1", 0x000000f0, 0, SROM4_AG10, 0xff00}, + {"ag2", 0x000000f0, 0, SROM4_AG32, 0x00ff}, + {"ag3", 0x000000f0, 0, SROM4_AG32, 0xff00}, + {"ag0", 0x00000700, 0, SROM8_AG10, 0x00ff}, + {"ag1", 0x00000700, 0, SROM8_AG10, 0xff00}, + {"ag2", 0x00000700, 0, SROM8_AG32, 0x00ff}, + {"ag3", 0x00000700, 0, SROM8_AG32, 0xff00}, + {"pa1b0", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB0, 0xffff}, + {"pa1b1", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB1, 0xffff}, + {"pa1b2", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB2, 0xffff}, + {"pa1lob0", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB0, 0xffff}, + {"pa1lob1", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB1, 0xffff}, + {"pa1lob2", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB2, 0xffff}, + {"pa1hib0", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB0, 0xffff}, + {"pa1hib1", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB1, 0xffff}, + {"pa1hib2", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB2, 0xffff}, + {"pa1itssit", 0x0000000e, 0, SROM_ITT, 0xff00}, + {"pa1maxpwr", 0x0000000e, 0, SROM_WL10MAXP, 0xff00}, + {"pa1lomaxpwr", 0x0000000c, 0, SROM_WL1LHMAXP, 0xff00}, + {"pa1himaxpwr", 0x0000000c, 0, SROM_WL1LHMAXP, 0x00ff}, + {"pa1b0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0, 0xffff}, + {"pa1b1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1, 0xffff}, + {"pa1b2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2, 0xffff}, + {"pa1lob0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0_LC, 0xffff}, + {"pa1lob1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1_LC, 0xffff}, + {"pa1lob2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2_LC, 0xffff}, + {"pa1hib0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0_HC, 0xffff}, + {"pa1hib1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1_HC, 0xffff}, + {"pa1hib2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2_HC, 0xffff}, + {"pa1itssit", 0x00000700, 0, SROM8_W1_ITTMAXP, 0xff00}, + {"pa1maxpwr", 0x00000700, 0, SROM8_W1_ITTMAXP, 0x00ff}, + {"pa1lomaxpwr", 0x00000700, 0, SROM8_W1_MAXP_LCHC, 0xff00}, + {"pa1himaxpwr", 0x00000700, 0, SROM8_W1_MAXP_LCHC, 0x00ff}, + {"bxa2g", 0x00000008, 0, SROM_BXARSSI2G, 0x1800}, + {"rssisav2g", 0x00000008, 0, SROM_BXARSSI2G, 0x0700}, + {"rssismc2g", 0x00000008, 0, SROM_BXARSSI2G, 0x00f0}, + {"rssismf2g", 0x00000008, 0, SROM_BXARSSI2G, 0x000f}, + {"bxa2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x1800}, + {"rssisav2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x0700}, + {"rssismc2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x00f0}, + {"rssismf2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x000f}, + {"bxa5g", 0x00000008, 0, SROM_BXARSSI5G, 0x1800}, + {"rssisav5g", 0x00000008, 0, SROM_BXARSSI5G, 0x0700}, + {"rssismc5g", 0x00000008, 0, SROM_BXARSSI5G, 0x00f0}, + {"rssismf5g", 0x00000008, 0, SROM_BXARSSI5G, 0x000f}, + {"bxa5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x1800}, + {"rssisav5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x0700}, + {"rssismc5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x00f0}, + {"rssismf5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x000f}, + {"tri2g", 0x00000008, 0, SROM_TRI52G, 0x00ff}, + {"tri5g", 0x00000008, 0, SROM_TRI52G, 0xff00}, + {"tri5gl", 0x00000008, 0, SROM_TRI5GHL, 0x00ff}, + {"tri5gh", 0x00000008, 0, SROM_TRI5GHL, 0xff00}, + {"tri2g", 0x00000700, 0, SROM8_TRI52G, 0x00ff}, + {"tri5g", 0x00000700, 0, SROM8_TRI52G, 0xff00}, + {"tri5gl", 0x00000700, 0, SROM8_TRI5GHL, 0x00ff}, + {"tri5gh", 0x00000700, 0, SROM8_TRI5GHL, 0xff00}, + {"rxpo2g", 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0x00ff}, + {"rxpo5g", 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0xff00}, + {"rxpo2g", 0x00000700, SRFL_PRSIGN, SROM8_RXPO52G, 0x00ff}, + {"rxpo5g", 0x00000700, SRFL_PRSIGN, SROM8_RXPO52G, 0xff00}, + {"txchain", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_TXCHAIN_MASK}, + {"rxchain", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_RXCHAIN_MASK}, + {"antswitch", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_SWITCH_MASK}, + {"txchain", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_TXCHAIN_MASK}, + {"rxchain", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_RXCHAIN_MASK}, + {"antswitch", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_SWITCH_MASK}, + {"tssipos2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_TSSIPOS_MASK}, + {"extpagain2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_EXTPA_GAIN_MASK}, + {"pdetrange2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_PDET_RANGE_MASK}, + {"triso2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_TR_ISO_MASK}, + {"antswctl2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_ANTSWLUT_MASK}, + {"tssipos5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_TSSIPOS_MASK}, + {"extpagain5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_EXTPA_GAIN_MASK}, + {"pdetrange5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_PDET_RANGE_MASK}, + {"triso5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_TR_ISO_MASK}, + {"antswctl5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_ANTSWLUT_MASK}, + {"txpid2ga0", 0x000000f0, 0, SROM4_TXPID2G, 0x00ff}, + {"txpid2ga1", 0x000000f0, 0, SROM4_TXPID2G, 0xff00}, + {"txpid2ga2", 0x000000f0, 0, SROM4_TXPID2G + 1, 0x00ff}, + {"txpid2ga3", 0x000000f0, 0, SROM4_TXPID2G + 1, 0xff00}, + {"txpid5ga0", 0x000000f0, 0, SROM4_TXPID5G, 0x00ff}, + {"txpid5ga1", 0x000000f0, 0, SROM4_TXPID5G, 0xff00}, + {"txpid5ga2", 0x000000f0, 0, SROM4_TXPID5G + 1, 0x00ff}, + {"txpid5ga3", 0x000000f0, 0, SROM4_TXPID5G + 1, 0xff00}, + {"txpid5gla0", 0x000000f0, 0, SROM4_TXPID5GL, 0x00ff}, + {"txpid5gla1", 0x000000f0, 0, SROM4_TXPID5GL, 0xff00}, + {"txpid5gla2", 0x000000f0, 0, SROM4_TXPID5GL + 1, 0x00ff}, + {"txpid5gla3", 0x000000f0, 0, SROM4_TXPID5GL + 1, 0xff00}, + {"txpid5gha0", 0x000000f0, 0, SROM4_TXPID5GH, 0x00ff}, + {"txpid5gha1", 0x000000f0, 0, SROM4_TXPID5GH, 0xff00}, + {"txpid5gha2", 0x000000f0, 0, SROM4_TXPID5GH + 1, 0x00ff}, + {"txpid5gha3", 0x000000f0, 0, SROM4_TXPID5GH + 1, 0xff00}, + + {"ccode", 0x0000000f, SRFL_CCODE, SROM_CCODE, 0xffff}, + {"ccode", 0x00000010, SRFL_CCODE, SROM4_CCODE, 0xffff}, + {"ccode", 0x000000e0, SRFL_CCODE, SROM5_CCODE, 0xffff}, + {"ccode", 0x00000700, SRFL_CCODE, SROM8_CCODE, 0xffff}, + {"macaddr", 0x00000700, SRFL_ETHADDR, SROM8_MACHI, 0xffff}, + {"macaddr", 0x000000e0, SRFL_ETHADDR, SROM5_MACHI, 0xffff}, + {"macaddr", 0x00000010, SRFL_ETHADDR, SROM4_MACHI, 0xffff}, + {"macaddr", 0x00000008, SRFL_ETHADDR, SROM3_MACHI, 0xffff}, + {"il0macaddr", 0x00000007, SRFL_ETHADDR, SROM_MACHI_IL0, 0xffff}, + {"et1macaddr", 0x00000007, SRFL_ETHADDR, SROM_MACHI_ET1, 0xffff}, + {"leddc", 0x00000700, SRFL_NOFFS|SRFL_LEDDC, SROM8_LEDDC, 0xffff}, + {"leddc", 0x000000e0, SRFL_NOFFS|SRFL_LEDDC, SROM5_LEDDC, 0xffff}, + {"leddc", 0x00000010, SRFL_NOFFS|SRFL_LEDDC, SROM4_LEDDC, 0xffff}, + {"leddc", 0x00000008, SRFL_NOFFS|SRFL_LEDDC, SROM3_LEDDC, 0xffff}, + + {"tempthresh", 0x00000700, 0, SROM8_THERMAL, 0xff00}, + {"tempoffset", 0x00000700, 0, SROM8_THERMAL, 0x00ff}, + {"rawtempsense", 0x00000700, SRFL_PRHEX, SROM8_MPWR_RAWTS, 0x01ff}, + {"measpower", 0x00000700, SRFL_PRHEX, SROM8_MPWR_RAWTS, 0xfe00}, + {"tempsense_slope", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0x00ff}, + {"tempcorrx", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0xfc00}, + {"tempsense_option", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0x0300}, + {"freqoffset_corr", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x000f}, + {"iqcal_swp_dis", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x0010}, + {"hw_iqcal_en", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x0020}, + {"elna2g", 0x00000700, 0, SROM8_EXTLNAGAIN, 0x00ff}, + {"elna5g", 0x00000700, 0, SROM8_EXTLNAGAIN, 0xff00}, + {"phycal_tempdelta", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0x00ff}, + {"temps_period", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0x0f00}, + {"temps_hysteresis", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0xf000}, + {"measpower1", 0x00000700, SRFL_PRHEX, SROM8_MPWR_1_AND_2, 0x007f}, + {"measpower2", 0x00000700, SRFL_PRHEX, SROM8_MPWR_1_AND_2, 0x3f80}, + + {"cck2gpo", 0x000000f0, 0, SROM4_2G_CCKPO, 0xffff}, + {"cck2gpo", 0x00000100, 0, SROM8_2G_CCKPO, 0xffff}, + {"ofdm2gpo", 0x000000f0, SRFL_MORE, SROM4_2G_OFDMPO, 0xffff}, + {"", 0, 0, SROM4_2G_OFDMPO + 1, 0xffff}, + {"ofdm5gpo", 0x000000f0, SRFL_MORE, SROM4_5G_OFDMPO, 0xffff}, + {"", 0, 0, SROM4_5G_OFDMPO + 1, 0xffff}, + {"ofdm5glpo", 0x000000f0, SRFL_MORE, SROM4_5GL_OFDMPO, 0xffff}, + {"", 0, 0, SROM4_5GL_OFDMPO + 1, 0xffff}, + {"ofdm5ghpo", 0x000000f0, SRFL_MORE, SROM4_5GH_OFDMPO, 0xffff}, + {"", 0, 0, SROM4_5GH_OFDMPO + 1, 0xffff}, + {"ofdm2gpo", 0x00000100, SRFL_MORE, SROM8_2G_OFDMPO, 0xffff}, + {"", 0, 0, SROM8_2G_OFDMPO + 1, 0xffff}, + {"ofdm5gpo", 0x00000100, SRFL_MORE, SROM8_5G_OFDMPO, 0xffff}, + {"", 0, 0, SROM8_5G_OFDMPO + 1, 0xffff}, + {"ofdm5glpo", 0x00000100, SRFL_MORE, SROM8_5GL_OFDMPO, 0xffff}, + {"", 0, 0, SROM8_5GL_OFDMPO + 1, 0xffff}, + {"ofdm5ghpo", 0x00000100, SRFL_MORE, SROM8_5GH_OFDMPO, 0xffff}, + {"", 0, 0, SROM8_5GH_OFDMPO + 1, 0xffff}, + {"mcs2gpo0", 0x000000f0, 0, SROM4_2G_MCSPO, 0xffff}, + {"mcs2gpo1", 0x000000f0, 0, SROM4_2G_MCSPO + 1, 0xffff}, + {"mcs2gpo2", 0x000000f0, 0, SROM4_2G_MCSPO + 2, 0xffff}, + {"mcs2gpo3", 0x000000f0, 0, SROM4_2G_MCSPO + 3, 0xffff}, + {"mcs2gpo4", 0x000000f0, 0, SROM4_2G_MCSPO + 4, 0xffff}, + {"mcs2gpo5", 0x000000f0, 0, SROM4_2G_MCSPO + 5, 0xffff}, + {"mcs2gpo6", 0x000000f0, 0, SROM4_2G_MCSPO + 6, 0xffff}, + {"mcs2gpo7", 0x000000f0, 0, SROM4_2G_MCSPO + 7, 0xffff}, + {"mcs5gpo0", 0x000000f0, 0, SROM4_5G_MCSPO, 0xffff}, + {"mcs5gpo1", 0x000000f0, 0, SROM4_5G_MCSPO + 1, 0xffff}, + {"mcs5gpo2", 0x000000f0, 0, SROM4_5G_MCSPO + 2, 0xffff}, + {"mcs5gpo3", 0x000000f0, 0, SROM4_5G_MCSPO + 3, 0xffff}, + {"mcs5gpo4", 0x000000f0, 0, SROM4_5G_MCSPO + 4, 0xffff}, + {"mcs5gpo5", 0x000000f0, 0, SROM4_5G_MCSPO + 5, 0xffff}, + {"mcs5gpo6", 0x000000f0, 0, SROM4_5G_MCSPO + 6, 0xffff}, + {"mcs5gpo7", 0x000000f0, 0, SROM4_5G_MCSPO + 7, 0xffff}, + {"mcs5glpo0", 0x000000f0, 0, SROM4_5GL_MCSPO, 0xffff}, + {"mcs5glpo1", 0x000000f0, 0, SROM4_5GL_MCSPO + 1, 0xffff}, + {"mcs5glpo2", 0x000000f0, 0, SROM4_5GL_MCSPO + 2, 0xffff}, + {"mcs5glpo3", 0x000000f0, 0, SROM4_5GL_MCSPO + 3, 0xffff}, + {"mcs5glpo4", 0x000000f0, 0, SROM4_5GL_MCSPO + 4, 0xffff}, + {"mcs5glpo5", 0x000000f0, 0, SROM4_5GL_MCSPO + 5, 0xffff}, + {"mcs5glpo6", 0x000000f0, 0, SROM4_5GL_MCSPO + 6, 0xffff}, + {"mcs5glpo7", 0x000000f0, 0, SROM4_5GL_MCSPO + 7, 0xffff}, + {"mcs5ghpo0", 0x000000f0, 0, SROM4_5GH_MCSPO, 0xffff}, + {"mcs5ghpo1", 0x000000f0, 0, SROM4_5GH_MCSPO + 1, 0xffff}, + {"mcs5ghpo2", 0x000000f0, 0, SROM4_5GH_MCSPO + 2, 0xffff}, + {"mcs5ghpo3", 0x000000f0, 0, SROM4_5GH_MCSPO + 3, 0xffff}, + {"mcs5ghpo4", 0x000000f0, 0, SROM4_5GH_MCSPO + 4, 0xffff}, + {"mcs5ghpo5", 0x000000f0, 0, SROM4_5GH_MCSPO + 5, 0xffff}, + {"mcs5ghpo6", 0x000000f0, 0, SROM4_5GH_MCSPO + 6, 0xffff}, + {"mcs5ghpo7", 0x000000f0, 0, SROM4_5GH_MCSPO + 7, 0xffff}, + {"mcs2gpo0", 0x00000100, 0, SROM8_2G_MCSPO, 0xffff}, + {"mcs2gpo1", 0x00000100, 0, SROM8_2G_MCSPO + 1, 0xffff}, + {"mcs2gpo2", 0x00000100, 0, SROM8_2G_MCSPO + 2, 0xffff}, + {"mcs2gpo3", 0x00000100, 0, SROM8_2G_MCSPO + 3, 0xffff}, + {"mcs2gpo4", 0x00000100, 0, SROM8_2G_MCSPO + 4, 0xffff}, + {"mcs2gpo5", 0x00000100, 0, SROM8_2G_MCSPO + 5, 0xffff}, + {"mcs2gpo6", 0x00000100, 0, SROM8_2G_MCSPO + 6, 0xffff}, + {"mcs2gpo7", 0x00000100, 0, SROM8_2G_MCSPO + 7, 0xffff}, + {"mcs5gpo0", 0x00000100, 0, SROM8_5G_MCSPO, 0xffff}, + {"mcs5gpo1", 0x00000100, 0, SROM8_5G_MCSPO + 1, 0xffff}, + {"mcs5gpo2", 0x00000100, 0, SROM8_5G_MCSPO + 2, 0xffff}, + {"mcs5gpo3", 0x00000100, 0, SROM8_5G_MCSPO + 3, 0xffff}, + {"mcs5gpo4", 0x00000100, 0, SROM8_5G_MCSPO + 4, 0xffff}, + {"mcs5gpo5", 0x00000100, 0, SROM8_5G_MCSPO + 5, 0xffff}, + {"mcs5gpo6", 0x00000100, 0, SROM8_5G_MCSPO + 6, 0xffff}, + {"mcs5gpo7", 0x00000100, 0, SROM8_5G_MCSPO + 7, 0xffff}, + {"mcs5glpo0", 0x00000100, 0, SROM8_5GL_MCSPO, 0xffff}, + {"mcs5glpo1", 0x00000100, 0, SROM8_5GL_MCSPO + 1, 0xffff}, + {"mcs5glpo2", 0x00000100, 0, SROM8_5GL_MCSPO + 2, 0xffff}, + {"mcs5glpo3", 0x00000100, 0, SROM8_5GL_MCSPO + 3, 0xffff}, + {"mcs5glpo4", 0x00000100, 0, SROM8_5GL_MCSPO + 4, 0xffff}, + {"mcs5glpo5", 0x00000100, 0, SROM8_5GL_MCSPO + 5, 0xffff}, + {"mcs5glpo6", 0x00000100, 0, SROM8_5GL_MCSPO + 6, 0xffff}, + {"mcs5glpo7", 0x00000100, 0, SROM8_5GL_MCSPO + 7, 0xffff}, + {"mcs5ghpo0", 0x00000100, 0, SROM8_5GH_MCSPO, 0xffff}, + {"mcs5ghpo1", 0x00000100, 0, SROM8_5GH_MCSPO + 1, 0xffff}, + {"mcs5ghpo2", 0x00000100, 0, SROM8_5GH_MCSPO + 2, 0xffff}, + {"mcs5ghpo3", 0x00000100, 0, SROM8_5GH_MCSPO + 3, 0xffff}, + {"mcs5ghpo4", 0x00000100, 0, SROM8_5GH_MCSPO + 4, 0xffff}, + {"mcs5ghpo5", 0x00000100, 0, SROM8_5GH_MCSPO + 5, 0xffff}, + {"mcs5ghpo6", 0x00000100, 0, SROM8_5GH_MCSPO + 6, 0xffff}, + {"mcs5ghpo7", 0x00000100, 0, SROM8_5GH_MCSPO + 7, 0xffff}, + {"cddpo", 0x000000f0, 0, SROM4_CDDPO, 0xffff}, + {"stbcpo", 0x000000f0, 0, SROM4_STBCPO, 0xffff}, + {"bw40po", 0x000000f0, 0, SROM4_BW40PO, 0xffff}, + {"bwduppo", 0x000000f0, 0, SROM4_BWDUPPO, 0xffff}, + {"cddpo", 0x00000100, 0, SROM8_CDDPO, 0xffff}, + {"stbcpo", 0x00000100, 0, SROM8_STBCPO, 0xffff}, + {"bw40po", 0x00000100, 0, SROM8_BW40PO, 0xffff}, + {"bwduppo", 0x00000100, 0, SROM8_BWDUPPO, 0xffff}, + + /* power per rate from sromrev 9 */ + {"cckbw202gpo", 0x00000600, 0, SROM9_2GPO_CCKBW20, 0xffff}, + {"cckbw20ul2gpo", 0x00000600, 0, SROM9_2GPO_CCKBW20UL, 0xffff}, + {"legofdmbw202gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_LOFDMBW20, 0xffff}, + {"", 0, 0, SROM9_2GPO_LOFDMBW20 + 1, 0xffff}, + {"legofdmbw20ul2gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_LOFDMBW20UL, 0xffff}, + {"", 0, 0, SROM9_2GPO_LOFDMBW20UL + 1, 0xffff}, + {"legofdmbw205glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_LOFDMBW20, 0xffff}, + {"", 0, 0, SROM9_5GLPO_LOFDMBW20 + 1, 0xffff}, + {"legofdmbw20ul5glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_LOFDMBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GLPO_LOFDMBW20UL + 1, 0xffff}, + {"legofdmbw205gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_LOFDMBW20, 0xffff}, + {"", 0, 0, SROM9_5GMPO_LOFDMBW20 + 1, 0xffff}, + {"legofdmbw20ul5gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_LOFDMBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GMPO_LOFDMBW20UL + 1, 0xffff}, + {"legofdmbw205ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_LOFDMBW20, 0xffff}, + {"", 0, 0, SROM9_5GHPO_LOFDMBW20 + 1, 0xffff}, + {"legofdmbw20ul5ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_LOFDMBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GHPO_LOFDMBW20UL + 1, 0xffff}, + {"mcsbw202gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW20, 0xffff}, + {"", 0, 0, SROM9_2GPO_MCSBW20 + 1, 0xffff}, + {"mcsbw20ul2gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW20UL, 0xffff}, + {"", 0, 0, SROM9_2GPO_MCSBW20UL + 1, 0xffff}, + {"mcsbw402gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW40, 0xffff}, + {"", 0, 0, SROM9_2GPO_MCSBW40 + 1, 0xffff}, + {"mcsbw205glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW20, 0xffff}, + {"", 0, 0, SROM9_5GLPO_MCSBW20 + 1, 0xffff}, + {"mcsbw20ul5glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GLPO_MCSBW20UL + 1, 0xffff}, + {"mcsbw405glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW40, 0xffff}, + {"", 0, 0, SROM9_5GLPO_MCSBW40 + 1, 0xffff}, + {"mcsbw205gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW20, 0xffff}, + {"", 0, 0, SROM9_5GMPO_MCSBW20 + 1, 0xffff}, + {"mcsbw20ul5gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GMPO_MCSBW20UL + 1, 0xffff}, + {"mcsbw405gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW40, 0xffff}, + {"", 0, 0, SROM9_5GMPO_MCSBW40 + 1, 0xffff}, + {"mcsbw205ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW20, 0xffff}, + {"", 0, 0, SROM9_5GHPO_MCSBW20 + 1, 0xffff}, + {"mcsbw20ul5ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GHPO_MCSBW20UL + 1, 0xffff}, + {"mcsbw405ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW40, 0xffff}, + {"", 0, 0, SROM9_5GHPO_MCSBW40 + 1, 0xffff}, + {"mcs32po", 0x00000600, 0, SROM9_PO_MCS32, 0xffff}, + {"legofdm40duppo", 0x00000600, 0, SROM9_PO_LOFDM40DUP, 0xffff}, + {"pcieingress_war", 0x00000700, 0, SROM8_PCIEINGRESS_WAR, 0xf}, + {"eu_edthresh2g", 0x00000100, 0, SROM8_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00000100, 0, SROM8_EU_EDCRSTH, 0xff00}, + {"eu_edthresh2g", 0x00000200, 0, SROM9_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00000200, 0, SROM9_EU_EDCRSTH, 0xff00}, + {"rxgainerr2ga0", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x003f}, + {"rxgainerr2ga0", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x003f}, + {"rxgainerr2ga1", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x07c0}, + {"rxgainerr2ga2", 0x00000700, 0, SROM8_RXGAINERR_2G, 0xf800}, + {"rxgainerr5gla0", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0x003f}, + {"rxgainerr5gla1", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0x07c0}, + {"rxgainerr5gla2", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0xf800}, + {"rxgainerr5gma0", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0x003f}, + {"rxgainerr5gma1", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0x07c0}, + {"rxgainerr5gma2", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0xf800}, + {"rxgainerr5gha0", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0x003f}, + {"rxgainerr5gha1", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0x07c0}, + {"rxgainerr5gha2", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0xf800}, + {"rxgainerr5gua0", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0x003f}, + {"rxgainerr5gua1", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0x07c0}, + {"rxgainerr5gua2", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0xf800}, + {"sar2g", 0x00000600, 0, SROM9_SAR, 0x00ff}, + {"sar5g", 0x00000600, 0, SROM9_SAR, 0xff00}, + {"noiselvl2ga0", 0x00000700, 0, SROM8_NOISELVL_2G, 0x001f}, + {"noiselvl2ga1", 0x00000700, 0, SROM8_NOISELVL_2G, 0x03e0}, + {"noiselvl2ga2", 0x00000700, 0, SROM8_NOISELVL_2G, 0x7c00}, + {"noiselvl5gla0", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x001f}, + {"noiselvl5gla1", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x03e0}, + {"noiselvl5gla2", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x7c00}, + {"noiselvl5gma0", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x001f}, + {"noiselvl5gma1", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x03e0}, + {"noiselvl5gma2", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x7c00}, + {"noiselvl5gha0", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x001f}, + {"noiselvl5gha1", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x03e0}, + {"noiselvl5gha2", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x7c00}, + {"noiselvl5gua0", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x001f}, + {"noiselvl5gua1", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x03e0}, + {"noiselvl5gua2", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x7c00}, + {"noisecaloffset", 0x00000300, 0, SROM8_NOISECALOFFSET, 0x00ff}, + {"noisecaloffset5g", 0x00000300, 0, SROM8_NOISECALOFFSET, 0xff00}, + {"subband5gver", 0x00000700, 0, SROM8_SUBBAND_PPR, 0x7}, + + {"cckPwrOffset", 0x00000400, 0, SROM10_CCKPWROFFSET, 0xffff}, + {"eu_edthresh2g", 0x00000400, 0, SROM10_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00000400, 0, SROM10_EU_EDCRSTH, 0xff00}, + /* swctrlmap_2g array, note that the last element doesn't have SRFL_ARRAY flag set */ + {"swctrlmap_2g", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G, 0xffff}, + {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 1, 0xffff}, + {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 2, 0xffff}, + {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 3, 0xffff}, + {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 4, 0xffff}, + {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 5, 0xffff}, + {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 6, 0xffff}, + {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 7, 0xffff}, + {"", 0x00000400, SRFL_PRHEX, SROM10_SWCTRLMAP_2G + 8, 0xffff}, + + /* sromrev 11 */ + {"boardflags3", 0xfffff800, SRFL_PRHEX|SRFL_MORE, SROM11_BFL4, 0xffff}, + {"", 0, 0, SROM11_BFL5, 0xffff}, + {"boardnum", 0xfffff800, 0, SROM11_MACLO, 0xffff}, + {"macaddr", 0xfffff800, SRFL_ETHADDR, SROM11_MACHI, 0xffff}, + {"ccode", 0xfffff800, SRFL_CCODE, SROM11_CCODE, 0xffff}, + {"regrev", 0xfffff800, 0, SROM11_REGREV, 0x00ff}, + {"ledbh0", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH10, 0x00ff}, + {"ledbh1", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH10, 0xff00}, + {"ledbh2", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH32, 0x00ff}, + {"ledbh3", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH32, 0xff00}, + {"leddc", 0xfffff800, SRFL_NOFFS|SRFL_LEDDC, SROM11_LEDDC, 0xffff}, + {"aa2g", 0xfffff800, 0, SROM11_AA, 0x00ff}, + {"aa5g", 0xfffff800, 0, SROM11_AA, 0xff00}, + {"agbg0", 0xfffff800, 0, SROM11_AGBG10, 0xff00}, + {"agbg1", 0xfffff800, 0, SROM11_AGBG10, 0x00ff}, + {"agbg2", 0xfffff800, 0, SROM11_AGBG2A0, 0xff00}, + {"aga0", 0xfffff800, 0, SROM11_AGBG2A0, 0x00ff}, + {"aga1", 0xfffff800, 0, SROM11_AGA21, 0xff00}, + {"aga2", 0xfffff800, 0, SROM11_AGA21, 0x00ff}, + {"txchain", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_TXCHAIN_MASK}, + {"rxchain", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_RXCHAIN_MASK}, + {"antswitch", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_SWITCH_MASK}, + + {"tssiposslope2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0001}, + {"epagain2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x000e}, + {"pdgain2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x01f0}, + {"tworangetssi2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0200}, + {"papdcap2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0400}, + {"femctrl", 0xfffff800, 0, SROM11_FEM_CFG1, 0xf800}, + + {"tssiposslope5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0001}, + {"epagain5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x000e}, + {"pdgain5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x01f0}, + {"tworangetssi5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0200}, + {"papdcap5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0400}, + {"gainctrlsph", 0xfffff800, 0, SROM11_FEM_CFG2, 0xf800}, + + {"tempthresh", 0xfffff800, 0, SROM11_THERMAL, 0xff00}, + {"tempoffset", 0xfffff800, 0, SROM11_THERMAL, 0x00ff}, + {"rawtempsense", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_RAWTS, 0x01ff}, + {"measpower", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_RAWTS, 0xfe00}, + {"tempsense_slope", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0x00ff}, + {"tempcorrx", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0xfc00}, + {"tempsense_option", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0x0300}, + {"xtalfreq", 0xfffff800, 0, SROM11_XTAL_FREQ, 0xffff}, + /* Special PA Params for 4350 5G Band, 40/80 MHz BW Ant #1 */ + {"pa5gbw4080a1", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W0_A1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W1_A1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W2_A1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_4080_W0_A1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_4080_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_4080_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_4080_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_4080_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH2 + SROM11_5GB3_4080_PA + 2, 0xffff}, + {"phycal_tempdelta", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0x00ff}, + {"temps_period", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0x0f00}, + {"temps_hysteresis", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0xf000}, + {"measpower1", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_1_AND_2, 0x007f}, + {"measpower2", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_1_AND_2, 0x3f80}, + {"tssifloor2g", 0xfffff800, SRFL_PRHEX, SROM11_TSSIFLOOR_2G, 0x03ff}, + {"tssifloor5g", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GL, 0x03ff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GM, 0x03ff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GH, 0x03ff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_TSSIFLOOR_5GU, 0x03ff}, + {"pdoffset2g40ma0", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x000f}, + {"pdoffset2g40ma1", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x00f0}, + {"pdoffset2g40ma2", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x0f00}, + {"pdoffset2g40mvalid", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x8000}, + {"pdoffset40ma0", 0xfffff800, 0, SROM11_PDOFF_40M_A0, 0xffff}, + {"pdoffset40ma1", 0xfffff800, 0, SROM11_PDOFF_40M_A1, 0xffff}, + {"pdoffset40ma2", 0xfffff800, 0, SROM11_PDOFF_40M_A2, 0xffff}, + {"pdoffset80ma0", 0xfffff800, 0, SROM11_PDOFF_80M_A0, 0xffff}, + {"pdoffset80ma1", 0xfffff800, 0, SROM11_PDOFF_80M_A1, 0xffff}, + {"pdoffset80ma2", 0xfffff800, 0, SROM11_PDOFF_80M_A2, 0xffff}, + + {"subband5gver", 0xfffff800, SRFL_PRHEX, SROM11_SUBBAND5GVER, 0xffff}, + {"paparambwver", 0xfffff800, 0, SROM11_MCSLR5GLPO, 0xf000}, + {"rx5ggainwar", 0xfffff800, 0, SROM11_MCSLR5GMPO, 0x2000}, + /* Special PA Params for 4350 5G Band, 40/80 MHz BW Ant #0 */ + {"pa5gbw4080a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 +SROM11_5GB0_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH2 + SROM11_5GB3_PA + 2, 0xffff}, + /* Special PA Params for 4335 5G Band, 40 MHz BW */ + {"pa5gbw40a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB3_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB3_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH1 + SROM11_5GB3_PA + 2, 0xffff}, + /* Special PA Params for 4335 5G Band, 80 MHz BW */ + {"pa5gbw80a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH2 + SROM11_5GB3_PA + 2, 0xffff}, + /* Special PA Params for 4335 2G Band, CCK */ + {"pa2gccka0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_2G_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_2G_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH1 + SROM11_2G_PA + 2, 0xffff}, + + /* power per rate */ + {"cckbw202gpo", 0xfffff800, 0, SROM11_CCKBW202GPO, 0xffff}, + {"cckbw20ul2gpo", 0xfffff800, 0, SROM11_CCKBW20UL2GPO, 0xffff}, + {"mcsbw202gpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW202GPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW202GPO_1, 0xffff}, + {"mcsbw402gpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW402GPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW402GPO_1, 0xffff}, + {"dot11agofdmhrbw202gpo", 0xfffff800, 0, SROM11_DOT11AGOFDMHRBW202GPO, 0xffff}, + {"ofdmlrbw202gpo", 0xfffff800, 0, SROM11_OFDMLRBW202GPO, 0xffff}, + {"mcsbw205glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GLPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW205GLPO_1, 0xffff}, + {"mcsbw405glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GLPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW405GLPO_1, 0xffff}, + {"mcsbw805glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GLPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW805GLPO_1, 0xffff}, + {"mcsbw205gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GMPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW205GMPO_1, 0xffff}, + {"mcsbw405gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GMPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW405GMPO_1, 0xffff}, + {"mcsbw805gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GMPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW805GMPO_1, 0xffff}, + {"mcsbw205ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GHPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW205GHPO_1, 0xffff}, + {"mcsbw405ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GHPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW405GHPO_1, 0xffff}, + {"mcsbw805ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GHPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW805GHPO_1, 0xffff}, + {"mcslr5glpo", 0xfffff800, 0, SROM11_MCSLR5GLPO, 0x0fff}, + {"mcslr5gmpo", 0xfffff800, 0, SROM11_MCSLR5GMPO, 0xffff}, + {"mcslr5ghpo", 0xfffff800, 0, SROM11_MCSLR5GHPO, 0xffff}, + {"sb20in40hrpo", 0xfffff800, 0, SROM11_SB20IN40HRPO, 0xffff}, + {"sb20in80and160hr5glpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GLPO, 0xffff}, + {"sb40and80hr5glpo", 0xfffff800, 0, SROM11_SB40AND80HR5GLPO, 0xffff}, + {"sb20in80and160hr5gmpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GMPO, 0xffff}, + {"sb40and80hr5gmpo", 0xfffff800, 0, SROM11_SB40AND80HR5GMPO, 0xffff}, + {"sb20in80and160hr5ghpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GHPO, 0xffff}, + {"sb40and80hr5ghpo", 0xfffff800, 0, SROM11_SB40AND80HR5GHPO, 0xffff}, + {"sb20in40lrpo", 0xfffff800, 0, SROM11_SB20IN40LRPO, 0xffff}, + {"sb20in80and160lr5glpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GLPO, 0xffff}, + {"sb40and80lr5glpo", 0xfffff800, 0, SROM11_SB40AND80LR5GLPO, 0xffff}, + {"sb20in80and160lr5gmpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GMPO, 0xffff}, + {"sb40and80lr5gmpo", 0xfffff800, 0, SROM11_SB40AND80LR5GMPO, 0xffff}, + {"sb20in80and160lr5ghpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GHPO, 0xffff}, + {"sb40and80lr5ghpo", 0xfffff800, 0, SROM11_SB40AND80LR5GHPO, 0xffff}, + {"dot11agduphrpo", 0xfffff800, 0, SROM11_DOT11AGDUPHRPO, 0xffff}, + {"dot11agduplrpo", 0xfffff800, 0, SROM11_DOT11AGDUPLRPO, 0xffff}, + + /* Misc */ + {"sar2g", 0xfffff800, 0, SROM11_SAR, 0x00ff}, + {"sar5g", 0xfffff800, 0, SROM11_SAR, 0xff00}, + + {"noiselvl2ga0", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x001f}, + {"noiselvl2ga1", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x03e0}, + {"noiselvl2ga2", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x7c00}, + {"noiselvl5ga0", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x001f}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x001f}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x001f}, + {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x001f}, + {"noiselvl5ga1", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x03e0}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x03e0}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x03e0}, + {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x03e0}, + {"noiselvl5ga2", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x7c00}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x7c00}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x7c00}, + {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x7c00}, + {"eu_edthresh2g", 0x00000800, 0, SROM11_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00000800, 0, SROM11_EU_EDCRSTH, 0xff00}, + + {"rxgainerr2ga0", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0x003f}, + {"rxgainerr2ga1", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0x07c0}, + {"rxgainerr2ga2", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0xf800}, + {"rxgainerr5ga0", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0x003f}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0x003f}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0x003f}, + {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0x003f}, + {"rxgainerr5ga1", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0x07c0}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0x07c0}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0x07c0}, + {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0x07c0}, + {"rxgainerr5ga2", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0xf800}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0xf800}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0xf800}, + {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0xf800}, + {"rpcal2g", 0xfffff800, 0, SROM11_RPCAL_2G, 0xffff}, + {"rpcal5gb0", 0xfffff800, 0, SROM11_RPCAL_5GL, 0xffff}, + {"rpcal5gb1", 0xfffff800, 0, SROM11_RPCAL_5GM, 0xffff}, + {"rpcal5gb2", 0xfffff800, 0, SROM11_RPCAL_5GH, 0xffff}, + {"rpcal5gb3", 0xfffff800, 0, SROM11_RPCAL_5GU, 0xffff}, + {"txidxcap2g", 0xfffff800, 0, SROM11_TXIDXCAP2G, 0x0ff0}, + {"txidxcap5g", 0xfffff800, 0, SROM11_TXIDXCAP5G, 0x0ff0}, + {"pdoffsetcckma0", 0xfffff800, 0, SROM11_PDOFF_2G_CCK, 0x000f}, + {"pdoffsetcckma1", 0xfffff800, 0, SROM11_PDOFF_2G_CCK, 0x00f0}, + {"pdoffsetcckma2", 0xfffff800, 0, SROM11_PDOFF_2G_CCK, 0x0f00}, + + /* sromrev 12 */ + {"boardflags4", 0xfffff000, SRFL_PRHEX|SRFL_MORE, SROM12_BFL6, 0xffff}, + {"", 0, 0, SROM12_BFL7, 0xffff}, + {"pdoffsetcck", 0xfffff000, 0, SROM12_PDOFF_2G_CCK, 0xffff}, + {"pdoffset20in40m5gb0", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B0, 0xffff}, + {"pdoffset20in40m5gb1", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B1, 0xffff}, + {"pdoffset20in40m5gb2", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B2, 0xffff}, + {"pdoffset20in40m5gb3", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B3, 0xffff}, + {"pdoffset20in40m5gb4", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B4, 0xffff}, + {"pdoffset40in80m5gb0", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B0, 0xffff}, + {"pdoffset40in80m5gb1", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B1, 0xffff}, + {"pdoffset40in80m5gb2", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B2, 0xffff}, + {"pdoffset40in80m5gb3", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B3, 0xffff}, + {"pdoffset40in80m5gb4", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B4, 0xffff}, + {"pdoffset20in80m5gb0", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B0, 0xffff}, + {"pdoffset20in80m5gb1", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B1, 0xffff}, + {"pdoffset20in80m5gb2", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B2, 0xffff}, + {"pdoffset20in80m5gb3", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B3, 0xffff}, + {"pdoffset20in80m5gb4", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B4, 0xffff}, + + {"pdoffset20in40m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3, 0xffff}, + {"pdoffset20in40m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3_1, 0xffff}, + {"pdoffset20in80m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3, 0xffff}, + {"pdoffset20in80m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3_1, 0xffff}, + {"pdoffset40in80m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3, 0xffff}, + {"pdoffset40in80m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3_1, 0xffff}, + + {"pdoffset20in40m2g", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2G, 0xffff}, + {"pdoffset20in40m2gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2GCORE3, 0xffff}, + + /* power per rate */ + {"mcsbw205gx1po", 0xfffff000, SRFL_MORE, SROM12_MCSBW205GX1PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW205GX1PO_1, 0xffff}, + {"mcsbw405gx1po", 0xfffff000, SRFL_MORE, SROM12_MCSBW405GX1PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW405GX1PO_1, 0xffff}, + {"mcsbw805gx1po", 0xfffff000, SRFL_MORE, SROM12_MCSBW805GX1PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW805GX1PO_1, 0xffff}, + {"mcsbw205gx2po", 0xfffff000, SRFL_MORE, SROM12_MCSBW205GX2PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW205GX2PO_1, 0xffff}, + {"mcsbw405gx2po", 0xfffff000, SRFL_MORE, SROM12_MCSBW405GX2PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW405GX2PO_1, 0xffff}, + {"mcsbw805gx2po", 0xfffff000, SRFL_MORE, SROM12_MCSBW805GX2PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW805GX2PO_1, 0xffff}, + + {"sb20in80and160hr5gx1po", 0xfffff000, 0, SROM12_SB20IN80AND160HR5GX1PO, 0xffff}, + {"sb40and80hr5gx1po", 0xfffff000, 0, SROM12_SB40AND80HR5GX1PO, 0xffff}, + {"sb20in80and160lr5gx1po", 0xfffff000, 0, SROM12_SB20IN80AND160LR5GX1PO, 0xffff}, + {"sb40and80hr5gx1po", 0xfffff000, 0, SROM12_SB40AND80HR5GX1PO, 0xffff}, + {"sb20in80and160hr5gx2po", 0xfffff000, 0, SROM12_SB20IN80AND160HR5GX2PO, 0xffff}, + {"sb40and80hr5gx2po", 0xfffff000, 0, SROM12_SB40AND80HR5GX2PO, 0xffff}, + {"sb20in80and160lr5gx2po", 0xfffff000, 0, SROM12_SB20IN80AND160LR5GX2PO, 0xffff}, + {"sb40and80hr5gx2po", 0xfffff000, 0, SROM12_SB40AND80HR5GX2PO, 0xffff}, + + {"rxgains5gmelnagaina0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0007}, + {"rxgains5gmelnagaina1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0007}, + {"rxgains5gmelnagaina2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0007}, + {"rxgains5gmtrisoa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0078}, + {"rxgains5gmtrisoa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0078}, + {"rxgains5gmtrisoa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0078}, + {"rxgains5gmtrelnabypa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0080}, + {"rxgains5gmtrelnabypa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0080}, + {"rxgains5gmtrelnabypa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0080}, + {"rxgains5ghelnagaina0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0700}, + {"rxgains5ghelnagaina1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0700}, + {"rxgains5ghelnagaina2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0700}, + {"rxgains5ghtrisoa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x7800}, + {"rxgains5ghtrisoa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x7800}, + {"rxgains5ghtrisoa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x7800}, + {"rxgains5ghtrelnabypa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x8000}, + {"rxgains5ghtrelnabypa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x8000}, + {"rxgains5ghtrelnabypa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x8000}, + {"eu_edthresh2g", 0x00001000, 0, SROM12_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00001000, 0, SROM12_EU_EDCRSTH, 0xff00}, + + {"gpdn", 0xfffff000, SRFL_PRHEX|SRFL_MORE, SROM12_GPDN_L, 0xffff}, + {"", 0, 0, SROM12_GPDN_H, 0xffff}, + + {"eu_edthresh2g", 0x00002000, 0, SROM13_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00002000, 0, SROM13_EU_EDCRSTH, 0xff00}, + + {"agbg3", 0xffffe000, 0, SROM13_ANTGAIN_BANDBGA, 0xff00}, + {"aga3", 0xffffe000, 0, SROM13_ANTGAIN_BANDBGA, 0x00ff}, + {"noiselvl2ga3", 0xffffe000, 0, SROM13_NOISELVLCORE3, 0x001f}, + {"noiselvl5ga3", 0xffffe000, SRFL_ARRAY, SROM13_NOISELVLCORE3, 0x03e0}, + {"", 0xffffe000, SRFL_ARRAY, SROM13_NOISELVLCORE3, 0x7c00}, + {"", 0xffffe000, SRFL_ARRAY, SROM13_NOISELVLCORE3_1, 0x001f}, + {"", 0xffffe000, 0, SROM13_NOISELVLCORE3_1, 0x03e0}, + {"rxgainerr2ga3", 0xffffe000, 0, SROM13_RXGAINERRCORE3, 0x001f}, + {"rxgainerr5ga3", 0xffffe000, SRFL_ARRAY, SROM13_RXGAINERRCORE3, 0x03e0}, + {"", 0xffffe000, SRFL_ARRAY, SROM13_RXGAINERRCORE3, 0x7c00}, + {"", 0xffffe000, SRFL_ARRAY, SROM13_RXGAINERRCORE3_1, 0x001f}, + {"", 0xffffe000, 0, SROM13_RXGAINERRCORE3_1, 0x03e0}, + {"rxgains5gmelnagaina3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0007}, + {"rxgains5gmtrisoa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0078}, + {"rxgains5gmtrelnabypa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0080}, + {"rxgains5ghelnagaina3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0700}, + {"rxgains5ghtrisoa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x7800}, + {"rxgains5ghtrelnabypa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x8000}, + + /* power per rate */ + {"mcs1024qam2gpo", 0xffffe000, 0, SROM13_MCS1024QAM2GPO, 0xffff}, + {"mcs1024qam5glpo", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GLPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GLPO_1, 0xffff}, + {"mcs1024qam5gmpo", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GMPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GMPO_1, 0xffff}, + {"mcs1024qam5ghpo", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GHPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GHPO_1, 0xffff}, + {"mcs1024qam5gx1po", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GX1PO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GX1PO_1, 0xffff}, + {"mcs1024qam5gx2po", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GX2PO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GX2PO_1, 0xffff}, + + {"mcsbw1605glpo", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GLPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GLPO_1, 0xffff}, + {"mcsbw1605gmpo", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GMPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GMPO_1, 0xffff}, + {"mcsbw1605ghpo", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GHPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GHPO_1, 0xffff}, + {"mcsbw1605gx1po", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GX1PO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GX1PO_1, 0xffff}, + {"mcsbw1605gx2po", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GX2PO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GX2PO_1, 0xffff}, + + {"ulbpproffs2g", 0xffffe000, 0, SROM13_ULBPPROFFS2G, 0xffff}, + + {"mcs8poexp", 0xffffe000, SRFL_MORE, SROM13_MCS8POEXP, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS8POEXP_1, 0xffff}, + {"mcs9poexp", 0xffffe000, SRFL_MORE, SROM13_MCS9POEXP, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS9POEXP_1, 0xffff}, + {"mcs10poexp", 0xffffe000, SRFL_MORE, SROM13_MCS10POEXP, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS10POEXP_1, 0xffff}, + {"mcs11poexp", 0xffffe000, SRFL_MORE, SROM13_MCS11POEXP, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS11POEXP_1, 0xffff}, + + {"ulbpdoffs5gb0a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A0, 0xffff}, + {"ulbpdoffs5gb0a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A1, 0xffff}, + {"ulbpdoffs5gb0a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A2, 0xffff}, + {"ulbpdoffs5gb0a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A3, 0xffff}, + {"ulbpdoffs5gb1a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A0, 0xffff}, + {"ulbpdoffs5gb1a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A1, 0xffff}, + {"ulbpdoffs5gb1a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A2, 0xffff}, + {"ulbpdoffs5gb1a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A3, 0xffff}, + {"ulbpdoffs5gb2a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A0, 0xffff}, + {"ulbpdoffs5gb2a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A1, 0xffff}, + {"ulbpdoffs5gb2a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A2, 0xffff}, + {"ulbpdoffs5gb2a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A3, 0xffff}, + {"ulbpdoffs5gb3a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A0, 0xffff}, + {"ulbpdoffs5gb3a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A1, 0xffff}, + {"ulbpdoffs5gb3a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A2, 0xffff}, + {"ulbpdoffs5gb3a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A3, 0xffff}, + {"ulbpdoffs5gb4a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A0, 0xffff}, + {"ulbpdoffs5gb4a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A1, 0xffff}, + {"ulbpdoffs5gb4a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A2, 0xffff}, + {"ulbpdoffs5gb4a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A3, 0xffff}, + {"ulbpdoffs2ga0", 0xffffe000, 0, SROM13_ULBPDOFFS2GA0, 0xffff}, + {"ulbpdoffs2ga1", 0xffffe000, 0, SROM13_ULBPDOFFS2GA1, 0xffff}, + {"ulbpdoffs2ga2", 0xffffe000, 0, SROM13_ULBPDOFFS2GA2, 0xffff}, + {"ulbpdoffs2ga3", 0xffffe000, 0, SROM13_ULBPDOFFS2GA3, 0xffff}, + + {"rpcal5gb4", 0xffffe000, 0, SROM13_RPCAL5GB4, 0xffff}, + + {"sb20in40hrlrpox", 0xffffe000, 0, SROM13_SB20IN40HRLRPOX, 0xffff}, + + {"pdoffset20in40m2g", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2G, 0xffff}, + {"pdoffset20in40m2gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2GCORE3, 0xffff}, + + {"pdoffset20in40m5gcore3", 0xffffe000, SRFL_MORE, SROM13_PDOFFSET20IN40M5GCORE3, 0xffff}, + {"", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3_1, 0xffff}, + {"pdoffset40in80m5gcore3", 0xffffe000, SRFL_MORE, SROM13_PDOFFSET40IN80M5GCORE3, 0xffff}, + {"", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3_1, 0xffff}, + {"pdoffset20in80m5gcore3", 0xffffe000, SRFL_MORE, SROM13_PDOFFSET20IN80M5GCORE3, 0xffff}, + {"", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3_1, 0xffff}, + + {"swctrlmap4_cfg", 0xffffe000, 0, SROM13_SWCTRLMAP4_CFG, 0xffff}, + {"swctrlmap4_TX2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX2G_FEM3TO0, 0xffff}, + {"swctrlmap4_RX2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX2G_FEM3TO0, 0xffff}, + {"swctrlmap4_RXByp2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP2G_FEM3TO0, 0xffff}, + {"swctrlmap4_misc2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC2G_FEM3TO0, 0xffff}, + {"swctrlmap4_TX5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX5G_FEM3TO0, 0xffff}, + {"swctrlmap4_RX5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX5G_FEM3TO0, 0xffff}, + {"swctrlmap4_RXByp5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP5G_FEM3TO0, 0xffff}, + {"swctrlmap4_misc5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC5G_FEM3TO0, 0xffff}, + {"swctrlmap4_TX2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX2G_FEM7TO4, 0xffff}, + {"swctrlmap4_RX2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX2G_FEM7TO4, 0xffff}, + {"swctrlmap4_RXByp2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP2G_FEM7TO4, 0xffff}, + {"swctrlmap4_misc2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC2G_FEM7TO4, 0xffff}, + {"swctrlmap4_TX5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX5G_FEM7TO4, 0xffff}, + {"swctrlmap4_RX5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX5G_FEM7TO4, 0xffff}, + {"swctrlmap4_RXByp5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP5G_FEM7TO4, 0xffff}, + {"swctrlmap4_misc5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC5G_FEM7TO4, 0xffff}, + {NULL, 0, 0, 0, 0} +}; + +static const sromvar_t perpath_pci_sromvars[] = { + {"maxp2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0x00ff}, + {"itt2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0xff00}, + {"itt5ga", 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0xff00}, + {"pa2gw0a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA, 0xffff}, + {"pa2gw1a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 1, 0xffff}, + {"pa2gw2a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 2, 0xffff}, + {"pa2gw3a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 3, 0xffff}, + {"maxp5ga", 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0x00ff}, + {"maxp5gha", 0x000000f0, 0, SROM4_5GLH_MAXP, 0x00ff}, + {"maxp5gla", 0x000000f0, 0, SROM4_5GLH_MAXP, 0xff00}, + {"pa5gw0a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA, 0xffff}, + {"pa5gw1a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 1, 0xffff}, + {"pa5gw2a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 2, 0xffff}, + {"pa5gw3a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 3, 0xffff}, + {"pa5glw0a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA, 0xffff}, + {"pa5glw1a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 1, 0xffff}, + {"pa5glw2a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 2, 0xffff}, + {"pa5glw3a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 3, 0xffff}, + {"pa5ghw0a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA, 0xffff}, + {"pa5ghw1a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 1, 0xffff}, + {"pa5ghw2a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 2, 0xffff}, + {"pa5ghw3a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 3, 0xffff}, + {"maxp2ga", 0x00000700, 0, SROM8_2G_ITT_MAXP, 0x00ff}, + {"itt2ga", 0x00000700, 0, SROM8_2G_ITT_MAXP, 0xff00}, + {"itt5ga", 0x00000700, 0, SROM8_5G_ITT_MAXP, 0xff00}, + {"pa2gw0a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA, 0xffff}, + {"pa2gw1a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA + 1, 0xffff}, + {"pa2gw2a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA + 2, 0xffff}, + {"maxp5ga", 0x00000700, 0, SROM8_5G_ITT_MAXP, 0x00ff}, + {"maxp5gha", 0x00000700, 0, SROM8_5GLH_MAXP, 0x00ff}, + {"maxp5gla", 0x00000700, 0, SROM8_5GLH_MAXP, 0xff00}, + {"pa5gw0a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA, 0xffff}, + {"pa5gw1a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA + 1, 0xffff}, + {"pa5gw2a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA + 2, 0xffff}, + {"pa5glw0a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA, 0xffff}, + {"pa5glw1a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA + 1, 0xffff}, + {"pa5glw2a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA + 2, 0xffff}, + {"pa5ghw0a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA, 0xffff}, + {"pa5ghw1a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA + 1, 0xffff}, + {"pa5ghw2a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA + 2, 0xffff}, + + /* sromrev 11 */ + {"maxp2ga", 0xfffff800, 0, SROM11_2G_MAXP, 0x00ff}, + {"pa2ga", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_2G_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_2G_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX, SROM11_2G_PA + 2, 0xffff}, + {"rxgains5gmelnagaina", 0x00000800, 0, SROM11_RXGAINS1, 0x0007}, + {"rxgains5gmtrisoa", 0x00000800, 0, SROM11_RXGAINS1, 0x0078}, + {"rxgains5gmtrelnabypa", 0x00000800, 0, SROM11_RXGAINS1, 0x0080}, + {"rxgains5ghelnagaina", 0x00000800, 0, SROM11_RXGAINS1, 0x0700}, + {"rxgains5ghtrisoa", 0x00000800, 0, SROM11_RXGAINS1, 0x7800}, + {"rxgains5ghtrelnabypa", 0x00000800, 0, SROM11_RXGAINS1, 0x8000}, + {"rxgains2gelnagaina", 0x00000800, 0, SROM11_RXGAINS, 0x0007}, + {"rxgains2gtrisoa", 0x00000800, 0, SROM11_RXGAINS, 0x0078}, + {"rxgains2gtrelnabypa", 0x00000800, 0, SROM11_RXGAINS, 0x0080}, + {"rxgains5gelnagaina", 0x00000800, 0, SROM11_RXGAINS, 0x0700}, + {"rxgains5gtrisoa", 0x00000800, 0, SROM11_RXGAINS, 0x7800}, + {"rxgains5gtrelnabypa", 0x00000800, 0, SROM11_RXGAINS, 0x8000}, + {"maxp5ga", 0x00000800, SRFL_ARRAY, SROM11_5GB1B0_MAXP, 0x00ff}, + {"", 0x00000800, SRFL_ARRAY, SROM11_5GB1B0_MAXP, 0xff00}, + {"", 0x00000800, SRFL_ARRAY, SROM11_5GB3B2_MAXP, 0x00ff}, + {"", 0x00000800, 0, SROM11_5GB3B2_MAXP, 0xff00}, + {"pa5ga", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA + 2, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA + 2, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA + 2, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB3_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB3_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX, SROM11_5GB3_PA + 2, 0xffff}, + + /* sromrev 12 */ + {"maxp5gb4a", 0xfffff000, 0, SROM12_5GB42G_MAXP, 0x00ff00}, + {"pa2ga", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2GB0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2GB0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2GB0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_2GB0_PA_W3, 0x00ffff}, + + {"pa2g40a", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2G40B0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2G40B0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2G40B0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_2G40B0_PA_W3, 0x00ffff}, + {"maxp5gb0a", 0xfffff000, 0, SROM12_5GB1B0_MAXP, 0x00ff}, + {"maxp5gb1a", 0xfffff000, 0, SROM12_5GB1B0_MAXP, 0x00ff00}, + {"maxp5gb2a", 0xfffff000, 0, SROM12_5GB3B2_MAXP, 0x00ff}, + {"maxp5gb3a", 0xfffff000, 0, SROM12_5GB3B2_MAXP, 0x00ff00}, + + {"pa5ga", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB4_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB4_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB4_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_5GB4_PA_W3, 0x00ffff}, + + {"pa5g40a", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B4_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B4_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B4_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_5G40B4_PA_W3, 0x00ffff}, + + {"pa5g80a", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B4_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B4_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B4_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_5G80B4_PA_W3, 0x00ffff}, + /* sromrev 13 */ + {"rxgains2gelnagaina", 0xffffe000, 0, SROM13_RXGAINS, 0x0007}, + {"rxgains2gtrisoa", 0xffffe000, 0, SROM13_RXGAINS, 0x0078}, + {"rxgains2gtrelnabypa", 0xffffe000, 0, SROM13_RXGAINS, 0x0080}, + {"rxgains5gelnagaina", 0xffffe000, 0, SROM13_RXGAINS, 0x0700}, + {"rxgains5gtrisoa", 0xffffe000, 0, SROM13_RXGAINS, 0x7800}, + {"rxgains5gtrelnabypa", 0xffffe000, 0, SROM13_RXGAINS, 0x8000}, + {NULL, 0, 0, 0, 0} +}; + +#if !(defined(PHY_TYPE_HT) && defined(PHY_TYPE_N)) +#define PHY_TYPE_HT 7 /* HT-Phy value */ +#define PHY_TYPE_N 4 /* N-Phy value */ +#endif /* !(defined(PHY_TYPE_HT) && defined(PHY_TYPE_N)) */ +#if !defined(PHY_TYPE_AC) +#define PHY_TYPE_AC 11 /* AC-Phy value */ +#endif /* !defined(PHY_TYPE_AC) */ +#if !defined(PHY_TYPE_LCN20) +#define PHY_TYPE_LCN20 12 /* LCN20-Phy value */ +#endif /* !defined(PHY_TYPE_LCN20) */ +#if !defined(PHY_TYPE_NULL) +#define PHY_TYPE_NULL 0xf /* Invalid Phy value */ +#endif /* !defined(PHY_TYPE_NULL) */ + +typedef struct { + uint16 phy_type; + uint16 bandrange; + uint16 chain; + const char *vars; +} pavars_t; + +static const pavars_t pavars[] = { + /* HTPHY */ + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G, 0, "pa2gw0a0 pa2gw1a0 pa2gw2a0"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G, 1, "pa2gw0a1 pa2gw1a1 pa2gw2a1"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G, 2, "pa2gw0a2 pa2gw1a2 pa2gw2a2"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 0, "pa5glw0a0 pa5glw1a0 pa5glw2a0"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 1, "pa5glw0a1 pa5glw1a1 pa5glw2a1"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 2, "pa5glw0a2 pa5glw1a2 pa5glw2a2"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 0, "pa5gw0a0 pa5gw1a0 pa5gw2a0"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 1, "pa5gw0a1 pa5gw1a1 pa5gw2a1"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 2, "pa5gw0a2 pa5gw1a2 pa5gw2a2"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 0, "pa5ghw0a0 pa5ghw1a0 pa5ghw2a0"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 1, "pa5ghw0a1 pa5ghw1a1 pa5ghw2a1"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 2, "pa5ghw0a2 pa5ghw1a2 pa5ghw2a2"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 0, "pa5gw0a3 pa5gw1a3 pa5gw2a3"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 1, "pa5glw0a3 pa5glw1a3 pa5glw2a3"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 2, "pa5ghw0a3 pa5ghw1a3 pa5ghw2a3"}, + /* NPHY */ + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, 0, "pa2gw0a0 pa2gw1a0 pa2gw2a0"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, 1, "pa2gw0a1 pa2gw1a1 pa2gw2a1"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND0, 0, "pa5glw0a0 pa5glw1a0 pa5glw2a0"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND0, 1, "pa5glw0a1 pa5glw1a1 pa5glw2a1"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND1, 0, "pa5gw0a0 pa5gw1a0 pa5gw2a0"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND1, 1, "pa5gw0a1 pa5gw1a1 pa5gw2a1"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND2, 0, "pa5ghw0a0 pa5ghw1a0 pa5ghw2a0"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND2, 1, "pa5ghw0a1 pa5ghw1a1 pa5ghw2a1"}, + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 2, "pa2ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 1, "pa5ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 2, "pa5ga2"}, + /* LCN20PHY */ + {PHY_TYPE_LCN20, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + + +static const pavars_t pavars_SROM12[] = { + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 2, "pa2ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 0, "pa2g40a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 1, "pa2g40a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 2, "pa2g40a2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 1, "pa5ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 2, "pa5ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 0, "pa5g40a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 1, "pa5g40a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 2, "pa5g40a2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 0, "pa5g80a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 1, "pa5g80a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 2, "pa5g80a2"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + +static const pavars_t pavars_SROM13[] = { + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 2, "pa2ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 3, "pa2ga3"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 0, "pa2g40a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 1, "pa2g40a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 2, "pa2g40a2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 3, "pa2g40a3"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 1, "pa5ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 2, "pa5ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 3, "pa5ga3"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 0, "pa5g40a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 1, "pa5g40a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 2, "pa5g40a2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 3, "pa5g40a3"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 0, "pa5g80a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 1, "pa5g80a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 2, "pa5g80a2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 3, "pa5g80a3"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + +/* pavars table when paparambwver is 1 */ +static const pavars_t pavars_bwver_1[] = { + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2gccka0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 1, "pa5gbw40a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 2, "pa5gbw80a0"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + +/* pavars table when paparambwver is 2 */ +static const pavars_t pavars_bwver_2[] = { + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 1, "pa5ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 2, "pa5gbw4080a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 3, "pa5gbw4080a1"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + +/* pavars table when paparambwver is 3 */ +static const pavars_t pavars_bwver_3[] = { + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 2, "pa2gccka0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 3, "pa2gccka1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 1, "pa5ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 2, "pa5gbw4080a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 3, "pa5gbw4080a1"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + +typedef struct { + uint16 phy_type; + uint16 bandrange; + const char *vars; +} povars_t; + +static const povars_t povars[] = { + /* NPHY */ + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, "mcs2gpo0 mcs2gpo1 mcs2gpo2 mcs2gpo3 " + "mcs2gpo4 mcs2gpo5 mcs2gpo6 mcs2gpo7"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GL, "mcs5glpo0 mcs5glpo1 mcs5glpo2 mcs5glpo3 " + "mcs5glpo4 mcs5glpo5 mcs5glpo6 mcs5glpo7"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GM, "mcs5gpo0 mcs5gpo1 mcs5gpo2 mcs5gpo3 " + "mcs5gpo4 mcs5gpo5 mcs5gpo6 mcs5gpo7"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GH, "mcs5ghpo0 mcs5ghpo1 mcs5ghpo2 mcs5ghpo3 " + "mcs5ghpo4 mcs5ghpo5 mcs5ghpo6 mcs5ghpo7"}, + {PHY_TYPE_NULL, 0, ""} +}; + +typedef struct { + uint8 tag; /* Broadcom subtag name */ + uint32 revmask; /* Supported cis_sromrev bitmask. Some of the parameters in + * different tuples have the same name. Therefore, the MFGc tool + * needs to know which tuple to generate when seeing these + * parameters (given that we know sromrev from user input, like the + * nvram file). + */ + uint8 len; /* Length field of the tuple, note that it includes the + * subtag name (1 byte): 1 + tuple content length + */ + const char *params; +} cis_tuple_t; + +#define OTP_RAW (0xff - 1) /* Reserved tuple number for wrvar Raw input */ +#define OTP_VERS_1 (0xff - 2) /* CISTPL_VERS_1 */ +#define OTP_MANFID (0xff - 3) /* CISTPL_MANFID */ +#define OTP_RAW1 (0xff - 4) /* Like RAW, but comes first */ + +/** this array is used by CIS creating/writing applications */ +static const cis_tuple_t cis_hnbuvars[] = { +/* tag revmask len params */ + {OTP_RAW1, 0xffffffff, 0, ""}, /* special case */ + {OTP_VERS_1, 0xffffffff, 0, "smanf sproductname"}, /* special case (non BRCM tuple) */ + {OTP_MANFID, 0xffffffff, 4, "2manfid 2prodid"}, /* special case (non BRCM tuple) */ + /* Unified OTP: tupple to embed USB manfid inside SDIO CIS */ + {HNBU_UMANFID, 0xffffffff, 8, "8usbmanfid"}, + {HNBU_SROMREV, 0xffffffff, 2, "1sromrev"}, + /* NOTE: subdevid is also written to boardtype. + * Need to write HNBU_BOARDTYPE to change it if it is different. + */ + {HNBU_CHIPID, 0xffffffff, 11, "2vendid 2devid 2chiprev 2subvendid 2subdevid"}, + {HNBU_BOARDREV, 0xffffffff, 3, "2boardrev"}, + {HNBU_PAPARMS, 0xffffffff, 10, "2pa0b0 2pa0b1 2pa0b2 1pa0itssit 1pa0maxpwr 1opo"}, + {HNBU_AA, 0xffffffff, 3, "1aa2g 1aa5g"}, + {HNBU_AA, 0xffffffff, 3, "1aa0 1aa1"}, /* backward compatibility */ + {HNBU_AG, 0xffffffff, 5, "1ag0 1ag1 1ag2 1ag3"}, + {HNBU_BOARDFLAGS, 0xffffffff, 21, "4boardflags 4boardflags2 4boardflags3 " + "4boardflags4 4boardflags5 "}, + {HNBU_LEDS, 0xffffffff, 17, "1ledbh0 1ledbh1 1ledbh2 1ledbh3 1ledbh4 1ledbh5 " + "1ledbh6 1ledbh7 1ledbh8 1ledbh9 1ledbh10 1ledbh11 1ledbh12 1ledbh13 1ledbh14 1ledbh15"}, + {HNBU_CCODE, 0xffffffff, 4, "2ccode 1cctl"}, + {HNBU_CCKPO, 0xffffffff, 3, "2cckpo"}, + {HNBU_OFDMPO, 0xffffffff, 5, "4ofdmpo"}, + {HNBU_PAPARMS5G, 0xffffffff, 23, "2pa1b0 2pa1b1 2pa1b2 2pa1lob0 2pa1lob1 2pa1lob2 " + "2pa1hib0 2pa1hib1 2pa1hib2 1pa1itssit " + "1pa1maxpwr 1pa1lomaxpwr 1pa1himaxpwr"}, + {HNBU_RDLID, 0xffffffff, 3, "2rdlid"}, + {HNBU_RSSISMBXA2G, 0xffffffff, 3, "0rssismf2g 0rssismc2g " + "0rssisav2g 0bxa2g"}, /* special case */ + {HNBU_RSSISMBXA5G, 0xffffffff, 3, "0rssismf5g 0rssismc5g " + "0rssisav5g 0bxa5g"}, /* special case */ + {HNBU_XTALFREQ, 0xffffffff, 5, "4xtalfreq"}, + {HNBU_TRI2G, 0xffffffff, 2, "1tri2g"}, + {HNBU_TRI5G, 0xffffffff, 4, "1tri5gl 1tri5g 1tri5gh"}, + {HNBU_RXPO2G, 0xffffffff, 2, "1rxpo2g"}, + {HNBU_RXPO5G, 0xffffffff, 2, "1rxpo5g"}, + {HNBU_BOARDNUM, 0xffffffff, 3, "2boardnum"}, + {HNBU_MACADDR, 0xffffffff, 7, "6macaddr"}, /* special case */ + {HNBU_RDLSN, 0xffffffff, 3, "2rdlsn"}, + {HNBU_BOARDTYPE, 0xffffffff, 3, "2boardtype"}, + {HNBU_LEDDC, 0xffffffff, 3, "2leddc"}, + {HNBU_RDLRNDIS, 0xffffffff, 2, "1rdlndis"}, + {HNBU_CHAINSWITCH, 0xffffffff, 5, "1txchain 1rxchain 2antswitch"}, + {HNBU_REGREV, 0xffffffff, 2, "1regrev"}, + {HNBU_FEM, 0x000007fe, 5, "0antswctl2g 0triso2g 0pdetrange2g 0extpagain2g " + "0tssipos2g 0antswctl5g 0triso5g 0pdetrange5g 0extpagain5g 0tssipos5g"}, /* special case */ + {HNBU_PAPARMS_C0, 0x000007fe, 31, "1maxp2ga0 1itt2ga0 2pa2gw0a0 2pa2gw1a0 " + "2pa2gw2a0 1maxp5ga0 1itt5ga0 1maxp5gha0 1maxp5gla0 2pa5gw0a0 2pa5gw1a0 2pa5gw2a0 " + "2pa5glw0a0 2pa5glw1a0 2pa5glw2a0 2pa5ghw0a0 2pa5ghw1a0 2pa5ghw2a0"}, + {HNBU_PAPARMS_C1, 0x000007fe, 31, "1maxp2ga1 1itt2ga1 2pa2gw0a1 2pa2gw1a1 " + "2pa2gw2a1 1maxp5ga1 1itt5ga1 1maxp5gha1 1maxp5gla1 2pa5gw0a1 2pa5gw1a1 2pa5gw2a1 " + "2pa5glw0a1 2pa5glw1a1 2pa5glw2a1 2pa5ghw0a1 2pa5ghw1a1 2pa5ghw2a1"}, + {HNBU_PO_CCKOFDM, 0xffffffff, 19, "2cck2gpo 4ofdm2gpo 4ofdm5gpo 4ofdm5glpo " + "4ofdm5ghpo"}, + {HNBU_PO_MCS2G, 0xffffffff, 17, "2mcs2gpo0 2mcs2gpo1 2mcs2gpo2 2mcs2gpo3 " + "2mcs2gpo4 2mcs2gpo5 2mcs2gpo6 2mcs2gpo7"}, + {HNBU_PO_MCS5GM, 0xffffffff, 17, "2mcs5gpo0 2mcs5gpo1 2mcs5gpo2 2mcs5gpo3 " + "2mcs5gpo4 2mcs5gpo5 2mcs5gpo6 2mcs5gpo7"}, + {HNBU_PO_MCS5GLH, 0xffffffff, 33, "2mcs5glpo0 2mcs5glpo1 2mcs5glpo2 2mcs5glpo3 " + "2mcs5glpo4 2mcs5glpo5 2mcs5glpo6 2mcs5glpo7 " + "2mcs5ghpo0 2mcs5ghpo1 2mcs5ghpo2 2mcs5ghpo3 " + "2mcs5ghpo4 2mcs5ghpo5 2mcs5ghpo6 2mcs5ghpo7"}, + {HNBU_CCKFILTTYPE, 0xffffffff, 2, "1cckdigfilttype"}, + {HNBU_PO_CDD, 0xffffffff, 3, "2cddpo"}, + {HNBU_PO_STBC, 0xffffffff, 3, "2stbcpo"}, + {HNBU_PO_40M, 0xffffffff, 3, "2bw40po"}, + {HNBU_PO_40MDUP, 0xffffffff, 3, "2bwduppo"}, + {HNBU_RDLRWU, 0xffffffff, 2, "1rdlrwu"}, + {HNBU_WPS, 0xffffffff, 3, "1wpsgpio 1wpsled"}, + {HNBU_USBFS, 0xffffffff, 2, "1usbfs"}, + {HNBU_ELNA2G, 0xffffffff, 2, "1elna2g"}, + {HNBU_ELNA5G, 0xffffffff, 2, "1elna5g"}, + {HNBU_CUSTOM1, 0xffffffff, 5, "4customvar1"}, + {OTP_RAW, 0xffffffff, 0, ""}, /* special case */ + {HNBU_OFDMPO5G, 0xffffffff, 13, "4ofdm5gpo 4ofdm5glpo 4ofdm5ghpo"}, + {HNBU_USBEPNUM, 0xffffffff, 3, "2usbepnum"}, + {HNBU_CCKBW202GPO, 0xffffffff, 7, "2cckbw202gpo 2cckbw20ul2gpo 2cckbw20in802gpo"}, + {HNBU_LEGOFDMBW202GPO, 0xffffffff, 9, "4legofdmbw202gpo 4legofdmbw20ul2gpo"}, + {HNBU_LEGOFDMBW205GPO, 0xffffffff, 25, "4legofdmbw205glpo 4legofdmbw20ul5glpo " + "4legofdmbw205gmpo 4legofdmbw20ul5gmpo 4legofdmbw205ghpo 4legofdmbw20ul5ghpo"}, + {HNBU_MCS2GPO, 0xffffffff, 17, "4mcsbw202gpo 4mcsbw20ul2gpo 4mcsbw402gpo 4mcsbw802gpo"}, + {HNBU_MCS5GLPO, 0xffffffff, 13, "4mcsbw205glpo 4mcsbw20ul5glpo 4mcsbw405glpo"}, + {HNBU_MCS5GMPO, 0xffffffff, 13, "4mcsbw205gmpo 4mcsbw20ul5gmpo 4mcsbw405gmpo"}, + {HNBU_MCS5GHPO, 0xffffffff, 13, "4mcsbw205ghpo 4mcsbw20ul5ghpo 4mcsbw405ghpo"}, + {HNBU_MCS32PO, 0xffffffff, 3, "2mcs32po"}, + {HNBU_LEG40DUPPO, 0xffffffff, 3, "2legofdm40duppo"}, + {HNBU_TEMPTHRESH, 0xffffffff, 7, "1tempthresh 0temps_period 0temps_hysteresis " + "1tempoffset 1tempsense_slope 0tempcorrx 0tempsense_option " + "1phycal_tempdelta"}, /* special case */ + {HNBU_MUXENAB, 0xffffffff, 2, "1muxenab"}, + {HNBU_FEM_CFG, 0xfffff800, 5, "0femctrl 0papdcap2g 0tworangetssi2g 0pdgain2g " + "0epagain2g 0tssiposslope2g 0gainctrlsph 0papdcap5g 0tworangetssi5g 0pdgain5g 0epagain5g " + "0tssiposslope5g"}, /* special case */ + {HNBU_ACPA_C0, 0xfffff800, 39, "2subband5gver 2maxp2ga0 2*3pa2ga0 " + "1*4maxp5ga0 2*12pa5ga0"}, + {HNBU_ACPA_C1, 0xfffff800, 37, "2maxp2ga1 2*3pa2ga1 1*4maxp5ga1 2*12pa5ga1"}, + {HNBU_ACPA_C2, 0xfffff800, 37, "2maxp2ga2 2*3pa2ga2 1*4maxp5ga2 2*12pa5ga2"}, + {HNBU_MEAS_PWR, 0xfffff800, 5, "1measpower 1measpower1 1measpower2 2rawtempsense"}, + {HNBU_PDOFF, 0xfffff800, 13, "2pdoffset40ma0 2pdoffset40ma1 2pdoffset40ma2 " + "2pdoffset80ma0 2pdoffset80ma1 2pdoffset80ma2"}, + {HNBU_ACPPR_2GPO, 0xfffff800, 13, "2dot11agofdmhrbw202gpo 2ofdmlrbw202gpo " + "2sb20in40dot11agofdm2gpo 2sb20in80dot11agofdm2gpo 2sb20in40ofdmlrbw202gpo " + "2sb20in80ofdmlrbw202gpo"}, + {HNBU_ACPPR_5GPO, 0xfffff800, 59, "4mcsbw805glpo 4mcsbw1605glpo 4mcsbw805gmpo " + "4mcsbw1605gmpo 4mcsbw805ghpo 4mcsbw1605ghpo 2mcslr5glpo 2mcslr5gmpo 2mcslr5ghpo " + "4mcsbw80p805glpo 4mcsbw80p805gmpo 4mcsbw80p805ghpo 4mcsbw80p805gx1po 2mcslr5gx1po " + "2mcslr5g80p80po 4mcsbw805gx1po 4mcsbw1605gx1po"}, + {HNBU_MCS5Gx1PO, 0xfffff800, 9, "4mcsbw205gx1po 4mcsbw405gx1po"}, + {HNBU_ACPPR_SBPO, 0xfffff800, 49, "2sb20in40hrpo 2sb20in80and160hr5glpo " + "2sb40and80hr5glpo 2sb20in80and160hr5gmpo 2sb40and80hr5gmpo 2sb20in80and160hr5ghpo " + "2sb40and80hr5ghpo 2sb20in40lrpo 2sb20in80and160lr5glpo 2sb40and80lr5glpo " + "2sb20in80and160lr5gmpo 2sb40and80lr5gmpo 2sb20in80and160lr5ghpo 2sb40and80lr5ghpo " + "4dot11agduphrpo 4dot11agduplrpo 2sb20in40and80hrpo 2sb20in40and80lrpo " + "2sb20in80and160hr5gx1po 2sb20in80and160lr5gx1po 2sb40and80hr5gx1po 2sb40and80lr5gx1po " + }, + {HNBU_ACPPR_SB8080_PO, 0xfffff800, 23, "2sb2040and80in80p80hr5glpo " + "2sb2040and80in80p80lr5glpo 2sb2040and80in80p80hr5gmpo " + "2sb2040and80in80p80lr5gmpo 2sb2040and80in80p80hr5ghpo 2sb2040and80in80p80lr5ghpo " + "2sb2040and80in80p80hr5gx1po 2sb2040and80in80p80lr5gx1po 2sb20in80p80hr5gpo " + "2sb20in80p80lr5gpo 2dot11agduppo"}, + {HNBU_NOISELVL, 0xfffff800, 16, "1noiselvl2ga0 1noiselvl2ga1 1noiselvl2ga2 " + "1*4noiselvl5ga0 1*4noiselvl5ga1 1*4noiselvl5ga2"}, + {HNBU_RXGAIN_ERR, 0xfffff800, 16, "1rxgainerr2ga0 1rxgainerr2ga1 1rxgainerr2ga2 " + "1*4rxgainerr5ga0 1*4rxgainerr5ga1 1*4rxgainerr5ga2"}, + {HNBU_AGBGA, 0xfffff800, 7, "1agbg0 1agbg1 1agbg2 1aga0 1aga1 1aga2"}, + {HNBU_USBDESC_COMPOSITE, 0xffffffff, 3, "2usbdesc_composite"}, + {HNBU_UUID, 0xffffffff, 17, "16uuid"}, + {HNBU_WOWLGPIO, 0xffffffff, 2, "1wowl_gpio"}, + {HNBU_ACRXGAINS_C0, 0xfffff800, 5, "0rxgains5gtrelnabypa0 0rxgains5gtrisoa0 " + "0rxgains5gelnagaina0 0rxgains2gtrelnabypa0 0rxgains2gtrisoa0 0rxgains2gelnagaina0 " + "0rxgains5ghtrelnabypa0 0rxgains5ghtrisoa0 0rxgains5ghelnagaina0 0rxgains5gmtrelnabypa0 " + "0rxgains5gmtrisoa0 0rxgains5gmelnagaina0"}, /* special case */ + {HNBU_ACRXGAINS_C1, 0xfffff800, 5, "0rxgains5gtrelnabypa1 0rxgains5gtrisoa1 " + "0rxgains5gelnagaina1 0rxgains2gtrelnabypa1 0rxgains2gtrisoa1 0rxgains2gelnagaina1 " + "0rxgains5ghtrelnabypa1 0rxgains5ghtrisoa1 0rxgains5ghelnagaina1 0rxgains5gmtrelnabypa1 " + "0rxgains5gmtrisoa1 0rxgains5gmelnagaina1"}, /* special case */ + {HNBU_ACRXGAINS_C2, 0xfffff800, 5, "0rxgains5gtrelnabypa2 0rxgains5gtrisoa2 " + "0rxgains5gelnagaina2 0rxgains2gtrelnabypa2 0rxgains2gtrisoa2 0rxgains2gelnagaina2 " + "0rxgains5ghtrelnabypa2 0rxgains5ghtrisoa2 0rxgains5ghelnagaina2 0rxgains5gmtrelnabypa2 " + "0rxgains5gmtrisoa2 0rxgains5gmelnagaina2"}, /* special case */ + {HNBU_TXDUTY, 0xfffff800, 9, "2tx_duty_cycle_ofdm_40_5g " + "2tx_duty_cycle_thresh_40_5g 2tx_duty_cycle_ofdm_80_5g 2tx_duty_cycle_thresh_80_5g"}, + {HNBU_PDOFF_2G, 0xfffff800, 3, "0pdoffset2g40ma0 0pdoffset2g40ma1 " + "0pdoffset2g40ma2 0pdoffset2g40mvalid"}, + {HNBU_ACPA_CCK, 0xfffff800, 7, "2*3pa2gccka0"}, + {HNBU_ACPA_40, 0xfffff800, 25, "2*12pa5gbw40a0"}, + {HNBU_ACPA_80, 0xfffff800, 25, "2*12pa5gbw80a0"}, + {HNBU_ACPA_4080, 0xfffff800, 49, "2*12pa5gbw4080a0 2*12pa5gbw4080a1"}, + {HNBU_SUBBAND5GVER, 0xfffff800, 3, "2subband5gver"}, + {HNBU_PAPARAMBWVER, 0xfffff800, 2, "1paparambwver"}, + {HNBU_TXBFRPCALS, 0xfffff800, 11, + "2rpcal2g 2rpcal5gb0 2rpcal5gb1 2rpcal5gb2 2rpcal5gb3"}, /* txbf rpcalvars */ + {HNBU_GPIO_PULL_DOWN, 0xffffffff, 5, "4gpdn"}, + {0xFF, 0xffffffff, 0, ""} +}; + +#endif /* _bcmsrom_tbl_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmutils.h b/drivers/amlogic/wifi/bcmdhd/include/bcmutils.h similarity index 80% rename from drivers/net/wireless/bcmdhd/include/bcmutils.h rename to drivers/amlogic/wifi/bcmdhd/include/bcmutils.h index a89a569b2f830..7dc7416973439 100644 --- a/drivers/net/wireless/bcmdhd/include/bcmutils.h +++ b/drivers/amlogic/wifi/bcmdhd/include/bcmutils.h @@ -1,26 +1,52 @@ /* * Misc useful os-independent macros and functions. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmutils.h 504037 2014-09-22 19:03:15Z $ + * + * <> + * + * $Id: bcmutils.h 563776 2015-06-15 15:51:15Z $ */ #ifndef _bcmutils_h_ #define _bcmutils_h_ -#define bcm_strcpy_s(dst, noOfElements, src) strcpy((dst), (src)) -#define bcm_strncpy_s(dst, noOfElements, src, count) strncpy((dst), (src), (count)) -#define bcm_strcat_s(dst, noOfElements, src) strcat((dst), (src)) #ifdef __cplusplus extern "C" { #endif -#ifdef PKTQ_LOG -#include -#endif +#define bcm_strncpy_s(dst, noOfElements, src, count) strncpy((dst), (src), (count)) +#define bcm_strncat_s(dst, noOfElements, src, count) strncat((dst), (src), (count)) +#define bcm_snprintf_s snprintf +#define bcm_sprintf_s snprintf + +/* + * #define bcm_strcpy_s(dst, count, src) strncpy((dst), (src), (count)) + * Use bcm_strcpy_s instead as it is a safer option + * bcm_strcat_s: Use bcm_strncat_s as a safer option + * + */ /* ctype replacement */ #define _BCM_U 0x01 /* upper */ @@ -51,6 +77,8 @@ extern const unsigned char bcm_ctype[]; #define CIRCULAR_ARRAY_FULL(rd_idx, wr_idx, max) ((wr_idx + 1)%max == rd_idx) +#define KB(bytes) (((bytes) + 1023) / 1024) + /* Buffer structure for collecting string-formatted data * using bcm_bprintf() API. * Use bcm_binit() to initialize before use @@ -63,6 +91,9 @@ struct bcmstrbuf { unsigned int origsize; /* unmodified orignal buffer size in bytes */ }; +#define BCMSTRBUF_LEN(b) (b->size) +#define BCMSTRBUF_BUF(b) (b->buf) + /* ** driver-only section ** */ #ifdef BCMDRIVER #include @@ -118,6 +149,7 @@ extern int ether_isnulladdr(const void *ea); #define BCM_RXCPL_CLR_VALID_INFO(a) ((a)->rxcpl_id.flags &= ~BCM_RXCPL_FLAGS_RXCPLVALID) #define BCM_RXCPL_VALID_INFO(a) (((a)->rxcpl_id.flags & BCM_RXCPL_FLAGS_RXCPLVALID) ? TRUE : FALSE) +#define UP_TABLE_MAX ((IPV4_TOS_DSCP_MASK >> IPV4_TOS_DSCP_SHIFT) + 1) /* 64 max */ struct reorder_rxcpl_id_list { uint16 head; @@ -200,6 +232,7 @@ extern void pktset8021xprio(void *pkt, int prio); #define DSCP_EF 0x2E extern uint pktsetprio(void *pkt, bool update_vtag); +extern uint pktsetprio_qms(void *pkt, uint8* up_table, bool update_vtag); extern bool pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp); /* string */ @@ -248,7 +281,7 @@ extern uint getgpiopin(char *vars, char *pin_name, uint def_pin); #define bcmtslog(tstamp, fmt, a1, a2) #define bcmprinttslogs() #define bcmprinttstamp(us) -#define bcmdumptslog(buf, size) +#define bcmdumptslog(b) extern char *bcm_nvram_vars(uint *length); extern int bcm_nvram_cache(void *sih); @@ -288,7 +321,7 @@ extern int bcm_iovar_lencheck(const bcm_iovar_t *table, void *arg, int len, bool #if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \ defined(WLMSG_PRPKT) || defined(WLMSG_WSEC) extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len); -#endif +#endif #endif /* BCMDRIVER */ /* Base type definitions */ @@ -392,7 +425,8 @@ extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len); #define BCME_MICERR -50 /* Integrity/MIC error */ #define BCME_REPLAY -51 /* Replay */ #define BCME_IE_NOTFOUND -52 /* IE not found */ -#define BCME_LAST BCME_IE_NOTFOUND +#define BCME_DATA_NOTFOUND -53 /* Complete data not found in buffer */ +#define BCME_LAST BCME_DATA_NOTFOUND #define BCME_NOTENABLED BCME_DISABLED @@ -451,6 +485,7 @@ extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len); "MIC error", \ "Replay", \ "IE not found", \ + "Data not found", \ } #ifndef ABS @@ -654,12 +689,12 @@ typedef struct bcm_bit_desc_ex { #define ETHER_ADDR_STR_LEN 18 /* 18-bytes of Ethernet address buffer length */ static INLINE uint32 /* 32bit word aligned xor-32 */ -bcm_compute_xor32(volatile uint32 *u32, int num_u32) +bcm_compute_xor32(volatile uint32 *u32_val, int num_u32) { - int i; + int idx; uint32 xor32 = 0; - for (i = 0; i < num_u32; i++) - xor32 ^= *(u32 + i); + for (idx = 0; idx < num_u32; idx++) + xor32 ^= *(u32_val + idx); return xor32; } @@ -712,6 +747,8 @@ extern void prhex(const char *msg, uchar *buf, uint len); /* IE parsing */ +/* packing is required if struct is passed across the bus */ +#include /* tag_ID/length/value_buffer tuple */ typedef struct bcm_tlv { uint8 id; @@ -720,11 +757,13 @@ typedef struct bcm_tlv { } bcm_tlv_t; /* bcm tlv w/ 16 bit id/len */ -typedef struct bcm_xtlv { +typedef BWL_PRE_PACKED_STRUCT struct bcm_xtlv { uint16 id; uint16 len; uint8 data[1]; -} bcm_xtlv_t; +} BWL_POST_PACKED_STRUCT bcm_xtlv_t; +#include + /* descriptor of xtlv data src or dst */ typedef struct { @@ -733,34 +772,37 @@ typedef struct { void *ptr; /* ptr to memory location */ } xtlv_desc_t; -/* set a var from xtlv buffer */ -typedef int -(bcm_set_var_from_tlv_cbfn_t)(void *ctx, void **tlv_buf, uint16 type, uint16 len); - -struct bcm_tlvbuf { - uint16 size; - uint8 *head; /* point to head of buffer */ - uint8 *buf; /* current position of buffer */ - /* followed by the allocated buffer */ +/* xtlv options */ +#define BCM_XTLV_OPTION_NONE 0x0000 +#define BCM_XTLV_OPTION_ALIGN32 0x0001 + +typedef uint16 bcm_xtlv_opts_t; +struct bcm_xtlvbuf { + bcm_xtlv_opts_t opts; + uint16 size; + uint8 *head; /* point to head of buffer */ + uint8 *buf; /* current position of buffer */ + /* allocated buffer may follow, but not necessarily */ }; +typedef struct bcm_xtlvbuf bcm_xtlvbuf_t; #define BCM_TLV_MAX_DATA_SIZE (255) #define BCM_XTLV_MAX_DATA_SIZE (65535) #define BCM_TLV_HDR_SIZE (OFFSETOF(bcm_tlv_t, data)) #define BCM_XTLV_HDR_SIZE (OFFSETOF(bcm_xtlv_t, data)) +/* LEN only stores the value's length without padding */ #define BCM_XTLV_LEN(elt) ltoh16_ua(&(elt->len)) #define BCM_XTLV_ID(elt) ltoh16_ua(&(elt->id)) -#define BCM_XTLV_SIZE(elt) (BCM_XTLV_HDR_SIZE + BCM_XTLV_LEN(elt)) +/* entire size of the XTLV including header, data, and optional padding */ +#define BCM_XTLV_SIZE(elt, opts) bcm_xtlv_size(elt, opts) +#define bcm_valid_xtlv(elt, buflen, opts) (elt && ((int)(buflen) >= (int)BCM_XTLV_SIZE(elt, opts))) /* Check that bcm_tlv_t fits into the given buflen */ #define bcm_valid_tlv(elt, buflen) (\ ((int)(buflen) >= (int)BCM_TLV_HDR_SIZE) && \ ((int)(buflen) >= (int)(BCM_TLV_HDR_SIZE + (elt)->len))) -#define bcm_valid_xtlv(elt, buflen) (\ - ((int)(buflen) >= (int)BCM_XTLV_HDR_SIZE) && \ - ((int)(buflen) >= (int)BCM_XTLV_SIZE(elt))) extern bcm_tlv_t *bcm_next_tlv(bcm_tlv_t *elt, int *buflen); extern bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key); @@ -779,28 +821,66 @@ extern uint8 *bcm_copy_tlv(const void *src, uint8 *dst); extern uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen); /* xtlv */ -extern bcm_xtlv_t *bcm_next_xtlv(bcm_xtlv_t *elt, int *buflen); -extern struct bcm_tlvbuf *bcm_xtlv_buf_alloc(void *osh, uint16 len); -extern void bcm_xtlv_buf_free(void *osh, struct bcm_tlvbuf *tbuf); -extern uint16 bcm_xtlv_buf_len(struct bcm_tlvbuf *tbuf); -extern uint16 bcm_xtlv_buf_rlen(struct bcm_tlvbuf *tbuf); -extern uint8 *bcm_xtlv_buf(struct bcm_tlvbuf *tbuf); -extern uint8 *bcm_xtlv_head(struct bcm_tlvbuf *tbuf); -extern int bcm_xtlv_put_data(struct bcm_tlvbuf *tbuf, uint16 type, const void *data, uint16 dlen); -extern int bcm_xtlv_put_8(struct bcm_tlvbuf *tbuf, uint16 type, const int8 data); -extern int bcm_xtlv_put_16(struct bcm_tlvbuf *tbuf, uint16 type, const int16 data); -extern int bcm_xtlv_put_32(struct bcm_tlvbuf *tbuf, uint16 type, const int32 data); -extern int bcm_unpack_xtlv_entry(void **tlv_buf, uint16 xpct_type, uint16 xpct_len, void *dst); -extern int bcm_skip_xtlv(void **tlv_buf); -extern int bcm_pack_xtlv_entry(void **tlv_buf, uint16 *buflen, uint16 type, uint16 len, void *src); -extern int bcm_unpack_xtlv_buf(void *ctx, - void *tlv_buf, uint16 buflen, bcm_set_var_from_tlv_cbfn_t *cbfn); -extern int -bcm_unpack_xtlv_buf_to_mem(void *tlv_buf, int *buflen, xtlv_desc_t *items); -extern int -bcm_pack_xtlv_buf_from_mem(void **tlv_buf, uint16 *buflen, xtlv_desc_t *items); -extern int -bcm_pack_xtlv_entry_from_hex_string(void **tlv_buf, uint16 *buflen, uint16 type, char *hex); + +/* return the next xtlv element, and update buffer len (remaining). Buffer length + * updated includes padding as specified by options + */ +extern bcm_xtlv_t *bcm_next_xtlv(bcm_xtlv_t *elt, int *buflen, bcm_xtlv_opts_t opts); + +/* initialize an xtlv buffer. Use options specified for packing/unpacking using + * the buffer. Caller is responsible for allocating both buffers. + */ +extern int bcm_xtlv_buf_init(bcm_xtlvbuf_t *tlv_buf, uint8 *buf, uint16 len, + bcm_xtlv_opts_t opts); + +extern uint16 bcm_xtlv_buf_len(struct bcm_xtlvbuf *tbuf); +extern uint16 bcm_xtlv_buf_rlen(struct bcm_xtlvbuf *tbuf); +extern uint8 *bcm_xtlv_buf(struct bcm_xtlvbuf *tbuf); +extern uint8 *bcm_xtlv_head(struct bcm_xtlvbuf *tbuf); +extern int bcm_xtlv_put_data(bcm_xtlvbuf_t *tbuf, uint16 type, const void *data, uint16 dlen); +extern int bcm_xtlv_put_8(bcm_xtlvbuf_t *tbuf, uint16 type, const int8 data); +extern int bcm_xtlv_put_16(bcm_xtlvbuf_t *tbuf, uint16 type, const int16 data); +extern int bcm_xtlv_put_32(bcm_xtlvbuf_t *tbuf, uint16 type, const int32 data); +extern int bcm_unpack_xtlv_entry(uint8 **buf, uint16 xpct_type, uint16 xpct_len, + void *dst, bcm_xtlv_opts_t opts); +extern int bcm_pack_xtlv_entry(uint8 **buf, uint16 *buflen, uint16 type, uint16 len, + void *src, bcm_xtlv_opts_t opts); +extern int bcm_xtlv_size(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts); + +/* callback for unpacking xtlv from a buffer into context. */ +typedef int (bcm_xtlv_unpack_cbfn_t)(void *ctx, uint8 *buf, uint16 type, uint16 len); + +/* unpack a tlv buffer using buffer, options, and callback */ +extern int bcm_unpack_xtlv_buf(void *ctx, uint8 *buf, uint16 buflen, + bcm_xtlv_opts_t opts, bcm_xtlv_unpack_cbfn_t *cbfn); + +/* unpack a set of tlvs from the buffer using provided xtlv desc */ +extern int bcm_unpack_xtlv_buf_to_mem(void *buf, int *buflen, xtlv_desc_t *items, + bcm_xtlv_opts_t opts); + +/* pack a set of tlvs into buffer using provided xtlv desc */ +extern int bcm_pack_xtlv_buf_from_mem(void **buf, uint16 *buflen, xtlv_desc_t *items, + bcm_xtlv_opts_t opts); + +/* return data pointer of a given ID from xtlv buffer + * xtlv data length is given to *datalen_out, if the pointer is valid + */ +extern void *bcm_get_data_from_xtlv_buf(uint8 *tlv_buf, uint16 buflen, uint16 id, + uint16 *datalen_out, bcm_xtlv_opts_t opts); + +/* callback to return next tlv id and len to pack, if there is more tlvs to come and + * options e.g. alignment + */ +typedef bool (*bcm_pack_xtlv_next_info_cbfn_t)(void *ctx, uint16 *tlv_id, uint16 *tlv_len); + +/* callback to pack the tlv into length validated buffer */ +typedef void (*bcm_pack_xtlv_pack_next_cbfn_t)(void *ctx, + uint16 tlv_id, uint16 tlv_len, uint8* buf); + +/* pack a set of tlvs into buffer using get_next to interate */ +int bcm_pack_xtlv_buf(void *ctx, void *tlv_buf, uint16 buflen, + bcm_xtlv_opts_t opts, bcm_pack_xtlv_next_info_cbfn_t get_next, + bcm_pack_xtlv_pack_next_cbfn_t pack_next, int *outlen); /* bcmerror */ extern const char *bcmerrorstr(int bcmerror); @@ -815,12 +895,13 @@ typedef uint32 mbool; /* generic datastruct to help dump routines */ struct fielddesc { const char *nameandfmt; - uint32 offset; - uint32 len; + uint32 offset; + uint32 len; }; extern void bcm_binit(struct bcmstrbuf *b, char *buf, uint size); -extern void bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, uint8 *buf, int len); +extern void bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, + const uint8 *buf, int len); extern void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount); extern int bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes); @@ -836,10 +917,54 @@ extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...); /* power conversion */ extern uint16 bcm_qdbm_to_mw(uint8 qdbm); extern uint8 bcm_mw_to_qdbm(uint16 mw); -extern uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint len); +extern uint bcm_mkiovar(const char *name, char *data, uint datalen, char *buf, uint len); unsigned int process_nvram_vars(char *varbuf, unsigned int len); +/* trace any object allocation / free, with / without features (flags) set to the object */ + +#define BCM_OBJDBG_ADD 1 +#define BCM_OBJDBG_REMOVE 2 +#define BCM_OBJDBG_ADD_PKT 3 + +/* object feature: set or clear flags */ +#define BCM_OBJECT_FEATURE_FLAG 1 +#define BCM_OBJECT_FEATURE_PKT_STATE 2 +/* object feature: flag bits */ +#define BCM_OBJECT_FEATURE_0 (1 << 0) +#define BCM_OBJECT_FEATURE_1 (1 << 1) +#define BCM_OBJECT_FEATURE_2 (1 << 2) +/* object feature: clear flag bits field set with this flag */ +#define BCM_OBJECT_FEATURE_CLEAR (1 << 31) +#ifdef BCM_OBJECT_TRACE +#define bcm_pkt_validate_chk(obj) do { \ + void * pkttag; \ + bcm_object_trace_chk(obj, 0, 0, \ + __FUNCTION__, __LINE__); \ + if ((pkttag = PKTTAG(obj))) { \ + bcm_object_trace_chk(obj, 1, DHD_PKTTAG_SN(pkttag), \ + __FUNCTION__, __LINE__); \ + } \ +} while (0) +extern void bcm_object_trace_opr(void *obj, uint32 opt, const char *caller, int line); +extern void bcm_object_trace_upd(void *obj, void *obj_new); +extern void bcm_object_trace_chk(void *obj, uint32 chksn, uint32 sn, + const char *caller, int line); +extern void bcm_object_feature_set(void *obj, uint32 type, uint32 value); +extern int bcm_object_feature_get(void *obj, uint32 type, uint32 value); +extern void bcm_object_trace_init(void); +extern void bcm_object_trace_deinit(void); +#else +#define bcm_pkt_validate_chk(obj) +#define bcm_object_trace_opr(a, b, c, d) +#define bcm_object_trace_upd(a, b) +#define bcm_object_trace_chk(a, b, c, d, e) +#define bcm_object_feature_set(a, b, c) +#define bcm_object_feature_get(a, b, c) +#define bcm_object_trace_init() +#define bcm_object_trace_deinit() +#endif /* BCM_OBJECT_TRACE */ + /* calculate a * b + c */ extern void bcm_uint64_multiple_add(uint32* r_high, uint32* r_low, uint32 a, uint32 b, uint32 c); /* calculate a / b */ @@ -859,20 +984,20 @@ _CSBTBL[256] = }; static INLINE uint32 /* Uses table _CSBTBL for fast counting of 1's in a u32 */ -bcm_cntsetbits(const uint32 u32) +bcm_cntsetbits(const uint32 u32arg) { /* function local scope declaration of const _CSBTBL[] */ - const uint8 * p = (const uint8 *)&u32; + const uint8 * p = (const uint8 *)&u32arg; return (_CSBTBL[p[0]] + _CSBTBL[p[1]] + _CSBTBL[p[2]] + _CSBTBL[p[3]]); } static INLINE int /* C equivalent count of leading 0's in a u32 */ -C_bcm_count_leading_zeros(uint32 u32) +C_bcm_count_leading_zeros(uint32 u32arg) { int shifts = 0; - while (u32) { - shifts++; u32 >>= 1; + while (u32arg) { + shifts++; u32arg >>= 1; } return (32U - shifts); } @@ -887,29 +1012,38 @@ C_bcm_count_leading_zeros(uint32 u32) */ #if defined(__arm__) - #if defined(__ARM_ARCH_7M__) /* Cortex M3 */ #define __USE_ASM_CLZ__ #endif /* __ARM_ARCH_7M__ */ - #if defined(__ARM_ARCH_7R__) /* Cortex R4 */ #define __USE_ASM_CLZ__ #endif /* __ARM_ARCH_7R__ */ - #endif /* __arm__ */ static INLINE int -bcm_count_leading_zeros(uint32 u32) +bcm_count_leading_zeros(uint32 u32arg) { #if defined(__USE_ASM_CLZ__) int zeros; - __asm__ volatile("clz %0, %1 \n" : "=r" (zeros) : "r" (u32)); + __asm__ volatile("clz %0, %1 \n" : "=r" (zeros) : "r" (u32arg)); return zeros; #else /* C equivalent */ - return C_bcm_count_leading_zeros(u32); -#endif /* C equivalent */ + return C_bcm_count_leading_zeros(u32arg); +#endif /* C equivalent */ } +/* + * Macro to count leading zeroes + * + */ +#if defined(__GNUC__) +#define CLZ(x) __builtin_clzl(x) +#elif defined(__arm__) +#define CLZ(x) __clz(x) +#else +#define CLZ(x) bcm_count_leading_zeros(x) +#endif /* __GNUC__ */ + /* INTERFACE: Multiword bitmap based small id allocator. */ struct bcm_mwbmap; /* forward declaration for use as an opaque mwbmap handle */ @@ -947,6 +1081,7 @@ extern void bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl); /* INTERFACE: Simple unique 16bit Id Allocator using a stack implementation. */ #define ID16_INVALID ((uint16)(~0)) +#define ID16_UNDEFINED (ID16_INVALID) /* * Construct a 16bit id allocator, managing 16bit ids in the range: @@ -970,7 +1105,6 @@ extern uint32 id16_map_failures(void * id16_map_hndl); /* Audit the 16bit id allocator state. */ extern bool id16_map_audit(void * id16_map_hndl); /* End - Simple 16bit Id Allocator. */ - #endif /* BCMDRIVER */ extern void bcm_uint64_right_shift(uint32* r, uint32 a_high, uint32 a_low, uint32 b); @@ -1116,7 +1250,7 @@ dll_delete(dll_t *node_p) node_p->prev_p->next_p = node_p->next_p; node_p->next_p->prev_p = node_p->prev_p; } -#endif /* ! defined(_dll_t_) */ +#endif /* ! defined(_dll_t_) */ /* Elements managed in a double linked list */ @@ -1155,4 +1289,16 @@ typedef struct _counter_tbl_t { void counter_printlog(counter_tbl_t *ctr_tbl); #endif /* DEBUG_COUNTER */ +/* Given a number 'n' returns 'm' that is next larger power of 2 after n */ +static INLINE uint32 next_larger_power2(uint32 num) +{ + num--; + num |= (num >> 1); + num |= (num >> 2); + num |= (num >> 4); + num |= (num >> 8); + num |= (num >> 16); + return (num + 1); +} + #endif /* _bcmutils_h_ */ diff --git a/drivers/amlogic/wifi/bcmdhd/include/brcm_nl80211.h b/drivers/amlogic/wifi/bcmdhd/include/brcm_nl80211.h new file mode 100644 index 0000000000000..888863117105e --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/brcm_nl80211.h @@ -0,0 +1,68 @@ +/* + * Definitions for nl80211 vendor command/event access to host driver + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: brcm_nl80211.h 556083 2015-05-12 14:03:00Z $ + * + */ + +#ifndef _brcm_nl80211_h_ +#define _brcm_nl80211_h_ + +#define OUI_BRCM 0x001018 + +enum wl_vendor_subcmd { + BRCM_VENDOR_SCMD_UNSPEC, + BRCM_VENDOR_SCMD_PRIV_STR, + BRCM_VENDOR_SCMD_BCM_STR +}; + + +struct bcm_nlmsg_hdr { + uint cmd; /* common ioctl definition */ + int len; /* expected return buffer length */ + uint offset; /* user buffer offset */ + uint set; /* get or set request optional */ + uint magic; /* magic number for verification */ +}; + +enum bcmnl_attrs { + BCM_NLATTR_UNSPEC, + + BCM_NLATTR_LEN, + BCM_NLATTR_DATA, + + __BCM_NLATTR_AFTER_LAST, + BCM_NLATTR_MAX = __BCM_NLATTR_AFTER_LAST - 1 +}; + +struct nl_prv_data { + int err; /* return result */ + void *data; /* ioctl return buffer pointer */ + uint len; /* ioctl return buffer length */ + struct bcm_nlmsg_hdr *nlioc; /* bcm_nlmsg_hdr header pointer */ +}; + +#endif /* _brcm_nl80211_h_ */ diff --git a/drivers/amlogic/wifi/bcmdhd/include/dbus.h b/drivers/amlogic/wifi/bcmdhd/include/dbus.h new file mode 100644 index 0000000000000..b066c67a5dad8 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/dbus.h @@ -0,0 +1,591 @@ +/* + * Dongle BUS interface Abstraction layer + * target serial buses like USB, SDIO, SPI, etc. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dbus.h 553311 2015-04-29 10:23:08Z $ + */ + +#ifndef __DBUS_H__ +#define __DBUS_H__ + +#include "typedefs.h" + +#define DBUSTRACE(args) +#define DBUSERR(args) +#define DBUSINFO(args) +#define DBUSDBGLOCK(args) + +enum { + DBUS_OK = 0, + DBUS_ERR = -200, + DBUS_ERR_TIMEOUT, + DBUS_ERR_DISCONNECT, + DBUS_ERR_NODEVICE, + DBUS_ERR_UNSUPPORTED, + DBUS_ERR_PENDING, + DBUS_ERR_NOMEM, + DBUS_ERR_TXFAIL, + DBUS_ERR_TXTIMEOUT, + DBUS_ERR_TXDROP, + DBUS_ERR_RXFAIL, + DBUS_ERR_RXDROP, + DBUS_ERR_TXCTLFAIL, + DBUS_ERR_RXCTLFAIL, + DBUS_ERR_REG_PARAM, + DBUS_STATUS_CANCELLED, + DBUS_ERR_NVRAM, + DBUS_JUMBO_NOMATCH, + DBUS_JUMBO_BAD_FORMAT, + DBUS_NVRAM_NONTXT, + DBUS_ERR_RXZLP +}; + +#define BCM_OTP_SIZE_43236 84 /* number of 16 bit values */ +#define BCM_OTP_SW_RGN_43236 24 /* start offset of SW config region */ +#define BCM_OTP_ADDR_43236 0x18000800 /* address of otp base */ + +#define ERR_CBMASK_TXFAIL 0x00000001 +#define ERR_CBMASK_RXFAIL 0x00000002 +#define ERR_CBMASK_ALL 0xFFFFFFFF + +#define DBUS_CBCTL_WRITE 0 +#define DBUS_CBCTL_READ 1 +#if defined(INTR_EP_ENABLE) +#define DBUS_CBINTR_POLL 2 +#endif /* defined(INTR_EP_ENABLE) */ + +#define DBUS_TX_RETRY_LIMIT 3 /* retries for failed txirb */ +#define DBUS_TX_TIMEOUT_INTERVAL 250 /* timeout for txirb complete, in ms */ + +#define DBUS_BUFFER_SIZE_TX 32000 +#define DBUS_BUFFER_SIZE_RX 24000 + +#define DBUS_BUFFER_SIZE_TX_NOAGG 2048 +#define DBUS_BUFFER_SIZE_RX_NOAGG 2048 + +/** DBUS types */ +enum { + DBUS_USB, + DBUS_SDIO, + DBUS_SPI, + DBUS_UNKNOWN +}; + +enum dbus_state { + DBUS_STATE_DL_PENDING, + DBUS_STATE_DL_DONE, + DBUS_STATE_UP, + DBUS_STATE_DOWN, + DBUS_STATE_PNP_FWDL, + DBUS_STATE_DISCONNECT, + DBUS_STATE_SLEEP, + DBUS_STATE_DL_NEEDED +}; + +enum dbus_pnp_state { + DBUS_PNP_DISCONNECT, + DBUS_PNP_SLEEP, + DBUS_PNP_RESUME +}; + +enum dbus_file { + DBUS_FIRMWARE, + DBUS_NVFILE +}; + +typedef enum _DEVICE_SPEED { + INVALID_SPEED = -1, + LOW_SPEED = 1, /**< USB 1.1: 1.5 Mbps */ + FULL_SPEED, /**< USB 1.1: 12 Mbps */ + HIGH_SPEED, /**< USB 2.0: 480 Mbps */ + SUPER_SPEED, /**< USB 3.0: 4.8 Gbps */ +} DEVICE_SPEED; + +typedef struct { + int bustype; + int vid; + int pid; + int devid; + int chiprev; /**< chip revsion number */ + int mtu; + int nchan; /**< Data Channels */ + int has_2nd_bulk_in_ep; +} dbus_attrib_t; + +/* FIX: Account for errors related to DBUS; + * Let upper layer account for packets/bytes + */ +typedef struct { + uint32 rx_errors; + uint32 tx_errors; + uint32 rx_dropped; + uint32 tx_dropped; +} dbus_stats_t; + +/** + * Configurable BUS parameters + */ +enum { + DBUS_CONFIG_ID_RXCTL_DEFERRES = 1, + DBUS_CONFIG_ID_AGGR_LIMIT +}; + +typedef struct { + uint32 config_id; + union { + bool rxctl_deferrespok; + struct { + int maxrxsf; + int maxrxsize; + int maxtxsf; + int maxtxsize; + } aggr_param; + }; +} dbus_config_t; + +/** + * External Download Info + */ +typedef struct dbus_extdl { + uint8 *fw; + int fwlen; + uint8 *vars; + int varslen; +} dbus_extdl_t; + +struct dbus_callbacks; +struct exec_parms; + +typedef void *(*probe_cb_t)(void *arg, const char *desc, uint32 bustype, uint32 hdrlen); +typedef void (*disconnect_cb_t)(void *arg); +typedef void *(*exec_cb_t)(struct exec_parms *args); + +/** Client callbacks registered during dbus_attach() */ +typedef struct dbus_callbacks { + void (*send_complete)(void *cbarg, void *info, int status); + void (*recv_buf)(void *cbarg, uint8 *buf, int len); + void (*recv_pkt)(void *cbarg, void *pkt); + void (*txflowcontrol)(void *cbarg, bool onoff); + void (*errhandler)(void *cbarg, int err); + void (*ctl_complete)(void *cbarg, int type, int status); + void (*state_change)(void *cbarg, int state); + void *(*pktget)(void *cbarg, uint len, bool send); + void (*pktfree)(void *cbarg, void *p, bool send); +} dbus_callbacks_t; + +struct dbus_pub; +struct bcmstrbuf; +struct dbus_irb; +struct dbus_irb_rx; +struct dbus_irb_tx; +struct dbus_intf_callbacks; + +typedef struct { + void* (*attach)(struct dbus_pub *pub, void *cbarg, struct dbus_intf_callbacks *cbs); + void (*detach)(struct dbus_pub *pub, void *bus); + + int (*up)(void *bus); + int (*down)(void *bus); + int (*send_irb)(void *bus, struct dbus_irb_tx *txirb); + int (*recv_irb)(void *bus, struct dbus_irb_rx *rxirb); + int (*cancel_irb)(void *bus, struct dbus_irb_tx *txirb); + int (*send_ctl)(void *bus, uint8 *buf, int len); + int (*recv_ctl)(void *bus, uint8 *buf, int len); + int (*get_stats)(void *bus, dbus_stats_t *stats); + int (*get_attrib)(void *bus, dbus_attrib_t *attrib); + + int (*pnp)(void *bus, int evnt); + int (*remove)(void *bus); + int (*resume)(void *bus); + int (*suspend)(void *bus); + int (*stop)(void *bus); + int (*reset)(void *bus); + + /* Access to bus buffers directly */ + void *(*pktget)(void *bus, int len); + void (*pktfree)(void *bus, void *pkt); + + int (*iovar_op)(void *bus, const char *name, void *params, int plen, void *arg, int len, + bool set); + void (*dump)(void *bus, struct bcmstrbuf *strbuf); + int (*set_config)(void *bus, dbus_config_t *config); + int (*get_config)(void *bus, dbus_config_t *config); + + bool (*device_exists)(void *bus); + bool (*dlneeded)(void *bus); + int (*dlstart)(void *bus, uint8 *fw, int len); + int (*dlrun)(void *bus); + bool (*recv_needed)(void *bus); + + void *(*exec_rxlock)(void *bus, exec_cb_t func, struct exec_parms *args); + void *(*exec_txlock)(void *bus, exec_cb_t func, struct exec_parms *args); + + int (*tx_timer_init)(void *bus); + int (*tx_timer_start)(void *bus, uint timeout); + int (*tx_timer_stop)(void *bus); + + int (*sched_dpc)(void *bus); + int (*lock)(void *bus); + int (*unlock)(void *bus); + int (*sched_probe_cb)(void *bus); + + int (*shutdown)(void *bus); + + int (*recv_stop)(void *bus); + int (*recv_resume)(void *bus); + + int (*recv_irb_from_ep)(void *bus, struct dbus_irb_rx *rxirb, uint ep_idx); + + int (*readreg)(void *bus, uint32 regaddr, int datalen, uint32 *value); + + /* Add from the bottom */ +} dbus_intf_t; + +typedef struct dbus_pub { + struct osl_info *osh; + dbus_stats_t stats; + dbus_attrib_t attrib; + enum dbus_state busstate; + DEVICE_SPEED device_speed; + int ntxq, nrxq, rxsize; + void *bus; + struct shared_info *sh; + void *dev_info; +} dbus_pub_t; + +#define BUS_INFO(bus, type) (((type *) bus)->pub->bus) + +#define ALIGNED_LOCAL_VARIABLE(var, align) \ + uint8 buffer[SDALIGN+64]; \ + uint8 *var = (uint8 *)(((uintptr)&buffer[0]) & ~(align-1)) + align; + +/* + * Public Bus Function Interface + */ + +/* + * FIX: Is there better way to pass OS/Host handles to DBUS but still + * maintain common interface for all OS?? + * Under NDIS, param1 needs to be MiniportHandle + * For NDIS60, param2 is WdfDevice + * Under Linux, param1 and param2 are NULL; + */ +extern int dbus_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, void *prarg, + void *param1, void *param2); +extern int dbus_deregister(void); + +extern dbus_pub_t *dbus_attach(struct osl_info *osh, int rxsize, int nrxq, int ntxq, + void *cbarg, dbus_callbacks_t *cbs, dbus_extdl_t *extdl, struct shared_info *sh); +extern void dbus_detach(dbus_pub_t *pub); + +extern int dbus_download_firmware(dbus_pub_t *pub); +extern int dbus_up(dbus_pub_t *pub); +extern int dbus_down(dbus_pub_t *pub); +extern int dbus_stop(dbus_pub_t *pub); +extern int dbus_shutdown(dbus_pub_t *pub); +extern void dbus_flowctrl_rx(dbus_pub_t *pub, bool on); + +extern int dbus_send_txdata(dbus_pub_t *dbus, void *pktbuf); +extern int dbus_send_buf(dbus_pub_t *pub, uint8 *buf, int len, void *info); +extern int dbus_send_pkt(dbus_pub_t *pub, void *pkt, void *info); +extern int dbus_send_ctl(dbus_pub_t *pub, uint8 *buf, int len); +extern int dbus_recv_ctl(dbus_pub_t *pub, uint8 *buf, int len); +extern int dbus_recv_bulk(dbus_pub_t *pub, uint32 ep_idx); +extern int dbus_poll_intr(dbus_pub_t *pub); +extern int dbus_get_stats(dbus_pub_t *pub, dbus_stats_t *stats); +extern int dbus_get_attrib(dbus_pub_t *pub, dbus_attrib_t *attrib); +extern int dbus_get_device_speed(dbus_pub_t *pub); +extern int dbus_set_config(dbus_pub_t *pub, dbus_config_t *config); +extern int dbus_get_config(dbus_pub_t *pub, dbus_config_t *config); +extern void * dbus_get_devinfo(dbus_pub_t *pub); + +extern void *dbus_pktget(dbus_pub_t *pub, int len); +extern void dbus_pktfree(dbus_pub_t *pub, void* pkt); + +extern int dbus_set_errmask(dbus_pub_t *pub, uint32 mask); +extern int dbus_pnp_sleep(dbus_pub_t *pub); +extern int dbus_pnp_resume(dbus_pub_t *pub, int *fw_reload); +extern int dbus_pnp_disconnect(dbus_pub_t *pub); + +extern int dbus_iovar_op(dbus_pub_t *pub, const char *name, + void *params, int plen, void *arg, int len, bool set); + +extern void *dhd_dbus_txq(const dbus_pub_t *pub); +extern uint dhd_dbus_hdrlen(const dbus_pub_t *pub); + +/* + * Private Common Bus Interface + */ + +/** IO Request Block (IRB) */ +typedef struct dbus_irb { + struct dbus_irb *next; /**< it's casted from dbus_irb_tx or dbus_irb_rx struct */ +} dbus_irb_t; + +typedef struct dbus_irb_rx { + struct dbus_irb irb; /* Must be first */ + uint8 *buf; + int buf_len; + int actual_len; + void *pkt; + void *info; + void *arg; +} dbus_irb_rx_t; + +typedef struct dbus_irb_tx { + struct dbus_irb irb; /** Must be first */ + uint8 *buf; /** mutually exclusive with struct member 'pkt' */ + int len; /** length of field 'buf' */ + void *pkt; /** mutually exclusive with struct member 'buf' */ + int retry_count; + void *info; + void *arg; + void *send_buf; /**< linear bufffer for LINUX when aggreagtion is enabled */ +} dbus_irb_tx_t; + +/** + * DBUS interface callbacks are different from user callbacks + * so, internally, different info can be passed to upper layer + */ +typedef struct dbus_intf_callbacks { + void (*send_irb_timeout)(void *cbarg, dbus_irb_tx_t *txirb); + void (*send_irb_complete)(void *cbarg, dbus_irb_tx_t *txirb, int status); + void (*recv_irb_complete)(void *cbarg, dbus_irb_rx_t *rxirb, int status); + void (*errhandler)(void *cbarg, int err); + void (*ctl_complete)(void *cbarg, int type, int status); + void (*state_change)(void *cbarg, int state); + bool (*isr)(void *cbarg, bool *wantdpc); + bool (*dpc)(void *cbarg, bool bounded); + void (*watchdog)(void *cbarg); + void *(*pktget)(void *cbarg, uint len, bool send); + void (*pktfree)(void *cbarg, void *p, bool send); + struct dbus_irb* (*getirb)(void *cbarg, bool send); + void (*rxerr_indicate)(void *cbarg, bool on); +} dbus_intf_callbacks_t; + +/* + * Porting: To support new bus, port these functions below + */ + +/* + * Bus specific Interface + * Implemented by dbus_usb.c/dbus_sdio.c + */ +extern int dbus_bus_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, void *prarg, + dbus_intf_t **intf, void *param1, void *param2); +extern int dbus_bus_deregister(void); +extern void dbus_bus_fw_get(void *bus, uint8 **fw, int *fwlen, int *decomp); + +/* + * Bus-specific and OS-specific Interface + * Implemented by dbus_usb_[linux/ndis].c/dbus_sdio_[linux/ndis].c + */ +extern int dbus_bus_osl_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, + void *prarg, dbus_intf_t **intf, void *param1, void *param2); +extern int dbus_bus_osl_deregister(void); + +/* + * Bus-specific, OS-specific, HW-specific Interface + * Mainly for SDIO Host HW controller + */ +extern int dbus_bus_osl_hw_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, + void *prarg, dbus_intf_t **intf); +extern int dbus_bus_osl_hw_deregister(void); + +extern uint usbdev_bulkin_eps(void); +#if defined(BCM_REQUEST_FW) +extern void *dbus_get_fw_nvfile(int devid, int chiprev, uint8 **fw, int *fwlen, int type, + uint16 boardtype, uint16 boardrev); +extern void dbus_release_fw_nvfile(void *firmware); +#endif /* #if defined(BCM_REQUEST_FW) */ + + +#if defined(EHCI_FASTPATH_TX) || defined(EHCI_FASTPATH_RX) + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + /* Backward compatibility */ + typedef unsigned int gfp_t; + + #define dma_pool pci_pool + #define dma_pool_create(name, dev, size, align, alloc) \ + pci_pool_create(name, dev, size, align, alloc, GFP_DMA | GFP_ATOMIC) + #define dma_pool_destroy(pool) pci_pool_destroy(pool) + #define dma_pool_alloc(pool, flags, handle) pci_pool_alloc(pool, flags, handle) + #define dma_pool_free(pool, vaddr, addr) pci_pool_free(pool, vaddr, addr) + + #define dma_map_single(dev, addr, size, dir) pci_map_single(dev, addr, size, dir) + #define dma_unmap_single(dev, hnd, size, dir) pci_unmap_single(dev, hnd, size, dir) + #define DMA_FROM_DEVICE PCI_DMA_FROMDEVICE + #define DMA_TO_DEVICE PCI_DMA_TODEVICE +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */ + +/* Availability of these functions varies (when present, they have two arguments) */ +#ifndef hc32_to_cpu + #define hc32_to_cpu(x) le32_to_cpu(x) + #define cpu_to_hc32(x) cpu_to_le32(x) + typedef unsigned int __hc32; +#else + #error Two-argument functions needed +#endif + +/* Private USB opcode base */ +#define EHCI_FASTPATH 0x31 +#define EHCI_SET_EP_BYPASS EHCI_FASTPATH +#define EHCI_SET_BYPASS_CB (EHCI_FASTPATH + 1) +#define EHCI_SET_BYPASS_DEV (EHCI_FASTPATH + 2) +#define EHCI_DUMP_STATE (EHCI_FASTPATH + 3) +#define EHCI_SET_BYPASS_POOL (EHCI_FASTPATH + 4) +#define EHCI_CLR_EP_BYPASS (EHCI_FASTPATH + 5) + +/* + * EHCI QTD structure (hardware and extension) + * NOTE that is does not need to (and does not) match its kernel counterpart + */ +#define EHCI_QTD_NBUFFERS 5 +#define EHCI_QTD_ALIGN 32 +#define EHCI_BULK_PACKET_SIZE 512 +#define EHCI_QTD_XACTERR_MAX 32 + +struct ehci_qtd { + /* Hardware map */ + volatile uint32_t qtd_next; + volatile uint32_t qtd_altnext; + volatile uint32_t qtd_status; +#define EHCI_QTD_GET_BYTES(x) (((x)>>16) & 0x7fff) +#define EHCI_QTD_IOC 0x00008000 +#define EHCI_QTD_GET_CERR(x) (((x)>>10) & 0x3) +#define EHCI_QTD_SET_CERR(x) ((x) << 10) +#define EHCI_QTD_GET_PID(x) (((x)>>8) & 0x3) +#define EHCI_QTD_SET_PID(x) ((x) << 8) +#define EHCI_QTD_ACTIVE 0x80 +#define EHCI_QTD_HALTED 0x40 +#define EHCI_QTD_BUFERR 0x20 +#define EHCI_QTD_BABBLE 0x10 +#define EHCI_QTD_XACTERR 0x08 +#define EHCI_QTD_MISSEDMICRO 0x04 + volatile uint32_t qtd_buffer[EHCI_QTD_NBUFFERS]; + volatile uint32_t qtd_buffer_hi[EHCI_QTD_NBUFFERS]; + + /* Implementation extension */ + dma_addr_t qtd_self; /**< own hardware address */ + struct ehci_qtd *obj_next; /**< software link to the next QTD */ + void *rpc; /**< pointer to the rpc buffer */ + size_t length; /**< length of the data in the buffer */ + void *buff; /**< pointer to the reassembly buffer */ + int xacterrs; /**< retry counter for qtd xact error */ +} __attribute__ ((aligned(EHCI_QTD_ALIGN))); + +#define EHCI_NULL __constant_cpu_to_le32(1) /* HW null pointer shall be odd */ + +#define SHORT_READ_Q(token) (EHCI_QTD_GET_BYTES(token) != 0 && EHCI_QTD_GET_PID(token) == 1) + +/** + * Queue Head + * NOTE This structure is slightly different from the one in the kernel; but needs to stay + * compatible. + */ +struct ehci_qh { + /* Hardware map */ + volatile uint32_t qh_link; + volatile uint32_t qh_endp; + volatile uint32_t qh_endphub; + volatile uint32_t qh_curqtd; + + /* QTD overlay */ + volatile uint32_t ow_next; + volatile uint32_t ow_altnext; + volatile uint32_t ow_status; + volatile uint32_t ow_buffer [EHCI_QTD_NBUFFERS]; + volatile uint32_t ow_buffer_hi [EHCI_QTD_NBUFFERS]; + + /* Extension (should match the kernel layout) */ + dma_addr_t unused0; + void *unused1; + struct list_head unused2; + struct ehci_qtd *dummy; + struct ehci_qh *unused3; + + struct ehci_hcd *unused4; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + struct kref unused5; + unsigned unused6; + + uint8_t unused7; + + /* periodic schedule info */ + uint8_t unused8; + uint8_t unused9; + uint8_t unused10; + uint16_t unused11; + uint16_t unused12; + uint16_t unused13; + struct usb_device *unused14; +#else + unsigned unused5; + + u8 unused6; + + /* periodic schedule info */ + u8 unused7; + u8 unused8; + u8 unused9; + unsigned short unused10; + unsigned short unused11; +#define NO_FRAME ((unsigned short)~0) +#ifdef EHCI_QUIRK_FIX + struct usb_device *unused12; +#endif /* EHCI_QUIRK_FIX */ +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */ + struct ehci_qtd *first_qtd; + /* Link to the first QTD; this is an optimized equivalent of the qtd_list field */ + /* NOTE that ehci_qh in ehci.h shall reserve this word */ +} __attribute__ ((aligned(EHCI_QTD_ALIGN))); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +/** The corresponding structure in the kernel is used to get the QH */ +struct hcd_dev { /* usb_device.hcpriv points to this */ + struct list_head unused0; + struct list_head unused1; + + /* array of QH pointers */ + void *ep[32]; +}; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */ + +int optimize_qtd_fill_with_rpc(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd, void *rpc, + int token, int len); +int optimize_qtd_fill_with_data(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd, void *data, + int token, int len); +int optimize_submit_async(struct ehci_qtd *qtd, int epn); +void inline optimize_ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma); +struct ehci_qtd *optimize_ehci_qtd_alloc(gfp_t flags); +void optimize_ehci_qtd_free(struct ehci_qtd *qtd); +void optimize_submit_rx_request(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd_in, void *buf); +#endif /* EHCI_FASTPATH_TX || EHCI_FASTPATH_RX */ + +void dbus_flowctrl_tx(void *dbi, bool on); +#endif /* __DBUS_H__ */ diff --git a/drivers/net/wireless/bcmdhd/include/devctrl_if/wlioctl_defs.h b/drivers/amlogic/wifi/bcmdhd/include/devctrl_if/wlioctl_defs.h similarity index 93% rename from drivers/net/wireless/bcmdhd/include/devctrl_if/wlioctl_defs.h rename to drivers/amlogic/wifi/bcmdhd/include/devctrl_if/wlioctl_defs.h index f5bce2950eef5..2fbe8e0a6b505 100644 --- a/drivers/net/wireless/bcmdhd/include/devctrl_if/wlioctl_defs.h +++ b/drivers/amlogic/wifi/bcmdhd/include/devctrl_if/wlioctl_defs.h @@ -4,7 +4,28 @@ * * Definitions subject to change without notice. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> * * $Id: wlioctl_defs.h 403826 2013-05-22 16:40:55Z $ */ @@ -14,7 +35,6 @@ #define wlioctl_defs_h -#ifndef LINUX_POSTMOGRIFY_REMOVAL @@ -50,6 +70,9 @@ #define WL_RSPEC_BW_40MHZ 0x00020000 #define WL_RSPEC_BW_80MHZ 0x00030000 #define WL_RSPEC_BW_160MHZ 0x00040000 +#define WL_RSPEC_BW_10MHZ 0x00050000 +#define WL_RSPEC_BW_5MHZ 0x00060000 +#define WL_RSPEC_BW_2P5MHZ 0x00070000 /* Legacy defines for the nrate iovar */ #define OLD_NRATE_MCS_INUSE 0x00000080 /* MSC in use,indicates b0-6 holds an mcs */ @@ -68,10 +91,10 @@ #define HIGHEST_SINGLE_STREAM_MCS 7 /* MCS values greater than this enable multiple streams */ -#define GET_PRO_PRIETARY_11N_MCS_NSS(mcs) (1 + ((mcs) - 85) / 8) +#define WLC_11N_N_PROP_MCS 6 +#define WLC_11N_FIRST_PROP_MCS 87 +#define WLC_11N_LAST_PROP_MCS 102 -#define GET_11N_MCS_NSS(mcs) ((mcs) < 32 ? (1 + ((mcs) / 8)) \ - : ((mcs) == 32 ? 1 : GET_PRO_PRIETARY_11N_MCS_NSS(mcs))) #define MAX_CCA_CHANNELS 38 /* Max number of 20 Mhz wide channels */ #define MAX_CCA_SECS 60 /* CCA keeps this many seconds history */ @@ -166,17 +189,19 @@ #define WLC_TXFILTER_OVERRIDE_DISABLED 0 #define WLC_TXFILTER_OVERRIDE_ENABLED 1 -#define WL_IOCTL_ACTION_GET 0x0 -#define WL_IOCTL_ACTION_SET 0x1 +#define WL_IOCTL_ACTION_GET 0x0 +#define WL_IOCTL_ACTION_SET 0x1 #define WL_IOCTL_ACTION_OVL_IDX_MASK 0x1e -#define WL_IOCTL_ACTION_OVL_RSV 0x20 -#define WL_IOCTL_ACTION_OVL 0x40 -#define WL_IOCTL_ACTION_MASK 0x7e -#define WL_IOCTL_ACTION_OVL_SHIFT 1 +#define WL_IOCTL_ACTION_OVL_RSV 0x20 +#define WL_IOCTL_ACTION_OVL 0x40 +#define WL_IOCTL_ACTION_MASK 0x7e +#define WL_IOCTL_ACTION_OVL_SHIFT 1 -#define WL_BSSTYPE_INFRA 1 +/* For WLC_SET_INFRA ioctl & infra_configuration iovar SET/GET operations */ #define WL_BSSTYPE_INDEP 0 -#define WL_BSSTYPE_ANY 2 +#define WL_BSSTYPE_INFRA 1 +#define WL_BSSTYPE_ANY 2 /* deprecated */ +#define WL_BSSTYPE_MESH 3 /* Bitmask for scan_type */ #define WL_SCANFLAGS_PASSIVE 0x01 /* force passive scan */ @@ -185,6 +210,10 @@ #define WL_SCANFLAGS_OFFCHAN 0x08 /* allow scanning/reporting off-channel APs */ #define WL_SCANFLAGS_HOTSPOT 0x10 /* automatic ANQP to hotspot APs */ #define WL_SCANFLAGS_SWTCHAN 0x20 /* Force channel switch for differerent bandwidth */ +#define WL_SCANFLAGS_FORCE_PARALLEL 0x40 /* Force parallel scan even when actcb_fn_t is on. + * by default parallel scan will be disabled if actcb_fn_t + * is provided. + */ /* wl_iscan_results status values */ #define WL_SCAN_RESULTS_SUCCESS 0 @@ -282,16 +311,6 @@ /* current gain setting is maintained */ #define WL_ATTEN_PCL_OFF 2 /* turn off PCL. */ -#define PLC_CMD_FAILOVER 1 -#define PLC_CMD_MAC_COST 2 -#define PLC_CMD_LINK_COST 3 -#define PLC_CMD_NODE_LIST 4 - -#define NODE_TYPE_UNKNOWN 0 /* Unknown link */ -#define NODE_TYPE_WIFI_ONLY 1 /* Pure Wireless STA node */ -#define NODE_TYPE_PLC_ONLY 2 /* Pure PLC only node */ -#define NODE_TYPE_WIFI_PLC 3 /* WiFi PLC capable node */ - /* defines used by poweridx iovar - it controls power in a-band */ /* current gain setting is maintained */ #define WL_PWRIDX_PCL_OFF -2 /* turn off PCL. */ @@ -315,8 +334,6 @@ /* check this magic number */ #define WLC_IOCTL_MAGIC 0x14e46c77 -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ - /* bss_info_cap_t flags */ #define WL_BSS_FLAGS_FROM_BEACON 0x01 /* bss_info derived from beacon */ #define WL_BSS_FLAGS_FROM_CACHE 0x02 /* bss_info collected from cache */ @@ -347,14 +364,14 @@ #define CRYPTO_ALGO_AES_CCM 4 #define CRYPTO_ALGO_AES_OCB_MSDU 5 #define CRYPTO_ALGO_AES_OCB_MPDU 6 -#if !defined(BCMCCX) && !defined(BCMEXTCCX) +#if !defined(BCMEXTCCX) #define CRYPTO_ALGO_NALG 7 #else #define CRYPTO_ALGO_CKIP 7 #define CRYPTO_ALGO_CKIP_MMH 8 #define CRYPTO_ALGO_WEP_MMH 9 #define CRYPTO_ALGO_NALG 10 -#endif /* !BCMCCX && !BCMEXTCCX */ +#endif #define CRYPTO_ALGO_SMS4 11 #define CRYPTO_ALGO_PMK 12 /* for 802.1x supp to set PMK before 4-way */ @@ -378,13 +395,13 @@ #define WL_SOFT_KEY (1 << 0) /* Indicates this key is using soft encrypt */ #define WL_PRIMARY_KEY (1 << 1) /* Indicates this key is the primary (ie tx) key */ -#if defined(BCMCCX) || defined(BCMEXTCCX) +#if defined(BCMEXTCCX) #define WL_CKIP_KP (1 << 4) /* CMIC */ #define WL_CKIP_MMH (1 << 5) /* CKIP */ #else #define WL_KF_RES_4 (1 << 4) /* Reserved for backward compat */ #define WL_KF_RES_5 (1 << 5) /* Reserved for backward compat */ -#endif /* BCMCCX || BCMEXTCCX */ +#endif #define WL_IBSS_PEER_GROUP_KEY (1 << 6) /* Indicates a group key for a IBSS PEER */ /* wireless security bitvec */ @@ -392,46 +409,19 @@ #define TKIP_ENABLED 0x0002 #define AES_ENABLED 0x0004 #define WSEC_SWFLAG 0x0008 -#ifdef BCMCCX -#define CKIP_KP_ENABLED 0x0010 -#define CKIP_MIC_ENABLED 0x0020 -#endif /* BCMCCX */ #define SES_OW_ENABLED 0x0040 /* to go into transition mode without setting wep */ -#ifdef BCMWAPI_WPI -#define SMS4_ENABLED 0x0100 -#endif /* BCMWAPI_WPI */ /* wsec macros for operating on the above definitions */ #define WSEC_WEP_ENABLED(wsec) ((wsec) & WEP_ENABLED) #define WSEC_TKIP_ENABLED(wsec) ((wsec) & TKIP_ENABLED) #define WSEC_AES_ENABLED(wsec) ((wsec) & AES_ENABLED) -#ifdef BCMCCX -#define WSEC_CKIP_KP_ENABLED(wsec) ((wsec) & CKIP_KP_ENABLED) -#define WSEC_CKIP_MIC_ENABLED(wsec) ((wsec) & CKIP_MIC_ENABLED) -#define WSEC_CKIP_ENABLED(wsec) ((wsec) & (CKIP_KP_ENABLED|CKIP_MIC_ENABLED)) - -#ifdef BCMWAPI_WPI -#define WSEC_ENABLED(wsec) \ - ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | CKIP_KP_ENABLED | \ - CKIP_MIC_ENABLED | SMS4_ENABLED)) -#else /* BCMWAPI_WPI */ -#define WSEC_ENABLED(wsec) \ - ((wsec) & \ - (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | CKIP_KP_ENABLED | CKIP_MIC_ENABLED)) -#endif /* BCMWAPI_WPI */ -#else /* defined BCMCCX */ -#ifdef BCMWAPI_WPI -#define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED)) -#else /* BCMWAPI_WPI */ #define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED)) -#endif /* BCMWAPI_WPI */ -#endif /* BCMCCX */ #define WSEC_SES_OW_ENABLED(wsec) ((wsec) & SES_OW_ENABLED) -#ifdef BCMWAPI_WAI -#define WSEC_SMS4_ENABLED(wsec) ((wsec) & SMS4_ENABLED) -#endif /* BCMWAPI_WAI */ +/* Following macros are not used any more. Just kept here to + * avoid build issue in BISON/CARIBOU branch + */ #define MFP_CAPABLE 0x0200 #define MFP_REQUIRED 0x0400 #define MFP_SHA256 0x0800 /* a special configuration for STA for WIFI test tool */ @@ -441,39 +431,34 @@ #define WPA_AUTH_NONE 0x0001 /* none (IBSS) */ #define WPA_AUTH_UNSPECIFIED 0x0002 /* over 802.1x */ #define WPA_AUTH_PSK 0x0004 /* Pre-shared key */ -#if defined(BCMCCX) || defined(BCMEXTCCX) +#if defined(BCMEXTCCX) #define WPA_AUTH_CCKM 0x0008 /* CCKM */ #define WPA2_AUTH_CCKM 0x0010 /* CCKM2 */ -#endif /* BCMCCX || BCMEXTCCX */ +#endif /* #define WPA_AUTH_8021X 0x0020 */ /* 802.1x, reserved */ #define WPA2_AUTH_UNSPECIFIED 0x0040 /* over 802.1x */ #define WPA2_AUTH_PSK 0x0080 /* Pre-shared key */ #define BRCM_AUTH_PSK 0x0100 /* BRCM specific PSK */ #define BRCM_AUTH_DPT 0x0200 /* DPT PSK without group keys */ -#if defined(BCMWAPI_WAI) || defined(BCMWAPI_WPI) -#define WPA_AUTH_WAPI 0x0400 -#define WAPI_AUTH_NONE WPA_AUTH_NONE /* none (IBSS) */ -#define WAPI_AUTH_UNSPECIFIED 0x0400 /* over AS */ -#define WAPI_AUTH_PSK 0x0800 /* Pre-shared key */ -#endif /* BCMWAPI_WAI || BCMWAPI_WPI */ -#define WPA2_AUTH_MFP 0x1000 /* MFP (11w) in contrast to CCX */ -#define WPA2_AUTH_TPK 0x2000 /* TDLS Peer Key */ -#define WPA2_AUTH_FT 0x4000 /* Fast Transition. */ +#define WPA2_AUTH_1X_SHA256 0x1000 /* 1X with SHA256 key derivation */ +#define WPA2_AUTH_TPK 0x2000 /* TDLS Peer Key */ +#define WPA2_AUTH_FT 0x4000 /* Fast Transition. */ +#define WPA2_AUTH_PSK_SHA256 0x8000 /* PSK with SHA256 key derivation */ +/* WPA2_AUTH_SHA256 not used anymore. Just kept here to avoid build issue in DINGO */ +#define WPA2_AUTH_SHA256 0x8000 #define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */ /* pmkid */ #define MAXPMKID 16 -#ifdef SROM12 -#define WLC_IOCTL_MAXLEN 10000 /* max length ioctl buffer required */ -#else +/* SROM12 changes */ #define WLC_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */ -#endif /* SROM12 */ + #define WLC_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */ #define WLC_IOCTL_MEDLEN 1536 /* "med" length ioctl buffer required */ -#if defined(LCNCONF) || defined(LCN40CONF) -#define WLC_SAMPLECOLLECT_MAXLEN 1024 /* Max Sample Collect buffer */ +#if defined(LCNCONF) || defined(LCN40CONF) || defined(LCN20CONF) +#define WLC_SAMPLECOLLECT_MAXLEN 8192 /* Max Sample Collect buffer */ #else #define WLC_SAMPLECOLLECT_MAXLEN 10240 /* Max Sample Collect buffer for two cores */ #endif @@ -625,7 +610,6 @@ #define WLC_SET_LAZYWDS 139 #define WLC_GET_BANDLIST 140 -#ifndef LINUX_POSTMOGRIFY_REMOVAL #define WLC_GET_BAND 141 #define WLC_SET_BAND 142 #define WLC_SCB_DEAUTHENTICATE 143 @@ -665,9 +649,7 @@ /* #define WLC_DUMP_PHYREGS 177 */ /* no longer supported */ #define WLC_GET_PROTECTION_CONTROL 178 #define WLC_SET_PROTECTION_CONTROL 179 -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ #define WLC_GET_PHYLIST 180 -#ifndef LINUX_POSTMOGRIFY_REMOVAL #define WLC_ENCRYPT_STRENGTH 181 /* ndis only */ #define WLC_DECRYPT_STATUS 182 /* ndis only */ #define WLC_GET_KEY_SEQ 183 @@ -688,9 +670,7 @@ /* #define WLC_GET_GMODE_PROTECTION_CTS 198 */ /* no longer supported */ /* #define WLC_SET_GMODE_PROTECTION_CTS 199 */ /* no longer supported */ #define WLC_SET_WSEC_TEST 200 -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ #define WLC_SCB_DEAUTHENTICATE_FOR_REASON 201 -#ifndef LINUX_POSTMOGRIFY_REMOVAL #define WLC_TKIP_COUNTERMEASURES 202 #define WLC_GET_PIOMODE 203 #define WLC_SET_PIOMODE 204 @@ -706,7 +686,6 @@ #define WLC_START_CHANNEL_QA 214 #define WLC_GET_CHANNEL_SEL 215 #define WLC_START_CHANNEL_SEL 216 -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ #define WLC_GET_VALID_CHANNELS 217 #define WLC_GET_FAKEFRAG 218 #define WLC_SET_FAKEFRAG 219 @@ -728,7 +707,6 @@ #define WLC_GET_KEY_PRIMARY 235 #define WLC_SET_KEY_PRIMARY 236 -#ifndef LINUX_POSTMOGRIFY_REMOVAL /* #define WLC_DUMP_RADIOREGS 237 */ /* no longer supported */ #define WLC_GET_ACI_ARGS 238 @@ -755,17 +733,13 @@ #define WLC_LEGACY_LINK_BEHAVIOR 259 #define WLC_GET_CHANNELS_IN_COUNTRY 260 #define WLC_GET_COUNTRY_LIST 261 -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ #define WLC_GET_VAR 262 /* get value of named variable */ #define WLC_SET_VAR 263 /* set named variable to value */ -#ifndef LINUX_POSTMOGRIFY_REMOVAL #define WLC_NVRAM_GET 264 /* deprecated */ #define WLC_NVRAM_SET 265 #define WLC_NVRAM_DUMP 266 #define WLC_REBOOT 267 -#endif /* !LINUX_POSTMOGRIFY_REMOVAL */ #define WLC_SET_WSEC_PMK 268 -#ifndef LINUX_POSTMOGRIFY_REMOVAL #define WLC_GET_AUTH_MODE 269 #define WLC_SET_AUTH_MODE 270 #define WLC_GET_WAKEENTRY 271 @@ -936,8 +910,10 @@ #define WL_CHAN_FREQ_RANGE_5G_BAND1 2 #define WL_CHAN_FREQ_RANGE_5G_BAND2 3 #define WL_CHAN_FREQ_RANGE_5G_BAND3 4 +#define WL_CHAN_FREQ_RANGE_5G_4BAND 5 -#ifdef SROM12 + +/* SROM12 */ #define WL_CHAN_FREQ_RANGE_5G_BAND4 5 #define WL_CHAN_FREQ_RANGE_2G_40 6 #define WL_CHAN_FREQ_RANGE_5G_BAND0_40 7 @@ -951,14 +927,10 @@ #define WL_CHAN_FREQ_RANGE_5G_BAND3_80 15 #define WL_CHAN_FREQ_RANGE_5G_BAND4_80 16 -#define WL_CHAN_FREQ_RANGE_5G_4BAND 17 #define WL_CHAN_FREQ_RANGE_5G_5BAND 18 #define WL_CHAN_FREQ_RANGE_5G_5BAND_40 19 #define WL_CHAN_FREQ_RANGE_5G_5BAND_80 20 -#else -#define WL_CHAN_FREQ_RANGE_5G_4BAND 5 -#endif /* SROM12 */ -/* MAC list modes */ + #define WLC_MACMODE_DISABLED 0 /* MAC list disabled */ #define WLC_MACMODE_DENY 1 /* Deny specified (i.e. allow unspecified) */ #define WLC_MACMODE_ALLOW 2 /* Allow specified (i.e. deny unspecified) */ @@ -1034,6 +1006,9 @@ #define WLC_BW_40MHZ_BIT (1<<1) #define WLC_BW_80MHZ_BIT (1<<2) #define WLC_BW_160MHZ_BIT (1<<3) +#define WLC_BW_10MHZ_BIT (1<<4) +#define WLC_BW_5MHZ_BIT (1<<5) +#define WLC_BW_2P5MHZ_BIT (1<<6) /* Bandwidth capabilities */ #define WLC_BW_CAP_20MHZ (WLC_BW_20MHZ_BIT) @@ -1041,12 +1016,18 @@ #define WLC_BW_CAP_80MHZ (WLC_BW_80MHZ_BIT|WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT) #define WLC_BW_CAP_160MHZ (WLC_BW_160MHZ_BIT|WLC_BW_80MHZ_BIT| \ WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT) +#define WLC_BW_CAP_2P5MHZ (WLC_BW_2P5MHZ_BIT) +#define WLC_BW_CAP_5MHZ (WLC_BW_5MHZ_BIT) +#define WLC_BW_CAP_10MHZ (WLC_BW_10MHZ_BIT) #define WLC_BW_CAP_UNRESTRICTED 0xFF #define WL_BW_CAP_20MHZ(bw_cap) (((bw_cap) & WLC_BW_20MHZ_BIT) ? TRUE : FALSE) #define WL_BW_CAP_40MHZ(bw_cap) (((bw_cap) & WLC_BW_40MHZ_BIT) ? TRUE : FALSE) #define WL_BW_CAP_80MHZ(bw_cap) (((bw_cap) & WLC_BW_80MHZ_BIT) ? TRUE : FALSE) #define WL_BW_CAP_160MHZ(bw_cap)(((bw_cap) & WLC_BW_160MHZ_BIT) ? TRUE : FALSE) +#define WL_BW_CAP_2P5MHZ(bw_cap)(((bw_cap) & WLC_BW_2P5MHZ_BIT) ? TRUE : FALSE) +#define WL_BW_CAP_5MHZ(bw_cap) (((bw_cap) & WLC_BW_5MHZ_BIT) ? TRUE : FALSE) +#define WL_BW_CAP_10MHZ(bw_cap) (((bw_cap) & WLC_BW_10MHZ_BIT) ? TRUE : FALSE) /* values to force tx/rx chain */ #define WLC_N_TXRX_CHAIN0 0 @@ -1102,6 +1083,7 @@ #define WL_OTA_ARG_PARSE_BLK_SIZE 1200 #define WL_OTA_TEST_MAX_NUM_RATE 30 #define WL_OTA_TEST_MAX_NUM_SEQ 100 +#define WL_OTA_TEST_MAX_NUM_RSSI 85 #define WL_THRESHOLD_LO_BAND 70 /* range from 5250MHz - 5350MHz */ @@ -1111,9 +1093,13 @@ #define WL_RADAR_SIMULATED 2 /* force radar detector to declare * detection once */ +#define WL_RADAR_SIMULATED_SC 3 /* force radar detector to declare + * detection once on scan core + * if available and active + */ #define WL_RSSI_ANT_VERSION 1 /* current version of wl_rssi_ant_t */ #define WL_ANT_RX_MAX 2 /* max 2 receive antennas */ -#define WL_ANT_HT_RX_MAX 3 /* max 3 receive antennas/cores */ +#define WL_ANT_HT_RX_MAX 4 /* max 4 receive antennas/cores */ #define WL_ANT_IDX_1 0 /* antenna index 1 */ #define WL_ANT_IDX_2 1 /* antenna index 2 */ @@ -1146,6 +1132,9 @@ #define WL_BW_80MHZ 2 #define WL_BW_160MHZ 3 #define WL_BW_8080MHZ 4 +#define WL_BW_2P5MHZ 5 +#define WL_BW_5MHZ 6 +#define WL_BW_10MHZ 7 /* tx_power_t.flags bits */ #define WL_TX_POWER_F_ENABLED 1 @@ -1155,6 +1144,7 @@ #define WL_TX_POWER_F_HT 0x10 #define WL_TX_POWER_F_VHT 0x20 #define WL_TX_POWER_F_OPENLOOP 0x40 +#define WL_TX_POWER_F_PROP11NRATES 0x80 /* Message levels */ #define WL_ERROR_VAL 0x00000001 @@ -1168,22 +1158,26 @@ #define WL_ASSOC_VAL 0x00000100 #define WL_PRUSR_VAL 0x00000200 #define WL_PS_VAL 0x00000400 -#define WL_TXPWR_VAL 0x00000800 /* retired in TOT on 6/10/2009 */ +#define WL_TXPWR_VAL 0x00000000 /* retired in TOT on 6/10/2009 */ #define WL_MODE_SWITCH_VAL 0x00000800 /* Using retired TXPWR val */ #define WL_PORT_VAL 0x00001000 #define WL_DUAL_VAL 0x00002000 #define WL_WSEC_VAL 0x00004000 #define WL_WSEC_DUMP_VAL 0x00008000 #define WL_LOG_VAL 0x00010000 -#define WL_NRSSI_VAL 0x00020000 /* retired in TOT on 6/10/2009 */ -#define WL_LOFT_VAL 0x00040000 /* retired in TOT on 6/10/2009 */ +#define WL_NRSSI_VAL 0x00000000 /* retired in TOT on 6/10/2009 */ +#define WL_BCNTRIM_VAL 0x00020000 /* Using retired NRSSI VAL */ +#define WL_LOFT_VAL 0x00000000 /* retired in TOT on 6/10/2009 */ +#define WL_PFN_VAL 0x00040000 /* Using retired LOFT_VAL */ #define WL_REGULATORY_VAL 0x00080000 #define WL_TAF_VAL 0x00100000 -#define WL_RADAR_VAL 0x00200000 /* retired in TOT on 6/10/2009 */ +#define WL_RADAR_VAL 0x00000000 /* retired in TOT on 6/10/2009 */ +#define WL_WDI_VAL 0x00200000 /* Using retired WL_RADAR_VAL VAL */ #define WL_MPC_VAL 0x00400000 #define WL_APSTA_VAL 0x00800000 #define WL_DFS_VAL 0x01000000 -#define WL_BA_VAL 0x02000000 /* retired in TOT on 6/14/2010 */ +#define WL_BA_VAL 0x00000000 /* retired in TOT on 6/14/2010 */ +#define WL_MUMIMO_VAL 0x02000000 /* Using retired WL_BA_VAL */ #define WL_ACI_VAL 0x04000000 #define WL_PRMAC_VAL 0x04000000 #define WL_MBSS_VAL 0x04000000 @@ -1196,6 +1190,8 @@ * wl_msg_level2 in wl_dbg.h */ #define WL_DPT_VAL 0x00000001 +/* re-using WL_DPT_VAL */ +#define WL_MESH_VAL 0x00000001 #define WL_SCAN_VAL 0x00000002 #define WL_WOWL_VAL 0x00000004 #define WL_COEX_VAL 0x00000008 @@ -1218,6 +1214,7 @@ #define WL_TXBF_VAL 0x00100000 #define WL_P2PO_VAL 0x00200000 #define WL_TBTT_VAL 0x00400000 +#define WL_FBT_VAL 0x00800000 #define WL_MQ_VAL 0x01000000 /* This level is currently used in Phoenix2 only */ @@ -1227,6 +1224,8 @@ #define WL_PWRSEL_VAL 0x10000000 #define WL_NET_DETECT_VAL 0x20000000 #define WL_PCIE_VAL 0x40000000 +#define WL_PMDUR_VAL 0x80000000 + /* use top-bit for WL_TIME_STAMP_VAL because this is a modifier * rather than a message-type of its own @@ -1266,7 +1265,8 @@ #define WL_LED_NUMBEHAVIOR 25 /* led behavior numeric value format */ -#define WL_LED_BEH_MASK 0x7f /* behavior mask */ +#define WL_LED_BEH_MASK 0x3f /* behavior mask */ +#define WL_LED_PMU_OVERRIDE 0x40 /* need to set PMU Override bit for the GPIO */ #define WL_LED_AL_MASK 0x80 /* activelow (polarity) bit */ /* number of bytes needed to define a proper bit mask for MAC event reporting */ @@ -1352,7 +1352,6 @@ #define WL_NUMCHANSPECS 110 #endif - /* WDS link local endpoint WPA role */ #define WL_WDS_WPA_ROLE_AUTH 0 /* authenticator */ #define WL_WDS_WPA_ROLE_SUP 1 /* supplicant */ @@ -1413,6 +1412,7 @@ #define WL_PKTENG_PER_MASK 0xff #define WL_PKTENG_SYNCHRONOUS 0x100 /* synchronous flag */ +#define WL_PKTENG_SYNCHRONOUS_UNBLK 0x200 /* synchronous unblock flag */ #define WL_PKTENG_MAXPKTSZ 16384 /* max pktsz limit for pkteng */ @@ -1433,6 +1433,10 @@ #define WL_WOWL_M1 (1 << 6) /* Wakeup after PTK refresh */ #define WL_WOWL_EAPID (1 << 7) /* Wakeup after receipt of EAP-Identity Req */ #define WL_WOWL_PME_GPIO (1 << 8) /* Wakeind via PME(0) or GPIO(1) */ +#define WL_WOWL_ULP_BAILOUT (1 << 8) /* wakeind via unknown pkt by basic ULP-offloads - + * WL_WOWL_ULP_BAILOUT - same as WL_WOWL_PME_GPIO used only for DONGLE BUILDS and + * not WLC_HIGH_ONLY case + */ #define WL_WOWL_NEEDTKIP1 (1 << 9) /* need tkip phase 1 key to be updated by the driver */ #define WL_WOWL_GTK_FAILURE (1 << 10) /* enable wakeup if GTK fails */ #define WL_WOWL_EXTMAGPAT (1 << 11) /* support extended magic packets */ @@ -1729,6 +1733,19 @@ #define WL_WNM_NOTIF 0x00000100 #define WL_WNM_MAX 0x00000200 +#ifdef WLWNM_BRCM +#define BRCM_WNM_FEATURE_SET\ + (WL_WNM_PROXYARP | \ + WL_WNM_SLEEP | \ + WL_WNM_FMS | \ + WL_WNM_TFS | \ + WL_WNM_TIMBC | \ + WL_WNM_BSSTRANS | \ + WL_WNM_DMS | \ + WL_WNM_NOTIF | \ + 0) +#endif /* WLWNM_BRCM */ + #ifndef ETHER_MAX_DATA #define ETHER_MAX_DATA 1500 #endif /* ETHER_MAX_DATA */ @@ -1782,13 +1799,6 @@ #define TSPEC_UNKNOWN 3 /* TSPEC unknown */ #define TSPEC_STATUS_MASK 7 /* TSPEC status mask */ -#ifdef BCMCCX -/* "wlan_reason" iovar interface */ -#define WL_WLAN_ASSOC_REASON_NORMAL_NETWORK 0 /* normal WLAN network setup */ -#define WL_WLAN_ASSOC_REASON_ROAM_FROM_CELLULAR_NETWORK 1 /* roam from Cellular network */ -#define WL_WLAN_ASSOC_REASON_ROAM_FROM_LAN 2 /* roam from LAN */ -#define WL_WLAN_ASSOC_REASON_MAX 2 /* largest value allowed */ -#endif /* BCMCCX */ /* Software feature flag defines used by wlfeatureflag */ #ifdef WLAFTERBURNER @@ -1919,6 +1929,7 @@ #define WAKE_EVENT_AP_ASSOCIATION_LOST_BIT 2 #define WAKE_EVENT_GTK_HANDSHAKE_ERROR_BIT 4 #define WAKE_EVENT_4WAY_HANDSHAKE_REQUEST_BIT 8 +#define WAKE_EVENT_NET_PACKET_BIT 0x10 #define MAX_NUM_WOL_PATTERN 22 /* LOGO requirements min 22 */ @@ -2024,13 +2035,12 @@ #define NET_DETECT_MAX_CHANNELS 50 #endif /* NET_DETECT */ -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ - /* Bit masks for radio disabled status - returned by WL_GET_RADIO */ #define WL_RADIO_SW_DISABLE (1<<0) #define WL_RADIO_HW_DISABLE (1<<1) #define WL_RADIO_MPC_DISABLE (1<<2) #define WL_RADIO_COUNTRY_DISABLE (1<<3) /* some countries don't support any channel */ +#define WL_RADIO_PERCORE_DISABLE (1<<4) /* Radio diable per core for DVT */ #define WL_SPURAVOID_OFF 0 #define WL_SPURAVOID_ON1 1 @@ -2063,6 +2073,7 @@ #define WLC_PHY_TYPE_LCN 8 #define WLC_PHY_TYPE_LCN40 10 #define WLC_PHY_TYPE_AC 11 +#define WLC_PHY_TYPE_LCN20 12 #define WLC_PHY_TYPE_NULL 0xf /* Values for PM */ diff --git a/drivers/net/wireless/bcmdhd/include/dhdioctl.h b/drivers/amlogic/wifi/bcmdhd/include/dhdioctl.h similarity index 72% rename from drivers/net/wireless/bcmdhd/include/dhdioctl.h rename to drivers/amlogic/wifi/bcmdhd/include/dhdioctl.h index 31000e3269c7b..342d39c076a3a 100644 --- a/drivers/net/wireless/bcmdhd/include/dhdioctl.h +++ b/drivers/amlogic/wifi/bcmdhd/include/dhdioctl.h @@ -5,9 +5,30 @@ * * Definitions subject to change without notice. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhdioctl.h 438755 2013-11-22 23:20:40Z $ + * + * <> + * + * $Id: dhdioctl.h 585723 2015-09-11 06:26:37Z $ */ #ifndef _dhdioctl_h_ @@ -68,15 +89,15 @@ enum { #define DHD_GLOM_VAL 0x0400 #define DHD_EVENT_VAL 0x0800 #define DHD_BTA_VAL 0x1000 -#if 0 && (NDISVER >= 0x0630) && 1 -#define DHD_SCAN_VAL 0x2000 -#else #define DHD_ISCAN_VAL 0x2000 -#endif #define DHD_ARPOE_VAL 0x4000 #define DHD_REORDER_VAL 0x8000 #define DHD_NOCHECKDIED_VAL 0x20000 /* UTF WAR */ #define DHD_PNO_VAL 0x80000 +#define DHD_MSGTRACE_VAL 0x100000 +#define DHD_FWLOG_VAL 0x400000 +#define DHD_RTT_VAL 0x200000 +#define DHD_IOV_INFO_VAL 0x800000 #define DHD_ANDROID_VAL 0x10000 #define DHD_IW_VAL 0x20000 #define DHD_CFG_VAL 0x40000 diff --git a/drivers/amlogic/wifi/bcmdhd/include/epivers.h b/drivers/amlogic/wifi/bcmdhd/include/epivers.h new file mode 100644 index 0000000000000..931576e9eb021 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/epivers.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: epivers.h.in,v 13.33 2010-09-08 22:08:53 $ + * +*/ + +#ifndef _epivers_h_ +#define _epivers_h_ + +#define EPI_MAJOR_VERSION 1 + +#define EPI_MINOR_VERSION 363 + +#define EPI_RC_NUMBER 59 + +#define EPI_INCREMENTAL_NUMBER 144 + +#define EPI_BUILD_NUMBER 0 + +#define EPI_VERSION 1, 363, 59, 144 + +#define EPI_VERSION_NUM 0x0116b3b9 + +#define EPI_VERSION_DEV 1.363.59 + +/* Driver Version String, ASCII, 32 chars max */ +#define EPI_VERSION_STR "1.363.59.144.9 (r)" + +#endif /* _epivers_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/event_log.h b/drivers/amlogic/wifi/bcmdhd/include/event_log.h similarity index 52% rename from drivers/net/wireless/bcmdhd/include/event_log.h rename to drivers/amlogic/wifi/bcmdhd/include/event_log.h index 2fa436dce9c08..d06d811cb925f 100644 --- a/drivers/net/wireless/bcmdhd/include/event_log.h +++ b/drivers/amlogic/wifi/bcmdhd/include/event_log.h @@ -1,77 +1,38 @@ /* * EVENT_LOG system definitions * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: event_log.h 241182 2011-02-17 21:50:03Z $ + * + * <> + * + * $Id: event_log.h 591285 2015-10-07 11:56:29Z $ */ #ifndef _EVENT_LOG_H_ #define _EVENT_LOG_H_ #include - -/* Set a maximum number of sets here. It is not dynamic for - * efficiency of the EVENT_LOG calls. - */ -#define NUM_EVENT_LOG_SETS 4 -#define EVENT_LOG_SET_BUS 0 -#define EVENT_LOG_SET_WL 1 -#define EVENT_LOG_SET_PSM 2 -#define EVENT_LOG_SET_DBG 3 - -/* Define new event log tags here */ -#define EVENT_LOG_TAG_NULL 0 /* Special null tag */ -#define EVENT_LOG_TAG_TS 1 /* Special timestamp tag */ -#define EVENT_LOG_TAG_BUS_OOB 2 -#define EVENT_LOG_TAG_BUS_STATE 3 -#define EVENT_LOG_TAG_BUS_PROTO 4 -#define EVENT_LOG_TAG_BUS_CTL 5 -#define EVENT_LOG_TAG_BUS_EVENT 6 -#define EVENT_LOG_TAG_BUS_PKT 7 -#define EVENT_LOG_TAG_BUS_FRAME 8 -#define EVENT_LOG_TAG_BUS_DESC 9 -#define EVENT_LOG_TAG_BUS_SETUP 10 -#define EVENT_LOG_TAG_BUS_MISC 11 -#define EVENT_LOG_TAG_SRSCAN 22 -#define EVENT_LOG_TAG_PWRSTATS_INFO 23 -#define EVENT_LOG_TAG_UCODE_WATCHDOG 26 -#define EVENT_LOG_TAG_UCODE_FIFO 27 -#define EVENT_LOG_TAG_SCAN_TRACE_LOW 28 -#define EVENT_LOG_TAG_SCAN_TRACE_HIGH 29 -#define EVENT_LOG_TAG_SCAN_ERROR 30 -#define EVENT_LOG_TAG_SCAN_WARN 31 -#define EVENT_LOG_TAG_MPF_ERR 32 -#define EVENT_LOG_TAG_MPF_WARN 33 -#define EVENT_LOG_TAG_MPF_INFO 34 -#define EVENT_LOG_TAG_MPF_DEBUG 35 -#define EVENT_LOG_TAG_EVENT_INFO 36 -#define EVENT_LOG_TAG_EVENT_ERR 37 -#define EVENT_LOG_TAG_PWRSTATS_ERROR 38 -#define EVENT_LOG_TAG_EXCESS_PM_ERROR 39 -#define EVENT_LOG_TAG_IOCTL_LOG 40 -#define EVENT_LOG_TAG_PFN_ERR 41 -#define EVENT_LOG_TAG_PFN_WARN 42 -#define EVENT_LOG_TAG_PFN_INFO 43 -#define EVENT_LOG_TAG_PFN_DEBUG 44 -#define EVENT_LOG_TAG_BEACON_LOG 45 -#define EVENT_LOG_TAG_WNM_BSSTRANS_INFO 46 -#define EVENT_LOG_TAG_TRACE_CHANSW 47 -#define EVENT_LOG_TAG_PCI_ERROR 48 -#define EVENT_LOG_TAG_PCI_TRACE 49 -#define EVENT_LOG_TAG_PCI_WARN 50 -#define EVENT_LOG_TAG_PCI_INFO 51 -#define EVENT_LOG_TAG_PCI_DBG 52 -#define EVENT_LOG_TAG_PCI_DATA 53 -#define EVENT_LOG_TAG_PCI_RING 54 -#define EVENT_LOG_TAG_MAX 55 /* Set to the same value of last tag, not last tag + 1 */ -/* Note: New event should be added/reserved in trunk before adding it to branches */ - -/* Flags for tag control */ -#define EVENT_LOG_TAG_FLAG_NONE 0 -#define EVENT_LOG_TAG_FLAG_LOG 0x80 -#define EVENT_LOG_TAG_FLAG_PRINT 0x40 -#define EVENT_LOG_TAG_FLAG_MASK 0x3f +#include +#include /* logstrs header */ #define LOGSTRS_MAGIC 0x4C4F4753 @@ -80,10 +41,11 @@ /* We make sure that the block size will fit in a single packet * (allowing for a bit of overhead on each packet */ -#define EVENT_LOG_MAX_BLOCK_SIZE 1400 -#define EVENT_LOG_PSM_BLOCK 0x200 -#define EVENT_LOG_BUS_BLOCK 0x200 -#define EVENT_LOG_DBG_BLOCK 0x100 +#define EVENT_LOG_MAX_BLOCK_SIZE 1400 +#define EVENT_LOG_WL_BLOCK_SIZE 0x200 +#define EVENT_LOG_PSM_BLOCK_SIZE 0x200 +#define EVENT_LOG_BUS_BLOCK_SIZE 0x200 +#define EVENT_LOG_ERROR_BLOCK_SIZE 0x200 /* * There are multiple levels of objects define here: @@ -112,19 +74,6 @@ #define _EL_TOP_PTR struct event_log_top * #endif /* EVENT_LOG_DUMPER */ -/* Each event log entry has a type. The type is the LAST word of the - * event log. The printing code walks the event entries in reverse - * order to find the first entry. - */ -typedef union event_log_hdr { - struct { - uint8 tag; /* Event_log entry tag */ - uint8 count; /* Count of 4-byte entries */ - uint16 fmt_num; /* Format number */ - }; - uint32 t; /* Type cheat */ -} event_log_hdr_t; - /* Event log sets (a logical circurlar buffer) consist of one or more * event_log_blocks. The blocks themselves form a logical circular * list. The log entries are placed in each event_log_block until it @@ -186,6 +135,35 @@ typedef struct { uint32 log_magic; /* MAGIC number for verification 'LOGS' */ } logstr_header_t; +/* + * Use the following macros for generating log events. + * + * The FAST versions check the enable of the tag before evaluating the arguments and calling the + * event_log function. This adds 5 instructions. The COMPACT versions evaluate the arguments + * and call the event_log function unconditionally. The event_log function will then skip logging + * if this tag is disabled. + * + * To support easy usage of existing debugging (e.g. msglevel) via macro re-definition there are + * two variants of these macros to help. + * + * First there are the CAST versions. The event_log function normally logs uint32 values or else + * they have to be cast to uint32. The CAST versions blindly cast for you so you don't have to edit + * any existing code. + * + * Second there are the PAREN_ARGS versions. These expect the logging format string and arguments + * to be enclosed in parentheses. This allows us to make the following mapping of an existing + * msglevel macro: + * #define WL_ERROR(args) EVENT_LOG_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_ERROR, args) + * + * The versions of the macros without FAST or COMPACT in their name are just synonyms for the + * COMPACT versions. + * + * You should use the COMPACT macro (or its synonym) in cases where there is some preceding logic + * that prevents the execution of the macro, e.g. WL_ERROR by definition rarely gets executed. + * Use the FAST macro in performance sensitive paths. The key concept here is that you should be + * assuming that your macro usage is compiled into ROM and can't be changed ... so choose wisely. + * + */ #ifndef EVENT_LOG_DUMPER @@ -193,6 +171,18 @@ typedef struct { /* Null define if no tracing */ #define EVENT_LOG(format, ...) +#define EVENT_LOG_FAST(tag, fmt, ...) +#define EVENT_LOG_COMPACT(tag, fmt, ...) + +#define EVENT_LOG_CAST(tag, fmt, ...) +#define EVENT_LOG_FAST_CAST(tag, fmt, ...) +#define EVENT_LOG_COMPACT_CAST(tag, fmt, ...) + +#define EVENT_LOG_CAST_PAREN_ARGS(tag, pargs) +#define EVENT_LOG_FAST_CAST_PAREN_ARGS(tag, pargs) +#define EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, pargs) + +#define EVENT_LOG_IS_LOG_ON(tag) 0 #else /* EVENT_LOG_COMPILE */ @@ -224,6 +214,32 @@ typedef struct { #define _EVENT_LOGE(tag, fmt_num, ...) event_logn(14, tag, fmt_num, __VA_ARGS__) #define _EVENT_LOGF(tag, fmt_num, ...) event_logn(15, tag, fmt_num, __VA_ARGS__) + +/* Casting low level macros */ +#define _EVENT_LOG_CAST0(tag, fmt_num) \ + event_log0(tag, fmt_num) +#define _EVENT_LOG_CAST1(tag, fmt_num, t1) \ + event_log1(tag, fmt_num, (uint32)(t1)) +#define _EVENT_LOG_CAST2(tag, fmt_num, t1, t2) \ + event_log2(tag, fmt_num, (uint32)(t1), (uint32)(t2)) +#define _EVENT_LOG_CAST3(tag, fmt_num, t1, t2, t3) \ + event_log3(tag, fmt_num, (uint32)(t1), (uint32)(t2), (uint32)(t3)) +#define _EVENT_LOG_CAST4(tag, fmt_num, t1, t2, t3, t4) \ + event_log4(tag, fmt_num, (uint32)(t1), (uint32)(t2), (uint32)(t3), (uint32)(t4)) + +/* The rest call the generic routine that takes a count */ +#define _EVENT_LOG_CAST5(tag, fmt_num, ...) _EVENT_LOG5(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CAST6(tag, fmt_num, ...) _EVENT_LOG6(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CAST7(tag, fmt_num, ...) _EVENT_LOG7(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CAST8(tag, fmt_num, ...) _EVENT_LOG8(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CAST9(tag, fmt_num, ...) _EVENT_LOG9(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTA(tag, fmt_num, ...) _EVENT_LOGA(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTB(tag, fmt_num, ...) _EVENT_LOGB(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTC(tag, fmt_num, ...) _EVENT_LOGC(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTD(tag, fmt_num, ...) _EVENT_LOGD(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTE(tag, fmt_num, ...) _EVENT_LOGE(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTF(tag, fmt_num, ...) _EVENT_LOGF(tag, fmt_num, __VA_ARGS__) + /* Hack to make the proper routine call when variadic macros get * passed. Note the max of 15 arguments. More than that can't be * handled by the event_log entries anyways so best to catch it at compile @@ -233,30 +249,70 @@ typedef struct { #define _EVENT_LOG_VA_NUM_ARGS(F, _1, _2, _3, _4, _5, _6, _7, _8, _9, \ _A, _B, _C, _D, _E, _F, N, ...) F ## N -#define _EVENT_LOG(tag, fmt, ...) \ +/* cast = _EVENT_LOG for no casting + * cast = _EVENT_LOG_CAST for casting of fmt arguments to uint32. + * Only first 4 arguments are casted to uint32. event_logn() is called + * if more than 4 arguments are present. This function internally assumes + * all arguments are uint32 + */ +#define _EVENT_LOG(cast, tag, fmt, ...) \ static char logstr[] __attribute__ ((section(".logstrs"))) = fmt; \ static uint32 fmtnum __attribute__ ((section(".lognums"))) = (uint32) &logstr; \ - _EVENT_LOG_VA_NUM_ARGS(_EVENT_LOG, ##__VA_ARGS__, \ + _EVENT_LOG_VA_NUM_ARGS(cast, ##__VA_ARGS__, \ F, E, D, C, B, A, 9, 8, \ 7, 6, 5, 4, 3, 2, 1, 0) \ - (tag, (int) &fmtnum , ## __VA_ARGS__); \ + (tag, (int) &fmtnum , ## __VA_ARGS__) #define EVENT_LOG_FAST(tag, fmt, ...) \ - if (event_log_tag_sets != NULL) { \ - uint8 tag_flag = *(event_log_tag_sets + tag); \ - if (tag_flag != 0) { \ - _EVENT_LOG(tag, fmt , ## __VA_ARGS__); \ + do { \ + if (event_log_tag_sets != NULL) { \ + uint8 tag_flag = *(event_log_tag_sets + tag); \ + if (tag_flag != 0) { \ + _EVENT_LOG(_EVENT_LOG, tag, fmt , ## __VA_ARGS__); \ + } \ } \ - } + } while (0) #define EVENT_LOG_COMPACT(tag, fmt, ...) \ - if (1) { \ - _EVENT_LOG(tag, fmt , ## __VA_ARGS__); \ - } + do { \ + _EVENT_LOG(_EVENT_LOG, tag, fmt , ## __VA_ARGS__); \ + } while (0) + +/* Event log macro with casting to uint32 of arguments */ +#define EVENT_LOG_FAST_CAST(tag, fmt, ...) \ + do { \ + if (event_log_tag_sets != NULL) { \ + uint8 tag_flag = *(event_log_tag_sets + tag); \ + if (tag_flag != 0) { \ + _EVENT_LOG(_EVENT_LOG_CAST, tag, fmt , ## __VA_ARGS__); \ + } \ + } \ + } while (0) + +#define EVENT_LOG_COMPACT_CAST(tag, fmt, ...) \ + do { \ + _EVENT_LOG(_EVENT_LOG_CAST, tag, fmt , ## __VA_ARGS__); \ + } while (0) + #define EVENT_LOG(tag, fmt, ...) EVENT_LOG_COMPACT(tag, fmt , ## __VA_ARGS__) +#define EVENT_LOG_CAST(tag, fmt, ...) EVENT_LOG_COMPACT_CAST(tag, fmt , ## __VA_ARGS__) + +#define _EVENT_LOG_REMOVE_PAREN(...) __VA_ARGS__ +#define EVENT_LOG_REMOVE_PAREN(args) _EVENT_LOG_REMOVE_PAREN args + +#define EVENT_LOG_CAST_PAREN_ARGS(tag, pargs) \ + EVENT_LOG_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs)) + +#define EVENT_LOG_FAST_CAST_PAREN_ARGS(tag, pargs) \ + EVENT_LOG_FAST_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs)) + +#define EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, pargs) \ + EVENT_LOG_COMPACT_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs)) + + #define EVENT_LOG_IS_LOG_ON(tag) (*(event_log_tag_sets + (tag)) & EVENT_LOG_TAG_FLAG_LOG) #define EVENT_DUMP event_log_buffer @@ -281,7 +337,7 @@ extern void event_log3(int tag, int fmtNum, uint32 t1, uint32 t2, uint32 t3); extern void event_log4(int tag, int fmtNum, uint32 t1, uint32 t2, uint32 t3, uint32 t4); extern void event_logn(int num_args, int tag, int fmtNum, ...); -extern void event_log_time_sync(void); +extern void event_log_time_sync(uint32 ms); extern void event_log_buffer(int tag, uint8 *buf, int size); #endif /* EVENT_LOG_DUMPER */ diff --git a/drivers/net/wireless/bcmdhd/include/hnd_armtrap.h b/drivers/amlogic/wifi/bcmdhd/include/hnd_armtrap.h similarity index 51% rename from drivers/net/wireless/bcmdhd/include/hnd_armtrap.h rename to drivers/amlogic/wifi/bcmdhd/include/hnd_armtrap.h index 69738bb1fdca1..baf55724c595b 100644 --- a/drivers/net/wireless/bcmdhd/include/hnd_armtrap.h +++ b/drivers/amlogic/wifi/bcmdhd/include/hnd_armtrap.h @@ -1,9 +1,30 @@ /* * HND arm trap handling. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: hnd_armtrap.h 470663 2014-04-16 00:24:43Z $ + * + * <> + * + * $Id: hnd_armtrap.h 514727 2014-11-12 03:02:48Z $ */ #ifndef _hnd_armtrap_h_ @@ -37,6 +58,7 @@ #define TR_PC TR_REG(15) #define TRAP_T_SIZE 80 +#define ASSERT_TRAP_SVC_NUMBER 255 #ifndef _LANGUAGE_ASSEMBLY diff --git a/drivers/net/wireless/bcmdhd/include/hnd_cons.h b/drivers/amlogic/wifi/bcmdhd/include/hnd_cons.h similarity index 54% rename from drivers/net/wireless/bcmdhd/include/hnd_cons.h rename to drivers/amlogic/wifi/bcmdhd/include/hnd_cons.h index dbc83052d980a..2dee71abefeb5 100644 --- a/drivers/net/wireless/bcmdhd/include/hnd_cons.h +++ b/drivers/amlogic/wifi/bcmdhd/include/hnd_cons.h @@ -1,9 +1,30 @@ /* * Console support for RTE - for host use only. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: hnd_cons.h 473343 2014-04-29 01:45:22Z $ + * + * <> + * + * $Id: hnd_cons.h 514727 2014-11-12 03:02:48Z $ */ #ifndef _hnd_cons_h_ #define _hnd_cons_h_ diff --git a/drivers/net/wireless/bcmdhd/include/hnd_pktpool.h b/drivers/amlogic/wifi/bcmdhd/include/hnd_pktpool.h similarity index 65% rename from drivers/net/wireless/bcmdhd/include/hnd_pktpool.h rename to drivers/amlogic/wifi/bcmdhd/include/hnd_pktpool.h index 3e6878a117471..3cf46727b044f 100644 --- a/drivers/net/wireless/bcmdhd/include/hnd_pktpool.h +++ b/drivers/amlogic/wifi/bcmdhd/include/hnd_pktpool.h @@ -1,36 +1,60 @@ /* * HND generic packet pool operation primitives * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: $ + * + * <> + * + * $Id: hnd_pktpool.h 591285 2015-10-07 11:56:29Z $ */ #ifndef _hnd_pktpool_h_ #define _hnd_pktpool_h_ +#include + #ifdef __cplusplus extern "C" { #endif +/* mutex macros for thread safe */ +#ifdef HND_PKTPOOL_THREAD_SAFE +#define HND_PKTPOOL_MUTEX_DECL(mutex) OSL_EXT_MUTEX_DECL(mutex) +#else +#define HND_PKTPOOL_MUTEX_DECL(mutex) +#endif + #ifdef BCMPKTPOOL #define POOL_ENAB(pool) ((pool) && (pool)->inited) -#define SHARED_POOL (pktpool_shared) #else /* BCMPKTPOOL */ #define POOL_ENAB(bus) 0 -#define SHARED_POOL ((struct pktpool *)NULL) #endif /* BCMPKTPOOL */ -#ifdef BCMFRAGPOOL -#define SHARED_FRAG_POOL (pktpool_shared_lfrag) -#endif -#define SHARED_RXFRAG_POOL (pktpool_shared_rxlfrag) - - #ifndef PKTPOOL_LEN_MAX #define PKTPOOL_LEN_MAX 40 #endif /* PKTPOOL_LEN_MAX */ #define PKTPOOL_CB_MAX 3 +#define PKTPOOL_CB_MAX_AVL 4 + /* forward declaration */ struct pktpool; @@ -40,7 +64,8 @@ typedef struct { pktpool_cb_t cb; void *arg; } pktpool_cbinfo_t; -/* call back fn extension to populate host address in pool pkt */ + +/** PCIe SPLITRX related: call back fn extension to populate host address in pool pkt */ typedef int (*pktpool_cb_extn_t)(struct pktpool *pool, void *arg1, void* pkt, bool arg2); typedef struct { pktpool_cb_extn_t cb; @@ -77,26 +102,27 @@ typedef struct { #endif /* BCMDBG_POOL */ typedef struct pktpool { - bool inited; /* pktpool_init was successful */ - uint8 type; /* type of lbuf: basic, frag, etc */ - uint8 id; /* pktpool ID: index in registry */ - bool istx; /* direction: transmit or receive data path */ - - void * freelist; /* free list: see PKTNEXTFREE(), PKTSETNEXTFREE() */ - uint16 avail; /* number of packets in pool's free list */ - uint16 len; /* number of packets managed by pool */ - uint16 maxlen; /* maximum size of pool <= PKTPOOL_LEN_MAX */ - uint16 plen; /* size of pkt buffer, excluding lbuf|lbuf_frag */ + bool inited; /**< pktpool_init was successful */ + uint8 type; /**< type of lbuf: basic, frag, etc */ + uint8 id; /**< pktpool ID: index in registry */ + bool istx; /**< direction: transmit or receive data path */ + HND_PKTPOOL_MUTEX_DECL(mutex) /**< thread-safe mutex */ + + void * freelist; /**< free list: see PKTNEXTFREE(), PKTSETNEXTFREE() */ + uint16 avail; /**< number of packets in pool's free list */ + uint16 len; /**< number of packets managed by pool */ + uint16 maxlen; /**< maximum size of pool <= PKTPOOL_LEN_MAX */ + uint16 plen; /**< size of pkt buffer, excluding lbuf|lbuf_frag */ bool empty; uint8 cbtoggle; uint8 cbcnt; uint8 ecbcnt; - bool emptycb_disable; + uint8 emptycb_disable; /**< Value of type enum pktpool_empty_cb_state */ pktpool_cbinfo_t *availcb_excl; - pktpool_cbinfo_t cbs[PKTPOOL_CB_MAX]; + pktpool_cbinfo_t cbs[PKTPOOL_CB_MAX_AVL]; pktpool_cbinfo_t ecbs[PKTPOOL_CB_MAX]; - pktpool_cbextn_info_t cbext; + pktpool_cbextn_info_t cbext; /**< PCIe SPLITRX related */ pktpool_cbextn_info_t rxcplidfn; #ifdef BCMDBG_POOL uint8 dbg_cbcnt; @@ -107,11 +133,8 @@ typedef struct pktpool { pktpool_cbinfo_t dmarxfill; } pktpool_t; -extern pktpool_t *pktpool_shared; -#ifdef BCMFRAGPOOL -extern pktpool_t *pktpool_shared_lfrag; -#endif -extern pktpool_t *pktpool_shared_rxlfrag; + +pktpool_t *get_pktpools_registry(int id); /* Incarnate a pktpool registry. On success returns total_pools. */ extern int pktpool_attach(osl_t *osh, uint32 total_pools); @@ -164,13 +187,10 @@ extern int pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void #define PKTPOOL_MAXIMUM_ID (15) /* Registry of pktpool(s) */ -extern pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; - /* Pool ID to/from Pool Pointer converters */ -#define PKTPOOL_ID2PTR(id) (pktpools_registry[id]) +#define PKTPOOL_ID2PTR(id) (get_pktpools_registry(id)) #define PKTPOOL_PTR2ID(pp) (POOLID(pp)) - #ifdef BCMDBG_POOL extern int pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg); extern int pktpool_start_trigger(pktpool_t *pktp, void *p); @@ -179,6 +199,25 @@ extern int pktpool_dbg_notify(pktpool_t *pktp); extern int pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats); #endif /* BCMDBG_POOL */ +#ifdef BCMPKTPOOL +#define SHARED_POOL (pktpool_shared) +extern pktpool_t *pktpool_shared; +#ifdef BCMFRAGPOOL +#define SHARED_FRAG_POOL (pktpool_shared_lfrag) +extern pktpool_t *pktpool_shared_lfrag; +#endif + +/** PCIe SPLITRX related */ +#define SHARED_RXFRAG_POOL (pktpool_shared_rxlfrag) +extern pktpool_t *pktpool_shared_rxlfrag; + +void hnd_pktpool_init(osl_t *osh); +void hnd_pktpool_fill(pktpool_t *pktpool, bool minimal); +void hnd_pktpool_refill(bool minimal); +#else /* BCMPKTPOOL */ +#define SHARED_POOL ((struct pktpool *)NULL) +#endif /* BCMPKTPOOL */ + #ifdef __cplusplus } #endif diff --git a/drivers/net/wireless/bcmdhd/include/hnd_pktq.h b/drivers/amlogic/wifi/bcmdhd/include/hnd_pktq.h similarity index 51% rename from drivers/net/wireless/bcmdhd/include/hnd_pktq.h rename to drivers/amlogic/wifi/bcmdhd/include/hnd_pktq.h index c27a21d581bab..1586de3ca5b43 100644 --- a/drivers/net/wireless/bcmdhd/include/hnd_pktq.h +++ b/drivers/amlogic/wifi/bcmdhd/include/hnd_pktq.h @@ -1,18 +1,48 @@ /* * HND generic pktq operation primitives * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: $ + * + * <> + * + * $Id: hnd_pktq.h 591283 2015-10-07 11:52:00Z $ */ #ifndef _hnd_pktq_h_ #define _hnd_pktq_h_ +#include + #ifdef __cplusplus extern "C" { #endif +/* mutex macros for thread safe */ +#ifdef HND_PKTQ_THREAD_SAFE +#define HND_PKTQ_MUTEX_DECL(mutex) OSL_EXT_MUTEX_DECL(mutex) +#else +#define HND_PKTQ_MUTEX_DECL(mutex) +#endif + /* osl multi-precedence packet queue */ #define PKTQ_LEN_MAX 0xFFFF /* Max uint16 65535 packets */ #ifndef PKTQ_LEN_DEFAULT @@ -23,68 +53,69 @@ extern "C" { #endif typedef struct pktq_prec { - void *head; /* first packet to dequeue */ - void *tail; /* last packet to dequeue */ - uint16 len; /* number of queued packets */ - uint16 max; /* maximum number of queued packets */ + void *head; /**< first packet to dequeue */ + void *tail; /**< last packet to dequeue */ + uint16 len; /**< number of queued packets */ + uint16 max; /**< maximum number of queued packets */ } pktq_prec_t; #ifdef PKTQ_LOG typedef struct { - uint32 requested; /* packets requested to be stored */ - uint32 stored; /* packets stored */ - uint32 saved; /* packets saved, + uint32 requested; /**< packets requested to be stored */ + uint32 stored; /**< packets stored */ + uint32 saved; /**< packets saved, because a lowest priority queue has given away one packet */ - uint32 selfsaved; /* packets saved, + uint32 selfsaved; /**< packets saved, because an older packet from the same queue has been dropped */ - uint32 full_dropped; /* packets dropped, + uint32 full_dropped; /**< packets dropped, because pktq is full with higher precedence packets */ - uint32 dropped; /* packets dropped because pktq per that precedence is full */ - uint32 sacrificed; /* packets dropped, + uint32 dropped; /**< packets dropped because pktq per that precedence is full */ + uint32 sacrificed; /**< packets dropped, in order to save one from a queue of a highest priority */ - uint32 busy; /* packets droped because of hardware/transmission error */ - uint32 retry; /* packets re-sent because they were not received */ - uint32 ps_retry; /* packets retried again prior to moving power save mode */ - uint32 suppress; /* packets which were suppressed and not transmitted */ - uint32 retry_drop; /* packets finally dropped after retry limit */ - uint32 max_avail; /* the high-water mark of the queue capacity for packets - + uint32 busy; /**< packets droped because of hardware/transmission error */ + uint32 retry; /**< packets re-sent because they were not received */ + uint32 ps_retry; /**< packets retried again prior to moving power save mode */ + uint32 suppress; /**< packets which were suppressed and not transmitted */ + uint32 retry_drop; /**< packets finally dropped after retry limit */ + uint32 max_avail; /**< the high-water mark of the queue capacity for packets - goes to zero as queue fills */ - uint32 max_used; /* the high-water mark of the queue utilisation for packets - + uint32 max_used; /**< the high-water mark of the queue utilisation for packets - increases with use ('inverse' of max_avail) */ - uint32 queue_capacity; /* the maximum capacity of the queue */ - uint32 rtsfail; /* count of rts attempts that failed to receive cts */ - uint32 acked; /* count of packets sent (acked) successfully */ - uint32 txrate_succ; /* running total of phy rate of packets sent successfully */ - uint32 txrate_main; /* running totoal of primary phy rate of all packets */ - uint32 throughput; /* actual data transferred successfully */ - uint32 airtime; /* cumulative total medium access delay in useconds */ - uint32 _logtime; /* timestamp of last counter clear */ + uint32 queue_capacity; /**< the maximum capacity of the queue */ + uint32 rtsfail; /**< count of rts attempts that failed to receive cts */ + uint32 acked; /**< count of packets sent (acked) successfully */ + uint32 txrate_succ; /**< running total of phy rate of packets sent successfully */ + uint32 txrate_main; /**< running totoal of primary phy rate of all packets */ + uint32 throughput; /**< actual data transferred successfully */ + uint32 airtime; /**< cumulative total medium access delay in useconds */ + uint32 _logtime; /**< timestamp of last counter clear */ } pktq_counters_t; typedef struct { uint32 _prec_log; - pktq_counters_t* _prec_cnt[PKTQ_MAX_PREC]; /* Counters per queue */ + pktq_counters_t* _prec_cnt[PKTQ_MAX_PREC]; /**< Counters per queue */ } pktq_log_t; #endif /* PKTQ_LOG */ #define PKTQ_COMMON \ - uint16 num_prec; /* number of precedences in use */ \ - uint16 hi_prec; /* rapid dequeue hint (>= highest non-empty prec) */ \ - uint16 max; /* total max packets */ \ - uint16 len; /* total number of packets */ + uint16 num_prec; /**< number of precedences in use */ \ + uint16 hi_prec; /**< rapid dequeue hint (>= highest non-empty prec) */ \ + uint16 max; /**< total max packets */ \ + uint16 len; /**< total number of packets */ /* multi-priority pkt queue */ struct pktq { PKTQ_COMMON /* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */ struct pktq_prec q[PKTQ_MAX_PREC]; + HND_PKTQ_MUTEX_DECL(mutex) #ifdef PKTQ_LOG pktq_log_t* pktqlog; #endif @@ -95,6 +126,7 @@ struct spktq { PKTQ_COMMON /* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */ struct pktq_prec q[1]; + HND_PKTQ_MUTEX_DECL(mutex) }; #define PKTQ_PREC_ITER(pq, prec) for (prec = (pq)->num_prec - 1; prec >= 0; prec--) @@ -107,12 +139,16 @@ typedef bool (*ifpkt_cb_t)(void*, int); #define pktq_psetmax(pq, prec, _max) ((pq)->q[prec].max = (_max)) #define pktq_pmax(pq, prec) ((pq)->q[prec].max) #define pktq_plen(pq, prec) ((pq)->q[prec].len) -#define pktq_pavail(pq, prec) ((pq)->q[prec].max - (pq)->q[prec].len) -#define pktq_pfull(pq, prec) ((pq)->q[prec].len >= (pq)->q[prec].max) #define pktq_pempty(pq, prec) ((pq)->q[prec].len == 0) - #define pktq_ppeek(pq, prec) ((pq)->q[prec].head) #define pktq_ppeek_tail(pq, prec) ((pq)->q[prec].tail) +#ifdef HND_PKTQ_THREAD_SAFE +extern int pktq_pavail(struct pktq *pq, int prec); +extern bool pktq_pfull(struct pktq *pq, int prec); +#else +#define pktq_pavail(pq, prec) ((pq)->q[prec].max - (pq)->q[prec].len) +#define pktq_pfull(pq, prec) ((pq)->q[prec].len >= (pq)->q[prec].max) +#endif /* HND_PKTQ_THREAD_SAFE */ extern void pktq_append(struct pktq *pq, int prec, struct spktq *list); extern void pktq_prepend(struct pktq *pq, int prec, struct spktq *list); @@ -139,9 +175,14 @@ extern void *pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out); #define pktq_len(pq) ((int)(pq)->len) #define pktq_max(pq) ((int)(pq)->max) +#define pktq_empty(pq) ((pq)->len == 0) +#ifdef HND_PKTQ_THREAD_SAFE +extern int pktq_avail(struct pktq *pq); +extern bool pktq_full(struct pktq *pq); +#else #define pktq_avail(pq) ((int)((pq)->max - (pq)->len)) #define pktq_full(pq) ((pq)->len >= (pq)->max) -#define pktq_empty(pq) ((pq)->len == 0) +#endif /* HND_PKTQ_THREAD_SAFE */ /* operations for single precedence queues */ #define pktenq(pq, p) pktq_penq(((struct pktq *)(void *)pq), 0, (p)) @@ -150,8 +191,13 @@ extern void *pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out); #define pktdeq_tail(pq) pktq_pdeq_tail(((struct pktq *)(void *)pq), 0) #define pktqflush(osh, pq) pktq_flush(osh, ((struct pktq *)(void *)pq), TRUE, NULL, 0) #define pktqinit(pq, len) pktq_init(((struct pktq *)(void *)pq), 1, len) +#define pktqdeinit(pq) pktq_deinit((struct pktq *)(void *)pq) +#define pktqavail(pq) pktq_avail((struct pktq *)(void *)pq) +#define pktqfull(pq) pktq_full((struct pktq *)(void *)pq) + +extern bool pktq_init(struct pktq *pq, int num_prec, int max_len); +extern bool pktq_deinit(struct pktq *pq); -extern void pktq_init(struct pktq *pq, int num_prec, int max_len); extern void pktq_set_max_plen(struct pktq *pq, int prec, int max_len); /* prec_out may be NULL if caller is not interested in return value */ diff --git a/drivers/amlogic/wifi/bcmdhd/include/hndpmu.h b/drivers/amlogic/wifi/bcmdhd/include/hndpmu.h new file mode 100644 index 0000000000000..dfc83d3d7fd18 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/hndpmu.h @@ -0,0 +1,45 @@ +/* + * HND SiliconBackplane PMU support. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hndpmu.h 530150 2015-01-29 08:43:40Z $ + */ + +#ifndef _hndpmu_h_ +#define _hndpmu_h_ + +#include +#include +#include + + +extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on, uint32* min_res_mask); +extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength); + +extern void si_pmu_minresmask_htavail_set(si_t *sih, osl_t *osh, bool set_clear); +extern void si_pmu_slow_clk_reinit(si_t *sih, osl_t *osh); +extern void si_pmu_avbtimer_enable(si_t *sih, osl_t *osh, bool set_flag); + +#endif /* _hndpmu_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/hndsoc.h b/drivers/amlogic/wifi/bcmdhd/include/hndsoc.h similarity index 83% rename from drivers/net/wireless/bcmdhd/include/hndsoc.h rename to drivers/amlogic/wifi/bcmdhd/include/hndsoc.h index a44c2f7c2b5f1..36884a088b6fb 100644 --- a/drivers/net/wireless/bcmdhd/include/hndsoc.h +++ b/drivers/amlogic/wifi/bcmdhd/include/hndsoc.h @@ -1,9 +1,30 @@ /* * Broadcom HND chip & on-chip-interconnect-related definitions. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: hndsoc.h 473238 2014-04-28 19:14:56Z $ + * + * <> + * + * $Id: hndsoc.h 517544 2014-11-26 00:40:42Z $ */ #ifndef _HNDSOC_H @@ -33,6 +54,11 @@ #define SI_MAXCORES 32 /* NorthStar has more cores */ #endif /* SI_MAXCORES */ +#define SI_MAXBR 4 /* Max bridges (this is arbitrary, for software + * convenience and could be changed if we + * make any larger chips + */ + #define SI_FASTRAM 0x19000000 /* On-chip RAM on chips that also have DDR */ #define SI_FASTRAM_SWAPPED 0x19800000 @@ -52,6 +78,8 @@ #define SI_ARMCR4_ROM 0x000f0000 /* ARM Cortex-R4 ROM */ #define SI_ARMCM3_SRAM2 0x60000000 /* ARM Cortex-M3 SRAM Region 2 */ #define SI_ARM7S_SRAM2 0x80000000 /* ARM7TDMI-S SRAM Region 2 */ +#define SI_ARMCA7_ROM 0x00000000 /* ARM Cortex-A7 ROM */ +#define SI_ARMCA7_RAM 0x00200000 /* ARM Cortex-A7 RAM */ #define SI_ARM_FLASH1 0xffff0000 /* ARM Flash Region 1 */ #define SI_ARM_FLASH1_SZ 0x00010000 /* ARM Size of Flash Region 1 */ @@ -65,6 +93,22 @@ #define SI_PCIE_DMA_H32 0x80000000 /* PCIE Client Mode sb2pcitranslation2 * (2 ZettaBytes), high 32 bits */ + +#define SI_BCM53573_NANDFLASH 0x30000000 /* 53573 NAND flash base */ +#define SI_BCM53573_NORFLASH 0x1c000000 /* 53573 NOR flash base */ + +#define SI_BCM53573_NORFLASH_WINDOW 0x01000000 /* only support 16M direct access for + * 3-byte address modes in spi flash + */ +#define SI_BCM53573_BOOTDEV_MASK 0x3 +#define SI_BCM53573_BOOTDEV_NOR 0x0 + +#define SI_BCM53573_DDRTYPE_MASK 0x10 +#define SI_BCM53573_DDRTYPE_DDR3 0x10 + +/* APB bridge code */ +#define APB_BRIDGE_ID 0x135 /* APB Bridge 0, 1, etc. */ + /* core codes */ #define NODEV_CORE_ID 0x700 /* Invalid coreid */ #define CC_CORE_ID 0x800 /* chipcommon core */ @@ -129,6 +173,9 @@ #define ARMCR4_CORE_ID 0x83e /* ARM CR4 CPU */ #define GCI_CORE_ID 0x840 /* GCI Core */ #define M2MDMA_CORE_ID 0x844 /* memory to memory dma */ +#define CMEM_CORE_ID 0x846 /* CNDS DDR2/3 memory controller */ +#define ARMCA7_CORE_ID 0x847 /* ARM CA7 CPU */ +#define SYSMEM_CORE_ID 0x849 /* System memory core */ #define APB_BRIDGE_CORE_ID 0x135 /* APB bridge core ID */ #define AXI_CORE_ID 0x301 /* AXI/GPV core ID */ #define EROM_CORE_ID 0x366 /* EROM core ID */ @@ -217,7 +264,7 @@ #define CCS_HQCLKREQ 0x00000040 /* HQ Clock Required */ #define CCS_USBCLKREQ 0x00000100 /* USB Clock Req */ #define CCS_SECICLKREQ 0x00000100 /* SECI Clock Req */ -#define CCS_ARMFASTCLOCKREQ 0x00000100 /* ARM CR4 fast clock request */ +#define CCS_ARMFASTCLOCKREQ 0x00000100 /* ARM CR4/CA7 fast clock request */ #define CCS_AVBCLKREQ 0x00000400 /* AVB Clock enable request */ #define CCS_ERSRC_REQ_MASK 0x00000700 /* external resource requests */ #define CCS_ERSRC_REQ_SHIFT 8 diff --git a/drivers/net/wireless/bcmdhd/include/linux_osl.h b/drivers/amlogic/wifi/bcmdhd/include/linux_osl.h similarity index 85% rename from drivers/net/wireless/bcmdhd/include/linux_osl.h rename to drivers/amlogic/wifi/bcmdhd/include/linux_osl.h index 5527be2367c52..278fecf6fba04 100644 --- a/drivers/net/wireless/bcmdhd/include/linux_osl.h +++ b/drivers/amlogic/wifi/bcmdhd/include/linux_osl.h @@ -1,9 +1,30 @@ /* * Linux OS Independent Layer * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: linux_osl.h 503131 2014-09-17 12:16:08Z $ + * + * <> + * + * $Id: linux_osl.h 601764 2015-11-24 03:47:41Z $ */ #ifndef _linux_osl_h_ @@ -53,19 +74,11 @@ extern void osl_assert(const char *exp, const char *file, int line); #define ASSERT(exp) #endif /* GCC_VERSION > 30100 */ #endif /* __GNUC__ */ -#endif +#endif /* bcm_prefetch_32B */ static inline void bcm_prefetch_32B(const uint8 *addr, const int cachelines_32B) { -#if defined(BCM47XX_CA9) && (__LINUX_ARM_ARCH__ >= 5) - switch (cachelines_32B) { - case 4: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 96) : "cc"); - case 3: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 64) : "cc"); - case 2: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 32) : "cc"); - case 1: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 0) : "cc"); - } -#endif } /* microsecond delay */ @@ -101,13 +114,14 @@ extern uint osl_pcie_domain(osl_t *osh); extern uint osl_pcie_bus(osl_t *osh); extern struct pci_dev *osl_pci_device(osl_t *osh); +#define OSL_ACP_COHERENCE (1<<1L) /* Pkttag flag should be part of public information */ typedef struct { bool pkttag; - bool mmbus; /* Bus supports memory-mapped register accesses */ - pktfree_cb_fn_t tx_fn; /* Callback function for PKTFREE */ - void *tx_ctx; /* Context to the callback function */ + bool mmbus; /**< Bus supports memory-mapped register accesses */ + pktfree_cb_fn_t tx_fn; /**< Callback function for PKTFREE */ + void *tx_ctx; /**< Context to the callback function */ void *unused[3]; } osl_pubinfo_t; @@ -150,20 +164,6 @@ extern uint osl_malloc_failed(osl_t *osh); #define DMA_FREE_CONSISTENT_FORCE32(osh, va, size, pa, dmah) \ osl_dma_free_consistent((osh), (void*)(va), (size), (pa)) -#if defined(BCMPCIE) -#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_FLOWRING) -#define DMA_ALLOC_CONSISTENT_STATIC(osh, size, align, tot, pap, dmah, idx) \ - osl_dma_alloc_consistent_static((osh), (size), (align), (tot), (pap), (idx)) -#define DMA_FREE_CONSISTENT_STATIC(osh, va, size, pa, dmah, idx) \ - osl_dma_free_consistent_static((osh), (void*)(va), (size), (pa), (idx)) - -extern void *osl_dma_alloc_consistent_static(osl_t *osh, uint size, uint16 align, - uint *tot, dmaaddr_t *pap, uint16 idx); -extern void osl_dma_free_consistent_static(osl_t *osh, void *va, uint size, dmaaddr_t pa, - uint16 idx); -#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_FLOWRING */ -#endif /* BCMPCIE */ - extern uint osl_dma_consistent_align(void); extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align, uint *tot, dmaaddr_t *pap); @@ -178,12 +178,20 @@ extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t p osl_dma_unmap((osh), (pa), (size), (direction)) extern dmaaddr_t osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *txp_dmah); -extern void osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction); +extern void osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction); /* API for DMA addressing capability */ #define OSL_DMADDRWIDTH(osh, addrwidth) ({BCM_REFERENCE(osh); BCM_REFERENCE(addrwidth);}) -#if (defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__)) +#define OSL_SMP_WMB() smp_wmb() + +/* API for CPU relax */ +extern void osl_cpu_relax(void); +#define OSL_CPU_RELAX() osl_cpu_relax() + +#if (!defined(DHD_USE_COHERENT_MEM_FOR_RING) && defined(__ARM_ARCH_7A__)) || \ + (defined(STBLINUX) && defined(__ARM_ARCH_7A__)) || (defined(CONFIG_ARCH_MSM8996) || \ + defined(CONFIG_SOC_EXYNOS8890)) extern void osl_cache_flush(void *va, uint size); extern void osl_cache_inv(void *va, uint size); extern void osl_prefetch(const void *ptr); @@ -193,8 +201,11 @@ extern void osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction); #ifdef __ARM_ARCH_7A__ extern int osl_arch_is_coherent(void); #define OSL_ARCH_IS_COHERENT() osl_arch_is_coherent() + extern int osl_acp_war_enab(void); + #define OSL_ACP_WAR_ENAB() osl_acp_war_enab() #else #define OSL_ARCH_IS_COHERENT() NULL + #define OSL_ACP_WAR_ENAB() NULL #endif /* __ARM_ARCH_7A__ */ #else #define OSL_CACHE_FLUSH(va, len) BCM_REFERENCE(va) @@ -202,7 +213,8 @@ extern void osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction); #define OSL_PREFETCH(ptr) BCM_REFERENCE(ptr) #define OSL_ARCH_IS_COHERENT() NULL -#endif + #define OSL_ACP_WAR_ENAB() NULL +#endif /* register access macros */ #if defined(BCMSDIO) @@ -211,21 +223,7 @@ extern void osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction); (uintptr)(r), sizeof(*(r)), (v))) #define OSL_READ_REG(osh, r) (bcmsdh_reg_read(osl_get_bus_handle(osh), \ (uintptr)(r), sizeof(*(r)))) -#elif defined(BCM47XX_ACP_WAR) -extern void osl_pcie_rreg(osl_t *osh, ulong addr, void *v, uint size); - -#define OSL_READ_REG(osh, r) \ - ({\ - __typeof(*(r)) __osl_v; \ - osl_pcie_rreg(osh, (uintptr)(r), (void *)&__osl_v, sizeof(*(r))); \ - __osl_v; \ - }) -#endif - -#if defined(BCM47XX_ACP_WAR) - #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;}) - #define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); bus_op;}) -#else +#endif #if defined(BCMSDIO) #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \ @@ -235,8 +233,7 @@ extern void osl_pcie_rreg(osl_t *osh, ulong addr, void *v, uint size); #else #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;}) #define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;}) -#endif -#endif /* BCM47XX_ACP_WAR */ +#endif #define OSL_ERROR(bcmerror) osl_error(bcmerror) extern int osl_error(int bcmerror); @@ -268,6 +265,8 @@ extern int osl_error(int bcmerror); /* register access macros */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 1)) && defined(CONFIG_64BIT) && \ + defined(CONFIG_X86) #define R_REG(osh, r) (\ SELECT_BUS_READ(osh, \ ({ \ @@ -279,21 +278,54 @@ extern int osl_error(int bcmerror); readw((volatile uint16*)(r)); break; \ case sizeof(uint32): __osl_v = \ readl((volatile uint32*)(r)); break; \ + case sizeof(uint64): __osl_v = \ + readq((volatile uint64*)(r)); break; \ } \ __osl_v; \ }), \ OSL_READ_REG(osh, r)) \ ) - +#else +#define R_REG(osh, r) (\ + SELECT_BUS_READ(osh, \ + ({ \ + __typeof(*(r)) __osl_v; \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): __osl_v = \ + readb((volatile uint8*)(r)); break; \ + case sizeof(uint16): __osl_v = \ + readw((volatile uint16*)(r)); break; \ + case sizeof(uint32): __osl_v = \ + readl((volatile uint32*)(r)); break; \ + } \ + __osl_v; \ + }), \ + OSL_READ_REG(osh, r)) \ +) +#endif /* KERNEL_VERSION(3, 11, 1)) && defined(CONFIG_64BIT) && defined(CONFIG_X86) */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 1)) && defined(CONFIG_64BIT) && \ + defined(CONFIG_X86) #define W_REG(osh, r, v) do { \ SELECT_BUS_WRITE(osh, \ switch (sizeof(*(r))) { \ case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \ case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \ case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \ + case sizeof(uint64): writeq((uint64)(v), (volatile uint64*)(r)); break; \ }, \ (OSL_WRITE_REG(osh, r, v))); \ } while (0) +#else +#define W_REG(osh, r, v) do { \ + SELECT_BUS_WRITE(osh, \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \ + case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \ + case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \ + }, \ + (OSL_WRITE_REG(osh, r, v))); \ + } while (0) +#endif /* KERNEL_VERSION(3, 11, 1)) && defined(CONFIG_64BIT) && defined(CONFIG_X86) */ #define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v)) #define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v)) @@ -315,7 +347,7 @@ extern int osl_error(int bcmerror); #define OSL_GETCYCLES(x) rdtscl((x)) #else #define OSL_GETCYCLES(x) ((x) = 0) -#endif +#endif /* dereference an address that may cause a bus exception */ #define BUSPROBE(val, addr) ({ (val) = R_REG(NULL, (addr)); 0; }) @@ -343,12 +375,21 @@ extern int osl_error(int bcmerror); #define PKTGET(osh, len, send) osl_pktget((osh), (len), __LINE__, __FILE__) #define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FILE__) #else +#ifdef BCM_OBJECT_TRACE +#define PKTGET(osh, len, send) osl_pktget((osh), (len), __LINE__, __FUNCTION__) +#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FUNCTION__) +#else #define PKTGET(osh, len, send) osl_pktget((osh), (len)) #define PKTDUP(osh, skb) osl_pktdup((osh), (skb)) +#endif /* BCM_OBJECT_TRACE */ #endif /* BCMDBG_CTRACE */ #define PKTLIST_DUMP(osh, buf) BCM_REFERENCE(osh) #define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh) +#if defined(BCM_OBJECT_TRACE) +#define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send), __LINE__, __FUNCTION__) +#else #define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send)) +#endif /* BCM_OBJECT_TRACE */ #ifdef CONFIG_DHD_USE_STATIC_BUF #define PKTGET_STATIC(osh, len, send) osl_pktget_static((osh), (len)) #define PKTFREE_STATIC(osh, skb, send) osl_pktfree_static((osh), (skb), (send)) @@ -404,10 +445,13 @@ extern int osl_error(int bcmerror); #define PKTID(skb) ({BCM_REFERENCE(skb); 0;}) #define PKTSETID(skb, id) ({BCM_REFERENCE(skb); BCM_REFERENCE(id);}) #define PKTSHRINK(osh, m) ({BCM_REFERENCE(osh); m;}) -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) -#define PKTORPHAN(skb) skb_orphan(skb) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && defined(TSQ_MULTIPLIER) +#define PKTORPHAN(skb, tsq) osl_pkt_orphan_partial(skb, tsq) +extern void osl_pkt_orphan_partial(struct sk_buff *skb, int tsq); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) +#define PKTORPHAN(skb, tsq) skb_orphan(skb, tsq) #else -#define PKTORPHAN(skb) ({BCM_REFERENCE(skb); 0;}) +#define PKTORPHAN(skb, tsq) ({BCM_REFERENCE(skb); 0;}) #endif /* LINUX VERSION >= 3.6 */ @@ -520,7 +564,7 @@ typedef struct ctfpool { #define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->__unused) #endif /* 2.6.22 */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) #define CTFPOOLPTR(osh, skb) (((struct sk_buff*)(skb))->ctfpool) #define CTFPOOLHEAD(osh, skb) (((ctfpool_t *)((struct sk_buff*)(skb))->ctfpool)->head) #else @@ -664,7 +708,7 @@ extern void osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt, extern void osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt); #define PKTFRMFWDER(osh, skbs, skb_cnt) \ osl_pkt_frmfwder(((osl_t *)osh), (void *)(skbs), (skb_cnt)) -#endif +#endif /** GMAC Forwarded packet tagging for reduced cache flush/invalidate. @@ -819,7 +863,11 @@ extern void osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt); #define PKTCLRFAFREED(skb) BCM_REFERENCE(skb) #endif /* BCMFA */ +#if defined(BCM_OBJECT_TRACE) +extern void osl_pktfree(osl_t *osh, void *skb, bool send, int line, const char *caller); +#else extern void osl_pktfree(osl_t *osh, void *skb, bool send); +#endif /* BCM_OBJECT_TRACE */ extern void *osl_pktget_static(osl_t *osh, uint len); extern void osl_pktfree_static(osl_t *osh, void *skb, bool send); extern void osl_pktclone(osl_t *osh, void **pkt); @@ -833,9 +881,14 @@ extern void *osl_pktdup(osl_t *osh, void *skb, int line, char *file); struct bcmstrbuf; extern void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b); #else -extern void *osl_pkt_frmnative(osl_t *osh, void *skb); +#ifdef BCM_OBJECT_TRACE +extern void *osl_pktget(osl_t *osh, uint len, int line, const char *caller); +extern void *osl_pktdup(osl_t *osh, void *skb, int line, const char *caller); +#else extern void *osl_pktget(osl_t *osh, uint len); extern void *osl_pktdup(osl_t *osh, void *skb); +#endif /* BCM_OBJECT_TRACE */ +extern void *osl_pkt_frmnative(osl_t *osh, void *skb); #endif /* BCMDBG_CTRACE */ extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt); #ifdef BCMDBG_CTRACE @@ -962,6 +1015,20 @@ typedef struct sec_cma_info { struct sec_mem_elem *sec_alloc_list_tail; } sec_cma_info_t; +/* Current STB 7445D1 doesn't use ACP and it is non-coherrent. + * Adding these dummy values for build apss only + * When we revisit need to change these. + */ +#if defined(STBLINUX) + +#if defined(__ARM_ARCH_7A__) +#define ACP_WAR_ENAB() 0 +#define ACP_WIN_LIMIT 1 +#define arch_is_coherent() 0 +#endif /* __ARM_ARCH_7A__ */ + +#endif /* STBLINUX */ + #ifdef BCM_SECURE_DMA #define SECURE_DMA_MAP(osh, va, size, direction, p, dmah, pcma, offset) \ @@ -973,13 +1040,8 @@ typedef struct sec_cma_info { #define SECURE_DMA_UNMAP(osh, pa, size, direction, p, dmah, pcma, offset) \ osl_sec_dma_unmap((osh), (pa), (size), (direction), (p), (dmah), (pcma), (offset)) #define SECURE_DMA_UNMAP_ALL(osh, pcma) \ -osl_sec_dma_unmap_all((osh), (pcma)) - + osl_sec_dma_unmap_all((osh), (pcma)) #if defined(__ARM_ARCH_7A__) -#define ACP_WAR_ENAB() 0 -#define ACP_WIN_LIMIT 0 -#define arch_is_coherent() 0 - #define CMA_BUFSIZE_4K 4096 #define CMA_BUFSIZE_2K 2048 #define CMA_BUFSIZE_512 512 @@ -999,10 +1061,10 @@ osl_sec_dma_unmap_all((osh), (pcma)) typedef struct sec_mem_elem { size_t size; int direction; - phys_addr_t pa_cma; /* physical address */ - void *va; /* virtual address of driver pkt */ - dma_addr_t dma_handle; /* bus address assign by linux */ - void *vac; /* virtual address of cma buffer */ + phys_addr_t pa_cma; /**< physical address */ + void *va; /**< virtual address of driver pkt */ + dma_addr_t dma_handle; /**< bus address assign by linux */ + void *vac; /**< virtual address of cma buffer */ struct sec_mem_elem *next; } sec_mem_elem_t; @@ -1017,4 +1079,12 @@ extern void osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int extern void osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info); #endif /* BCM_SECURE_DMA */ + +typedef struct sk_buff_head PKT_LIST; +#define PKTLIST_INIT(x) skb_queue_head_init((x)) +#define PKTLIST_ENQ(x, y) skb_queue_head((struct sk_buff_head *)(x), (struct sk_buff *)(y)) +#define PKTLIST_DEQ(x) skb_dequeue((struct sk_buff_head *)(x)) +#define PKTLIST_UNLINK(x, y) skb_unlink((struct sk_buff *)(y), (struct sk_buff_head *)(x)) +#define PKTLIST_FINI(x) skb_queue_purge((struct sk_buff_head *)(x)) + #endif /* _linux_osl_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/linuxver.h b/drivers/amlogic/wifi/bcmdhd/include/linuxver.h similarity index 90% rename from drivers/net/wireless/bcmdhd/include/linuxver.h rename to drivers/amlogic/wifi/bcmdhd/include/linuxver.h index 7e13746ea2000..7fa3e7b76332b 100644 --- a/drivers/net/wireless/bcmdhd/include/linuxver.h +++ b/drivers/amlogic/wifi/bcmdhd/include/linuxver.h @@ -2,14 +2,41 @@ * Linux-specific abstractions to gain some independence from linux kernel versions. * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: linuxver.h 431983 2013-10-25 06:53:27Z $ + * + * <> + * + * $Id: linuxver.h 604758 2015-12-08 12:01:08Z $ */ #ifndef _linuxver_h_ #define _linuxver_h_ +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-but-set-variable" +#pragma GCC diagnostic ignored "-Wunused-but-set-parameter" +#endif + #include #include #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) @@ -21,11 +48,11 @@ #include #endif #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */ -#include #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)) #include #endif +#include #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)) /* __NO_VERSION__ must be defined for all linkables except one in 2.2 */ @@ -586,10 +613,16 @@ static inline bool binary_sema_up(tsk_ctl_t *tsk) (tsk_ctl)->proc_name = name; \ (tsk_ctl)->terminated = FALSE; \ (tsk_ctl)->p_task = kthread_run(thread_func, tsk_ctl, (char*)name); \ - (tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \ - spin_lock_init(&((tsk_ctl)->spinlock)); \ - DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \ - (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + if (IS_ERR((tsk_ctl)->p_task)) { \ + (tsk_ctl)->thr_pid = DHD_PID_KT_INVALID; \ + DBG_THR(("%s(): thread:%s:%lx failed\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + } else { \ + (tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \ + spin_lock_init(&((tsk_ctl)->spinlock)); \ + DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + } \ } #define PROC_STOP(tsk_ctl) \ @@ -686,7 +719,7 @@ not match our unaligned address for < 2.6.24 #define WL_ISR(i, d, p) wl_isr((i), (d)) #else #define WL_ISR(i, d, p) wl_isr((i), (d), (p)) -#endif /* < 2.6.20 */ +#endif /* < 2.6.20 */ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) #define netdev_priv(dev) dev->priv @@ -716,7 +749,7 @@ not match our unaligned address for < 2.6.24 * Overide latest kfifo functions with * older version to work on older kernels */ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) && !defined(WL_COMPAT_WIRELESS) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) #define kfifo_in_spinlocked(a, b, c, d) kfifo_put(a, (u8 *)b, c) #define kfifo_out_spinlocked(a, b, c, d) kfifo_get(a, (u8 *)b, c) #define kfifo_esize(a) 1 @@ -727,4 +760,15 @@ not match our unaligned address for < 2.6.24 #define kfifo_esize(a) 1 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */ +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic pop +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) +static inline struct inode *file_inode(const struct file *f) +{ + return f->f_dentry->d_inode; +} +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */ + #endif /* _linuxver_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/miniopt.h b/drivers/amlogic/wifi/bcmdhd/include/miniopt.h similarity index 54% rename from drivers/net/wireless/bcmdhd/include/miniopt.h rename to drivers/amlogic/wifi/bcmdhd/include/miniopt.h index a6fbe3c6c02cc..2eb6d18ea7ca1 100644 --- a/drivers/net/wireless/bcmdhd/include/miniopt.h +++ b/drivers/amlogic/wifi/bcmdhd/include/miniopt.h @@ -1,8 +1,30 @@ /* * Command line options parser. * - * $Copyright Open Broadcom Corporation$ - * $Id: miniopt.h 484281 2014-06-12 22:42:26Z $ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: miniopt.h 514727 2014-11-12 03:02:48Z $ */ @@ -58,4 +80,4 @@ int miniopt(miniopt_t *t, char **argv); } #endif -#endif /* MINI_OPT_H */ +#endif /* MINI_OPT_H */ diff --git a/drivers/net/wireless/bcmdhd/include/msgtrace.h b/drivers/amlogic/wifi/bcmdhd/include/msgtrace.h similarity index 61% rename from drivers/net/wireless/bcmdhd/include/msgtrace.h rename to drivers/amlogic/wifi/bcmdhd/include/msgtrace.h index 8a3f691dbb0b4..0d67000c9df3f 100644 --- a/drivers/net/wireless/bcmdhd/include/msgtrace.h +++ b/drivers/amlogic/wifi/bcmdhd/include/msgtrace.h @@ -1,9 +1,30 @@ /* * Trace messages sent over HBUS * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: msgtrace.h 439681 2013-11-27 15:39:50Z $ + * + * <> + * + * $Id: msgtrace.h 514727 2014-11-12 03:02:48Z $ */ #ifndef _MSGTRACE_H diff --git a/drivers/net/wireless/bcmdhd/include/osl.h b/drivers/amlogic/wifi/bcmdhd/include/osl.h similarity index 74% rename from drivers/net/wireless/bcmdhd/include/osl.h rename to drivers/amlogic/wifi/bcmdhd/include/osl.h index 5324e5db5fd9b..8a00f9d55a83e 100644 --- a/drivers/net/wireless/bcmdhd/include/osl.h +++ b/drivers/amlogic/wifi/bcmdhd/include/osl.h @@ -1,9 +1,30 @@ /* * OS Abstraction Layer * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: osl.h 503131 2014-09-17 12:16:08Z $ + * + * <> + * + * $Id: osl.h 526460 2015-01-14 08:25:24Z $ */ #ifndef _osl_h_ @@ -22,7 +43,11 @@ typedef void (*osl_wreg_fn_t)(void *ctx, volatile void *reg, unsigned int val, +#if defined(WL_UNITTEST) +#include +#else #include +#endif #ifndef PKTDBG_TRACE #define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh) @@ -38,11 +63,11 @@ typedef void (*osl_wreg_fn_t)(void *ctx, volatile void *reg, unsigned int val, #ifndef AND_REG #define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v)) -#endif /* !AND_REG */ +#endif /* !AND_REG */ #ifndef OR_REG #define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v)) -#endif /* !OR_REG */ +#endif /* !OR_REG */ #if !defined(OSL_SYSUPTIME) #define OSL_SYSUPTIME() (0) @@ -51,6 +76,14 @@ typedef void (*osl_wreg_fn_t)(void *ctx, volatile void *reg, unsigned int val, #define OSL_SYSUPTIME_SUPPORT TRUE #endif /* OSL_SYSUPTIME */ +#ifndef OSL_SYS_HALT +#define OSL_SYS_HALT() do {} while (0) +#endif + +#ifndef OSL_MEM_AVAIL +#define OSL_MEM_AVAIL() (0xffffffff) +#endif + #if !defined(PKTC) && !defined(PKTC_DONGLE) #define PKTCGETATTR(skb) (0) #define PKTCSETATTR(skb, f, p, b) BCM_REFERENCE(skb) @@ -126,7 +159,8 @@ do { \ #define PKTISFRAG(osh, lb) (0) #define PKTFRAGISCHAINED(osh, i) (0) /* TRIM Tail bytes from lfrag */ -#define PKTFRAG_TRIM_TAILBYTES(osh, p, len) PKTSETLEN(osh, p, PKTLEN(osh, p) - len) +#define PKTFRAG_TRIM_TAILBYTES(osh, p, len, type) PKTSETLEN(osh, p, PKTLEN(osh, p) - len) + #ifdef BCM_SECURE_DMA #define SECURE_DMA_ENAB(osh) (1) #else @@ -140,4 +174,5 @@ do { \ #endif + #endif /* _osl_h_ */ diff --git a/drivers/amlogic/wifi/bcmdhd/include/osl_decl.h b/drivers/amlogic/wifi/bcmdhd/include/osl_decl.h new file mode 100644 index 0000000000000..6c8d86eeabf17 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/osl_decl.h @@ -0,0 +1,37 @@ +/* + * osl forward declarations + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: osl_decl.h 591283 2015-10-07 11:52:00Z $ + */ + +#ifndef _osl_decl_h_ +#define _osl_decl_h_ + +/* osl handle type forward declaration */ +typedef struct osl_info osl_t; +typedef struct osl_dmainfo osldma_t; +extern unsigned int lmtest; /* low memory test */ +#endif diff --git a/drivers/amlogic/wifi/bcmdhd/include/osl_ext.h b/drivers/amlogic/wifi/bcmdhd/include/osl_ext.h new file mode 100644 index 0000000000000..61984e68c4d02 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/osl_ext.h @@ -0,0 +1,697 @@ +/* + * OS Abstraction Layer Extension - the APIs defined by the "extension" API + * are only supported by a subset of all operating systems. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: osl_ext.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _osl_ext_h_ +#define _osl_ext_h_ + + +/* ---- Include Files ---------------------------------------------------- */ + +#if defined(TARGETOS_symbian) + #include + #include +#elif defined(THREADX) + #include +#else + #define OSL_EXT_DISABLED +#endif + +/* Include base operating system abstraction. */ +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* ---- Constants and Types ---------------------------------------------- */ + +/* ----------------------------------------------------------------------- + * Generic OS types. + */ +typedef enum osl_ext_status_t +{ + OSL_EXT_SUCCESS, + OSL_EXT_ERROR, + OSL_EXT_TIMEOUT + +} osl_ext_status_t; +#define OSL_EXT_STATUS_DECL(status) osl_ext_status_t status; + +#define OSL_EXT_TIME_FOREVER ((osl_ext_time_ms_t)(-1)) +typedef unsigned int osl_ext_time_ms_t; + +typedef unsigned int osl_ext_event_bits_t; + +typedef unsigned int osl_ext_interrupt_state_t; + +/* ----------------------------------------------------------------------- + * Timers. + */ +typedef enum +{ + /* One-shot timer. */ + OSL_EXT_TIMER_MODE_ONCE, + + /* Periodic timer. */ + OSL_EXT_TIMER_MODE_REPEAT + +} osl_ext_timer_mode_t; + +/* User registered callback and parameter to invoke when timer expires. */ +typedef void* osl_ext_timer_arg_t; +typedef void (*osl_ext_timer_callback)(osl_ext_timer_arg_t arg); + + +/* ----------------------------------------------------------------------- + * Tasks. + */ + +/* Task entry argument. */ +typedef void* osl_ext_task_arg_t; + +/* Task entry function. */ +typedef void (*osl_ext_task_entry)(osl_ext_task_arg_t arg); + +/* Abstract task priority levels. */ +typedef enum +{ + OSL_EXT_TASK_IDLE_PRIORITY, + OSL_EXT_TASK_LOW_PRIORITY, + OSL_EXT_TASK_LOW_NORMAL_PRIORITY, + OSL_EXT_TASK_NORMAL_PRIORITY, + OSL_EXT_TASK_HIGH_NORMAL_PRIORITY, + OSL_EXT_TASK_HIGHEST_PRIORITY, + OSL_EXT_TASK_TIME_CRITICAL_PRIORITY, + + /* This must be last. */ + OSL_EXT_TASK_NUM_PRIORITES +} osl_ext_task_priority_t; + + +#ifndef OSL_EXT_DISABLED + +/* ---- Variable Externs ------------------------------------------------- */ +/* ---- Function Prototypes ---------------------------------------------- */ + + +/* -------------------------------------------------------------------------- +** Semaphore +*/ + +/**************************************************************************** +* Function: osl_ext_sem_create +* +* Purpose: Creates a counting semaphore object, which can subsequently be +* used for thread notification. +* +* Parameters: name (in) Name to assign to the semaphore (must be unique). +* init_cnt (in) Initial count that the semaphore should have. +* sem (out) Newly created semaphore. +* +* Returns: OSL_EXT_SUCCESS if the semaphore was created successfully, or an +* error code if the semaphore could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_sem_create(char *name, int init_cnt, osl_ext_sem_t *sem); + +/**************************************************************************** +* Function: osl_ext_sem_delete +* +* Purpose: Destroys a previously created semaphore object. +* +* Parameters: sem (mod) Semaphore object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the semaphore was deleted successfully, or an +* error code if the semaphore could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_sem_delete(osl_ext_sem_t *sem); + +/**************************************************************************** +* Function: osl_ext_sem_give +* +* Purpose: Increments the count associated with the semaphore. This will +* cause one thread blocked on a take to wake up. +* +* Parameters: sem (mod) Semaphore object to give. +* +* Returns: OSL_EXT_SUCCESS if the semaphore was given successfully, or an +* error code if the semaphore could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_sem_give(osl_ext_sem_t *sem); + +/**************************************************************************** +* Function: osl_ext_sem_take +* +* Purpose: Decrements the count associated with the semaphore. If the count +* is less than zero, then the calling task will become blocked until +* another thread does a give on the semaphore. This function will only +* block the calling thread for timeout_msec milliseconds, before +* returning with OSL_EXT_TIMEOUT. +* +* Parameters: sem (mod) Semaphore object to take. +* timeout_msec (in) Number of milliseconds to wait for the +* semaphore to enter a state where it can be +* taken. +* +* Returns: OSL_EXT_SUCCESS if the semaphore was taken successfully, or an +* error code if the semaphore could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_sem_take(osl_ext_sem_t *sem, osl_ext_time_ms_t timeout_msec); + + +/* -------------------------------------------------------------------------- +** Mutex +*/ + +/**************************************************************************** +* Function: osl_ext_mutex_create +* +* Purpose: Creates a mutex object, which can subsequently be used to control +* mutually exclusion of resources. +* +* Parameters: name (in) Name to assign to the mutex (must be unique). +* mutex (out) Mutex object to initialize. +* +* Returns: OSL_EXT_SUCCESS if the mutex was created successfully, or an +* error code if the mutex could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_mutex_create(char *name, osl_ext_mutex_t *mutex); + +/**************************************************************************** +* Function: osl_ext_mutex_delete +* +* Purpose: Destroys a previously created mutex object. +* +* Parameters: mutex (mod) Mutex object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the mutex was deleted successfully, or an +* error code if the mutex could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_mutex_delete(osl_ext_mutex_t *mutex); + +/**************************************************************************** +* Function: osl_ext_mutex_acquire +* +* Purpose: Acquires the indicated mutual exclusion object. If the object is +* currently acquired by another task, then this function will wait +* for timeout_msec milli-seconds before returning with OSL_EXT_TIMEOUT. +* +* Parameters: mutex (mod) Mutex object to acquire. +* timeout_msec (in) Number of milliseconds to wait for the mutex. +* +* Returns: OSL_EXT_SUCCESS if the mutex was acquired successfully, or an +* error code if the mutex could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_mutex_acquire(osl_ext_mutex_t *mutex, osl_ext_time_ms_t timeout_msec); + +/**************************************************************************** +* Function: osl_ext_mutex_release +* +* Purpose: Releases the indicated mutual exclusion object. This makes it +* available for another task to acquire. +* +* Parameters: mutex (mod) Mutex object to release. +* +* Returns: OSL_EXT_SUCCESS if the mutex was released successfully, or an +* error code if the mutex could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_mutex_release(osl_ext_mutex_t *mutex); + + +/* -------------------------------------------------------------------------- +** Timers +*/ + +/**************************************************************************** +* Function: osl_ext_timer_create +* +* Purpose: Creates a timer object. +* +* Parameters: name (in) Name of timer. +* timeout_msec (in) Invoke callback after this number of milliseconds. +* mode (in) One-shot or periodic timer. +* func (in) Callback function to invoke on timer expiry. +* arg (in) Argument to callback function. +* timer (out) Timer object to create. +* +* Note: The function callback occurs in interrupt context. The application is +* required to provide context switch for the callback if required. +* +* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an +* error code if the timer could not be created. +***************************************************************************** +*/ +osl_ext_status_t +osl_ext_timer_create(char *name, osl_ext_time_ms_t timeout_msec, osl_ext_timer_mode_t mode, + osl_ext_timer_callback func, osl_ext_timer_arg_t arg, osl_ext_timer_t *timer); + +/**************************************************************************** +* Function: osl_ext_timer_delete +* +* Purpose: Destroys a previously created timer object. +* +* Parameters: timer (mod) Timer object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an +* error code if the timer could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_timer_delete(osl_ext_timer_t *timer); + +/**************************************************************************** +* Function: osl_ext_timer_start +* +* Purpose: Start a previously created timer object. +* +* Parameters: timer (in) Timer object. +* timeout_msec (in) Invoke callback after this number of milliseconds. +* mode (in) One-shot or periodic timer. +* +* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an +* error code if the timer could not be created. +***************************************************************************** +*/ +osl_ext_status_t +osl_ext_timer_start(osl_ext_timer_t *timer, + osl_ext_time_ms_t timeout_msec, osl_ext_timer_mode_t mode); + +/**************************************************************************** +* Function: osl_ext_timer_stop +* +* Purpose: Stop a previously created timer object. +* +* Parameters: timer (in) Timer object. +* +* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an +* error code if the timer could not be created. +***************************************************************************** +*/ +osl_ext_status_t +osl_ext_timer_stop(osl_ext_timer_t *timer); + +/**************************************************************************** +* Function: osl_ext_time_get +* +* Purpose: Returns incrementing time counter. +* +* Parameters: None. +* +* Returns: Returns incrementing time counter in msec. +***************************************************************************** +*/ +osl_ext_time_ms_t osl_ext_time_get(void); + +/* -------------------------------------------------------------------------- +** Tasks +*/ + +/**************************************************************************** +* Function: osl_ext_task_create +* +* Purpose: Create a task. +* +* Parameters: name (in) Pointer to task string descriptor. +* stack (in) Pointer to stack. NULL to allocate. +* stack_size (in) Stack size - in bytes. +* priority (in) Abstract task priority. +* func (in) A pointer to the task entry point function. +* arg (in) Value passed into task entry point function. +* task (out) Task to create. +* +* Returns: OSL_EXT_SUCCESS if the task was created successfully, or an +* error code if the task could not be created. +***************************************************************************** +*/ + +#define osl_ext_task_create(name, stack, stack_size, priority, func, arg, task) \ + osl_ext_task_create_ex((name), (stack), (stack_size), (priority), 0, (func), \ + (arg), (task)) + +osl_ext_status_t osl_ext_task_create_ex(char* name, + void *stack, unsigned int stack_size, osl_ext_task_priority_t priority, + osl_ext_time_ms_t timslice_msec, osl_ext_task_entry func, osl_ext_task_arg_t arg, + osl_ext_task_t *task); + +/**************************************************************************** +* Function: osl_ext_task_delete +* +* Purpose: Destroy a task. +* +* Parameters: task (mod) Task to destroy. +* +* Returns: OSL_EXT_SUCCESS if the task was created successfully, or an +* error code if the task could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_task_delete(osl_ext_task_t *task); + + +/**************************************************************************** +* Function: osl_ext_task_is_running +* +* Purpose: Returns current running task. +* +* Parameters: None. +* +* Returns: osl_ext_task_t of current running task. +***************************************************************************** +*/ +osl_ext_task_t *osl_ext_task_current(void); + + +/**************************************************************************** +* Function: osl_ext_task_yield +* +* Purpose: Yield the CPU to other tasks of the same priority that are +* ready-to-run. +* +* Parameters: None. +* +* Returns: OSL_EXT_SUCCESS if successful, else error code. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_task_yield(void); + + +/**************************************************************************** +* Function: osl_ext_task_enable_stack_check +* +* Purpose: Enable task stack checking. +* +* Parameters: None. +* +* Returns: OSL_EXT_SUCCESS if successful, else error code. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_task_enable_stack_check(void); + + +/* -------------------------------------------------------------------------- +** Queue +*/ + +/**************************************************************************** +* Function: osl_ext_queue_create +* +* Purpose: Create a queue. +* +* Parameters: name (in) Name to assign to the queue (must be unique). +* buffer (in) Queue buffer. NULL to allocate. +* size (in) Size of the queue. +* queue (out) Newly created queue. +* +* Returns: OSL_EXT_SUCCESS if the queue was created successfully, or an +* error code if the queue could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_create(char *name, + void *queue_buffer, unsigned int queue_size, + osl_ext_queue_t *queue); + +/**************************************************************************** +* Function: osl_ext_queue_delete +* +* Purpose: Destroys a previously created queue object. +* +* Parameters: queue (mod) Queue object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the queue was deleted successfully, or an +* error code if the queue could not be deleteed. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_delete(osl_ext_queue_t *queue); + +/**************************************************************************** +* Function: osl_ext_queue_send +* +* Purpose: Send/add data to the queue. This function will not block the +* calling thread if the queue is full. +* +* Parameters: queue (mod) Queue object. +* data (in) Data pointer to be queued. +* +* Returns: OSL_EXT_SUCCESS if the data was queued successfully, or an +* error code if the data could not be queued. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_send(osl_ext_queue_t *queue, void *data); + +/**************************************************************************** +* Function: osl_ext_queue_send_synchronous +* +* Purpose: Send/add data to the queue. This function will block the +* calling thread until the data is dequeued. +* +* Parameters: queue (mod) Queue object. +* data (in) Data pointer to be queued. +* +* Returns: OSL_EXT_SUCCESS if the data was queued successfully, or an +* error code if the data could not be queued. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_send_synchronous(osl_ext_queue_t *queue, void *data); + +/**************************************************************************** +* Function: osl_ext_queue_receive +* +* Purpose: Receive/remove data from the queue. This function will only +* block the calling thread for timeout_msec milliseconds, before +* returning with OSL_EXT_TIMEOUT. +* +* Parameters: queue (mod) Queue object. +* timeout_msec (in) Number of milliseconds to wait for the +* data from the queue. +* data (out) Data pointer received/removed from the queue. +* +* Returns: OSL_EXT_SUCCESS if the data was dequeued successfully, or an +* error code if the data could not be dequeued. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_receive(osl_ext_queue_t *queue, + osl_ext_time_ms_t timeout_msec, void **data); + +/**************************************************************************** +* Function: osl_ext_queue_count +* +* Purpose: Returns the number of items in the queue. +* +* Parameters: queue (mod) Queue object. +* count (out) Data pointer received/removed from the queue. +* +* Returns: OSL_EXT_SUCCESS if the count was returned successfully, or an +* error code if the count is invalid. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_count(osl_ext_queue_t *queue, int *count); + + +/* -------------------------------------------------------------------------- +** Event +*/ + +/**************************************************************************** +* Function: osl_ext_event_create +* +* Purpose: Creates a event object, which can subsequently be used to +* notify and trigger tasks. +* +* Parameters: name (in) Name to assign to the event (must be unique). +* event (out) Event object to initialize. +* +* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an +* error code if the event could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_event_create(char *name, osl_ext_event_t *event); + +/**************************************************************************** +* Function: osl_ext_event_delete +* +* Purpose: Destroys a previously created event object. +* +* Parameters: event (mod) Event object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an +* error code if the event could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_event_delete(osl_ext_event_t *event); + +/**************************************************************************** +* Function: osl_ext_event_get +* +* Purpose: Get event from specified event object. +* +* Parameters: event (mod) Event object to get. +* requested (in) Requested event to get. +* timeout_msec (in) Number of milliseconds to wait for the event. +* event_bits (out) Event bits retrieved. +* +* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an +* error code if the event could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_event_get(osl_ext_event_t *event, + osl_ext_event_bits_t requested, osl_ext_time_ms_t timeout_msec, + osl_ext_event_bits_t *event_bits); + +/**************************************************************************** +* Function: osl_ext_event_set +* +* Purpose: Set event of specified event object. +* +* Parameters: event (mod) Event object to set. +* event_bits (in) Event bits to set. +* +* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an +* error code if the event could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_event_set(osl_ext_event_t *event, + osl_ext_event_bits_t event_bits); + + +/* -------------------------------------------------------------------------- +** Interrupt +*/ + +/**************************************************************************** +* Function: osl_ext_interrupt_disable +* +* Purpose: Disable CPU interrupt. +* +* Parameters: None. +* +* Returns: The interrupt state before disable for restoring interrupt. +***************************************************************************** +*/ +osl_ext_interrupt_state_t osl_ext_interrupt_disable(void); + + +/**************************************************************************** +* Function: osl_ext_interrupt_restore +* +* Purpose: Restore CPU interrupt state. +* +* Parameters: state (in) Interrupt state to restore returned from +* osl_ext_interrupt_disable(). +* +* Returns: None. +***************************************************************************** +*/ +void osl_ext_interrupt_restore(osl_ext_interrupt_state_t state); + +#else + +/* ---- Constants and Types ---------------------------------------------- */ + +/* Semaphore. */ +#define osl_ext_sem_t +#define OSL_EXT_SEM_DECL(sem) + +/* Mutex. */ +#define osl_ext_mutex_t +#define OSL_EXT_MUTEX_DECL(mutex) + +/* Timer. */ +#define osl_ext_timer_t +#define OSL_EXT_TIMER_DECL(timer) + +/* Task. */ +#define osl_ext_task_t void +#define OSL_EXT_TASK_DECL(task) + +/* Queue. */ +#define osl_ext_queue_t +#define OSL_EXT_QUEUE_DECL(queue) + +/* Event. */ +#define osl_ext_event_t +#define OSL_EXT_EVENT_DECL(event) + +/* ---- Variable Externs ------------------------------------------------- */ +/* ---- Function Prototypes ---------------------------------------------- */ + +#define osl_ext_sem_create(name, init_cnt, sem) (OSL_EXT_SUCCESS) +#define osl_ext_sem_delete(sem) (OSL_EXT_SUCCESS) +#define osl_ext_sem_give(sem) (OSL_EXT_SUCCESS) +#define osl_ext_sem_take(sem, timeout_msec) (OSL_EXT_SUCCESS) + +#define osl_ext_mutex_create(name, mutex) (OSL_EXT_SUCCESS) +#define osl_ext_mutex_delete(mutex) (OSL_EXT_SUCCESS) +#define osl_ext_mutex_acquire(mutex, timeout_msec) (OSL_EXT_SUCCESS) +#define osl_ext_mutex_release(mutex) (OSL_EXT_SUCCESS) + +#define osl_ext_timer_create(name, timeout_msec, mode, func, arg, timer) \ + (OSL_EXT_SUCCESS) +#define osl_ext_timer_delete(timer) (OSL_EXT_SUCCESS) +#define osl_ext_timer_start(timer, timeout_msec, mode) (OSL_EXT_SUCCESS) +#define osl_ext_timer_stop(timer) (OSL_EXT_SUCCESS) +#define osl_ext_time_get() (0) + +#define osl_ext_task_create(name, stack, stack_size, priority, func, arg, task) \ + (OSL_EXT_SUCCESS) +#define osl_ext_task_delete(task) (OSL_EXT_SUCCESS) +#define osl_ext_task_current() (NULL) +#define osl_ext_task_yield() (OSL_EXT_SUCCESS) +#define osl_ext_task_enable_stack_check() (OSL_EXT_SUCCESS) + +#define osl_ext_queue_create(name, queue_buffer, queue_size, queue) \ + (OSL_EXT_SUCCESS) +#define osl_ext_queue_delete(queue) (OSL_EXT_SUCCESS) +#define osl_ext_queue_send(queue, data) (OSL_EXT_SUCCESS) +#define osl_ext_queue_send_synchronous(queue, data) (OSL_EXT_SUCCESS) +#define osl_ext_queue_receive(queue, timeout_msec, data) \ + (OSL_EXT_SUCCESS) +#define osl_ext_queue_count(queue, count) (OSL_EXT_SUCCESS) + +#define osl_ext_event_create(name, event) (OSL_EXT_SUCCESS) +#define osl_ext_event_delete(event) (OSL_EXT_SUCCESS) +#define osl_ext_event_get(event, requested, timeout_msec, event_bits) \ + (OSL_EXT_SUCCESS) +#define osl_ext_event_set(event, event_bits) (OSL_EXT_SUCCESS) + +#define osl_ext_interrupt_disable(void) +#define osl_ext_interrupt_restore(state) + +#endif /* OSL_EXT_DISABLED */ + +#ifdef __cplusplus +} +#endif + +#endif /* _osl_ext_h_ */ diff --git a/drivers/amlogic/wifi/bcmdhd/include/packed_section_end.h b/drivers/amlogic/wifi/bcmdhd/include/packed_section_end.h new file mode 100644 index 0000000000000..e3a35c7e92709 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/packed_section_end.h @@ -0,0 +1,63 @@ +/* + * Declare directives for structure packing. No padding will be provided + * between the members of packed structures, and therefore, there is no + * guarantee that structure members will be aligned. + * + * Declaring packed structures is compiler specific. In order to handle all + * cases, packed structures should be delared as: + * + * #include + * + * typedef BWL_PRE_PACKED_STRUCT struct foobar_t { + * some_struct_members; + * } BWL_POST_PACKED_STRUCT foobar_t; + * + * #include + * + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: packed_section_end.h 514727 2014-11-12 03:02:48Z $ + */ + + +/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h + * and undefined in packed_section_end.h. If it is NOT defined at this + * point, then there is a missing include of packed_section_start.h. + */ +#ifdef BWL_PACKED_SECTION + #undef BWL_PACKED_SECTION +#else + #error "BWL_PACKED_SECTION is NOT defined!" +#endif + + + + +/* Compiler-specific directives for structure packing are declared in + * packed_section_start.h. This marks the end of the structure packing section, + * so, undef them here. + */ +#undef BWL_PRE_PACKED_STRUCT +#undef BWL_POST_PACKED_STRUCT diff --git a/drivers/amlogic/wifi/bcmdhd/include/packed_section_start.h b/drivers/amlogic/wifi/bcmdhd/include/packed_section_start.h new file mode 100644 index 0000000000000..617176461f759 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/packed_section_start.h @@ -0,0 +1,67 @@ +/* + * Declare directives for structure packing. No padding will be provided + * between the members of packed structures, and therefore, there is no + * guarantee that structure members will be aligned. + * + * Declaring packed structures is compiler specific. In order to handle all + * cases, packed structures should be delared as: + * + * #include + * + * typedef BWL_PRE_PACKED_STRUCT struct foobar_t { + * some_struct_members; + * } BWL_POST_PACKED_STRUCT foobar_t; + * + * #include + * + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: packed_section_start.h 514727 2014-11-12 03:02:48Z $ + */ + + +/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h + * and undefined in packed_section_end.h. If it is already defined at this + * point, then there is a missing include of packed_section_end.h. + */ +#ifdef BWL_PACKED_SECTION + #error "BWL_PACKED_SECTION is already defined!" +#else + #define BWL_PACKED_SECTION +#endif + + + + +/* Declare compiler-specific directives for structure packing. */ +#if defined(__GNUC__) || defined(__lint) + #define BWL_PRE_PACKED_STRUCT + #define BWL_POST_PACKED_STRUCT __attribute__ ((packed)) +#elif defined(__CC_ARM) + #define BWL_PRE_PACKED_STRUCT __packed + #define BWL_POST_PACKED_STRUCT +#else + #error "Unknown compiler!" +#endif diff --git a/drivers/amlogic/wifi/bcmdhd/include/pcicfg.h b/drivers/amlogic/wifi/bcmdhd/include/pcicfg.h new file mode 100644 index 0000000000000..be0a92a17847f --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/pcicfg.h @@ -0,0 +1,260 @@ +/* + * pcicfg.h: PCI configuration constants and structures. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: pcicfg.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _h_pcicfg_ +#define _h_pcicfg_ + + +/* pci config status reg has a bit to indicate that capability ptr is present */ + +#define PCI_CAPPTR_PRESENT 0x0010 + +/* A structure for the config registers is nice, but in most + * systems the config space is not memory mapped, so we need + * field offsetts. :-( + */ +#define PCI_CFG_VID 0 +#define PCI_CFG_DID 2 +#define PCI_CFG_CMD 4 +#define PCI_CFG_STAT 6 +#define PCI_CFG_REV 8 +#define PCI_CFG_PROGIF 9 +#define PCI_CFG_SUBCL 0xa +#define PCI_CFG_BASECL 0xb +#define PCI_CFG_CLSZ 0xc +#define PCI_CFG_LATTIM 0xd +#define PCI_CFG_HDR 0xe +#define PCI_CFG_BIST 0xf +#define PCI_CFG_BAR0 0x10 +#define PCI_CFG_BAR1 0x14 +#define PCI_CFG_BAR2 0x18 +#define PCI_CFG_BAR3 0x1c +#define PCI_CFG_BAR4 0x20 +#define PCI_CFG_BAR5 0x24 +#define PCI_CFG_CIS 0x28 +#define PCI_CFG_SVID 0x2c +#define PCI_CFG_SSID 0x2e +#define PCI_CFG_ROMBAR 0x30 +#define PCI_CFG_CAPPTR 0x34 +#define PCI_CFG_INT 0x3c +#define PCI_CFG_PIN 0x3d +#define PCI_CFG_MINGNT 0x3e +#define PCI_CFG_MAXLAT 0x3f +#define PCI_CFG_DEVCTRL 0xd8 + + +/* PCI CAPABILITY DEFINES */ +#define PCI_CAP_POWERMGMTCAP_ID 0x01 +#define PCI_CAP_MSICAP_ID 0x05 +#define PCI_CAP_VENDSPEC_ID 0x09 +#define PCI_CAP_PCIECAP_ID 0x10 + +/* Data structure to define the Message Signalled Interrupt facility + * Valid for PCI and PCIE configurations + */ +typedef struct _pciconfig_cap_msi { + uint8 capID; + uint8 nextptr; + uint16 msgctrl; + uint32 msgaddr; +} pciconfig_cap_msi; +#define MSI_ENABLE 0x1 /* bit 0 of msgctrl */ + +/* Data structure to define the Power managment facility + * Valid for PCI and PCIE configurations + */ +typedef struct _pciconfig_cap_pwrmgmt { + uint8 capID; + uint8 nextptr; + uint16 pme_cap; + uint16 pme_sts_ctrl; + uint8 pme_bridge_ext; + uint8 data; +} pciconfig_cap_pwrmgmt; + +#define PME_CAP_PM_STATES (0x1f << 27) /* Bits 31:27 states that can generate PME */ +#define PME_CSR_OFFSET 0x4 /* 4-bytes offset */ +#define PME_CSR_PME_EN (1 << 8) /* Bit 8 Enable generating of PME */ +#define PME_CSR_PME_STAT (1 << 15) /* Bit 15 PME got asserted */ + +/* Data structure to define the PCIE capability */ +typedef struct _pciconfig_cap_pcie { + uint8 capID; + uint8 nextptr; + uint16 pcie_cap; + uint32 dev_cap; + uint16 dev_ctrl; + uint16 dev_status; + uint32 link_cap; + uint16 link_ctrl; + uint16 link_status; + uint32 slot_cap; + uint16 slot_ctrl; + uint16 slot_status; + uint16 root_ctrl; + uint16 root_cap; + uint32 root_status; +} pciconfig_cap_pcie; + +/* PCIE Enhanced CAPABILITY DEFINES */ +#define PCIE_EXTCFG_OFFSET 0x100 +#define PCIE_ADVERRREP_CAPID 0x0001 +#define PCIE_VC_CAPID 0x0002 +#define PCIE_DEVSNUM_CAPID 0x0003 +#define PCIE_PWRBUDGET_CAPID 0x0004 + +/* PCIE Extended configuration */ +#define PCIE_ADV_CORR_ERR_MASK 0x114 +#define CORR_ERR_RE (1 << 0) /* Receiver */ +#define CORR_ERR_BT (1 << 6) /* Bad TLP */ +#define CORR_ERR_BD (1 << 7) /* Bad DLLP */ +#define CORR_ERR_RR (1 << 8) /* REPLAY_NUM rollover */ +#define CORR_ERR_RT (1 << 12) /* Reply timer timeout */ +#define ALL_CORR_ERRORS (CORR_ERR_RE | CORR_ERR_BT | CORR_ERR_BD | \ + CORR_ERR_RR | CORR_ERR_RT) + +/* PCIE Root Control Register bits (Host mode only) */ +#define PCIE_RC_CORR_SERR_EN 0x0001 +#define PCIE_RC_NONFATAL_SERR_EN 0x0002 +#define PCIE_RC_FATAL_SERR_EN 0x0004 +#define PCIE_RC_PME_INT_EN 0x0008 +#define PCIE_RC_CRS_EN 0x0010 + +/* PCIE Root Capability Register bits (Host mode only) */ +#define PCIE_RC_CRS_VISIBILITY 0x0001 + +/* Header to define the PCIE specific capabilities in the extended config space */ +typedef struct _pcie_enhanced_caphdr { + uint16 capID; + uint16 cap_ver : 4; + uint16 next_ptr : 12; +} pcie_enhanced_caphdr; + + +#define PCI_BAR0_WIN 0x80 /* backplane addres space accessed by BAR0 */ +#define PCI_BAR1_WIN 0x84 /* backplane addres space accessed by BAR1 */ +#define PCI_SPROM_CONTROL 0x88 /* sprom property control */ +#define PCI_BAR1_CONTROL 0x8c /* BAR1 region burst control */ +#define PCI_INT_STATUS 0x90 /* PCI and other cores interrupts */ +#define PCI_INT_MASK 0x94 /* mask of PCI and other cores interrupts */ +#define PCI_TO_SB_MB 0x98 /* signal backplane interrupts */ +#define PCI_BACKPLANE_ADDR 0xa0 /* address an arbitrary location on the system backplane */ +#define PCI_BACKPLANE_DATA 0xa4 /* data at the location specified by above address */ +#define PCI_CLK_CTL_ST 0xa8 /* pci config space clock control/status (>=rev14) */ +#define PCI_BAR0_WIN2 0xac /* backplane addres space accessed by second 4KB of BAR0 */ +#define PCI_GPIO_IN 0xb0 /* pci config space gpio input (>=rev3) */ +#define PCI_GPIO_OUT 0xb4 /* pci config space gpio output (>=rev3) */ +#define PCI_GPIO_OUTEN 0xb8 /* pci config space gpio output enable (>=rev3) */ +#define PCI_L1SS_CTRL2 0x24c /* The L1 PM Substates Control register */ + +/* Private Registers */ +#define PCI_STAT_CTRL 0xa80 +#define PCI_L0_EVENTCNT 0xa84 +#define PCI_L0_STATETMR 0xa88 +#define PCI_L1_EVENTCNT 0xa8c +#define PCI_L1_STATETMR 0xa90 +#define PCI_L1_1_EVENTCNT 0xa94 +#define PCI_L1_1_STATETMR 0xa98 +#define PCI_L1_2_EVENTCNT 0xa9c +#define PCI_L1_2_STATETMR 0xaa0 +#define PCI_L2_EVENTCNT 0xaa4 +#define PCI_L2_STATETMR 0xaa8 + +#define PCI_PMCR_REFUP 0x1814 /* Trefup time */ +#define PCI_PMCR_REFUP_EXT 0x1818 /* Trefup extend Max */ +#define PCI_TPOWER_SCALE_MASK 0x3 +#define PCI_TPOWER_SCALE_SHIFT 3 /* 0:1 is scale and 2 is rsvd */ + + +#define PCI_BAR0_SHADOW_OFFSET (2 * 1024) /* bar0 + 2K accesses sprom shadow (in pci core) */ +#define PCI_BAR0_SPROM_OFFSET (4 * 1024) /* bar0 + 4K accesses external sprom */ +#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024) /* bar0 + 6K accesses pci core registers */ +#define PCI_BAR0_PCISBR_OFFSET (4 * 1024) /* pci core SB registers are at the end of the + * 8KB window, so their address is the "regular" + * address plus 4K + */ +/* + * PCIE GEN2 changed some of the above locations for + * Bar0WrapperBase, SecondaryBAR0Window and SecondaryBAR0WrapperBase + * BAR0 maps 32K of register space +*/ +#define PCIE2_BAR0_WIN2 0x70 /* backplane addres space accessed by second 4KB of BAR0 */ +#define PCIE2_BAR0_CORE2_WIN 0x74 /* backplane addres space accessed by second 4KB of BAR0 */ +#define PCIE2_BAR0_CORE2_WIN2 0x78 /* backplane addres space accessed by second 4KB of BAR0 */ + +#define PCI_BAR0_WINSZ (16 * 1024) /* bar0 window size Match with corerev 13 */ +/* On pci corerev >= 13 and all pcie, the bar0 is now 16KB and it maps: */ +#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024) /* bar0 + 8K accesses pci/pcie core registers */ +#define PCI_16KB0_CCREGS_OFFSET (12 * 1024) /* bar0 + 12K accesses chipc core registers */ +#define PCI_16KBB0_WINSZ (16 * 1024) /* bar0 window size */ +#define PCI_SECOND_BAR0_OFFSET (16 * 1024) /* secondary bar 0 window */ + + +/* Header types */ +#define PCI_HEADER_MULTI 0x80 +#define PCI_HEADER_MASK 0x7f +typedef enum { + PCI_HEADER_NORMAL, + PCI_HEADER_BRIDGE, + PCI_HEADER_CARDBUS +} pci_header_types; + +#define PCI_CONFIG_SPACE_SIZE 256 + +#define DWORD_ALIGN(x) (x & ~(0x03)) +#define BYTE_POS(x) (x & 0x3) +#define WORD_POS(x) (x & 0x1) + +#define BYTE_SHIFT(x) (8 * BYTE_POS(x)) +#define WORD_SHIFT(x) (16 * WORD_POS(x)) + +#define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF) +#define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF) + +#define read_pci_cfg_byte(a) \ + (BYTE_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xff) + +#define read_pci_cfg_word(a) \ + (WORD_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xffff) + +#define write_pci_cfg_byte(a, val) do { \ + uint32 tmpval; \ + tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFF << BYTE_POS(a)) | \ + val << BYTE_POS(a); \ + OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \ + } while (0) + +#define write_pci_cfg_word(a, val) do { \ + uint32 tmpval; \ + tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFFFF << WORD_POS(a)) | \ + val << WORD_POS(a); \ + OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \ + } while (0) + +#endif /* _h_pcicfg_ */ diff --git a/drivers/net/wireless/bcmdhd/include/pcie_core.h b/drivers/amlogic/wifi/bcmdhd/include/pcie_core.h similarity index 92% rename from drivers/net/wireless/bcmdhd/include/pcie_core.h rename to drivers/amlogic/wifi/bcmdhd/include/pcie_core.h index 73028da88e6e4..25a156adcb4f1 100644 --- a/drivers/net/wireless/bcmdhd/include/pcie_core.h +++ b/drivers/amlogic/wifi/bcmdhd/include/pcie_core.h @@ -1,9 +1,30 @@ /* * BCM43XX PCIE core hardware definitions. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: pcie_core.h 483003 2014-06-05 19:57:46Z $ + * + * <> + * + * $Id: pcie_core.h 514727 2014-11-12 03:02:48Z $ */ #ifndef _PCIE_CORE_H #define _PCIE_CORE_H @@ -124,7 +145,9 @@ typedef struct sbpcieregs { uint32 ltr_state; /* 0x1A0 */ uint32 pwr_int_status; /* 0x1A4 */ uint32 pwr_int_mask; /* 0x1A8 */ - uint32 PAD[21]; /* 0x1AC - 0x200 */ + uint32 PAD[13]; /* 0x1AC - 0x1DF */ + uint32 clk_ctl_st; /* 0x1E0 */ + uint32 PAD[7]; /* 0x1E4 - 0x1FF */ pcie_devdmaregs_t h2d0_dmaregs; /* 0x200 - 0x23c */ pcie_devdmaregs_t d2h0_dmaregs; /* 0x240 - 0x27c */ pcie_devdmaregs_t h2d1_dmaregs; /* 0x280 - 0x2bc */ @@ -147,6 +170,8 @@ typedef struct sbpcieregs { #define PCIE_DLYPERST 0x100 /* Delay PeRst to CoE Core */ #define PCIE_DISSPROMLD 0x200 /* DisableSpromLoadOnPerst */ #define PCIE_WakeModeL2 0x1000 /* Wake on L2 */ +#define PCIE_PipeIddqDisable0 0x8000 /* Disable assertion of pcie_pipe_iddq during L1.2 and L2 */ +#define PCIE_PipeIddqDisable1 0x10000 /* Disable assertion of pcie_pipe_iddq during L2 */ #define PCIE_CFGADDR 0x120 /* offsetof(configaddr) */ #define PCIE_CFGDATA 0x124 /* offsetof(configdata) */ @@ -197,9 +222,9 @@ typedef struct sbpcieregs { #define PCIE_MB_D2H_MB_MASK \ (PCIE_MB_TOPCIE_D2H0_DB0 | PCIE_MB_TOPCIE_D2H0_DB1 | \ - PCIE_MB_TOPCIE_D2H1_DB1 | PCIE_MB_TOPCIE_D2H1_DB1 | \ - PCIE_MB_TOPCIE_D2H2_DB1 | PCIE_MB_TOPCIE_D2H2_DB1 | \ - PCIE_MB_TOPCIE_D2H3_DB1 | PCIE_MB_TOPCIE_D2H3_DB1) + PCIE_MB_TOPCIE_D2H1_DB0 | PCIE_MB_TOPCIE_D2H1_DB1 | \ + PCIE_MB_TOPCIE_D2H2_DB0 | PCIE_MB_TOPCIE_D2H2_DB1 | \ + PCIE_MB_TOPCIE_D2H3_DB0 | PCIE_MB_TOPCIE_D2H3_DB1) /* SB to PCIE translation masks */ #define SBTOPCIE0_MASK 0xfc000000 @@ -540,6 +565,8 @@ typedef struct sbpcieregs { #define I_F0_B1 (0x1 << 9) /* Mail box interrupt Function 0 interrupt, bit 1 */ #define PCIECFGREG_DEVCONTROL 0xB4 +#define PCIECFGREG_DEVCONTROL_MRRS_SHFT 12 +#define PCIECFGREG_DEVCONTROL_MRRS_MASK (0x7 << PCIECFGREG_DEVCONTROL_MRRS_SHFT) /* SROM hardware region */ #define SROM_OFFSET_BAR1_CTRL 52 @@ -603,11 +630,11 @@ typedef struct sbpcieregs { #define PCIEGEN2_IOC_L2_L3_LINK_SHIFT 15 #define PCIEGEN2_IOC_D0_STATE_MASK (1 << PCIEGEN2_IOC_D0_STATE_SHIFT) -#define PCIEGEN2_IOC_D1_STATE_MASK (1 << PCIEGEN2_IOC_D1_STATE_SHIF) -#define PCIEGEN2_IOC_D2_STATE_MASK (1 << PCIEGEN2_IOC_D2_STATE_SHIF) -#define PCIEGEN2_IOC_D3_STATE_MASK (1 << PCIEGEN2_IOC_D3_STATE_SHIF) -#define PCIEGEN2_IOC_L0_LINK_MASK (1 << PCIEGEN2_IOC_L0_LINK_SHIF) -#define PCIEGEN2_IOC_L1_LINK_MASK (1 << PCIEGEN2_IOC_L1_LINK_SHIF) +#define PCIEGEN2_IOC_D1_STATE_MASK (1 << PCIEGEN2_IOC_D1_STATE_SHIFT) +#define PCIEGEN2_IOC_D2_STATE_MASK (1 << PCIEGEN2_IOC_D2_STATE_SHIFT) +#define PCIEGEN2_IOC_D3_STATE_MASK (1 << PCIEGEN2_IOC_D3_STATE_SHIFT) +#define PCIEGEN2_IOC_L0_LINK_MASK (1 << PCIEGEN2_IOC_L0_LINK_SHIFT) +#define PCIEGEN2_IOC_L1_LINK_MASK (1 << PCIEGEN2_IOC_L1_LINK_SHIFT) #define PCIEGEN2_IOC_L1L2_LINK_MASK (1 << PCIEGEN2_IOC_L1L2_LINK_SHIFT) #define PCIEGEN2_IOC_L2_L3_LINK_MASK (1 << PCIEGEN2_IOC_L2_L3_LINK_SHIFT) @@ -619,6 +646,7 @@ typedef struct sbpcieregs { #ifdef BCMDRIVER void pcie_watchdog_reset(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs); +void pcie_serdes_iddqdisable(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs); #endif /* BCMDRIVER */ #endif /* _PCIE_CORE_H */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11.h b/drivers/amlogic/wifi/bcmdhd/include/proto/802.11.h similarity index 83% rename from drivers/net/wireless/bcmdhd/include/proto/802.11.h rename to drivers/amlogic/wifi/bcmdhd/include/proto/802.11.h index 69bfca29f8e47..7aaea5d0596d3 100644 --- a/drivers/net/wireless/bcmdhd/include/proto/802.11.h +++ b/drivers/amlogic/wifi/bcmdhd/include/proto/802.11.h @@ -1,9 +1,30 @@ /* - * $Copyright Open Broadcom Corporation$ - * * Fundamental types and constants relating to 802.11 * - * $Id: 802.11.h 495738 2014-08-08 03:36:17Z $ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: 802.11.h 556559 2015-05-14 01:48:17Z $ */ #ifndef _802_11_H_ @@ -223,6 +244,7 @@ BWL_PRE_PACKED_STRUCT struct dot11_management_header { struct ether_addr bssid; /* BSS ID */ uint16 seq; /* sequence control */ } BWL_POST_PACKED_STRUCT; +typedef struct dot11_management_header dot11_management_header_t; #define DOT11_MGMT_HDR_LEN 24 /* d11 management header length */ /* Management frame payloads */ @@ -294,6 +316,16 @@ BWL_PRE_PACKED_STRUCT struct dot11_action_vht_oper_mode { uint8 mode; } BWL_POST_PACKED_STRUCT; +/* These lengths assume 64 MU groups, as specified in 802.11ac-2013 */ +#define DOT11_ACTION_GID_MEMBERSHIP_LEN 8 /* bytes */ +#define DOT11_ACTION_GID_USER_POS_LEN 16 /* bytes */ +BWL_PRE_PACKED_STRUCT struct dot11_action_group_id { + uint8 category; + uint8 action; + uint8 membership_status[DOT11_ACTION_GID_MEMBERSHIP_LEN]; + uint8 user_position[DOT11_ACTION_GID_USER_POS_LEN]; +} BWL_POST_PACKED_STRUCT; + #define SM_PWRSAVE_ENABLE 1 #define SM_PWRSAVE_MODE 2 @@ -364,6 +396,7 @@ BWL_PRE_PACKED_STRUCT struct dot11_action_frmhdr { uint8 action; uint8 data[1]; } BWL_POST_PACKED_STRUCT; +typedef struct dot11_action_frmhdr dot11_action_frmhdr_t; #define DOT11_ACTION_FRMHDR_LEN 2 /** CSA IE data structure */ @@ -478,8 +511,6 @@ BWL_PRE_PACKED_STRUCT struct dot11_extcap_ie { } BWL_POST_PACKED_STRUCT; typedef struct dot11_extcap_ie dot11_extcap_ie_t; -#define DOT11_EXTCAP_LEN_MAX 8 - #define DOT11_EXTCAP_LEN_COEX 1 #define DOT11_EXTCAP_LEN_BT 3 #define DOT11_EXTCAP_LEN_IW 4 @@ -499,11 +530,6 @@ typedef struct dot11_extcap_ie dot11_extcap_ie_t; #define DOT11_EXTCAP_LEN_TDLS_WBW 8 #define DOT11_EXTCAP_LEN_OPMODE_NOTIFICATION 8 -BWL_PRE_PACKED_STRUCT struct dot11_extcap { - uint8 extcap[DOT11_EXTCAP_LEN_MAX]; -} BWL_POST_PACKED_STRUCT; -typedef struct dot11_extcap dot11_extcap_t; - /* TDLS Capabilities */ #define DOT11_TDLS_CAP_TDLS 37 /* TDLS support */ #define DOT11_TDLS_CAP_PU_BUFFER_STA 28 /* TDLS Peer U-APSD buffer STA support */ @@ -517,17 +543,24 @@ typedef struct dot11_extcap dot11_extcap_t; /* 802.11h/802.11k Measurement Request/Report IEs */ /* Measurement Type field */ -#define DOT11_MEASURE_TYPE_BASIC 0 /* d11 measurement basic type */ -#define DOT11_MEASURE_TYPE_CCA 1 /* d11 measurement CCA type */ -#define DOT11_MEASURE_TYPE_RPI 2 /* d11 measurement RPI type */ -#define DOT11_MEASURE_TYPE_CHLOAD 3 /* d11 measurement Channel Load type */ -#define DOT11_MEASURE_TYPE_NOISE 4 /* d11 measurement Noise Histogram type */ -#define DOT11_MEASURE_TYPE_BEACON 5 /* d11 measurement Beacon type */ -#define DOT11_MEASURE_TYPE_FRAME 6 /* d11 measurement Frame type */ -#define DOT11_MEASURE_TYPE_STAT 7 /* d11 measurement STA Statistics type */ -#define DOT11_MEASURE_TYPE_LCI 8 /* d11 measurement LCI type */ -#define DOT11_MEASURE_TYPE_TXSTREAM 9 /* d11 measurement TX Stream type */ -#define DOT11_MEASURE_TYPE_PAUSE 255 /* d11 measurement pause type */ +#define DOT11_MEASURE_TYPE_BASIC 0 /* d11 measurement basic type */ +#define DOT11_MEASURE_TYPE_CCA 1 /* d11 measurement CCA type */ +#define DOT11_MEASURE_TYPE_RPI 2 /* d11 measurement RPI type */ +#define DOT11_MEASURE_TYPE_CHLOAD 3 /* d11 measurement Channel Load type */ +#define DOT11_MEASURE_TYPE_NOISE 4 /* d11 measurement Noise Histogram type */ +#define DOT11_MEASURE_TYPE_BEACON 5 /* d11 measurement Beacon type */ +#define DOT11_MEASURE_TYPE_FRAME 6 /* d11 measurement Frame type */ +#define DOT11_MEASURE_TYPE_STAT 7 /* d11 measurement STA Statistics type */ +#define DOT11_MEASURE_TYPE_LCI 8 /* d11 measurement LCI type */ +#define DOT11_MEASURE_TYPE_TXSTREAM 9 /* d11 measurement TX Stream type */ +#define DOT11_MEASURE_TYPE_MCDIAGS 10 /* d11 measurement multicast diagnostics */ +#define DOT11_MEASURE_TYPE_CIVICLOC 11 /* d11 measurement location civic */ +#define DOT11_MEASURE_TYPE_LOC_ID 12 /* d11 measurement location identifier */ +#define DOT11_MEASURE_TYPE_DIRCHANQ 13 /* d11 measurement dir channel quality */ +#define DOT11_MEASURE_TYPE_DIRMEAS 14 /* d11 measurement directional */ +#define DOT11_MEASURE_TYPE_DIRSTATS 15 /* d11 measurement directional stats */ +#define DOT11_MEASURE_TYPE_FTMRANGE 16 /* d11 measurement Fine Timing */ +#define DOT11_MEASURE_TYPE_PAUSE 255 /* d11 measurement pause type */ /* Measurement Request Modes */ #define DOT11_MEASURE_MODE_PARALLEL (1<<0) /* d11 measurement parallel */ @@ -561,6 +594,53 @@ typedef struct dot11_meas_req dot11_meas_req_t; /* length of Measure Request IE data not including variable len */ #define DOT11_MNG_IE_MREQ_FIXED_LEN 3 /* d11 measurement request IE fixed length */ +BWL_PRE_PACKED_STRUCT struct dot11_meas_req_loc { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + BWL_PRE_PACKED_STRUCT union + { + BWL_PRE_PACKED_STRUCT struct { + uint8 subject; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT lci; + BWL_PRE_PACKED_STRUCT struct { + uint8 subject; + uint8 type; /* type of civic location */ + uint8 siu; /* service interval units */ + uint16 si; /* service interval */ + uint8 data[1]; + } BWL_POST_PACKED_STRUCT civic; + BWL_PRE_PACKED_STRUCT struct { + uint16 max_init_delay; /* maximum random initial delay */ + uint8 min_ap_count; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT ftm_range; + } BWL_POST_PACKED_STRUCT req; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_req_loc dot11_meas_req_loc_t; +#define DOT11_MNG_IE_MREQ_MIN_LEN 4 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREQ_LCI_FIXED_LEN 4 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREQ_CIVIC_FIXED_LEN 8 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREQ_FRNG_FIXED_LEN 6 /* d11 measurement report IE length */ + +BWL_PRE_PACKED_STRUCT struct dot11_lci_subelement { + uint8 subelement; + uint8 length; + uint8 lci_data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_lci_subelement dot11_lci_subelement_t; + +BWL_PRE_PACKED_STRUCT struct dot11_civic_subelement { + uint8 type; /* type of civic location */ + uint8 subelement; + uint8 length; + uint8 civic_data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_civic_subelement dot11_civic_subelement_t; + BWL_PRE_PACKED_STRUCT struct dot11_meas_rep { uint8 id; uint8 len; @@ -575,10 +655,30 @@ BWL_PRE_PACKED_STRUCT struct dot11_meas_rep { uint16 duration; uint8 map; } BWL_POST_PACKED_STRUCT basic; + BWL_PRE_PACKED_STRUCT struct { + uint8 subelement; + uint8 length; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT lci; + BWL_PRE_PACKED_STRUCT struct { + uint8 type; /* type of civic location */ + uint8 subelement; + uint8 length; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT civic; + BWL_PRE_PACKED_STRUCT struct { + uint8 entry_count; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT ftm_range; uint8 data[1]; } BWL_POST_PACKED_STRUCT rep; } BWL_POST_PACKED_STRUCT; typedef struct dot11_meas_rep dot11_meas_rep_t; +#define DOT11_MNG_IE_MREP_MIN_LEN 5 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_LCI_FIXED_LEN 5 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_CIVIC_FIXED_LEN 6 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_BASIC_FIXED_LEN 15 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_FRNG_FIXED_LEN 4 /* length of Measure Report IE data not including variable len */ #define DOT11_MNG_IE_MREP_FIXED_LEN 3 /* d11 measurement response IE fixed length */ @@ -642,6 +742,7 @@ typedef uint8 ac_bitmap_t; /* AC bitmap of (1 << AC_xx) */ #define AC_BITMAP_SET(ab, ac) (((ab) |= (1 << (ac)))) #define AC_BITMAP_RESET(ab, ac) (((ab) &= ~(1 << (ac)))) + /** WME Information Element (IE) */ BWL_PRE_PACKED_STRUCT struct wme_ie { uint8 oui[3]; @@ -1020,7 +1121,10 @@ typedef struct ti_ie ti_ie_t; */ #define DOT11_RC_BAD_PC 10 /* Unacceptable power capability element */ #define DOT11_RC_BAD_CHANNELS 11 /* Unacceptable supported channels element */ -/* 12 is unused */ + +/* 12 is unused by STA but could be used by AP/GO */ +#define DOT11_RC_DISASSOC_BTM 12 /* Disassociated due to BSS Transition Magmt */ + /* 32-39 are QSTA specific reasons added in 11e */ #define DOT11_RC_UNSPECIFIED_QOS 32 /* unspecified QoS-related reason */ @@ -1032,7 +1136,24 @@ typedef struct ti_ie ti_ie_t; #define DOT11_RC_SETUP_NEEDED 38 /* mechanism needs a setup */ #define DOT11_RC_TIMEOUT 39 /* timeout */ -#define DOT11_RC_MAX 23 /* Reason codes > 23 are reserved */ +#define DOT11_RC_MESH_PEERING_CANCELLED 52 +#define DOT11_RC_MESH_MAX_PEERS 53 +#define DOT11_RC_MESH_CONFIG_POLICY_VIOLN 54 +#define DOT11_RC_MESH_CLOSE_RECVD 55 +#define DOT11_RC_MESH_MAX_RETRIES 56 +#define DOT11_RC_MESH_CONFIRM_TIMEOUT 57 +#define DOT11_RC_MESH_INVALID_GTK 58 +#define DOT11_RC_MESH_INCONSISTENT_PARAMS 59 + +#define DOT11_RC_MESH_INVALID_SEC_CAP 60 +#define DOT11_RC_MESH_PATHERR_NOPROXYINFO 61 +#define DOT11_RC_MESH_PATHERR_NOFWINFO 62 +#define DOT11_RC_MESH_PATHERR_DSTUNREACH 63 +#define DOT11_RC_MESH_MBSSMAC_EXISTS 64 +#define DOT11_RC_MESH_CHANSWITCH_REGREQ 65 +#define DOT11_RC_MESH_CHANSWITCH_UNSPEC 66 + +#define DOT11_RC_MAX 66 /* Reason codes > 66 are reserved */ #define DOT11_RC_TDLS_PEER_UNREACH 25 #define DOT11_RC_TDLS_DOWN_UNSPECIFIED 26 @@ -1142,6 +1263,10 @@ typedef struct ti_ie ti_ie_t; #define DOT11_SC_UNEXP_MSG 70 /* Unexpected message */ #define DOT11_SC_INVALID_SNONCE 71 /* Invalid SNonce */ #define DOT11_SC_INVALID_RSNIE 72 /* Invalid contents of RSNIE */ + +#define DOT11_SC_ANTICLOG_TOCKEN_REQUIRED 76 /* Anti-clogging tocken required */ +#define DOT11_SC_INVALID_FINITE_CYCLIC_GRP 77 /* Invalid contents of RSNIE */ + #define DOT11_SC_ASSOC_VHT_REQUIRED 104 /* Association denied because the requesting * station does not support VHT features. */ @@ -1252,15 +1377,23 @@ typedef struct ti_ie ti_ie_t; #define DOT11_MNG_QOS_MAP_ID 110 /* 11u QoS map set */ #define DOT11_MNG_ROAM_CONSORT_ID 111 /* 11u roaming consortium */ #define DOT11_MNG_EMERGCY_ALERT_ID 112 /* 11u emergency alert identifier */ -#define DOT11_MNG_EXT_CAP_ID 127 /* d11 mgmt ext capability */ +#define DOT11_MNG_MESH_CONFIG 113 /* Mesh Configuration */ +#define DOT11_MNG_MESH_ID 114 /* Mesh ID */ +#define DOT11_MNG_MESH_PEER_MGMT_ID 117 /* Mesh PEER MGMT IE */ + +#define DOT11_MNG_EXT_CAP_ID 127 /* d11 mgmt ext capability */ +#define DOT11_MNG_EXT_PREQ_ID 130 /* Mesh PREQ IE */ +#define DOT11_MNG_EXT_PREP_ID 131 /* Mesh PREP IE */ +#define DOT11_MNG_EXT_PERR_ID 132 /* Mesh PERR IE */ #define DOT11_MNG_VHT_CAP_ID 191 /* d11 mgmt VHT cap id */ #define DOT11_MNG_VHT_OPERATION_ID 192 /* d11 mgmt VHT op id */ +#define DOT11_MNG_EXT_BSSLOAD_ID 193 /* d11 mgmt VHT extended bss load id */ #define DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID 194 /* Wide BW Channel Switch IE */ #define DOT11_MNG_VHT_TRANSMIT_POWER_ENVELOPE_ID 195 /* VHT transmit Power Envelope IE */ #define DOT11_MNG_CHANNEL_SWITCH_WRAPPER_ID 196 /* Channel Switch Wrapper IE */ #define DOT11_MNG_AID_ID 197 /* Association ID IE */ #define DOT11_MNG_OPER_MODE_NOTIF_ID 199 /* d11 mgmt VHT oper mode notif */ - +#define DOT11_MNG_FTM_PARAMS_ID 206 #define DOT11_MNG_WPA_ID 221 /* d11 management WPA id */ #define DOT11_MNG_PROPR_ID 221 @@ -1351,6 +1484,10 @@ typedef struct ti_ie ti_ie_t; #define DOT11_EXT_CAP_FMS 11 /* proxy ARP service support bit position */ #define DOT11_EXT_CAP_PROXY_ARP 12 +/* Civic Location */ +#define DOT11_EXT_CAP_CIVIC_LOC 14 +/* Geospatial Location */ +#define DOT11_EXT_CAP_LCI 15 /* Traffic Filter Service */ #define DOT11_EXT_CAP_TFS 16 /* WNM-Sleep Mode */ @@ -1372,6 +1509,24 @@ typedef struct ti_ie ti_ie_t; #define DOT11_EXT_CAP_WNM_NOTIF 46 /* Operating mode notification - VHT (11ac D3.0 - 8.4.2.29) */ #define DOT11_EXT_CAP_OPER_MODE_NOTIF 62 +/* Fine timing measurement - D3.0 */ +#define DOT11_EXT_CAP_FTM_RESPONDER 70 +#define DOT11_EXT_CAP_FTM_INITIATOR 71 /* tentative 11mcd3.0 */ +#ifdef WL_FTM +#define DOT11_EXT_CAP_MAX_BIT_IDX 95 /* !!!update this please!!! */ +#else +#define DOT11_EXT_CAP_MAX_BIT_IDX 62 /* !!!update this please!!! */ +#endif + +/* extended capability */ +#ifndef DOT11_EXTCAP_LEN_MAX +#define DOT11_EXTCAP_LEN_MAX ((DOT11_EXT_CAP_MAX_BIT_IDX + 8) >> 3) +#endif + +BWL_PRE_PACKED_STRUCT struct dot11_extcap { + uint8 extcap[DOT11_EXTCAP_LEN_MAX]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_extcap dot11_extcap_t; /* VHT Operating mode bit fields - (11ac D3.0 - 8.4.1.50) */ #define DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT 0 @@ -1448,6 +1603,8 @@ typedef struct dot11_oper_mode_notif_ie dot11_oper_mode_notif_ie_t; #define DOT11_ACTION_CAT_PDPA 9 /* protected dual of public action */ #define DOT11_ACTION_CAT_WNM 10 /* category for WNM */ #define DOT11_ACTION_CAT_UWNM 11 /* category for Unprotected WNM */ +#define DOT11_ACTION_CAT_MESH 13 /* category for Mesh */ +#define DOT11_ACTION_CAT_SELFPROT 15 /* category for Mesh, self protected */ #define DOT11_ACTION_NOTIFICATION 17 #define DOT11_ACTION_CAT_VHT 21 /* VHT action */ #define DOT11_ACTION_CAT_VSP 126 /* protected vendor specific */ @@ -1476,6 +1633,8 @@ typedef struct dot11_oper_mode_notif_ie dot11_oper_mode_notif_ie_t; #define DOT11_PUB_ACTION_BSS_COEX_MNG 0 /* 20/40 Coexistence Management action id */ #define DOT11_PUB_ACTION_CHANNEL_SWITCH 4 /* d11 action channel switch */ #define DOT11_PUB_ACTION_GAS_CB_REQ 12 /* GAS Comeback Request */ +#define DOT11_PUB_ACTION_FTM_REQ 32 /* FTM request */ +#define DOT11_PUB_ACTION_FTM 33 /* FTM measurement */ /* Block Ack action types */ #define DOT11_BA_ACTION_ADDBA_REQ 0 /* ADDBA Req action frame type */ @@ -1587,6 +1746,29 @@ BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_query { typedef struct dot11_bsstrans_query dot11_bsstrans_query_t; #define DOT11_BSSTRANS_QUERY_LEN 4 /* Fixed length */ +/* BTM transition reason */ +#define DOT11_BSSTRANS_REASON_UNSPECIFIED 0 +#define DOT11_BSSTRANS_REASON_EXC_FRAME_LOSS 1 +#define DOT11_BSSTRANS_REASON_EXC_TRAFFIC_DELAY 2 +#define DOT11_BSSTRANS_REASON_INSUFF_QOS_CAPACITY 3 +#define DOT11_BSSTRANS_REASON_FIRST_ASSOC 4 +#define DOT11_BSSTRANS_REASON_LOAD_BALANCING 5 +#define DOT11_BSSTRANS_REASON_BETTER_AP_FOUND 6 +#define DOT11_BSSTRANS_REASON_DEAUTH_RX 7 +#define DOT11_BSSTRANS_REASON_8021X_EAP_AUTH_FAIL 8 +#define DOT11_BSSTRANS_REASON_4WAY_HANDSHK_FAIL 9 +#define DOT11_BSSTRANS_REASON_MANY_REPLAYCNT_FAIL 10 +#define DOT11_BSSTRANS_REASON_MANY_DATAMIC_FAIL 11 +#define DOT11_BSSTRANS_REASON_EXCEED_MAX_RETRANS 12 +#define DOT11_BSSTRANS_REASON_MANY_BCAST_DISASSOC_RX 13 +#define DOT11_BSSTRANS_REASON_MANY_BCAST_DEAUTH_RX 14 +#define DOT11_BSSTRANS_REASON_PREV_TRANSITION_FAIL 15 +#define DOT11_BSSTRANS_REASON_LOW_RSSI 16 +#define DOT11_BSSTRANS_REASON_ROAM_FROM_NON_80211 17 +#define DOT11_BSSTRANS_REASON_RX_BTM_REQ 18 +#define DOT11_BSSTRANS_REASON_PREF_LIST_INCLUDED 19 +#define DOT11_BSSTRANS_REASON_LEAVING_ESS 20 + /** BSS Management Transition Request frame header */ BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_req { uint8 category; /* category of action frame (10) */ @@ -2352,6 +2534,9 @@ typedef struct dot11_rrm_cap_ie dot11_rrm_cap_ie_t; #define DOT11_RRM_CAP_BSSAAD 31 #define DOT11_RRM_CAP_BSSAAC 32 #define DOT11_RRM_CAP_AI 33 +#define DOT11_RRM_CAP_FTM_RANGE 34 +#define DOT11_RRM_CAP_CIVIC_LOC 35 +#define DOT11_RRM_CAP_LAST 35 /* Operating Class (formerly "Regulatory Class") definitions */ #define DOT11_OP_CLASS_NONE 255 @@ -2462,13 +2647,24 @@ typedef struct dot11_rmrep_bcn dot11_rmrep_bcn_t; #define DOT11_RMREQ_BCN_REPDET_REQUEST 1 /* + requested information elems */ #define DOT11_RMREQ_BCN_REPDET_ALL 2 /* All fields */ +/* Reporting Information (reporting condition) element definition */ +#define DOT11_RMREQ_BCN_REPINFO_LEN 2 /* Beacon Reporting Information length */ +#define DOT11_RMREQ_BCN_REPCOND_DEFAULT 0 /* Report to be issued after each measurement */ + /* Sub-element IDs for Beacon Report */ #define DOT11_RMREP_BCN_FRM_BODY 1 +#define DOT11_RMREP_BCN_FRM_BODY_LEN_MAX 224 /* 802.11k-2008 7.3.2.22.6 */ /* Sub-element IDs for Frame Report */ #define DOT11_RMREP_FRAME_COUNT_REPORT 1 -/** Channel load request */ +/* Statistics Group Report: Group IDs */ +#define DOT11_RRM_STATS_GRP_ID_0 0 + +/* Statistics Group Report: Group Data length */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_0 28 + +/* Channel load request */ BWL_PRE_PACKED_STRUCT struct dot11_rmreq_chanload { uint8 id; uint8 len; @@ -2634,6 +2830,151 @@ BWL_PRE_PACKED_STRUCT struct dot11_rmrep_tx_stream { } BWL_POST_PACKED_STRUCT; typedef struct dot11_rmrep_tx_stream dot11_rmrep_tx_stream_t; +enum { + DOT11_FTM_LOCATION_SUBJ_LOCAL = 0, /* Where am I? */ + DOT11_FTM_LOCATION_SUBJ_REMOTE = 1, /* Where are you? */ + DOT11_FTM_LOCATION_SUBJ_THIRDPARTY = 2 /* Where is he/she? */ +}; + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_lci { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 subj; + + /* Following 3 fields are unused. Keep for ROM compatibility. */ + uint8 lat_res; + uint8 lon_res; + uint8 alt_res; + + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_ftm_lci dot11_rmreq_ftm_lci_t; + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_lci { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 lci_sub_id; + uint8 lci_sub_len; + /* optional LCI field */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_ftm_lci dot11_rmrep_ftm_lci_t; + +#define DOT11_FTM_LCI_SUBELEM_ID 0 +#define DOT11_FTM_LCI_SUBELEM_LEN 2 +#define DOT11_FTM_LCI_FIELD_LEN 16 +#define DOT11_FTM_LCI_UNKNOWN_LEN 2 + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_civic { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 subj; + uint8 civloc_type; + uint8 siu; /* service interval units */ + uint16 si; /* service interval */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_ftm_civic dot11_rmreq_ftm_civic_t; + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_civic { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 civloc_type; + uint8 civloc_sub_id; + uint8 civloc_sub_len; + /* optional location civic field */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_ftm_civic dot11_rmrep_ftm_civic_t; + +#define DOT11_FTM_CIVIC_LOC_TYPE_RFC4776 0 +#define DOT11_FTM_CIVIC_SUBELEM_ID 0 +#define DOT11_FTM_CIVIC_SUBELEM_LEN 2 +#define DOT11_FTM_CIVIC_LOC_SI_NONE 0 +#define DOT11_FTM_CIVIC_TYPE_LEN 1 +#define DOT11_FTM_CIVIC_UNKNOWN_LEN 3 + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_subel { + uint8 id; + uint8 len; + uint16 max_age; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_range_subel dot11_ftm_range_subel_t; +#define DOT11_FTM_RANGE_SUBELEM_ID 4 +#define DOT11_FTM_RANGE_SUBELEM_LEN 2 + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_range { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint16 max_init_delay; /* maximum random initial delay */ + uint8 min_ap_count; + uint8 data[1]; + /* neighbor report sub-elements */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_ftm_range dot11_rmreq_ftm_range_t; +#define DOT11_RMREQ_FTM_RANGE_LEN 8 + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_entry { + uint32 start_tsf; /* 4 lsb of tsf */ + struct ether_addr bssid; + uint16 range; + uint16 max_err; + uint8 rsvd; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_range_entry dot11_ftm_range_entry_t; +#define DOT11_FTM_RANGE_ENTRY_MAX_COUNT 15 + +enum { + DOT11_FTM_RANGE_ERROR_AP_INCAPABLE = 3, + DOT11_FTM_RANGE_ERROR_AP_FAILED = 4, + DOT11_FTM_RANGE_ERROR_TX_FAILED = 8, + DOT11_FTM_RANGE_ERROR_MAX +}; + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_error_entry { + uint32 start_tsf; /* 4 lsb of tsf */ + struct ether_addr bssid; + uint8 code; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_range_error_entry dot11_ftm_range_error_entry_t; +#define DOT11_FTM_RANGE_ERROR_ENTRY_MAX_COUNT 11 + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_range { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 entry_count; + uint8 data[2]; /* includes pad */ + /* + dot11_ftm_range_entry_t entries[entry_count]; + uint8 error_count; + dot11_ftm_error_entry_t errors[error_count]; + */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_ftm_range dot11_rmrep_ftm_range_t; + +#define DOT11_FTM_RANGE_REP_MIN_LEN 6 /* No extra byte for error_count */ +#define DOT11_FTM_RANGE_ENTRY_CNT_MAX 15 +#define DOT11_FTM_RANGE_ERROR_CNT_MAX 11 +#define DOT11_FTM_RANGE_REP_FIXED_LEN 1 /* No extra byte for error_count */ /** Measurement pause request */ BWL_PRE_PACKED_STRUCT struct dot11_rmreq_pause_time { uint8 id; @@ -2686,6 +3027,8 @@ typedef struct dot11_ngbr_bss_term_dur_se dot11_ngbr_bss_term_dur_se_t; #define DOT11_NGBR_BI_CAP_IMM_BA 0x0200 #define DOT11_NGBR_BI_MOBILITY 0x0400 #define DOT11_NGBR_BI_HT 0x0800 +#define DOT11_NGBR_BI_VHT 0x1000 +#define DOT11_NGBR_BI_FTM 0x2000 /** Neighbor Report element (11k & 11v) */ BWL_PRE_PACKED_STRUCT struct dot11_neighbor_rep_ie { @@ -2706,6 +3049,7 @@ typedef struct dot11_neighbor_rep_ie dot11_neighbor_rep_ie_t; #define DOT11_BSSTYPE_INFRASTRUCTURE 0 /* d11 infrastructure */ #define DOT11_BSSTYPE_INDEPENDENT 1 /* d11 independent */ #define DOT11_BSSTYPE_ANY 2 /* d11 any BSS type */ +#define DOT11_BSSTYPE_MESH 3 /* d11 Mesh */ #define DOT11_SCANTYPE_ACTIVE 0 /* d11 scan active */ #define DOT11_SCANTYPE_PASSIVE 1 /* d11 scan passive */ @@ -2787,6 +3131,7 @@ typedef struct dot11_lmrep dot11_lmrep_t; #define APHY_SERVICE_NBITS 16 /* APHY service nbits */ #define APHY_TAIL_NBITS 6 /* APHY tail nbits */ #define APHY_CWMIN 15 /* APHY cwmin */ +#define APHY_PHYHDR_DUR 20 /* APHY PHY Header Duration */ /* 802.11 B PHY constants */ #define BPHY_SLOT_TIME 20 /* BPHY slot time */ @@ -2795,6 +3140,8 @@ typedef struct dot11_lmrep dot11_lmrep_t; #define BPHY_PLCP_TIME 192 /* BPHY PLCP time */ #define BPHY_PLCP_SHORT_TIME 96 /* BPHY PLCP short time */ #define BPHY_CWMIN 31 /* BPHY cwmin */ +#define BPHY_SHORT_PHYHDR_DUR 96 /* BPHY Short PHY Header Duration */ +#define BPHY_LONG_PHYHDR_DUR 192 /* BPHY Long PHY Header Duration */ /* 802.11 G constants */ #define DOT11_OFDM_SIGNAL_EXTENSION 6 /* d11 OFDM signal extension */ @@ -2827,6 +3174,7 @@ typedef int vht_group_id_t; #define VHT_SIGA1_NSTS_SHIFT_MASK_USER0 0x001C00 #define VHT_SIGA1_NSTS_SHIFT 10 +#define VHT_SIGA1_MAX_USERPOS 3 #define VHT_SIGA1_PARTIAL_AID_MASK 0x3fe000 #define VHT_SIGA1_PARTIAL_AID_SHIFT 13 @@ -2880,6 +3228,13 @@ typedef struct d11cnt { #define BRCM_PROP_OUI "\x00\x90\x4C" +/* Action frame type for FTM Initiator Report */ +#define BRCM_FTM_VS_AF_TYPE 14 +enum { + BRCM_FTM_VS_INITIATOR_RPT_SUBTYPE = 1, /* FTM Initiator Report */ + BRCM_FTM_VS_COLLECT_SUBTYPE = 2, /* FTM Collect debug protocol */ +}; + /* Action frame type for RWL */ #define RWL_WIFI_DEFAULT 0 #define RWL_WIFI_FIND_MY_PEER 9 /* Used while finding server */ @@ -2889,37 +3244,8 @@ typedef struct d11cnt { #define PROXD_AF_TYPE 11 /* Wifi proximity action frame type */ #define BRCM_RELMACST_AF_TYPE 12 /* RMC action frame type */ -#ifndef LINUX_POSTMOGRIFY_REMOVAL -/* - * This BRCM_PROP_OUI types is intended for use in events to embed additional - * data, and would not be expected to appear on the air -- but having an IE - * format allows IE frame data with extra data in events in that allows for - * more flexible parsing. - */ -#define BRCM_EVT_WL_BSS_INFO 64 - -/** - * Following is the generic structure for brcm_prop_ie (uses BRCM_PROP_OUI). - * DPT uses this format with type set to DPT_IE_TYPE - */ -BWL_PRE_PACKED_STRUCT struct brcm_prop_ie_s { - uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ - uint8 len; /* IE length */ - uint8 oui[3]; - uint8 type; /* type of this IE */ - uint16 cap; /* DPT capabilities */ -} BWL_POST_PACKED_STRUCT; -typedef struct brcm_prop_ie_s brcm_prop_ie_t; - -#define BRCM_PROP_IE_LEN 6 /* len of fixed part of brcm_prop ie */ - -#define DPT_IE_TYPE 2 -#define BRCM_SYSCAP_IE_TYPE 3 -#define WET_TUNNEL_IE_TYPE 3 -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ - /* brcm syscap_ie cap */ #define BRCM_SYSCAP_WET_TUNNEL 0x0100 /* Device with WET_TUNNEL support */ @@ -2948,12 +3274,14 @@ typedef struct brcm_ie brcm_ie_t; #define BRF_BLOCKACK 0x8 /* BlockACK capable */ #define BRF_ABCOUNTER_MASK 0xf0 /* afterburner is obsolete, defined for backward compat */ #define BRF_PROP_11N_MCS 0x10 /* re-use afterburner bit */ +#define BRF_MEDIA_CLIENT 0x20 /* re-use afterburner bit to indicate media client device */ #define GET_BRF_PROP_11N_MCS(brcm_ie) \ (!((brcm_ie)->flags & BRF_ABCAP) && ((brcm_ie)->flags & BRF_PROP_11N_MCS)) /* brcm_ie flags1 */ #define BRF1_AMSDU 0x1 /* A-MSDU capable */ +#define BRF1_WNM 0x2 /* WNM capable */ #define BRF1_WMEPS 0x4 /* AP is capable of handling WME + PS w/o APSD */ #define BRF1_PSOFIX 0x8 /* AP has fixed PS mode out-of-order packets */ #define BRF1_RX_LARGE_AGG 0x10 /* device can rx large aggregates */ @@ -3008,6 +3336,19 @@ typedef struct relmcast_brcm_prop_ie relmcast_brcm_prop_ie_t; #define RELMCAST_BRCM_PROP_IE_TYPE 55 +/* BRCM BTC IE */ +BWL_PRE_PACKED_STRUCT struct btc_brcm_prop_ie { + uint8 id; + uint8 len; + uint8 oui[3]; + uint8 type; /* type inidicates what follows */ + uint32 info; +} BWL_POST_PACKED_STRUCT; +typedef struct btc_brcm_prop_ie btc_brcm_prop_ie_t; + +#define BTC_INFO_BRCM_PROP_IE_TYPE 90 +#define BRCM_BTC_INFO_TYPE_LEN (sizeof(btc_brcm_prop_ie_t) - (2 * sizeof(uint8))) + /* ************* HT definitions. ************* */ #define MCSSET_LEN 16 /* 16-bits per 8-bit set to give 128-bits bitmap of MCS Index */ #define MAX_MCS_NUM (128) /* max mcs number = 128 */ @@ -3319,11 +3660,30 @@ typedef struct vht_cap_ie vht_cap_ie_t; #define VHT_CAP_MCS_MAP_M 0x3 /* mask for 1-stream */ /* assumes VHT_CAP_MCS_MAP_NONE is 3 and 2 bits are used for encoding */ #define VHT_CAP_MCS_MAP_NONE_ALL 0xffff + +/* VHT rates bitmap */ +#define VHT_CAP_MCS_0_7_RATEMAP 0x00ff +#define VHT_CAP_MCS_0_8_RATEMAP 0x01ff +#define VHT_CAP_MCS_0_9_RATEMAP 0x03ff +#define VHT_CAP_MCS_FULL_RATEMAP VHT_CAP_MCS_0_9_RATEMAP + +#define VHT_PROP_MCS_MAP_10_11 0 +#define VHT_PROP_MCS_MAP_UNUSED1 1 +#define VHT_PROP_MCS_MAP_UNUSED2 2 +#define VHT_PROP_MCS_MAP_NONE 3 +#define VHT_PROP_MCS_MAP_NONE_ALL 0xffff + +/* VHT prop rates bitmap */ +#define VHT_PROP_MCS_10_11_RATEMAP 0x0c00 +#define VHT_PROP_MCS_FULL_RATEMAP VHT_PROP_MCS_10_11_RATEMAP + +#if !defined(VHT_CAP_MCS_MAP_0_9_NSS3) /* mcsmap with MCS0-9 for Nss = 3 */ #define VHT_CAP_MCS_MAP_0_9_NSS3 \ ((VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(1)) | \ (VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(2)) | \ (VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(3))) +#endif /* !VHT_CAP_MCS_MAP_0_9_NSS3 */ #define VHT_CAP_MCS_MAP_NSS_MAX 8 @@ -3338,15 +3698,21 @@ typedef struct vht_cap_ie vht_cap_ie_t; /* Map the mcs code to mcs bit map */ #define VHT_MCS_CODE_TO_MCS_MAP(mcs_code) \ - ((mcs_code == VHT_CAP_MCS_MAP_0_7) ? 0xff : \ - (mcs_code == VHT_CAP_MCS_MAP_0_8) ? 0x1ff : \ - (mcs_code == VHT_CAP_MCS_MAP_0_9) ? 0x3ff : 0) + ((mcs_code == VHT_CAP_MCS_MAP_0_7) ? VHT_CAP_MCS_0_7_RATEMAP : \ + (mcs_code == VHT_CAP_MCS_MAP_0_8) ? VHT_CAP_MCS_0_8_RATEMAP : \ + (mcs_code == VHT_CAP_MCS_MAP_0_9) ? VHT_CAP_MCS_0_9_RATEMAP : 0) + +#define VHT_PROP_MCS_CODE_TO_PROP_MCS_MAP(mcs_code) \ + ((mcs_code == VHT_PROP_MCS_MAP_10_11) ? VHT_PROP_MCS_10_11_RATEMAP : 0) /* Map the mcs bit map to mcs code */ #define VHT_MCS_MAP_TO_MCS_CODE(mcs_map) \ - ((mcs_map == 0xff) ? VHT_CAP_MCS_MAP_0_7 : \ - (mcs_map == 0x1ff) ? VHT_CAP_MCS_MAP_0_8 : \ - (mcs_map == 0x3ff) ? VHT_CAP_MCS_MAP_0_9 : VHT_CAP_MCS_MAP_NONE) + ((mcs_map == VHT_CAP_MCS_0_7_RATEMAP) ? VHT_CAP_MCS_MAP_0_7 : \ + (mcs_map == VHT_CAP_MCS_0_8_RATEMAP) ? VHT_CAP_MCS_MAP_0_8 : \ + (mcs_map == VHT_CAP_MCS_0_9_RATEMAP) ? VHT_CAP_MCS_MAP_0_9 : VHT_CAP_MCS_MAP_NONE) + +#define VHT_PROP_MCS_MAP_TO_PROP_MCS_CODE(mcs_map) \ + (((mcs_map & 0xc00) == 0xc00) ? VHT_PROP_MCS_MAP_10_11 : VHT_PROP_MCS_MAP_NONE) /** VHT Capabilities Supported Channel Width */ typedef enum vht_cap_chan_width { @@ -3475,9 +3841,14 @@ typedef struct vht_features_ie_hdr vht_features_ie_hdr_t; #define RSN_AKM_PSK 2 /* Pre-shared Key */ #define RSN_AKM_FBT_1X 3 /* Fast Bss transition using 802.1X */ #define RSN_AKM_FBT_PSK 4 /* Fast Bss transition using Pre-shared Key */ +/* RSN_AKM_MFP_1X and RSN_AKM_MFP_PSK are not used any more + * Just kept here to avoid build issue in BISON/CARIBOU branch + */ #define RSN_AKM_MFP_1X 5 /* SHA256 key derivation, using 802.1X */ #define RSN_AKM_MFP_PSK 6 /* SHA256 key derivation, using Pre-shared Key */ -#define RSN_AKM_TPK 7 /* TPK(TDLS Peer Key) handshake */ +#define RSN_AKM_SHA256_1X 5 /* SHA256 key derivation, using 802.1X */ +#define RSN_AKM_SHA256_PSK 6 /* SHA256 key derivation, using Pre-shared Key */ +#define RSN_AKM_TPK 7 /* TPK(TDLS Peer Key) handshake */ /* OSEN authenticated key managment suite */ #define OSEN_AKM_UNSPECIFIED RSN_AKM_UNSPECIFIED /* Over 802.1x */ @@ -3518,10 +3889,6 @@ typedef struct vht_features_ie_hdr vht_features_ie_hdr_t; #define WCN_OUI "\x00\x50\xf2" /* WCN OUI */ #define WCN_TYPE 4 /* WCN type */ -#ifdef BCMWAPI_WPI -#define SMS4_KEY_LEN 16 -#define SMS4_WPI_CBC_MAC_LEN 16 -#endif /* 802.11r protocol definitions */ @@ -3581,16 +3948,25 @@ BWL_PRE_PACKED_STRUCT struct mmic_ie { } BWL_POST_PACKED_STRUCT; typedef struct mmic_ie mmic_ie_t; +/* 802.11r-2008, 11A.10.3 - RRB frame format */ +BWL_PRE_PACKED_STRUCT struct dot11_ft_rrb_frame { + uint8 frame_type; /* 1 for RRB */ + uint8 packet_type; /* 0 for Request 1 for Response */ + uint16 len; + uint8 cur_ap_addr[ETHER_ADDR_LEN]; + uint8 data[1]; /* IEs Received/Sent in FT Action Req/Resp Frame */ +} BWL_POST_PACKED_STRUCT; + +typedef struct dot11_ft_rrb_frame dot11_ft_rrb_frame_t; + +#define DOT11_FT_RRB_FIXED_LEN 10 +#define DOT11_FT_REMOTE_FRAME_TYPE 1 +#define DOT11_FT_PACKET_REQ 0 +#define DOT11_FT_PACKET_RESP 1 + #define BSSID_INVALID "\x00\x00\x00\x00\x00\x00" #define BSSID_BROADCAST "\xFF\xFF\xFF\xFF\xFF\xFF" -#ifdef BCMWAPI_WAI -#define WAPI_IE_MIN_LEN 20 /* WAPI IE min length */ -#define WAPI_VERSION 1 /* WAPI version */ -#define WAPI_VERSION_LEN 2 /* WAPI version length */ -#define WAPI_OUI "\x00\x14\x72" /* WAPI OUI */ -#define WAPI_OUI_LEN DOT11_OUI_LEN /* WAPI OUI length */ -#endif /* BCMWAPI_WAI */ /* ************* WMM Parameter definitions. ************* */ #define WMM_OUI "\x00\x50\xF2" /* WNN OUI */ @@ -3682,25 +4058,229 @@ typedef struct pu_buffer_status_ie pu_buffer_status_ie_t; #define GAS_COMEBACK_REQUEST_ACTION_FRAME 12 #define GAS_COMEBACK_RESPONSE_ACTION_FRAME 13 +/* FTM - fine timing measurement public action frames */ +BWL_PRE_PACKED_STRUCT struct dot11_ftm_req { + uint8 category; /* category of action frame (4) */ + uint8 action; /* public action (32) */ + uint8 trigger; /* trigger/continue? */ + /* optional lci, civic loc, ftm params */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_req dot11_ftm_req_t; + +BWL_PRE_PACKED_STRUCT struct dot11_ftm { + uint8 category; /* category of action frame (4) */ + uint8 action; /* public action (33) */ + uint8 dialog; /* dialog token */ + uint8 follow_up; /* follow up dialog token */ + uint8 tod[6]; /* t1 - last depart timestamp */ + uint8 toa[6]; /* t4 - last ack arrival timestamp */ + uint8 tod_err[2]; /* t1 error */ + uint8 toa_err[2]; /* t4 error */ + /* optional lci report, civic loc report, ftm params */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm dot11_ftm_t; + +#define DOT11_FTM_ERR_NOT_CONT_OFFSET 0 +#define DOT11_FTM_ERR_NOT_CONT_MASK 0x0001 +#define DOT11_FTM_ERR_NOT_CONT_SHIFT 0 +#define DOT11_FTM_ERR_NOT_CONT(_err) (((_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] & \ + DOT11_FTM_ERR_NOT_CONT_MASK) >> DOT11_FTM_ERR_NOT_CONT_SHIFT) +#define DOT11_FTM_ERR_SET_NOT_CONT(_err, _val) do {\ + uint8 _err2 = (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET]; \ + _err2 &= ~DOT11_FTM_ERR_NOT_CONT_MASK; \ + _err2 |= ((_val) << DOT11_FTM_ERR_NOT_CONT_SHIFT) & DOT11_FTM_ERR_NOT_CONT_MASK; \ + (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] = _err2; \ +} while (0) + +#define DOT11_FTM_ERR_MAX_ERR_OFFSET 0 +#define DOT11_FTM_ERR_MAX_ERR_MASK 0xfff7 +#define DOT11_FTM_ERR_MAX_ERR_SHIFT 1 +#define DOT11_FTM_ERR_MAX_ERR(_err) ((((_err)[1] << 7) | (_err)[0]) >> 1) +#define DOT11_FTM_ERR_SET_MAX_ERR(_err, _val) do {\ + uint16 _val2; \ + _val2 = (((_val) << DOT11_FTM_ERR_MAX_ERR_SHIFT) |\ + ((_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] & DOT11_FTM_ERR_NOT_CONT_MASK)); \ + (_err)[0] = _val2 & 0xff; \ + (_err)[1] = _val2 >> 8 & 0xff; \ +} while (0) + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_params { + uint8 id; /* DOT11_MNG_FTM_PARAM_ID 8.4.2.166 11mcd2.6/2014 - revisit */ + uint8 len; + uint8 info[9]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_params dot11_ftm_params_t; +#define DOT11_FTM_PARAMS_IE_LEN (sizeof(dot11_ftm_params_t) - 2) + +#define FTM_PARAMS_FIELD(_p, _off, _mask, _shift) (((_p)->info[(_off)] & (_mask)) >> (_shift)) +#define FTM_PARAMS_SET_FIELD(_p, _off, _mask, _shift, _val) do {\ + uint8 _ptmp = (_p)->info[_off] & ~(_mask); \ + (_p)->info[(_off)] = _ptmp | (((_val) << (_shift)) & (_mask)); \ +} while (0) + +#define FTM_PARAMS_STATUS_OFFSET 0 +#define FTM_PARAMS_STATUS_MASK 0x03 +#define FTM_PARAMS_STATUS_SHIFT 0 +#define FTM_PARAMS_STATUS(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_STATUS_OFFSET, \ + FTM_PARAMS_STATUS_MASK, FTM_PARAMS_STATUS_SHIFT) +#define FTM_PARAMS_SET_STATUS(_p, _status) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_STATUS_OFFSET, FTM_PARAMS_STATUS_MASK, FTM_PARAMS_STATUS_SHIFT, _status) + +#define FTM_PARAMS_VALUE_OFFSET 0 +#define FTM_PARAMS_VALUE_MASK 0x7c +#define FTM_PARAMS_VALUE_SHIFT 2 +#define FTM_PARAMS_VALUE(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_VALUE_OFFSET, \ + FTM_PARAMS_VALUE_MASK, FTM_PARAMS_VALUE_SHIFT) +#define FTM_PARAMS_SET_VALUE(_p, _value) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_VALUE_OFFSET, FTM_PARAMS_VALUE_MASK, FTM_PARAMS_VALUE_SHIFT, _value) +#define FTM_PARAMS_MAX_VALUE 32 + +#define FTM_PARAMS_NBURSTEXP_OFFSET 1 +#define FTM_PARAMS_NBURSTEXP_MASK 0x0f +#define FTM_PARAMS_NBURSTEXP_SHIFT 0 +#define FTM_PARAMS_NBURSTEXP(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_NBURSTEXP_OFFSET, \ + FTM_PARAMS_NBURSTEXP_MASK, FTM_PARAMS_NBURSTEXP_SHIFT) +#define FTM_PARAMS_SET_NBURSTEXP(_p, _bexp) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_NBURSTEXP_OFFSET, FTM_PARAMS_NBURSTEXP_MASK, FTM_PARAMS_NBURSTEXP_SHIFT, \ + _bexp) + +#define FTM_PARAMS_NBURST(_p) (1 << FTM_PARAMS_NBURSTEXP(_p)) + +enum { + FTM_PARAMS_BURSTTMO_NOPREF = 15 +}; + +#define FTM_PARAMS_BURSTTMO_OFFSET 1 +#define FTM_PARAMS_BURSTTMO_MASK 0xf0 +#define FTM_PARAMS_BURSTTMO_SHIFT 4 +#define FTM_PARAMS_BURSTTMO(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_BURSTTMO_OFFSET, \ + FTM_PARAMS_BURSTTMO_MASK, FTM_PARAMS_BURSTTMO_SHIFT) +/* set timeout in params using _tmo where timeout = 2^(_tmo) * 250us */ +#define FTM_PARAMS_SET_BURSTTMO(_p, _tmo) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_BURSTTMO_OFFSET, FTM_PARAMS_BURSTTMO_MASK, FTM_PARAMS_BURSTTMO_SHIFT, (_tmo)+2) + +#define FTM_PARAMS_BURSTTMO_USEC(_val) ((1 << ((_val)-2)) * 250) +#define FTM_PARAMS_BURSTTMO_VALID(_val) ((((_val) < 12 && (_val) > 1)) || \ + (_val) == FTM_PARAMS_BURSTTMO_NOPREF) +#define FTM_PARAMS_BURSTTMO_MAX_MSEC 128 /* 2^9 * 250us */ +#define FTM_PARAMS_BURSTTMO_MAX_USEC 128000 /* 2^9 * 250us */ + +#define FTM_PARAMS_MINDELTA_OFFSET 2 +#define FTM_PARAMS_MINDELTA_USEC(_p) ((_p)->info[FTM_PARAMS_MINDELTA_OFFSET] * 100) +#define FTM_PARAMS_SET_MINDELTA_USEC(_p, _delta) do { \ + (_p)->info[FTM_PARAMS_MINDELTA_OFFSET] = (_delta) / 100; \ +} while (0) + +#define FTM_PARAMS_PARTIAL_TSF(_p) ((_p)->info[4] << 8 | (_p)->info[3]) +#define FTM_PARAMS_SET_PARTIAL_TSF(_p, _partial_tsf) do { \ + (_p)->info[3] = (_partial_tsf) & 0xff; \ + (_p)->info[4] = ((_partial_tsf) >> 8) & 0xff; \ +} while (0) + +#define FTM_PARAMS_PARTIAL_TSF_MASK 0x0000000003fffc00ULL +#define FTM_PARAMS_PARTIAL_TSF_SHIFT 10 +#define FTM_PARAMS_PARTIAL_TSF_BIT_LEN 16 +#define FTM_PARAMS_PARTIAL_TSF_MAX 0xffff + +#define FTM_PARAMS_ASAP_OFFSET 5 +#define FTM_PARAMS_ASAP_MASK 0x4 +#define FTM_PARAMS_ASAP_SHIFT 2 +#define FTM_PARAMS_ASAP(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_ASAP_OFFSET, \ + FTM_PARAMS_ASAP_MASK, FTM_PARAMS_ASAP_SHIFT) +#define FTM_PARAMS_SET_ASAP(_p, _asap) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_ASAP_OFFSET, FTM_PARAMS_ASAP_MASK, FTM_PARAMS_ASAP_SHIFT, _asap) + +#define FTM_PARAMS_FTM1_OFFSET 5 +#define FTM_PARAMS_FTM1_MASK 0x02 +#define FTM_PARAMS_FTM1_SHIFT 1 +#define FTM_PARAMS_FTM1(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_FTM1_OFFSET, \ + FTM_PARAMS_FTM1_MASK, FTM_PARAMS_FTM1_SHIFT) +#define FTM_PARAMS_SET_FTM1(_p, _ftm1) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_FTM1_OFFSET, FTM_PARAMS_FTM1_MASK, FTM_PARAMS_FTM1_SHIFT, _ftm1) + +#define FTM_PARAMS_FTMS_PER_BURST_OFFSET 5 +#define FTM_PARAMS_FTMS_PER_BURST_MASK 0xf8 +#define FTM_PARAMS_FTMS_PER_BURST_SHIFT 3 +#define FTM_PARAMS_FTMS_PER_BURST(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_FTMS_PER_BURST_OFFSET, \ + FTM_PARAMS_FTMS_PER_BURST_MASK, FTM_PARAMS_FTMS_PER_BURST_SHIFT) +#define FTM_PARAMS_SET_FTMS_PER_BURST(_p, _nftms) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_FTMS_PER_BURST_OFFSET, FTM_PARAMS_FTMS_PER_BURST_MASK, \ + FTM_PARAMS_FTMS_PER_BURST_SHIFT, _nftms) + +#define FTM_PARAMS_CHAN_INFO_OFFSET 6 +#define FTM_PARAMS_CHAN_INFO_MASK 0xfc +#define FTM_PARAMS_CHAN_INFO_SHIFT 2 +#define FTM_PARAMS_CHAN_INFO(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_CHAN_INFO_OFFSET, \ + FTM_PARAMS_CHAN_INFO_MASK, FTM_PARAMS_CHAN_INFO_SHIFT) +#define FTM_PARAMS_SET_CHAN_INFO(_p, _ci) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_CHAN_INFO_OFFSET, FTM_PARAMS_CHAN_INFO_MASK, FTM_PARAMS_CHAN_INFO_SHIFT, _ci) + +/* burst period - units of 100ms */ +#define FTM_PARAMS_BURST_PERIOD(_p) (((_p)->info[8] << 8) | (_p)->info[7]) +#define FTM_PARAMS_SET_BURST_PERIOD(_p, _bp) do {\ + (_p)->info[7] = (_bp) & 0xff; \ + (_p)->info[8] = ((_bp) >> 8) & 0xff; \ +} while (0) + +#define FTM_PARAMS_BURST_PERIOD_MS(_p) (FTM_PARAMS_BURST_PERIOD(_p) * 100) + +/* FTM status values - last updated from 11mcD4.0 */ +enum { + FTM_PARAMS_STATUS_RESERVED = 0, + FTM_PARAMS_STATUS_SUCCESSFUL = 1, + FTM_PARAMS_STATUS_INCAPABLE = 2, + FTM_PARAMS_STATUS_FAILED = 3, + /* Below are obsolte */ + FTM_PARAMS_STATUS_OVERRIDDEN = 4, + FTM_PARAMS_STATUS_ASAP_INCAPABLE = 5, + FTM_PARAMS_STATUS_ASAP_FAILED = 6, + /* rest are reserved */ +}; + +enum { + FTM_PARAMS_CHAN_INFO_NO_PREF = 0, + FTM_PARAMS_CHAN_INFO_RESERVE1 = 1, + FTM_PARAMS_CHAN_INFO_RESERVE2 = 2, + FTM_PARAMS_CHAN_INFO_RESERVE3 = 3, + FTM_PARAMS_CHAN_INFO_NON_HT_5 = 4, + FTM_PARAMS_CHAN_INFO_RESERVE5 = 5, + FTM_PARAMS_CHAN_INFO_NON_HT_10 = 6, + FTM_PARAMS_CHAN_INFO_RESERVE7 = 7, + FTM_PARAMS_CHAN_INFO_NON_HT_20 = 8, /* excludes 2.4G, and High rate DSSS */ + FTM_PARAMS_CHAN_INFO_HT_MF_20 = 9, + FTM_PARAMS_CHAN_INFO_VHT_20 = 10, + FTM_PARAMS_CHAN_INFO_HT_MF_40 = 11, + FTM_PARAMS_CHAN_INFO_VHT_40 = 12, + FTM_PARAMS_CHAN_INFO_VHT_80 = 13, + FTM_PARAMS_CHAN_INFO_VHT_80_80 = 14, + FTM_PARAMS_CHAN_INFO_VHT_160_2_RFLOS = 15, + FTM_PARAMS_CHAN_INFO_VHT_160 = 16, + /* Reserved from 17 - 30 */ + FTM_PARAMS_CHAN_INFO_DMG_2160 = 31, + /* Reserved from 32 - 63 */ + FTM_PARAMS_CHAN_INFO_MAX = 63 +}; + /* 802.11u interworking access network options */ -#define IW_ANT_MASK 0x0f -#define IW_INTERNET_MASK 0x10 -#define IW_ASRA_MASK 0x20 -#define IW_ESR_MASK 0x40 -#define IW_UESA_MASK 0x80 +#define IW_ANT_MASK 0x0f +#define IW_INTERNET_MASK 0x10 +#define IW_ASRA_MASK 0x20 +#define IW_ESR_MASK 0x40 +#define IW_UESA_MASK 0x80 /* 802.11u interworking access network type */ -#define IW_ANT_PRIVATE_NETWORK 0 +#define IW_ANT_PRIVATE_NETWORK 0 #define IW_ANT_PRIVATE_NETWORK_WITH_GUEST 1 #define IW_ANT_CHARGEABLE_PUBLIC_NETWORK 2 -#define IW_ANT_FREE_PUBLIC_NETWORK 3 +#define IW_ANT_FREE_PUBLIC_NETWORK 3 #define IW_ANT_PERSONAL_DEVICE_NETWORK 4 #define IW_ANT_EMERGENCY_SERVICES_NETWORK 5 -#define IW_ANT_TEST_NETWORK 14 -#define IW_ANT_WILDCARD_NETWORK 15 +#define IW_ANT_TEST_NETWORK 14 +#define IW_ANT_WILDCARD_NETWORK 15 /* 802.11u advertisement protocol */ -#define ADVP_ANQP_PROTOCOL_ID 0 +#define ADVP_ANQP_PROTOCOL_ID 0 +#define ADVP_MIH_PROTOCOL_ID 1 /* 802.11u advertisement protocol masks */ #define ADVP_QRL_MASK 0x7f @@ -3709,32 +4289,32 @@ typedef struct pu_buffer_status_ie pu_buffer_status_ie_t; /* 802.11u advertisement protocol values */ #define ADVP_QRL_REQUEST 0x00 #define ADVP_QRL_RESPONSE 0x7f -#define ADVP_PAME_BI_DEPENDENT 0x00 -#define ADVP_PAME_BI_INDEPENDENT ADVP_PAME_BI_MASK +#define ADVP_PAME_BI_DEPENDENT 0x00 +#define ADVP_PAME_BI_INDEPENDENT ADVP_PAME_BI_MASK /* 802.11u ANQP information ID */ -#define ANQP_ID_QUERY_LIST 256 -#define ANQP_ID_CAPABILITY_LIST 257 -#define ANQP_ID_VENUE_NAME_INFO 258 -#define ANQP_ID_EMERGENCY_CALL_NUMBER_INFO 259 +#define ANQP_ID_QUERY_LIST 256 +#define ANQP_ID_CAPABILITY_LIST 257 +#define ANQP_ID_VENUE_NAME_INFO 258 +#define ANQP_ID_EMERGENCY_CALL_NUMBER_INFO 259 #define ANQP_ID_NETWORK_AUTHENTICATION_TYPE_INFO 260 -#define ANQP_ID_ROAMING_CONSORTIUM_LIST 261 +#define ANQP_ID_ROAMING_CONSORTIUM_LIST 261 #define ANQP_ID_IP_ADDRESS_TYPE_AVAILABILITY_INFO 262 -#define ANQP_ID_NAI_REALM_LIST 263 -#define ANQP_ID_G3PP_CELLULAR_NETWORK_INFO 264 -#define ANQP_ID_AP_GEOSPATIAL_LOCATION 265 -#define ANQP_ID_AP_CIVIC_LOCATION 266 -#define ANQP_ID_AP_LOCATION_PUBLIC_ID_URI 267 -#define ANQP_ID_DOMAIN_NAME_LIST 268 -#define ANQP_ID_EMERGENCY_ALERT_ID_URI 269 -#define ANQP_ID_EMERGENCY_NAI 271 -#define ANQP_ID_VENDOR_SPECIFIC_LIST 56797 +#define ANQP_ID_NAI_REALM_LIST 263 +#define ANQP_ID_G3PP_CELLULAR_NETWORK_INFO 264 +#define ANQP_ID_AP_GEOSPATIAL_LOCATION 265 +#define ANQP_ID_AP_CIVIC_LOCATION 266 +#define ANQP_ID_AP_LOCATION_PUBLIC_ID_URI 267 +#define ANQP_ID_DOMAIN_NAME_LIST 268 +#define ANQP_ID_EMERGENCY_ALERT_ID_URI 269 +#define ANQP_ID_EMERGENCY_NAI 271 +#define ANQP_ID_VENDOR_SPECIFIC_LIST 56797 /* 802.11u ANQP OUI */ -#define ANQP_OUI_SUBTYPE 9 +#define ANQP_OUI_SUBTYPE 9 /* 802.11u venue name */ -#define VENUE_LANGUAGE_CODE_SIZE 3 +#define VENUE_LANGUAGE_CODE_SIZE 3 #define VENUE_NAME_SIZE 255 /* 802.11u venue groups */ @@ -3752,34 +4332,34 @@ typedef struct pu_buffer_status_ie pu_buffer_status_ie_t; #define VENUE_OUTDOOR 11 /* 802.11u network authentication type indicator */ -#define NATI_UNSPECIFIED -1 -#define NATI_ACCEPTANCE_OF_TERMS_CONDITIONS 0 -#define NATI_ONLINE_ENROLLMENT_SUPPORTED 1 -#define NATI_HTTP_HTTPS_REDIRECTION 2 -#define NATI_DNS_REDIRECTION 3 +#define NATI_UNSPECIFIED -1 +#define NATI_ACCEPTANCE_OF_TERMS_CONDITIONS 0 +#define NATI_ONLINE_ENROLLMENT_SUPPORTED 1 +#define NATI_HTTP_HTTPS_REDIRECTION 2 +#define NATI_DNS_REDIRECTION 3 /* 802.11u IP address type availability - IPv6 */ -#define IPA_IPV6_SHIFT 0 -#define IPA_IPV6_MASK (0x03 << IPA_IPV6_SHIFT) +#define IPA_IPV6_SHIFT 0 +#define IPA_IPV6_MASK (0x03 << IPA_IPV6_SHIFT) #define IPA_IPV6_NOT_AVAILABLE 0x00 -#define IPA_IPV6_AVAILABLE 0x01 -#define IPA_IPV6_UNKNOWN_AVAILABILITY 0x02 +#define IPA_IPV6_AVAILABLE 0x01 +#define IPA_IPV6_UNKNOWN_AVAILABILITY 0x02 /* 802.11u IP address type availability - IPv4 */ -#define IPA_IPV4_SHIFT 2 -#define IPA_IPV4_MASK (0x3f << IPA_IPV4_SHIFT) +#define IPA_IPV4_SHIFT 2 +#define IPA_IPV4_MASK (0x3f << IPA_IPV4_SHIFT) #define IPA_IPV4_NOT_AVAILABLE 0x00 -#define IPA_IPV4_PUBLIC 0x01 +#define IPA_IPV4_PUBLIC 0x01 #define IPA_IPV4_PORT_RESTRICT 0x02 -#define IPA_IPV4_SINGLE_NAT 0x03 -#define IPA_IPV4_DOUBLE_NAT 0x04 -#define IPA_IPV4_PORT_RESTRICT_SINGLE_NAT 0x05 -#define IPA_IPV4_PORT_RESTRICT_DOUBLE_NAT 0x06 -#define IPA_IPV4_UNKNOWN_AVAILABILITY 0x07 +#define IPA_IPV4_SINGLE_NAT 0x03 +#define IPA_IPV4_DOUBLE_NAT 0x04 +#define IPA_IPV4_PORT_RESTRICT_SINGLE_NAT 0x05 +#define IPA_IPV4_PORT_RESTRICT_DOUBLE_NAT 0x06 +#define IPA_IPV4_UNKNOWN_AVAILABILITY 0x07 /* 802.11u NAI realm encoding */ -#define REALM_ENCODING_RFC4282 0 -#define REALM_ENCODING_UTF8 1 +#define REALM_ENCODING_RFC4282 0 +#define REALM_ENCODING_UTF8 1 /* 802.11u IANA EAP method type numbers */ #define REALM_EAP_TLS 13 @@ -3794,36 +4374,36 @@ typedef struct pu_buffer_status_ie pu_buffer_status_ie_t; #define REALM_EAP_EXPANDED 254 /* 802.11u authentication ID */ -#define REALM_EXPANDED_EAP 1 +#define REALM_EXPANDED_EAP 1 #define REALM_NON_EAP_INNER_AUTHENTICATION 2 #define REALM_INNER_AUTHENTICATION_EAP 3 -#define REALM_EXPANDED_INNER_EAP 4 -#define REALM_CREDENTIAL 5 +#define REALM_EXPANDED_INNER_EAP 4 +#define REALM_CREDENTIAL 5 #define REALM_TUNNELED_EAP_CREDENTIAL 6 -#define REALM_VENDOR_SPECIFIC_EAP 221 +#define REALM_VENDOR_SPECIFIC_EAP 221 /* 802.11u non-EAP inner authentication type */ -#define REALM_RESERVED_AUTH 0 +#define REALM_RESERVED_AUTH 0 #define REALM_PAP 1 #define REALM_CHAP 2 -#define REALM_MSCHAP 3 -#define REALM_MSCHAPV2 4 +#define REALM_MSCHAP 3 +#define REALM_MSCHAPV2 4 /* 802.11u credential type */ #define REALM_SIM 1 #define REALM_USIM 2 #define REALM_NFC 3 -#define REALM_HARDWARE_TOKEN 4 -#define REALM_SOFTOKEN 5 -#define REALM_CERTIFICATE 6 -#define REALM_USERNAME_PASSWORD 7 -#define REALM_SERVER_SIDE 8 -#define REALM_RESERVED_CRED 9 -#define REALM_VENDOR_SPECIFIC_CRED 10 +#define REALM_HARDWARE_TOKEN 4 +#define REALM_SOFTOKEN 5 +#define REALM_CERTIFICATE 6 +#define REALM_USERNAME_PASSWORD 7 +#define REALM_SERVER_SIDE 8 +#define REALM_RESERVED_CRED 9 +#define REALM_VENDOR_SPECIFIC_CRED 10 /* 802.11u 3GPP PLMN */ -#define G3PP_GUD_VERSION 0 -#define G3PP_PLMN_LIST_IE 0 +#define G3PP_GUD_VERSION 0 +#define G3PP_PLMN_LIST_IE 0 /** hotspot2.0 indication element (vendor specific) */ BWL_PRE_PACKED_STRUCT struct hs20_ie { @@ -3836,22 +4416,22 @@ typedef struct hs20_ie hs20_ie_t; /** IEEE 802.11 Annex E */ typedef enum { - DOT11_2GHZ_20MHZ_CLASS_12 = 81, /* Ch 1-11 */ - DOT11_5GHZ_20MHZ_CLASS_1 = 115, /* Ch 36-48 */ - DOT11_5GHZ_20MHZ_CLASS_2_DFS = 118, /* Ch 52-64 */ - DOT11_5GHZ_20MHZ_CLASS_3 = 124, /* Ch 149-161 */ - DOT11_5GHZ_20MHZ_CLASS_4_DFS = 121, /* Ch 100-140 */ - DOT11_5GHZ_20MHZ_CLASS_5 = 125, /* Ch 149-165 */ - DOT11_5GHZ_40MHZ_CLASS_22 = 116, /* Ch 36-44, lower */ + DOT11_2GHZ_20MHZ_CLASS_12 = 81, /* Ch 1-11 */ + DOT11_5GHZ_20MHZ_CLASS_1 = 115, /* Ch 36-48 */ + DOT11_5GHZ_20MHZ_CLASS_2_DFS = 118, /* Ch 52-64 */ + DOT11_5GHZ_20MHZ_CLASS_3 = 124, /* Ch 149-161 */ + DOT11_5GHZ_20MHZ_CLASS_4_DFS = 121, /* Ch 100-140 */ + DOT11_5GHZ_20MHZ_CLASS_5 = 125, /* Ch 149-165 */ + DOT11_5GHZ_40MHZ_CLASS_22 = 116, /* Ch 36-44, lower */ DOT11_5GHZ_40MHZ_CLASS_23_DFS = 119, /* Ch 52-60, lower */ DOT11_5GHZ_40MHZ_CLASS_24_DFS = 122, /* Ch 100-132, lower */ - DOT11_5GHZ_40MHZ_CLASS_25 = 126, /* Ch 149-157, lower */ - DOT11_5GHZ_40MHZ_CLASS_27 = 117, /* Ch 40-48, upper */ + DOT11_5GHZ_40MHZ_CLASS_25 = 126, /* Ch 149-157, lower */ + DOT11_5GHZ_40MHZ_CLASS_27 = 117, /* Ch 40-48, upper */ DOT11_5GHZ_40MHZ_CLASS_28_DFS = 120, /* Ch 56-64, upper */ DOT11_5GHZ_40MHZ_CLASS_29_DFS = 123, /* Ch 104-136, upper */ - DOT11_5GHZ_40MHZ_CLASS_30 = 127, /* Ch 153-161, upper */ - DOT11_2GHZ_40MHZ_CLASS_32 = 83, /* Ch 1-7, lower */ - DOT11_2GHZ_40MHZ_CLASS_33 = 84, /* Ch 5-11, upper */ + DOT11_5GHZ_40MHZ_CLASS_30 = 127, /* Ch 153-161, upper */ + DOT11_2GHZ_40MHZ_CLASS_32 = 83, /* Ch 1-7, lower */ + DOT11_2GHZ_40MHZ_CLASS_33 = 84, /* Ch 5-11, upper */ } dot11_op_class_t; /* QoS map */ diff --git a/drivers/amlogic/wifi/bcmdhd/include/proto/802.11_bta.h b/drivers/amlogic/wifi/bcmdhd/include/proto/802.11_bta.h new file mode 100644 index 0000000000000..f1da1c1744912 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/proto/802.11_bta.h @@ -0,0 +1,48 @@ +/* + * BT-AMP (BlueTooth Alternate Mac and Phy) 802.11 PAL (Protocol Adaptation Layer) + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: 802.11_bta.h 518342 2014-12-01 23:21:41Z $ +*/ + +#ifndef _802_11_BTA_H_ +#define _802_11_BTA_H_ + +#define BT_SIG_SNAP_MPROT "\xAA\xAA\x03\x00\x19\x58" + +/* BT-AMP 802.11 PAL Protocols */ +#define BTA_PROT_L2CAP 1 +#define BTA_PROT_ACTIVITY_REPORT 2 +#define BTA_PROT_SECURITY 3 +#define BTA_PROT_LINK_SUPERVISION_REQUEST 4 +#define BTA_PROT_LINK_SUPERVISION_REPLY 5 + +/* BT-AMP 802.11 PAL AMP_ASSOC Type IDs */ +#define BTA_TYPE_ID_MAC_ADDRESS 1 +#define BTA_TYPE_ID_PREFERRED_CHANNELS 2 +#define BTA_TYPE_ID_CONNECTED_CHANNELS 3 +#define BTA_TYPE_ID_CAPABILITIES 4 +#define BTA_TYPE_ID_VERSION 5 +#endif /* _802_11_bta_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11e.h b/drivers/amlogic/wifi/bcmdhd/include/proto/802.11e.h similarity index 79% rename from drivers/net/wireless/bcmdhd/include/proto/802.11e.h rename to drivers/amlogic/wifi/bcmdhd/include/proto/802.11e.h index 914d2781a4e36..ccfa9656b83ba 100644 --- a/drivers/net/wireless/bcmdhd/include/proto/802.11e.h +++ b/drivers/amlogic/wifi/bcmdhd/include/proto/802.11e.h @@ -1,9 +1,30 @@ /* * 802.11e protocol header file * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: 802.11e.h 382883 2013-02-04 23:26:09Z $ + * + * <> + * + * $Id: 802.11e.h 518342 2014-12-01 23:21:41Z $ */ #ifndef _802_11e_H_ @@ -100,12 +121,6 @@ typedef BWL_PRE_PACKED_STRUCT struct tspec { #define DOT11E_STATUS_ADDTS_INVALID_PARAM 1 /* TSPEC invalid parameter status */ #define DOT11E_STATUS_ADDTS_REFUSED_NSBW 3 /* ADDTS refused (non-sufficient BW) */ #define DOT11E_STATUS_ADDTS_REFUSED_AWHILE 47 /* ADDTS refused but could retry later */ -#ifdef BCMCCX -#define CCX_STATUS_ASSOC_DENIED_UNKNOWN 0xc8 /* unspecified QoS related failure */ -#define CCX_STATUS_ASSOC_DENIED_AP_POLICY 0xc9 /* TSPEC refused due to AP policy */ -#define CCX_STATUS_ASSOC_DENIED_NO_BW 0xca /* Assoc denied due to AP insufficient BW */ -#define CCX_STATUS_ASSOC_DENIED_BAD_PARAM 0xcb /* one or more TSPEC with invalid parameter */ -#endif /* BCMCCX */ /* 802.11e DELTS status code */ #define DOT11E_STATUS_QSTA_LEAVE_QBSS 36 /* STA leave QBSS */ diff --git a/drivers/amlogic/wifi/bcmdhd/include/proto/802.1d.h b/drivers/amlogic/wifi/bcmdhd/include/proto/802.1d.h new file mode 100644 index 0000000000000..9610b550467a2 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/proto/802.1d.h @@ -0,0 +1,53 @@ +/* + * Fundamental types and constants relating to 802.1D + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: 802.1d.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _802_1_D_ +#define _802_1_D_ + +/* 802.1D priority defines */ +#define PRIO_8021D_NONE 2 /* None = - */ +#define PRIO_8021D_BK 1 /* BK - Background */ +#define PRIO_8021D_BE 0 /* BE - Best-effort */ +#define PRIO_8021D_EE 3 /* EE - Excellent-effort */ +#define PRIO_8021D_CL 4 /* CL - Controlled Load */ +#define PRIO_8021D_VI 5 /* Vi - Video */ +#define PRIO_8021D_VO 6 /* Vo - Voice */ +#define PRIO_8021D_NC 7 /* NC - Network Control */ +#define MAXPRIO 7 /* 0-7 */ +#define NUMPRIO (MAXPRIO + 1) + +#define ALLPRIO -1 /* All prioirty */ + +/* Converts prio to precedence since the numerical value of + * PRIO_8021D_BE and PRIO_8021D_NONE are swapped. + */ +#define PRIO2PREC(prio) \ + (((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? ((prio^2)) : (prio)) + +#endif /* _802_1_D__ */ diff --git a/drivers/amlogic/wifi/bcmdhd/include/proto/802.3.h b/drivers/amlogic/wifi/bcmdhd/include/proto/802.3.h new file mode 100644 index 0000000000000..9f108c888a2ea --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/proto/802.3.h @@ -0,0 +1,55 @@ +/* + * Fundamental constants relating to 802.3 + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: 802.3.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _802_3_h_ +#define _802_3_h_ + +/* This marks the start of a packed structure section. */ +#include + +#define SNAP_HDR_LEN 6 /* 802.3 SNAP header length */ +#define DOT3_OUI_LEN 3 /* 802.3 oui length */ + +BWL_PRE_PACKED_STRUCT struct dot3_mac_llc_snap_header { + uint8 ether_dhost[ETHER_ADDR_LEN]; /* dest mac */ + uint8 ether_shost[ETHER_ADDR_LEN]; /* src mac */ + uint16 length; /* frame length incl header */ + uint8 dsap; /* always 0xAA */ + uint8 ssap; /* always 0xAA */ + uint8 ctl; /* always 0x03 */ + uint8 oui[DOT3_OUI_LEN]; /* RFC1042: 0x00 0x00 0x00 + * Bridge-Tunnel: 0x00 0x00 0xF8 + */ + uint16 type; /* ethertype */ +} BWL_POST_PACKED_STRUCT; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* #ifndef _802_3_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmdhcp.h b/drivers/amlogic/wifi/bcmdhd/include/proto/bcmdhcp.h similarity index 95% rename from drivers/net/wireless/bcmdhd/include/proto/bcmdhcp.h rename to drivers/amlogic/wifi/bcmdhd/include/proto/bcmdhcp.h index 0efddd2246fe7..5e51979ce393f 100644 --- a/drivers/net/wireless/bcmdhd/include/proto/bcmdhcp.h +++ b/drivers/amlogic/wifi/bcmdhd/include/proto/bcmdhcp.h @@ -1,15 +1,18 @@ /* - * Copyright (C) 2014, Broadcom Corporation - * All Rights Reserved. + * Fundamental constants relating to DHCP Protocol * + * Copyright (C) 2016, Broadcom Corporation + * All Rights Reserved. + * * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation; * the contents of this file may not be disclosed to third parties, copied * or duplicated in any form, in whole or in part, without the prior * written permission of Broadcom Corporation. * - * Fundamental constants relating to DHCP Protocol * - * $Id: bcmdhcp.h 382883 2013-02-04 23:26:09Z $ + * <> + * + * $Id: bcmdhcp.h 518342 2014-12-01 23:21:41Z $ */ #ifndef _bcmdhcp_h_ diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h b/drivers/amlogic/wifi/bcmdhd/include/proto/bcmeth.h similarity index 65% rename from drivers/net/wireless/bcmdhd/include/proto/bcmeth.h rename to drivers/amlogic/wifi/bcmdhd/include/proto/bcmeth.h index 165efabe90912..7ad453dbad0d5 100644 --- a/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h +++ b/drivers/amlogic/wifi/bcmdhd/include/proto/bcmeth.h @@ -1,9 +1,30 @@ /* * Broadcom Ethernettype protocol definitions * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmeth.h 445746 2013-12-30 12:57:26Z $ + * + * <> + * + * $Id: bcmeth.h 518342 2014-12-01 23:21:41Z $ */ /* diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h b/drivers/amlogic/wifi/bcmdhd/include/proto/bcmevent.h similarity index 65% rename from drivers/net/wireless/bcmdhd/include/proto/bcmevent.h rename to drivers/amlogic/wifi/bcmdhd/include/proto/bcmevent.h index ecdb9f5ceb9ba..6c30d57bfbbc8 100644 --- a/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h +++ b/drivers/amlogic/wifi/bcmdhd/include/proto/bcmevent.h @@ -1,11 +1,32 @@ /* * Broadcom Event protocol definitions * - * $Copyright Open Broadcom Corporation$ - * * Dependencies: proto/bcmeth.h * - * $Id: bcmevent.h 505096 2014-09-26 12:49:04Z $ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmevent.h 555154 2015-05-07 20:46:07Z $ * */ @@ -121,10 +142,6 @@ typedef BWL_PRE_PACKED_STRUCT struct bcm_event { #define WLC_E_IBSS_ASSOC 39 #define WLC_E_RADIO 40 #define WLC_E_PSM_WATCHDOG 41 /* PSM microcode watchdog fired */ -#if defined(BCMCCX) && defined(CCX_SDK) -#define WLC_E_CCX_ASSOC_START 42 /* CCX association start */ -#define WLC_E_CCX_ASSOC_ABORT 43 /* CCX association abort */ -#endif /* BCMCCX && CCX_SDK */ #define WLC_E_PROBREQ_MSG 44 /* probe request received */ #define WLC_E_SCAN_CONFIRM_IND 45 #define WLC_E_PSK_SUP 46 /* WPA Handshake fail */ @@ -134,15 +151,10 @@ typedef BWL_PRE_PACKED_STRUCT struct bcm_event { #define WLC_E_UNICAST_DECODE_ERROR 50 /* Unsupported unicast encrypted frame */ #define WLC_E_MULTICAST_DECODE_ERROR 51 /* Unsupported multicast encrypted frame */ #define WLC_E_TRACE 52 -#ifdef WLBTAMP -#define WLC_E_BTA_HCI_EVENT 53 /* BT-AMP HCI event */ -#endif #define WLC_E_IF 54 /* I/F change (for dongle host notification) */ #define WLC_E_P2P_DISC_LISTEN_COMPLETE 55 /* listen state expires */ #define WLC_E_RSSI 56 /* indicate RSSI change based on configured levels */ -#define WLC_E_PFN_SCAN_COMPLETE 57 /* PFN completed scan of network list */ -/* PFN best network batching event, re-use obsolete WLC_E_PFN_SCAN_COMPLETE */ -#define WLC_E_PFN_BEST_BATCHING 57 +#define WLC_E_PFN_BEST_BATCHING 57 /* PFN best network batching event */ #define WLC_E_EXTLOG_MSG 58 #define WLC_E_ACTION_FRAME 59 /* Action frame Rx */ #define WLC_E_ACTION_FRAME_COMPLETE 60 /* Action frame Tx complete */ @@ -203,6 +215,7 @@ typedef BWL_PRE_PACKED_STRUCT struct bcm_event { #define WLC_E_IBSS_COALESCE 110 /* IBSS Coalescing */ #define WLC_E_AIBSS_TXFAIL 110 /* TXFAIL event for AIBSS, re using event 110 */ #define WLC_E_BSS_LOAD 114 /* Inform host of beacon bss load */ +#define WLC_E_MSCH 120 /* Multiple channel scheduler event */ #define WLC_E_CSA_START_IND 121 #define WLC_E_CSA_DONE_IND 122 #define WLC_E_CSA_FAILURE_IND 123 @@ -216,16 +229,27 @@ typedef BWL_PRE_PACKED_STRUCT struct bcm_event { #define WLC_E_RSSI_LQM 133 /* Enhancement addition for WLC_E_RSSI */ #define WLC_E_PFN_GSCAN_FULL_RESULT 134 /* Full probe/beacon (IEs etc) results */ #define WLC_E_PFN_SWC 135 /* Significant change in rssi of bssids being tracked */ -#define WLC_E_RMC_EVENT 139 /* RMC event */ -#define WLC_E_LAST 140 /* highest val + 1 for range checking */ - -#if (WLC_E_LAST > 140) -#error "WLC_E_LAST: Invalid value for last event; must be <= 140." +#define WLC_E_AUTHORIZED 136 /* a STA been authroized for traffic */ +#define WLC_E_PROBREQ_MSG_RX 137 /* probe req with wl_event_rx_frame_data_t header */ +#define WLC_E_PFN_SCAN_COMPLETE 138 /* PFN completed scan of network list */ +#define WLC_E_RMC_EVENT 139 /* RMC Event */ +#define WLC_E_DPSTA_INTF_IND 140 /* DPSTA interface indication */ +#define WLC_E_RRM 141 /* RRM Event */ +#define WLC_E_PFN_SSID_EXT 142 /* SSID EXT event */ +#define WLC_E_ROAM_EXP_EVENT 143 /* Expanded roam event */ +#define WLC_E_LAST 144 /* highest val + 1 for range checking */ +#if (WLC_E_LAST > 144) +#error "WLC_E_LAST: Invalid value for last event; must be <= 141." #endif /* WLC_E_LAST */ /* define an API for getting the string name of an event */ extern const char *bcmevent_get_name(uint event_type); +extern void wl_event_to_host_order(wl_event_msg_t * evt); +extern void wl_event_to_network_order(wl_event_msg_t * evt); +/* conversion between host and network order for events */ +void wl_event_to_host_order(wl_event_msg_t * evt); +void wl_event_to_network_order(wl_event_msg_t * evt); /* Event status codes */ @@ -243,14 +267,10 @@ extern const char *bcmevent_get_name(uint event_type); #define WLC_E_STATUS_11HQUIET 11 /* 802.11h quiet period started */ #define WLC_E_STATUS_SUPPRESS 12 /* user disabled scanning (WLC_SET_SCANSUPPRESS) */ #define WLC_E_STATUS_NOCHANS 13 /* no allowable channels to scan */ -#ifdef BCMCCX -#define WLC_E_STATUS_CCXFASTRM 14 /* scan aborted due to CCX fast roam */ -#endif /* BCMCCX */ #define WLC_E_STATUS_CS_ABORT 15 /* abort channel select */ #define WLC_E_STATUS_ERROR 16 /* request failed due to error */ #define WLC_E_STATUS_INVALID 0xff /* Invalid status code to init variables. */ - /* roam reason codes */ #define WLC_E_REASON_INITIAL_ASSOC 0 /* initial assoc */ #define WLC_E_REASON_LOW_RSSI 1 /* roamed due to low RSSI */ @@ -280,21 +300,12 @@ extern const char *bcmevent_get_name(uint event_type); #define WLC_E_RSN_MISMATCH 8 /* STA does not support AP's RSN */ #define WLC_E_PRUNE_NO_COMMON_RATES 9 /* No rates in common with AP */ #define WLC_E_PRUNE_BASIC_RATES 10 /* STA does not support all basic rates of BSS */ -#ifdef BCMCCX -#define WLC_E_PRUNE_CCXFAST_PREVAP 11 /* CCX FAST ROAM: prune previous AP */ -#endif /* def BCMCCX */ #define WLC_E_PRUNE_CIPHER_NA 12 /* BSS's cipher not supported */ #define WLC_E_PRUNE_KNOWN_STA 13 /* AP is already known to us as a STA */ -#ifdef BCMCCX -#define WLC_E_PRUNE_CCXFAST_DROAM 14 /* CCX FAST ROAM: prune unqualified AP */ -#endif /* def BCMCCX */ #define WLC_E_PRUNE_WDS_PEER 15 /* AP is already known to us as a WDS peer */ #define WLC_E_PRUNE_QBSS_LOAD 16 /* QBSS LOAD - AAC is too low */ #define WLC_E_PRUNE_HOME_AP 17 /* prune home AP */ -#ifdef BCMCCX -#define WLC_E_PRUNE_AP_BLOCKED 18 /* prune blocked AP */ -#define WLC_E_PRUNE_NO_DIAG_SUPPORT 19 /* prune due to diagnostic mode not supported */ -#endif /* BCMCCX */ +#define WLC_E_PRUNE_AUTH_RESP_MAC 20 /* suppress auth resp by MAC filter */ /* WPA failure reason codes carried in the WLC_E_PSK_SUP event */ #define WLC_E_SUP_OTHER 0 /* Other reason */ @@ -349,10 +360,7 @@ typedef struct wl_event_data_if { #define WLC_E_IF_ROLE_WDS 2 /* WDS link */ #define WLC_E_IF_ROLE_P2P_GO 3 /* P2P Group Owner */ #define WLC_E_IF_ROLE_P2P_CLIENT 4 /* P2P Client */ -#ifdef WLBTAMP -#define WLC_E_IF_ROLE_BTA_CREATOR 5 /* BT-AMP Creator */ -#define WLC_E_IF_ROLE_BTA_ACCEPTOR 6 /* BT-AMP Acceptor */ -#endif +#define WLC_E_IF_ROLE_IBSS 8 /* IBSS */ /* WLC_E_RSSI event data */ typedef struct wl_event_data_rssi { @@ -365,10 +373,20 @@ typedef struct wl_event_data_rssi { #define WLC_E_IF_FLAGS_BSSCFG_NOIF 0x1 /* no host I/F creation needed */ /* Reason codes for LINK */ -#define WLC_E_LINK_BCN_LOSS 1 /* Link down because of beacon loss */ -#define WLC_E_LINK_DISASSOC 2 /* Link down because of disassoc */ -#define WLC_E_LINK_ASSOC_REC 3 /* Link down because assoc recreate failed */ -#define WLC_E_LINK_BSSCFG_DIS 4 /* Link down due to bsscfg down */ +#define WLC_E_LINK_BCN_LOSS 1 /* Link down because of beacon loss */ +#define WLC_E_LINK_DISASSOC 2 /* Link down because of disassoc */ +#define WLC_E_LINK_ASSOC_REC 3 /* Link down because assoc recreate failed */ +#define WLC_E_LINK_BSSCFG_DIS 4 /* Link down due to bsscfg down */ + + +/* WLC_E_NDIS_LINK event data */ +typedef BWL_PRE_PACKED_STRUCT struct ndis_link_parms { + struct ether_addr peer_mac; /* 6 bytes */ + uint16 chanspec; /* 2 bytes */ + uint32 link_speed; /* current datarate in units of 500 Kbit/s */ + uint32 max_link_speed; /* max possible datarate for link in units of 500 Kbit/s */ + int32 rssi; /* average rssi */ +} BWL_POST_PACKED_STRUCT ndis_link_parms_t; /* reason codes for WLC_E_OVERLAY_REQ event */ #define WLC_E_OVL_DOWNLOAD 0 /* overlay download request */ @@ -389,14 +407,16 @@ typedef struct wl_event_data_rssi { #define TDLS_AF_CATEGORY 12 /* Wi-Fi Display (WFD) Vendor Specific Category */ /* used for WFD Tunneled Probe Request and Response */ -#define TDLS_VENDOR_SPECIFIC 127 +#define TDLS_VENDOR_SPECIFIC 127 /* TDLS Action Field Values */ -#define TDLS_ACTION_SETUP_REQ 0 -#define TDLS_ACTION_SETUP_RESP 1 -#define TDLS_ACTION_SETUP_CONFIRM 2 -#define TDLS_ACTION_TEARDOWN 3 -#define WLAN_TDLS_SET_PROBE_WFD_IE 11 -#define WLAN_TDLS_SET_SETUP_WFD_IE 12 +#define TDLS_ACTION_SETUP_REQ 0 +#define TDLS_ACTION_SETUP_RESP 1 +#define TDLS_ACTION_SETUP_CONFIRM 2 +#define TDLS_ACTION_TEARDOWN 3 +#define WLAN_TDLS_SET_PROBE_WFD_IE 11 +#define WLAN_TDLS_SET_SETUP_WFD_IE 12 +#define WLAN_TDLS_SET_WFD_ENABLED 13 +#define WLAN_TDLS_SET_WFD_DISABLED 14 #endif @@ -426,6 +446,8 @@ typedef BWL_PRE_PACKED_STRUCT struct wl_event_sd { wl_sd_tlv_t tlv[1]; /* service discovery TLV */ } BWL_POST_PACKED_STRUCT wl_event_sd_t; +/* Note: proxd has a new API (ver 3.0) deprecates the following */ + /* Reason codes for WLC_E_PROXD */ #define WLC_E_PROXD_FOUND 1 /* Found a proximity device */ #define WLC_E_PROXD_GONE 2 /* Lost a proximity device */ @@ -438,6 +460,7 @@ typedef BWL_PRE_PACKED_STRUCT struct wl_event_sd { #define WLC_E_PROXD_COLLECT_COMPLETED 9 /* used by: initiator completed */ #define WLC_E_PROXD_COLLECT_ERROR 10 /* used by both initiator and target */ #define WLC_E_PROXD_NAN_EVENT 11 /* used by both initiator and target */ +#define WLC_E_PROXD_TS_RESULTS 12 /* used by: initiator completed */ /* proxd_event data */ typedef struct ftm_sample { @@ -445,6 +468,13 @@ typedef struct ftm_sample { int8 rssi; /* RSSI */ } ftm_sample_t; +typedef struct ts_sample { + uint32 t1; + uint32 t2; + uint32 t3; + uint32 t4; +} ts_sample_t; + typedef BWL_PRE_PACKED_STRUCT struct proxd_event_data { uint16 ver; /* version */ uint16 mode; /* mode: target/initiator */ @@ -459,11 +489,11 @@ typedef BWL_PRE_PACKED_STRUCT struct proxd_event_data { uint32 modertt; /* Mode delta */ uint32 medianrtt; /* median RTT */ uint32 sdrtt; /* Standard deviation of RTT */ - int gdcalcresult; /* Software or Hardware Kind of redundant, but if */ + int32 gdcalcresult; /* Software or Hardware Kind of redundant, but if */ /* frame type is VHT, then we should do it by hardware */ int16 avg_rssi; /* avg rssi accroos the ftm frames */ int16 validfrmcnt; /* Firmware's valid frame counts */ - char *peer_router_info; /* Peer router information if available in TLV, */ + int32 peer_router_info; /* Peer router information if available in TLV, */ /* We will add this field later */ int32 var1; /* average of group delay */ int32 var2; /* average of threshold crossing */ @@ -474,6 +504,16 @@ typedef BWL_PRE_PACKED_STRUCT struct proxd_event_data { ftm_sample_t ftm_buff[1]; /* 1 ... ftm_cnt */ } BWL_POST_PACKED_STRUCT wl_proxd_event_data_t; +typedef BWL_PRE_PACKED_STRUCT struct proxd_event_ts_results { + uint16 ver; /* version */ + uint16 mode; /* mode: target/initiator */ + uint16 method; /* method: rssi/TOF/AOA */ + uint8 err_code; /* error classification */ + uint8 TOF_type; /* one way or two way TOF */ + uint16 ts_cnt; /* number of timestamp measurements */ + ts_sample_t ts_buff[1]; /* Timestamps */ +} BWL_POST_PACKED_STRUCT wl_proxd_event_ts_results_t; + /* Video Traffic Interference Monitor Event */ #define INTFER_EVENT_VERSION 1 @@ -491,11 +531,20 @@ typedef struct wl_psta_primary_intf_event { struct ether_addr prim_ea; /* primary intf ether addr */ } wl_psta_primary_intf_event_t; +/* WLC_E_DPSTA_INTF_IND event data */ +typedef enum { + WL_INTF_PSTA = 1, + WL_INTF_DWDS = 2 +} wl_dpsta_intf_type; + +typedef struct wl_dpsta_intf_event { + wl_dpsta_intf_type intf_type; /* dwds/psta intf register */ +} wl_dpsta_intf_event_t; /* ********** NAN protocol events/subevents ********** */ #define NAN_EVENT_BUFFER_SIZE 512 /* max size */ /* nan application events to the host driver */ -enum nan_app_events { +typedef enum nan_app_events { WL_NAN_EVENT_START = 1, /* NAN cluster started */ WL_NAN_EVENT_JOIN = 2, /* Joined to a NAN cluster */ WL_NAN_EVENT_ROLE = 3, /* Role or State changed */ @@ -507,11 +556,235 @@ enum nan_app_events { WL_NAN_EVENT_STATUS_CHG = 9, /* generated on any change in nan_mac status */ WL_NAN_EVENT_MERGE = 10, /* Merged to a NAN cluster */ WL_NAN_EVENT_STOP = 11, /* NAN stopped */ - WL_NAN_EVENT_INVALID = 12, /* delimiter for max value */ -}; + WL_NAN_EVENT_P2P = 12, /* NAN P2P EVENT */ + WL_NAN_EVENT_WINDOW_BEGIN_P2P = 13, /* Event for begin of P2P further availability window */ + WL_NAN_EVENT_WINDOW_BEGIN_MESH = 14, + WL_NAN_EVENT_WINDOW_BEGIN_IBSS = 15, + WL_NAN_EVENT_WINDOW_BEGIN_RANGING = 16, + WL_NAN_EVENT_POST_DISC = 17, /* Event for post discovery data */ + WL_NAN_EVENT_INVALID /* delimiter for max value */ +} nan_app_events_e; + #define IS_NAN_EVT_ON(var, evt) ((var & (1 << (evt-1))) != 0) /* ******************* end of NAN section *************** */ +#define MSCH_EVENTS_BUFFER_SIZE 2048 + +/* Reason codes for WLC_E_MSCH */ +#define WLC_E_MSCH_START 0 /* start event check */ +#define WLC_E_MSCH_EXIT 1 /* exit event check */ +#define WLC_E_MSCH_REQ 2 /* request event */ +#define WLC_E_MSCH_CALLBACK 3 /* call back event */ +#define WLC_E_MSCH_MESSAGE 4 /* message event */ +#define WLC_E_MSCH_PROFILE_START 5 +#define WLC_E_MSCH_PROFILE_END 6 +#define WLC_E_MSCH_REQ_HANDLE 7 +#define WLC_E_MSCH_REQ_ENTITY 8 +#define WLC_E_MSCH_CHAN_CTXT 9 +#define WLC_E_MSCH_TIMESLOT 10 +#define WLC_E_MSCH_REQ_TIMING 11 + +typedef BWL_PRE_PACKED_STRUCT struct msch_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; +} BWL_POST_PACKED_STRUCT msch_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_start_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint32 status; +} BWL_POST_PACKED_STRUCT msch_start_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_message_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + char message[1]; /* message */ +} BWL_POST_PACKED_STRUCT msch_message_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_req_param_event_data { + uint16 flags; /* Describe various request properties */ + uint8 req_type; /* Describe start and end time flexiblilty */ + uint8 priority; /* Define the request priority */ + uint32 start_time_l; /* Requested start time offset in us unit */ + uint32 start_time_h; + uint32 duration; /* Requested duration in us unit */ + uint32 interval; /* Requested periodic interval in us unit, + * 0 means non-periodic + */ + union { + uint32 dur_flex; /* MSCH_REG_DUR_FLEX, min_dur = duration - dur_flex */ + struct { + uint32 min_dur; /* min duration for traffic, maps to home_time */ + uint32 max_away_dur; /* max acceptable away dur, maps to home_away_time*/ + uint32 lo_prio_time_l; + uint32 lo_prio_time_h; + uint32 lo_prio_interval; /* repeated low priority interval */ + uint32 hi_prio_time_l; + uint32 hi_prio_time_h; + uint32 hi_prio_interval; /* repeated high priority interval */ + } bf; + } flex; +} BWL_POST_PACKED_STRUCT msch_req_param_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_timeslot_event_data { + uint32 p_timeslot; + uint32 p_prev; + uint32 p_next; + uint32 timeslot_id; + uint32 pre_start_time_l; + uint32 pre_start_time_h; + uint32 end_time_l; + uint32 end_time_h; + uint32 sch_dur_l; + uint32 sch_dur_h; + uint32 p_chan_ctxt; + uint32 fire_time_l; + uint32 fire_time_h; + uint32 state; +} BWL_POST_PACKED_STRUCT msch_timeslot_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_req_timing_event_data { + uint32 p_req_timing; + uint32 p_prev; + uint32 p_next; + uint16 flags; + uint16 timeslot_ptr; + uint32 fire_time_l; + uint32 fire_time_h; + uint32 pre_start_time_l; + uint32 pre_start_time_h; + uint32 start_time_l; + uint32 start_time_h; + uint32 end_time_l; + uint32 end_time_h; + uint32 p_timeslot; +} BWL_POST_PACKED_STRUCT msch_req_timing_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_chan_ctxt_event_data { + uint32 p_chan_ctxt; + uint32 p_prev; + uint32 p_next; + uint16 chanspec; + uint16 bf_sch_pending; + uint32 bf_link_prev; + uint32 bf_link_next; + uint32 onchan_time_l; + uint32 onchan_time_h; + uint32 actual_onchan_dur_l; + uint32 actual_onchan_dur_h; + uint32 pend_onchan_dur_l; + uint32 pend_onchan_dur_h; + uint16 req_entity_list_cnt; + uint16 req_entity_list_ptr; + uint16 bf_entity_list_cnt; + uint16 bf_entity_list_ptr; +} BWL_POST_PACKED_STRUCT msch_chan_ctxt_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_prio_event_data { + uint32 is_lo; + uint32 time_l; + uint32 time_h; + uint32 p_entity; +} BWL_POST_PACKED_STRUCT msch_prio_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_req_entity_event_data { + uint32 p_req_entity; + uint32 req_hdl_link_prev; + uint32 req_hdl_link_next; + uint32 chan_ctxt_link_prev; + uint32 chan_ctxt_link_next; + uint32 rt_specific_link_prev; + uint32 rt_specific_link_next; + uint16 chanspec; + uint16 req_param_ptr; + uint16 cur_slot_ptr; + uint16 pend_slot_ptr; + msch_prio_event_data_t lo_event; + msch_prio_event_data_t hi_event; + uint32 ts_change_dur_flex; + uint16 ts_change_flags; + uint16 chan_ctxt_ptr; + uint32 p_chan_ctxt; + uint32 p_req_hdl; + uint32 hi_cnt_l; + uint32 hi_cnt_h; + uint32 bf_last_serv_time_l; + uint32 bf_last_serv_time_h; +} BWL_POST_PACKED_STRUCT msch_req_entity_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_req_handle_event_data { + uint32 p_req_handle; + uint32 p_prev; + uint32 p_next; + uint32 cb_func; + uint32 cb_ctxt; + uint16 req_param_ptr; + uint16 req_entity_list_cnt; + uint16 req_entity_list_ptr; + uint16 chan_cnt; + uint16 schd_chan_cnt; + uint16 chanspec_list_cnt; + uint16 chanspec_list_ptr; + uint16 pad; +} BWL_POST_PACKED_STRUCT msch_req_handle_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_profile_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint32 free_req_hdl_list; + uint32 free_req_entity_list; + uint32 free_chan_ctxt_list; + uint32 free_timeslot_list; + uint32 free_chanspec_list; + uint16 cur_msch_timeslot_ptr; + uint16 pad; + uint32 p_cur_msch_timeslot; + uint32 cur_armed_timeslot; + uint32 cur_armed_req_timing; + uint32 ts_id; + uint32 service_interval; + uint32 max_lo_prio_interval; + uint16 flex_list_cnt; + uint16 msch_chanspec_alloc_cnt; + uint16 msch_req_entity_alloc_cnt; + uint16 msch_req_hdl_alloc_cnt; + uint16 msch_chan_ctxt_alloc_cnt; + uint16 msch_timeslot_alloc_cnt; + uint16 msch_req_hdl_list_cnt; + uint16 msch_req_hdl_list_ptr; + uint16 msch_chan_ctxt_list_cnt; + uint16 msch_chan_ctxt_list_ptr; + uint16 msch_timeslot_list_cnt; + uint16 msch_timeslot_list_ptr; + uint16 msch_req_timing_list_cnt; + uint16 msch_req_timing_list_ptr; + uint16 msch_start_flex_list_cnt; + uint16 msch_start_flex_list_ptr; + uint16 msch_both_flex_list_cnt; + uint16 msch_both_flex_list_ptr; +} BWL_POST_PACKED_STRUCT msch_profile_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_req_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint16 chanspec_cnt; + uint16 chanspec_ptr; + uint16 req_param_ptr; + uint16 pad; +} BWL_POST_PACKED_STRUCT msch_req_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_callback_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint16 type; /* callback type */ + uint16 chanspec; /* actual chanspec, may different with requested one */ + uint32 pre_start_time_l; /* time slot prestart time low 32bit */ + uint32 pre_start_time_h; /* time slot prestart time high 32bit */ + uint32 end_time_l; /* time slot end time low 32 bit */ + uint32 end_time_h; /* time slot end time high 32 bit */ + uint32 timeslot_id; /* unique time slot id */ +} BWL_POST_PACKED_STRUCT msch_callback_event_data_t; + /* This marks the end of a packed structure section. */ #include diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmip.h b/drivers/amlogic/wifi/bcmdhd/include/proto/bcmip.h similarity index 85% rename from drivers/net/wireless/bcmdhd/include/proto/bcmip.h rename to drivers/amlogic/wifi/bcmdhd/include/proto/bcmip.h index e427bdc3b9133..eaa679c38948f 100644 --- a/drivers/net/wireless/bcmdhd/include/proto/bcmip.h +++ b/drivers/amlogic/wifi/bcmdhd/include/proto/bcmip.h @@ -1,9 +1,30 @@ /* - * $Copyright Open Broadcom Corporation$ - * * Fundamental constants relating to IP Protocol * - * $Id: bcmip.h 458522 2014-02-27 02:26:15Z $ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmip.h 518342 2014-12-01 23:21:41Z $ */ #ifndef _bcmip_h_ diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h b/drivers/amlogic/wifi/bcmdhd/include/proto/bcmipv6.h similarity index 70% rename from drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h rename to drivers/amlogic/wifi/bcmdhd/include/proto/bcmipv6.h index fff1485256324..fbab037b2f325 100644 --- a/drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h +++ b/drivers/amlogic/wifi/bcmdhd/include/proto/bcmipv6.h @@ -1,9 +1,30 @@ /* - * $Copyright Open Broadcom Corporation$ - * * Fundamental constants relating to Neighbor Discovery Protocol * - * $Id: bcmipv6.h 439574 2013-11-27 06:37:37Z $ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmipv6.h 518342 2014-12-01 23:21:41Z $ */ #ifndef _bcmipv6_h_ @@ -96,9 +117,9 @@ BWL_PRE_PACKED_STRUCT struct ipv6_hdr { } BWL_POST_PACKED_STRUCT; /* Neighbor Advertisement/Solicitation Packet Structure */ -BWL_PRE_PACKED_STRUCT struct nd_msg { - struct icmp6_hdr icmph; - struct ipv6_addr target; +BWL_PRE_PACKED_STRUCT struct bcm_nd_msg { + struct icmp6_hdr icmph; + struct ipv6_addr target; } BWL_POST_PACKED_STRUCT; diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmtcp.h b/drivers/amlogic/wifi/bcmdhd/include/proto/bcmtcp.h similarity index 64% rename from drivers/net/wireless/bcmdhd/include/proto/bcmtcp.h rename to drivers/amlogic/wifi/bcmdhd/include/proto/bcmtcp.h index 09cf24044e3ce..661e1f84d2aef 100644 --- a/drivers/net/wireless/bcmdhd/include/proto/bcmtcp.h +++ b/drivers/amlogic/wifi/bcmdhd/include/proto/bcmtcp.h @@ -1,9 +1,30 @@ /* * Fundamental constants relating to TCP Protocol * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmtcp.h 458522 2014-02-27 02:26:15Z $ + * + * <> + * + * $Id: bcmtcp.h 518342 2014-12-01 23:21:41Z $ */ #ifndef _bcmtcp_h_ diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmudp.h b/drivers/amlogic/wifi/bcmdhd/include/proto/bcmudp.h similarity index 90% rename from drivers/net/wireless/bcmdhd/include/proto/bcmudp.h rename to drivers/amlogic/wifi/bcmdhd/include/proto/bcmudp.h index c1d7c389c8c1d..97cf815db76cf 100644 --- a/drivers/net/wireless/bcmdhd/include/proto/bcmudp.h +++ b/drivers/amlogic/wifi/bcmdhd/include/proto/bcmudp.h @@ -1,15 +1,18 @@ /* - * Copyright (C) 2014, Broadcom Corporation - * All Rights Reserved. + * Fundamental constants relating to UDP Protocol * + * Copyright (C) 2016, Broadcom Corporation + * All Rights Reserved. + * * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation; * the contents of this file may not be disclosed to third parties, copied * or duplicated in any form, in whole or in part, without the prior * written permission of Broadcom Corporation. * - * Fundamental constants relating to UDP Protocol * - * $Id: bcmudp.h 382882 2013-02-04 23:24:31Z $ + * <> + * + * $Id: bcmudp.h 518342 2014-12-01 23:21:41Z $ */ #ifndef _bcmudp_h_ diff --git a/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h b/drivers/amlogic/wifi/bcmdhd/include/proto/bt_amp_hci.h similarity index 91% rename from drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h rename to drivers/amlogic/wifi/bcmdhd/include/proto/bt_amp_hci.h index bc91f8421cc36..4e948d24dfc1c 100644 --- a/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h +++ b/drivers/amlogic/wifi/bcmdhd/include/proto/bt_amp_hci.h @@ -1,9 +1,30 @@ /* * BT-AMP (BlueTooth Alternate Mac and Phy) HCI (Host/Controller Interface) * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bt_amp_hci.h 382882 2013-02-04 23:24:31Z $ + * + * <> + * + * $Id: bt_amp_hci.h 518342 2014-12-01 23:21:41Z $ */ #ifndef _bt_amp_hci_h diff --git a/drivers/net/wireless/bcmdhd/include/proto/eapol.h b/drivers/amlogic/wifi/bcmdhd/include/proto/eapol.h similarity index 81% rename from drivers/net/wireless/bcmdhd/include/proto/eapol.h rename to drivers/amlogic/wifi/bcmdhd/include/proto/eapol.h index d3bff33ab8b7b..be4ef5358fa59 100644 --- a/drivers/net/wireless/bcmdhd/include/proto/eapol.h +++ b/drivers/amlogic/wifi/bcmdhd/include/proto/eapol.h @@ -5,9 +5,30 @@ * IEEE Std 802.1X-2001 * IEEE 802.1X RADIUS Usage Guidelines * - * Copyright Open Broadcom Corporation + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * * - * $Id: eapol.h 452703 2014-01-31 20:33:06Z $ + * <> + * + * $Id: eapol.h 518342 2014-12-01 23:21:41Z $ */ #ifndef _eapol_h_ diff --git a/drivers/net/wireless/bcmdhd/include/proto/ethernet.h b/drivers/amlogic/wifi/bcmdhd/include/proto/ethernet.h similarity index 82% rename from drivers/net/wireless/bcmdhd/include/proto/ethernet.h rename to drivers/amlogic/wifi/bcmdhd/include/proto/ethernet.h index a166c5f84c0e8..022fee41a1965 100644 --- a/drivers/net/wireless/bcmdhd/include/proto/ethernet.h +++ b/drivers/amlogic/wifi/bcmdhd/include/proto/ethernet.h @@ -1,9 +1,30 @@ /* * From FreeBSD 2.2.7: Fundamental constants relating to ethernet. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: ethernet.h 473238 2014-04-28 19:14:56Z $ + * + * <> + * + * $Id: ethernet.h 518342 2014-12-01 23:21:41Z $ */ #ifndef _NET_ETHERNET_H_ /* use native BSD ethernet.h when available */ @@ -65,14 +86,10 @@ #define ETHER_TYPE_IPV6 0x86dd /* IPv6 */ #define ETHER_TYPE_BRCM 0x886c /* Broadcom Corp. */ #define ETHER_TYPE_802_1X 0x888e /* 802.1x */ -#ifdef PLC -#define ETHER_TYPE_88E1 0x88e1 /* GIGLE */ -#define ETHER_TYPE_8912 0x8912 /* GIGLE */ -#define ETHER_TYPE_GIGLED 0xffff /* GIGLE */ -#endif /* PLC */ #define ETHER_TYPE_802_1X_PREAUTH 0x88c7 /* 802.1x preauthentication */ #define ETHER_TYPE_WAI 0x88b4 /* WAI */ #define ETHER_TYPE_89_0D 0x890d /* 89-0d frame for TDLS */ +#define ETHER_TYPE_RRB ETHER_TYPE_89_0D /* RRB 802.11r 2008 */ #define ETHER_TYPE_PPP_SES 0x8864 /* PPPoE Session */ diff --git a/drivers/amlogic/wifi/bcmdhd/include/proto/event_log_set.h b/drivers/amlogic/wifi/bcmdhd/include/proto/event_log_set.h new file mode 100644 index 0000000000000..910cbcf169af1 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/proto/event_log_set.h @@ -0,0 +1,45 @@ +/* + * EVENT_LOG system definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: event_log.h 241182 2011-02-17 21:50:03Z $ + */ + +#ifndef _EVENT_LOG_SET_H_ +#define _EVENT_LOG_SET_H_ + +/* Set a maximum number of sets here. It is not dynamic for + * efficiency of the EVENT_LOG calls. + */ +#define NUM_EVENT_LOG_SETS 8 + +/* Define new event log sets here */ +#define EVENT_LOG_SET_BUS 0 +#define EVENT_LOG_SET_WL 1 +#define EVENT_LOG_SET_PSM 2 +#define EVENT_LOG_SET_ERROR 3 +#define EVENT_LOG_SET_MEM_API 4 + +#endif /* _EVENT_LOG_SET_H_ */ diff --git a/drivers/amlogic/wifi/bcmdhd/include/proto/event_log_tag.h b/drivers/amlogic/wifi/bcmdhd/include/proto/event_log_tag.h new file mode 100644 index 0000000000000..25acbc7420e18 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/proto/event_log_tag.h @@ -0,0 +1,157 @@ +/* + * EVENT_LOG system definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: event_log.h 241182 2011-02-17 21:50:03Z $ + */ + +#ifndef _EVENT_LOG_TAG_H_ +#define _EVENT_LOG_TAG_H_ + +#include + +/* Define new event log tags here */ +#define EVENT_LOG_TAG_NULL 0 /* Special null tag */ +#define EVENT_LOG_TAG_TS 1 /* Special timestamp tag */ +#define EVENT_LOG_TAG_BUS_OOB 2 +#define EVENT_LOG_TAG_BUS_STATE 3 +#define EVENT_LOG_TAG_BUS_PROTO 4 +#define EVENT_LOG_TAG_BUS_CTL 5 +#define EVENT_LOG_TAG_BUS_EVENT 6 +#define EVENT_LOG_TAG_BUS_PKT 7 +#define EVENT_LOG_TAG_BUS_FRAME 8 +#define EVENT_LOG_TAG_BUS_DESC 9 +#define EVENT_LOG_TAG_BUS_SETUP 10 +#define EVENT_LOG_TAG_BUS_MISC 11 +#define EVENT_LOG_TAG_SRSCAN 22 +#define EVENT_LOG_TAG_PWRSTATS_INFO 23 +#define EVENT_LOG_TAG_UCODE_WATCHDOG 26 +#define EVENT_LOG_TAG_UCODE_FIFO 27 +#define EVENT_LOG_TAG_SCAN_TRACE_LOW 28 +#define EVENT_LOG_TAG_SCAN_TRACE_HIGH 29 +#define EVENT_LOG_TAG_SCAN_ERROR 30 +#define EVENT_LOG_TAG_SCAN_WARN 31 +#define EVENT_LOG_TAG_MPF_ERR 32 +#define EVENT_LOG_TAG_MPF_WARN 33 +#define EVENT_LOG_TAG_MPF_INFO 34 +#define EVENT_LOG_TAG_MPF_DEBUG 35 +#define EVENT_LOG_TAG_EVENT_INFO 36 +#define EVENT_LOG_TAG_EVENT_ERR 37 +#define EVENT_LOG_TAG_PWRSTATS_ERROR 38 +#define EVENT_LOG_TAG_EXCESS_PM_ERROR 39 +#define EVENT_LOG_TAG_IOCTL_LOG 40 +#define EVENT_LOG_TAG_PFN_ERR 41 +#define EVENT_LOG_TAG_PFN_WARN 42 +#define EVENT_LOG_TAG_PFN_INFO 43 +#define EVENT_LOG_TAG_PFN_DEBUG 44 +#define EVENT_LOG_TAG_BEACON_LOG 45 +#define EVENT_LOG_TAG_WNM_BSSTRANS_INFO 46 +#define EVENT_LOG_TAG_TRACE_CHANSW 47 +#define EVENT_LOG_TAG_PCI_ERROR 48 +#define EVENT_LOG_TAG_PCI_TRACE 49 +#define EVENT_LOG_TAG_PCI_WARN 50 +#define EVENT_LOG_TAG_PCI_INFO 51 +#define EVENT_LOG_TAG_PCI_DBG 52 +#define EVENT_LOG_TAG_PCI_DATA 53 +#define EVENT_LOG_TAG_PCI_RING 54 +#define EVENT_LOG_TAG_AWDL_TRACE_RANGING 55 +#define EVENT_LOG_TAG_WL_ERROR 56 +#define EVENT_LOG_TAG_PHY_ERROR 57 +#define EVENT_LOG_TAG_OTP_ERROR 58 +#define EVENT_LOG_TAG_NOTIF_ERROR 59 +#define EVENT_LOG_TAG_MPOOL_ERROR 60 +#define EVENT_LOG_TAG_OBJR_ERROR 61 +#define EVENT_LOG_TAG_DMA_ERROR 62 +#define EVENT_LOG_TAG_PMU_ERROR 63 +#define EVENT_LOG_TAG_BSROM_ERROR 64 +#define EVENT_LOG_TAG_SI_ERROR 65 +#define EVENT_LOG_TAG_ROM_PRINTF 66 +#define EVENT_LOG_TAG_RATE_CNT 67 +#define EVENT_LOG_TAG_CTL_MGT_CNT 68 +#define EVENT_LOG_TAG_AMPDU_DUMP 69 +#define EVENT_LOG_TAG_MEM_ALLOC_SUCC 70 +#define EVENT_LOG_TAG_MEM_ALLOC_FAIL 71 +#define EVENT_LOG_TAG_MEM_FREE 72 +#define EVENT_LOG_TAG_WL_ASSOC_LOG 73 +#define EVENT_LOG_TAG_WL_PS_LOG 74 +#define EVENT_LOG_TAG_WL_ROAM_LOG 75 +#define EVENT_LOG_TAG_WL_MPC_LOG 76 +#define EVENT_LOG_TAG_WL_WSEC_LOG 77 +#define EVENT_LOG_TAG_WL_WSEC_DUMP 78 +#define EVENT_LOG_TAG_WL_MCNX_LOG 79 +#define EVENT_LOG_TAG_HEALTH_CHECK_ERROR 80 +#define EVENT_LOG_TAG_HNDRTE_EVENT_ERROR 81 +#define EVENT_LOG_TAG_ECOUNTERS_ERROR 82 +#define EVENT_LOG_TAG_WL_COUNTERS 83 +#define EVENT_LOG_TAG_ECOUNTERS_IPCSTATS 84 +#define EVENT_LOG_TAG_WL_P2P_LOG 85 +#define EVENT_LOG_TAG_SDIO_ERROR 86 +#define EVENT_LOG_TAG_SDIO_TRACE 87 +#define EVENT_LOG_TAG_SDIO_DBG 88 +#define EVENT_LOG_TAG_SDIO_PRHDRS 89 +#define EVENT_LOG_TAG_SDIO_PRPKT 90 +#define EVENT_LOG_TAG_SDIO_INFORM 91 +#define EVENT_LOG_TAG_MIMO_PS_ERROR 92 +#define EVENT_LOG_TAG_MIMO_PS_TRACE 93 +#define EVENT_LOG_TAG_MIMO_PS_INFO 94 +#define EVENT_LOG_TAG_BTCX_STATS 95 +#define EVENT_LOG_TAG_LEAKY_AP_STATS 96 +#define EVENT_LOG_TAG_AWDL_TRACE_ELECTION 97 +#define EVENT_LOG_TAG_MIMO_PS_STATS 98 +#define EVENT_LOG_TAG_PWRSTATS_PHY 99 +#define EVENT_LOG_TAG_PWRSTATS_SCAN 100 +#define EVENT_LOG_TAG_PWRSTATS_AWDL 101 +#define EVENT_LOG_TAG_PWRSTATS_WAKE_V2 102 +#define EVENT_LOG_TAG_LQM 103 +#define EVENT_LOG_TAG_TRACE_WL_INFO 104 +#define EVENT_LOG_TAG_TRACE_BTCOEX_INFO 105 +#define EVENT_LOG_TAG_MAX 105 /* Set to the same value of last tag, not last tag + 1 */ +/* Note: New event should be added/reserved in trunk before adding it to branches */ + + +#define SD_PRHDRS(i, s, h, p, n, l) +#define SD_PRPKT(m, b, n) +#define SD_INFORM(args) + +/* Flags for tag control */ +#define EVENT_LOG_TAG_FLAG_NONE 0 +#define EVENT_LOG_TAG_FLAG_LOG 0x80 +#define EVENT_LOG_TAG_FLAG_PRINT 0x40 +#define EVENT_LOG_TAG_FLAG_SET_MASK 0x3f + +/* Each event log entry has a type. The type is the LAST word of the + * event log. The printing code walks the event entries in reverse + * order to find the first entry. + */ +typedef union event_log_hdr { + struct { + uint8 tag; /* Event_log entry tag */ + uint8 count; /* Count of 4-byte entries */ + uint16 fmt_num; /* Format number */ + }; + uint32 t; /* Type cheat */ +} event_log_hdr_t; + +#endif /* _EVENT_LOG_TAG_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/p2p.h b/drivers/amlogic/wifi/bcmdhd/include/proto/p2p.h similarity index 95% rename from drivers/net/wireless/bcmdhd/include/proto/p2p.h rename to drivers/amlogic/wifi/bcmdhd/include/proto/p2p.h index f50d5ed7eaa58..91f5147f54d01 100644 --- a/drivers/net/wireless/bcmdhd/include/proto/p2p.h +++ b/drivers/amlogic/wifi/bcmdhd/include/proto/p2p.h @@ -1,9 +1,30 @@ /* - * $Copyright Open Broadcom Corporation$ - * * Fundamental types and constants relating to WFA P2P (aka WiFi Direct) * - * $Id: p2p.h 457033 2014-02-20 19:39:45Z $ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: p2p.h 536785 2015-02-24 08:35:00Z $ */ #ifndef _P2P_H_ @@ -542,9 +563,9 @@ typedef struct wifi_p2p_noa_se wifi_p2p_noa_se_t; #define P2PSD_ACTION_ID_GAS_IRESP 0x0b /* Action value for GAS Initial Response AF */ #define P2PSD_ACTION_ID_GAS_CREQ 0x0c - /* Action value for GAS Comback Request AF */ + /* Action value for GAS Comeback Request AF */ #define P2PSD_ACTION_ID_GAS_CRESP 0x0d - /* Action value for GAS Comback Response AF */ + /* Action value for GAS Comeback Response AF */ #define P2PSD_AD_EID 0x6c /* Advertisement Protocol IE ID */ #define P2PSD_ADP_TUPLE_QLMT_PAMEBI 0x00 diff --git a/drivers/net/wireless/bcmdhd/include/proto/sdspi.h b/drivers/amlogic/wifi/bcmdhd/include/proto/sdspi.h similarity index 64% rename from drivers/net/wireless/bcmdhd/include/proto/sdspi.h rename to drivers/amlogic/wifi/bcmdhd/include/proto/sdspi.h index 647a217bbbcd8..a1d7ac937cf3c 100644 --- a/drivers/net/wireless/bcmdhd/include/proto/sdspi.h +++ b/drivers/amlogic/wifi/bcmdhd/include/proto/sdspi.h @@ -1,9 +1,30 @@ /* * SD-SPI Protocol Standard * - * $ Copyright Open Broadcom Corporation $ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: sdspi.h 382882 2013-02-04 23:24:31Z $ + * + * <> + * + * $Id: sdspi.h 518342 2014-12-01 23:21:41Z $ */ #ifndef _SD_SPI_H #define _SD_SPI_H diff --git a/drivers/net/wireless/bcmdhd/include/proto/vlan.h b/drivers/amlogic/wifi/bcmdhd/include/proto/vlan.h similarity index 61% rename from drivers/net/wireless/bcmdhd/include/proto/vlan.h rename to drivers/amlogic/wifi/bcmdhd/include/proto/vlan.h index ca1f461f5b91e..77b1458b36838 100644 --- a/drivers/net/wireless/bcmdhd/include/proto/vlan.h +++ b/drivers/amlogic/wifi/bcmdhd/include/proto/vlan.h @@ -1,9 +1,30 @@ /* * 802.1Q VLAN protocol definitions * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: vlan.h 382883 2013-02-04 23:26:09Z $ + * + * <> + * + * $Id: vlan.h 518342 2014-12-01 23:21:41Z $ */ #ifndef _vlan_h_ diff --git a/drivers/net/wireless/bcmdhd/include/proto/wpa.h b/drivers/amlogic/wifi/bcmdhd/include/proto/wpa.h similarity index 77% rename from drivers/net/wireless/bcmdhd/include/proto/wpa.h rename to drivers/amlogic/wifi/bcmdhd/include/proto/wpa.h index 1e079c9482b32..ef5d664dabee2 100644 --- a/drivers/net/wireless/bcmdhd/include/proto/wpa.h +++ b/drivers/amlogic/wifi/bcmdhd/include/proto/wpa.h @@ -1,9 +1,30 @@ /* * Fundamental types and constants relating to WPA * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: wpa.h 492853 2014-07-23 17:20:34Z $ + * + * <> + * + * $Id: wpa.h 518342 2014-12-01 23:21:41Z $ */ #ifndef _proto_wpa_h_ @@ -51,6 +72,8 @@ typedef BWL_PRE_PACKED_STRUCT struct #define WPA_IE_FIXED_LEN 8 #define WPA_IE_TAG_FIXED_LEN 6 +#define BIP_OUI_TYPE WPA2_OUI "\x06" + typedef BWL_PRE_PACKED_STRUCT struct { uint8 tag; /* TAG */ uint8 length; /* TAG length */ @@ -101,22 +124,7 @@ typedef BWL_PRE_PACKED_STRUCT struct #define WPA_CIPHER_WEP_104 5 /* WEP (104-bit) */ #define WPA_CIPHER_BIP 6 /* WEP (104-bit) */ #define WPA_CIPHER_TPK 7 /* Group addressed traffic not allowed */ -#ifdef BCMCCX -#define WPA_CIPHER_CKIP 8 /* KP with no MIC */ -#define WPA_CIPHER_CKIP_MMH 9 /* KP with MIC ("CKIP/MMH", "CKIP+CMIC") */ -#define WPA_CIPHER_WEP_MMH 10 /* MIC with no KP ("WEP/MMH", "CMIC") */ - -#define IS_CCX_CIPHER(cipher) ((cipher) == WPA_CIPHER_CKIP || \ - (cipher) == WPA_CIPHER_CKIP_MMH || \ - (cipher) == WPA_CIPHER_WEP_MMH) -#endif - -#ifdef BCMWAPI_WAI -#define WAPI_CIPHER_NONE WPA_CIPHER_NONE -#define WAPI_CIPHER_SMS4 11 -#define WAPI_CSE_WPI_SMS4 1 -#endif /* BCMWAPI_WAI */ #define IS_WPA_CIPHER(cipher) ((cipher) == WPA_CIPHER_NONE || \ (cipher) == WPA_CIPHER_WEP_40 || \ @@ -126,17 +134,6 @@ typedef BWL_PRE_PACKED_STRUCT struct (cipher) == WPA_CIPHER_AES_CCM || \ (cipher) == WPA_CIPHER_TPK) -#ifdef BCMWAPI_WAI -#define IS_WAPI_CIPHER(cipher) ((cipher) == WAPI_CIPHER_NONE || \ - (cipher) == WAPI_CSE_WPI_SMS4) - -/* convert WAPI_CSE_WPI_XXX to WAPI_CIPHER_XXX */ -#define WAPI_CSE_WPI_2_CIPHER(cse) ((cse) == WAPI_CSE_WPI_SMS4 ? \ - WAPI_CIPHER_SMS4 : WAPI_CIPHER_NONE) - -#define WAPI_CIPHER_2_CSE_WPI(cipher) ((cipher) == WAPI_CIPHER_SMS4 ? \ - WAPI_CSE_WPI_SMS4 : WAPI_CIPHER_NONE) -#endif /* BCMWAPI_WAI */ /* WPA TKIP countermeasures parameters */ #define WPA_TKIP_CM_DETECT 60 /* multiple MIC failure window (seconds) */ @@ -177,21 +174,7 @@ typedef BWL_PRE_PACKED_STRUCT struct #define WPA_CAP_WPA2_PREAUTH RSN_CAP_PREAUTH #define WPA2_PMKID_COUNT_LEN 2 -#define RSN_GROUPMANAGE_CIPHER_LEN 4 - -#ifdef BCMWAPI_WAI -#define WAPI_CAP_PREAUTH RSN_CAP_PREAUTH - -/* Other WAI definition */ -#define WAPI_WAI_REQUEST 0x00F1 -#define WAPI_UNICAST_REKEY 0x00F2 -#define WAPI_STA_AGING 0x00F3 -#define WAPI_MUTIL_REKEY 0x00F4 -#define WAPI_STA_STATS 0x00F5 -#define WAPI_USK_REKEY_COUNT 0x4000000 /* 0xA00000 */ -#define WAPI_MSK_REKEY_COUNT 0x4000000 /* 0xA00000 */ -#endif /* BCMWAPI_WAI */ /* This marks the end of a packed structure section. */ #include diff --git a/drivers/amlogic/wifi/bcmdhd/include/proto/wps.h b/drivers/amlogic/wifi/bcmdhd/include/proto/wps.h new file mode 100644 index 0000000000000..495d7f181fd3d --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/proto/wps.h @@ -0,0 +1,389 @@ +/* + * WPS IE definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id$ + */ + +#ifndef _WPS_ +#define _WPS_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Data Element Definitions */ +#define WPS_ID_AP_CHANNEL 0x1001 +#define WPS_ID_ASSOC_STATE 0x1002 +#define WPS_ID_AUTH_TYPE 0x1003 +#define WPS_ID_AUTH_TYPE_FLAGS 0x1004 +#define WPS_ID_AUTHENTICATOR 0x1005 +#define WPS_ID_CONFIG_METHODS 0x1008 +#define WPS_ID_CONFIG_ERROR 0x1009 +#define WPS_ID_CONF_URL4 0x100A +#define WPS_ID_CONF_URL6 0x100B +#define WPS_ID_CONN_TYPE 0x100C +#define WPS_ID_CONN_TYPE_FLAGS 0x100D +#define WPS_ID_CREDENTIAL 0x100E +#define WPS_ID_DEVICE_NAME 0x1011 +#define WPS_ID_DEVICE_PWD_ID 0x1012 +#define WPS_ID_E_HASH1 0x1014 +#define WPS_ID_E_HASH2 0x1015 +#define WPS_ID_E_SNONCE1 0x1016 +#define WPS_ID_E_SNONCE2 0x1017 +#define WPS_ID_ENCR_SETTINGS 0x1018 +#define WPS_ID_ENCR_TYPE 0x100F +#define WPS_ID_ENCR_TYPE_FLAGS 0x1010 +#define WPS_ID_ENROLLEE_NONCE 0x101A +#define WPS_ID_FEATURE_ID 0x101B +#define WPS_ID_IDENTITY 0x101C +#define WPS_ID_IDENTITY_PROOF 0x101D +#define WPS_ID_KEY_WRAP_AUTH 0x101E +#define WPS_ID_KEY_IDENTIFIER 0x101F +#define WPS_ID_MAC_ADDR 0x1020 +#define WPS_ID_MANUFACTURER 0x1021 +#define WPS_ID_MSG_TYPE 0x1022 +#define WPS_ID_MODEL_NAME 0x1023 +#define WPS_ID_MODEL_NUMBER 0x1024 +#define WPS_ID_NW_INDEX 0x1026 +#define WPS_ID_NW_KEY 0x1027 +#define WPS_ID_NW_KEY_INDEX 0x1028 +#define WPS_ID_NEW_DEVICE_NAME 0x1029 +#define WPS_ID_NEW_PWD 0x102A +#define WPS_ID_OOB_DEV_PWD 0x102C +#define WPS_ID_OS_VERSION 0x102D +#define WPS_ID_POWER_LEVEL 0x102F +#define WPS_ID_PSK_CURRENT 0x1030 +#define WPS_ID_PSK_MAX 0x1031 +#define WPS_ID_PUBLIC_KEY 0x1032 +#define WPS_ID_RADIO_ENABLED 0x1033 +#define WPS_ID_REBOOT 0x1034 +#define WPS_ID_REGISTRAR_CURRENT 0x1035 +#define WPS_ID_REGISTRAR_ESTBLSHD 0x1036 +#define WPS_ID_REGISTRAR_LIST 0x1037 +#define WPS_ID_REGISTRAR_MAX 0x1038 +#define WPS_ID_REGISTRAR_NONCE 0x1039 +#define WPS_ID_REQ_TYPE 0x103A +#define WPS_ID_RESP_TYPE 0x103B +#define WPS_ID_RF_BAND 0x103C +#define WPS_ID_R_HASH1 0x103D +#define WPS_ID_R_HASH2 0x103E +#define WPS_ID_R_SNONCE1 0x103F +#define WPS_ID_R_SNONCE2 0x1040 +#define WPS_ID_SEL_REGISTRAR 0x1041 +#define WPS_ID_SERIAL_NUM 0x1042 +#define WPS_ID_SC_STATE 0x1044 +#define WPS_ID_SSID 0x1045 +#define WPS_ID_TOT_NETWORKS 0x1046 +#define WPS_ID_UUID_E 0x1047 +#define WPS_ID_UUID_R 0x1048 +#define WPS_ID_VENDOR_EXT 0x1049 +#define WPS_ID_VERSION 0x104A +#define WPS_ID_X509_CERT_REQ 0x104B +#define WPS_ID_X509_CERT 0x104C +#define WPS_ID_EAP_IDENTITY 0x104D +#define WPS_ID_MSG_COUNTER 0x104E +#define WPS_ID_PUBKEY_HASH 0x104F +#define WPS_ID_REKEY_KEY 0x1050 +#define WPS_ID_KEY_LIFETIME 0x1051 +#define WPS_ID_PERM_CFG_METHODS 0x1052 +#define WPS_ID_SEL_REG_CFG_METHODS 0x1053 +#define WPS_ID_PRIM_DEV_TYPE 0x1054 +#define WPS_ID_SEC_DEV_TYPE_LIST 0x1055 +#define WPS_ID_PORTABLE_DEVICE 0x1056 +#define WPS_ID_AP_SETUP_LOCKED 0x1057 +#define WPS_ID_APP_LIST 0x1058 +#define WPS_ID_EAP_TYPE 0x1059 +#define WPS_ID_INIT_VECTOR 0x1060 +#define WPS_ID_KEY_PROVIDED_AUTO 0x1061 +#define WPS_ID_8021X_ENABLED 0x1062 +#define WPS_ID_WEP_TRANSMIT_KEY 0x1064 +#define WPS_ID_REQ_DEV_TYPE 0x106A + +/* WSC 2.0, WFA Vendor Extension Subelements */ +#define WFA_VENDOR_EXT_ID "\x00\x37\x2A" +#define WPS_WFA_SUBID_VERSION2 0x00 +#define WPS_WFA_SUBID_AUTHORIZED_MACS 0x01 +#define WPS_WFA_SUBID_NW_KEY_SHAREABLE 0x02 +#define WPS_WFA_SUBID_REQ_TO_ENROLL 0x03 +#define WPS_WFA_SUBID_SETTINGS_DELAY_TIME 0x04 +#define WPS_WFA_SUBID_REG_CFG_METHODS 0x05 + + +/* WCN-NET Windows Rally Vertical Pairing Vendor Extensions */ +#define MS_VENDOR_EXT_ID "\x00\x01\x37" +#define WPS_MS_ID_VPI 0x1001 /* Vertical Pairing Identifier TLV */ +#define WPS_MS_ID_TRANSPORT_UUID 0x1002 /* Transport UUID TLV */ + +/* Vertical Pairing Identifier TLV Definitions */ +#define WPS_MS_VPI_TRANSPORT_NONE 0x00 /* None */ +#define WPS_MS_VPI_TRANSPORT_DPWS 0x01 /* Devices Profile for Web Services */ +#define WPS_MS_VPI_TRANSPORT_UPNP 0x02 /* uPnP */ +#define WPS_MS_VPI_TRANSPORT_SDNWS 0x03 /* Secure Devices Profile for Web Services */ +#define WPS_MS_VPI_NO_PROFILE_REQ 0x00 /* Wi-Fi profile not requested. + * Not supported in Windows 7 + */ +#define WPS_MS_VPI_PROFILE_REQ 0x01 /* Wi-Fi profile requested. */ + +/* sizes of the fixed size elements */ +#define WPS_ID_AP_CHANNEL_S 2 +#define WPS_ID_ASSOC_STATE_S 2 +#define WPS_ID_AUTH_TYPE_S 2 +#define WPS_ID_AUTH_TYPE_FLAGS_S 2 +#define WPS_ID_AUTHENTICATOR_S 8 +#define WPS_ID_CONFIG_METHODS_S 2 +#define WPS_ID_CONFIG_ERROR_S 2 +#define WPS_ID_CONN_TYPE_S 1 +#define WPS_ID_CONN_TYPE_FLAGS_S 1 +#define WPS_ID_DEVICE_PWD_ID_S 2 +#define WPS_ID_ENCR_TYPE_S 2 +#define WPS_ID_ENCR_TYPE_FLAGS_S 2 +#define WPS_ID_FEATURE_ID_S 4 +#define WPS_ID_MAC_ADDR_S 6 +#define WPS_ID_MSG_TYPE_S 1 +#define WPS_ID_SC_STATE_S 1 +#define WPS_ID_RF_BAND_S 1 +#define WPS_ID_OS_VERSION_S 4 +#define WPS_ID_VERSION_S 1 +#define WPS_ID_SEL_REGISTRAR_S 1 +#define WPS_ID_SEL_REG_CFG_METHODS_S 2 +#define WPS_ID_REQ_TYPE_S 1 +#define WPS_ID_RESP_TYPE_S 1 +#define WPS_ID_AP_SETUP_LOCKED_S 1 + +/* WSC 2.0, WFA Vendor Extension Subelements */ +#define WPS_WFA_SUBID_VERSION2_S 1 +#define WPS_WFA_SUBID_NW_KEY_SHAREABLE_S 1 +#define WPS_WFA_SUBID_REQ_TO_ENROLL_S 1 +#define WPS_WFA_SUBID_SETTINGS_DELAY_TIME_S 1 +#define WPS_WFA_SUBID_REG_CFG_METHODS_S 2 + +/* Association states */ +#define WPS_ASSOC_NOT_ASSOCIATED 0 +#define WPS_ASSOC_CONN_SUCCESS 1 +#define WPS_ASSOC_CONFIG_FAIL 2 +#define WPS_ASSOC_ASSOC_FAIL 3 +#define WPS_ASSOC_IP_FAIL 4 + +/* Authentication types */ +#define WPS_AUTHTYPE_OPEN 0x0001 +#define WPS_AUTHTYPE_WPAPSK 0x0002 /* Deprecated in WSC 2.0 */ +#define WPS_AUTHTYPE_SHARED 0x0004 /* Deprecated in WSC 2.0 */ +#define WPS_AUTHTYPE_WPA 0x0008 /* Deprecated in WSC 2.0 */ +#define WPS_AUTHTYPE_WPA2 0x0010 +#define WPS_AUTHTYPE_WPA2PSK 0x0020 + +/* Config methods */ +#define WPS_CONFMET_USBA 0x0001 /* Deprecated in WSC 2.0 */ +#define WPS_CONFMET_ETHERNET 0x0002 /* Deprecated in WSC 2.0 */ +#define WPS_CONFMET_LABEL 0x0004 +#define WPS_CONFMET_DISPLAY 0x0008 +#define WPS_CONFMET_EXT_NFC_TOK 0x0010 +#define WPS_CONFMET_INT_NFC_TOK 0x0020 +#define WPS_CONFMET_NFC_INTF 0x0040 +#define WPS_CONFMET_PBC 0x0080 +#define WPS_CONFMET_KEYPAD 0x0100 +/* WSC 2.0 */ +#define WPS_CONFMET_VIRT_PBC 0x0280 +#define WPS_CONFMET_PHY_PBC 0x0480 +#define WPS_CONFMET_VIRT_DISPLAY 0x2008 +#define WPS_CONFMET_PHY_DISPLAY 0x4008 + +/* WPS error messages */ +#define WPS_ERROR_NO_ERROR 0 +#define WPS_ERROR_OOB_INT_READ_ERR 1 +#define WPS_ERROR_DECRYPT_CRC_FAIL 2 +#define WPS_ERROR_CHAN24_NOT_SUPP 3 +#define WPS_ERROR_CHAN50_NOT_SUPP 4 +#define WPS_ERROR_SIGNAL_WEAK 5 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_NW_AUTH_FAIL 6 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_NW_ASSOC_FAIL 7 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_NO_DHCP_RESP 8 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_FAILED_DHCP_CONF 9 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_IP_ADDR_CONFLICT 10 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_FAIL_CONN_REGISTRAR 11 +#define WPS_ERROR_MULTI_PBC_DETECTED 12 +#define WPS_ERROR_ROGUE_SUSPECTED 13 +#define WPS_ERROR_DEVICE_BUSY 14 +#define WPS_ERROR_SETUP_LOCKED 15 +#define WPS_ERROR_MSG_TIMEOUT 16 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_REG_SESSION_TIMEOUT 17 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_DEV_PWD_AUTH_FAIL 18 +#define WPS_ERROR_60GHZ_NOT_SUPPORT 19 +#define WPS_ERROR_PKH_MISMATCH 20 /* Public Key Hash Mismatch */ + +/* Connection types */ +#define WPS_CONNTYPE_ESS 0x01 +#define WPS_CONNTYPE_IBSS 0x02 + +/* Device password ID */ +#define WPS_DEVICEPWDID_DEFAULT 0x0000 +#define WPS_DEVICEPWDID_USER_SPEC 0x0001 +#define WPS_DEVICEPWDID_MACHINE_SPEC 0x0002 +#define WPS_DEVICEPWDID_REKEY 0x0003 +#define WPS_DEVICEPWDID_PUSH_BTN 0x0004 +#define WPS_DEVICEPWDID_REG_SPEC 0x0005 +#define WPS_DEVICEPWDID_IBSS 0x0006 +#define WPS_DEVICEPWDID_NFC_CHO 0x0007 /* NFC-Connection-Handover */ +#define WPS_DEVICEPWDID_WFDS 0x0008 /* Wi-Fi Direct Services Specification */ + +/* Encryption type */ +#define WPS_ENCRTYPE_NONE 0x0001 +#define WPS_ENCRTYPE_WEP 0x0002 /* Deprecated in WSC 2.0 */ +#define WPS_ENCRTYPE_TKIP 0x0004 /* Deprecated in version 2.0. TKIP can only + * be advertised on the AP when Mixed Mode + * is enabled (Encryption Type is 0x000c). + */ +#define WPS_ENCRTYPE_AES 0x0008 + + +/* WPS Message Types */ +#define WPS_ID_BEACON 0x01 +#define WPS_ID_PROBE_REQ 0x02 +#define WPS_ID_PROBE_RESP 0x03 +#define WPS_ID_MESSAGE_M1 0x04 +#define WPS_ID_MESSAGE_M2 0x05 +#define WPS_ID_MESSAGE_M2D 0x06 +#define WPS_ID_MESSAGE_M3 0x07 +#define WPS_ID_MESSAGE_M4 0x08 +#define WPS_ID_MESSAGE_M5 0x09 +#define WPS_ID_MESSAGE_M6 0x0A +#define WPS_ID_MESSAGE_M7 0x0B +#define WPS_ID_MESSAGE_M8 0x0C +#define WPS_ID_MESSAGE_ACK 0x0D +#define WPS_ID_MESSAGE_NACK 0x0E +#define WPS_ID_MESSAGE_DONE 0x0F + +/* WSP private ID for local use */ +#define WPS_PRIVATE_ID_IDENTITY (WPS_ID_MESSAGE_DONE + 1) +#define WPS_PRIVATE_ID_WPS_START (WPS_ID_MESSAGE_DONE + 2) +#define WPS_PRIVATE_ID_FAILURE (WPS_ID_MESSAGE_DONE + 3) +#define WPS_PRIVATE_ID_FRAG (WPS_ID_MESSAGE_DONE + 4) +#define WPS_PRIVATE_ID_FRAG_ACK (WPS_ID_MESSAGE_DONE + 5) +#define WPS_PRIVATE_ID_EAPOL_START (WPS_ID_MESSAGE_DONE + 6) + + +/* Device Type categories for primary and secondary device types */ +#define WPS_DEVICE_TYPE_CAT_COMPUTER 1 +#define WPS_DEVICE_TYPE_CAT_INPUT_DEVICE 2 +#define WPS_DEVICE_TYPE_CAT_PRINTER 3 +#define WPS_DEVICE_TYPE_CAT_CAMERA 4 +#define WPS_DEVICE_TYPE_CAT_STORAGE 5 +#define WPS_DEVICE_TYPE_CAT_NW_INFRA 6 +#define WPS_DEVICE_TYPE_CAT_DISPLAYS 7 +#define WPS_DEVICE_TYPE_CAT_MM_DEVICES 8 +#define WPS_DEVICE_TYPE_CAT_GAME_DEVICES 9 +#define WPS_DEVICE_TYPE_CAT_TELEPHONE 10 +#define WPS_DEVICE_TYPE_CAT_AUDIO_DEVICES 11 /* WSC 2.0 */ + +/* Device Type sub categories for primary and secondary device types */ +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_PC 1 +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_SERVER 2 +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_MEDIA_CTR 3 +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_UM_PC 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_NOTEBOOK 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_DESKTOP 6 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_MID 7 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_NETBOOK 8 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_Keyboard 1 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_MOUSE 2 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_JOYSTICK 3 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_TRACKBALL 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_GAM_CTRL 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_REMOTE 6 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_TOUCHSCREEN 7 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_BIO_READER 8 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_BAR_READER 9 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_PRINTER 1 +#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_SCANNER 2 +#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_FAX 3 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_COPIER 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_ALLINONE 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_CAM_DGTL_STILL 1 +#define WPS_DEVICE_TYPE_SUB_CAT_CAM_VIDEO_CAM 2 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_CAM_WEB_CAM 3 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_CAM_SECU_CAM 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_STOR_NAS 1 +#define WPS_DEVICE_TYPE_SUB_CAT_NW_AP 1 +#define WPS_DEVICE_TYPE_SUB_CAT_NW_ROUTER 2 +#define WPS_DEVICE_TYPE_SUB_CAT_NW_SWITCH 3 +#define WPS_DEVICE_TYPE_SUB_CAT_NW_GATEWAY 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_NW_BRIDGE 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_DISP_TV 1 +#define WPS_DEVICE_TYPE_SUB_CAT_DISP_PIC_FRAME 2 +#define WPS_DEVICE_TYPE_SUB_CAT_DISP_PROJECTOR 3 +#define WPS_DEVICE_TYPE_SUB_CAT_DISP_MONITOR 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_MM_DAR 1 +#define WPS_DEVICE_TYPE_SUB_CAT_MM_PVR 2 +#define WPS_DEVICE_TYPE_SUB_CAT_MM_MCX 3 +#define WPS_DEVICE_TYPE_SUB_CAT_MM_STB 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_MM_MS_ME 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_MM_PVP 6 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_GAM_XBOX 1 +#define WPS_DEVICE_TYPE_SUB_CAT_GAM_XBOX_360 2 +#define WPS_DEVICE_TYPE_SUB_CAT_GAM_PS 3 +#define WPS_DEVICE_TYPE_SUB_CAT_GAM_GC 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_GAM_PGD 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_WM 1 +#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_PSM 2 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_PDM 3 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_SSM 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_SDM 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_TUNER 1 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_SPEAKERS 2 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_PMP 3 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HEADSET 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HPHONE 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_MPHONE 6 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HTS 7 /* WSC 2.0 */ + + +/* Device request/response type */ +#define WPS_MSGTYPE_ENROLLEE_INFO_ONLY 0x00 +#define WPS_MSGTYPE_ENROLLEE_OPEN_8021X 0x01 +#define WPS_MSGTYPE_REGISTRAR 0x02 +#define WPS_MSGTYPE_AP_WLAN_MGR 0x03 + +/* RF Band */ +#define WPS_RFBAND_24GHZ 0x01 +#define WPS_RFBAND_50GHZ 0x02 + +/* Simple Config state */ +#define WPS_SCSTATE_UNCONFIGURED 0x01 +#define WPS_SCSTATE_CONFIGURED 0x02 +#define WPS_SCSTATE_OFF 11 + +/* WPS Vendor extension key */ +#define WPS_OUI_HEADER_LEN 2 +#define WPS_OUI_HEADER_SIZE 4 +#define WPS_OUI_FIXED_HEADER_OFF 16 +#define WPS_WFA_SUBID_V2_OFF 3 +#define WPS_WFA_V2_OFF 5 + +#ifdef __cplusplus +} +#endif + +#endif /* _WPS_ */ diff --git a/drivers/amlogic/wifi/bcmdhd/include/rte_ioctl.h b/drivers/amlogic/wifi/bcmdhd/include/rte_ioctl.h new file mode 100644 index 0000000000000..9c214ae704ac1 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/rte_ioctl.h @@ -0,0 +1,85 @@ +/* + * HND Run Time Environment ioctl. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: rte_ioctl.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _rte_ioctl_h_ +#define _rte_ioctl_h_ + +/* RTE IOCTL definitions for generic ether devices */ +#define RTEGHWADDR 0x8901 +#define RTESHWADDR 0x8902 +#define RTEGMTU 0x8903 +#define RTEGSTATS 0x8904 +#define RTEGALLMULTI 0x8905 +#define RTESALLMULTI 0x8906 +#define RTEGPROMISC 0x8907 +#define RTESPROMISC 0x8908 +#define RTESMULTILIST 0x8909 +#define RTEGUP 0x890A +#define RTEGPERMADDR 0x890B +#define RTEDEVPWRSTCHG 0x890C /* Device pwr state change for PCIedev */ +#define RTEDEVPMETOGGLE 0x890D /* Toggle PME# to wake up the host */ + +#define RTE_IOCTL_QUERY 0x00 +#define RTE_IOCTL_SET 0x01 +#define RTE_IOCTL_OVL_IDX_MASK 0x1e +#define RTE_IOCTL_OVL_RSV 0x20 +#define RTE_IOCTL_OVL 0x40 +#define RTE_IOCTL_OVL_IDX_SHIFT 1 + +enum hnd_ioctl_cmd { + HND_RTE_DNGL_IS_SS = 1, /* true if device connected at super speed */ + + /* PCIEDEV specific wl <--> bus ioctls */ + BUS_GET_VAR = 2, + BUS_SET_VAR = 3, + BUS_FLUSH_RXREORDER_Q = 4, + BUS_SET_LTR_STATE = 5, + BUS_FLUSH_CHAINED_PKTS = 6, + BUS_SET_COPY_COUNT = 7 +}; + +#define SDPCMDEV_SET_MAXTXPKTGLOM 1 + +typedef struct memuse_info { + uint16 ver; /* version of this struct */ + uint16 len; /* length in bytes of this structure */ + uint32 tot; /* Total memory */ + uint32 text_len; /* Size of Text segment memory */ + uint32 data_len; /* Size of Data segment memory */ + uint32 bss_len; /* Size of BSS segment memory */ + + uint32 arena_size; /* Total Heap size */ + uint32 arena_free; /* Heap memory available or free */ + uint32 inuse_size; /* Heap memory currently in use */ + uint32 inuse_hwm; /* High watermark of memory - reclaimed memory */ + uint32 inuse_overhead; /* tally of allocated mem_t blocks */ + uint32 inuse_total; /* Heap in-use + Heap overhead memory */ +} memuse_info_t; + +#endif /* _rte_ioctl_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/sbchipc.h b/drivers/amlogic/wifi/bcmdhd/include/sbchipc.h similarity index 72% rename from drivers/net/wireless/bcmdhd/include/sbchipc.h rename to drivers/amlogic/wifi/bcmdhd/include/sbchipc.h index f08e98ad8e27d..6d2389d17d67b 100644 --- a/drivers/net/wireless/bcmdhd/include/sbchipc.h +++ b/drivers/amlogic/wifi/bcmdhd/include/sbchipc.h @@ -5,9 +5,30 @@ * JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer, * GPIO interface, extbus, and support for serial and parallel flashes. * - * $Id: sbchipc.h 474281 2014-04-30 18:24:55Z $ + * $Id: sbchipc.h 574579 2015-07-27 15:36:37Z $ * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> */ #ifndef _SBCHIPC_H @@ -71,6 +92,19 @@ typedef volatile struct { uint32 pmuintmask1; /* 0x704 */ uint32 PAD[14]; uint32 pmuintstatus; /* 0x740 */ + uint32 extwakeupstatus; /* 0x744 */ + uint32 watchdog_res_mask; /* 0x748 */ + uint32 PAD[1]; /* 0x74C */ + uint32 swscratch; /* 0x750 */ + uint32 PAD[3]; /* 0x754-0x75C */ + uint32 extwakemask[2]; /* 0x760-0x764 */ + uint32 PAD[2]; /* 0x768-0x76C */ + uint32 extwakereqmask[2]; /* 0x770-0x774 */ + uint32 PAD[2]; /* 0x778-0x77C */ + uint32 pmuintctrl0; /* 0x780 */ + uint32 pmuintctrl1; /* 0x784 */ + uint32 PAD[2]; + uint32 extwakectrl[2] ; /* 0x790 */ } pmuregs_t; typedef struct eci_prerev35 { @@ -322,7 +356,7 @@ typedef volatile struct { uint32 res_req_timer_sel; uint32 res_req_timer; uint32 res_req_mask; - uint32 PAD; + uint32 core_cap_ext; /* 0x64c */ uint32 chipcontrol_addr; /* 0x650 */ uint32 chipcontrol_data; /* 0x654 */ uint32 regcontrol_addr; @@ -343,9 +377,11 @@ typedef volatile struct { uint32 pmuintmask1; /* 0x704 */ uint32 PAD[14]; uint32 pmuintstatus; /* 0x740 */ - uint32 PAD[47]; + uint32 PAD[15]; + uint32 pmuintctrl0; /* 0x780 */ + uint32 PAD[31]; uint16 sromotp[512]; /* 0x800 */ -#ifdef NFLASH_SUPPORT +#ifdef CCNFLASH_SUPPORT /* Nand flash MLC controller registers (corerev >= 38) */ uint32 nand_revision; /* 0xC00 */ uint32 nand_cmd_start; @@ -408,7 +444,7 @@ typedef volatile struct { uint32 nand_cache_data; uint32 nand_ctrl_config; uint32 nand_ctrl_status; -#endif /* NFLASH_SUPPORT */ +#endif /* CCNFLASH_SUPPORT */ uint32 gci_corecaps0; /* GCI starting at 0xC00 */ uint32 gci_corecaps1; uint32 gci_corecaps2; @@ -514,7 +550,9 @@ typedef volatile struct { #define CC_CLKC_M2 0x9c #define CC_CLKC_M3 0xa0 #define CC_CLKDIV 0xa4 +#define CC_CAP_EXT 0xac #define CC_SYS_CLK_CTL 0xc0 +#define CC_CLKDIV2 0xf0 #define CC_CLK_CTL_ST SI_CLK_CTL_ST #define PMU_CTL 0x600 #define PMU_CAP 0x604 @@ -559,6 +597,9 @@ typedef volatile struct { #define PMU_RES_DEP_MASK 0x624 #define RSRCUPDWNTIME 0x628 #define PMUREG_RESREQ_MASK 0x68c +#define PMUREG_RESREQ_TIMER 0x688 +#define PMUREG_RESREQ_MASK1 0x6f4 +#define PMUREG_RESREQ_TIMER1 0x6f0 #define EXT_LPO_AVAIL 0x100 #define LPO_SEL (1 << 0) #define CC_EXT_LPO_PU 0x200000 @@ -572,6 +613,12 @@ typedef volatile struct { #define REGCTRL5_PWM_AUTO_CTRL_SHIFT 17 #define REGCTRL6_PWM_AUTO_CTRL_MASK 0x3fff0000 #define REGCTRL6_PWM_AUTO_CTRL_SHIFT 16 +#define CC_BP_IND_ACCESS_START_SHIFT 9 +#define CC_BP_IND_ACCESS_START_MASK (1 << CC_BP_IND_ACCESS_START_SHIFT) +#define CC_BP_IND_ACCESS_RDWR_SHIFT 8 +#define CC_BP_IND_ACCESS_RDWR_MASK (1 << CC_BP_IND_ACCESS_RDWR_SHIFT) +#define CC_BP_IND_ACCESS_ERROR_SHIFT 10 +#define CC_BP_IND_ACCESS_ERROR_MASK (1 << CC_BP_IND_ACCESS_ERROR_SHIFT) #ifdef SR_DEBUG #define SUBCORE_POWER_ON 0x0001 @@ -584,7 +631,7 @@ typedef volatile struct { #define MEMLPLDO_POWER_ON_CHK 0x00200000 #endif /* SR_DEBUG */ -#ifdef NFLASH_SUPPORT +#ifdef CCNFLASH_SUPPORT /* NAND flash support */ #define CC_NAND_REVISION 0xC00 #define CC_NAND_CMD_START 0xC04 @@ -597,63 +644,70 @@ typedef volatile struct { #define CC_NAND_DEVID 0xC60 #define CC_NAND_DEVID_EXT 0xC64 #define CC_NAND_INTFC_STATUS 0xC6C -#endif /* NFLASH_SUPPORT */ +#endif /* CCNFLASH_SUPPORT */ /* chipid */ -#define CID_ID_MASK 0x0000ffff /* Chip Id mask */ -#define CID_REV_MASK 0x000f0000 /* Chip Revision mask */ -#define CID_REV_SHIFT 16 /* Chip Revision shift */ -#define CID_PKG_MASK 0x00f00000 /* Package Option mask */ -#define CID_PKG_SHIFT 20 /* Package Option shift */ -#define CID_CC_MASK 0x0f000000 /* CoreCount (corerev >= 4) */ +#define CID_ID_MASK 0x0000ffff /**< Chip Id mask */ +#define CID_REV_MASK 0x000f0000 /**< Chip Revision mask */ +#define CID_REV_SHIFT 16 /**< Chip Revision shift */ +#define CID_PKG_MASK 0x00f00000 /**< Package Option mask */ +#define CID_PKG_SHIFT 20 /**< Package Option shift */ +#define CID_CC_MASK 0x0f000000 /**< CoreCount (corerev >= 4) */ #define CID_CC_SHIFT 24 -#define CID_TYPE_MASK 0xf0000000 /* Chip Type */ +#define CID_TYPE_MASK 0xf0000000 /**< Chip Type */ #define CID_TYPE_SHIFT 28 /* capabilities */ -#define CC_CAP_UARTS_MASK 0x00000003 /* Number of UARTs */ -#define CC_CAP_MIPSEB 0x00000004 /* MIPS is in big-endian mode */ -#define CC_CAP_UCLKSEL 0x00000018 /* UARTs clock select */ -#define CC_CAP_UINTCLK 0x00000008 /* UARTs are driven by internal divided clock */ -#define CC_CAP_UARTGPIO 0x00000020 /* UARTs own GPIOs 15:12 */ -#define CC_CAP_EXTBUS_MASK 0x000000c0 /* External bus mask */ -#define CC_CAP_EXTBUS_NONE 0x00000000 /* No ExtBus present */ -#define CC_CAP_EXTBUS_FULL 0x00000040 /* ExtBus: PCMCIA, IDE & Prog */ -#define CC_CAP_EXTBUS_PROG 0x00000080 /* ExtBus: ProgIf only */ -#define CC_CAP_FLASH_MASK 0x00000700 /* Type of flash */ -#define CC_CAP_PLL_MASK 0x00038000 /* Type of PLL */ -#define CC_CAP_PWR_CTL 0x00040000 /* Power control */ -#define CC_CAP_OTPSIZE 0x00380000 /* OTP Size (0 = none) */ -#define CC_CAP_OTPSIZE_SHIFT 19 /* OTP Size shift */ -#define CC_CAP_OTPSIZE_BASE 5 /* OTP Size base */ -#define CC_CAP_JTAGP 0x00400000 /* JTAG Master Present */ -#define CC_CAP_ROM 0x00800000 /* Internal boot rom active */ -#define CC_CAP_BKPLN64 0x08000000 /* 64-bit backplane */ -#define CC_CAP_PMU 0x10000000 /* PMU Present, rev >= 20 */ -#define CC_CAP_ECI 0x20000000 /* ECI Present, rev >= 21 */ -#define CC_CAP_SROM 0x40000000 /* Srom Present, rev >= 32 */ -#define CC_CAP_NFLASH 0x80000000 /* Nand flash present, rev >= 35 */ - -#define CC_CAP2_SECI 0x00000001 /* SECI Present, rev >= 36 */ -#define CC_CAP2_GSIO 0x00000002 /* GSIO (spi/i2c) present, rev >= 37 */ +#define CC_CAP_UARTS_MASK 0x00000003 /**< Number of UARTs */ +#define CC_CAP_MIPSEB 0x00000004 /**< MIPS is in big-endian mode */ +#define CC_CAP_UCLKSEL 0x00000018 /**< UARTs clock select */ +#define CC_CAP_UINTCLK 0x00000008 /**< UARTs are driven by internal divided clock */ +#define CC_CAP_UARTGPIO 0x00000020 /**< UARTs own GPIOs 15:12 */ +#define CC_CAP_EXTBUS_MASK 0x000000c0 /**< External bus mask */ +#define CC_CAP_EXTBUS_NONE 0x00000000 /**< No ExtBus present */ +#define CC_CAP_EXTBUS_FULL 0x00000040 /**< ExtBus: PCMCIA, IDE & Prog */ +#define CC_CAP_EXTBUS_PROG 0x00000080 /**< ExtBus: ProgIf only */ +#define CC_CAP_FLASH_MASK 0x00000700 /**< Type of flash */ +#define CC_CAP_PLL_MASK 0x00038000 /**< Type of PLL */ +#define CC_CAP_PWR_CTL 0x00040000 /**< Power control */ +#define CC_CAP_OTPSIZE 0x00380000 /**< OTP Size (0 = none) */ +#define CC_CAP_OTPSIZE_SHIFT 19 /**< OTP Size shift */ +#define CC_CAP_OTPSIZE_BASE 5 /**< OTP Size base */ +#define CC_CAP_JTAGP 0x00400000 /**< JTAG Master Present */ +#define CC_CAP_ROM 0x00800000 /**< Internal boot rom active */ +#define CC_CAP_BKPLN64 0x08000000 /**< 64-bit backplane */ +#define CC_CAP_PMU 0x10000000 /**< PMU Present, rev >= 20 */ +#define CC_CAP_ECI 0x20000000 /**< ECI Present, rev >= 21 */ +#define CC_CAP_SROM 0x40000000 /**< Srom Present, rev >= 32 */ +#define CC_CAP_NFLASH 0x80000000 /**< Nand flash present, rev >= 35 */ + +#define CC_CAP2_SECI 0x00000001 /**< SECI Present, rev >= 36 */ +#define CC_CAP2_GSIO 0x00000002 /**< GSIO (spi/i2c) present, rev >= 37 */ /* capabilities extension */ -#define CC_CAP_EXT_SECI_PRESENT 0x00000001 /* SECI present */ -#define CC_CAP_EXT_GSIO_PRESENT 0x00000002 /* GSIO present */ -#define CC_CAP_EXT_GCI_PRESENT 0x00000004 /* GCI present */ -#define CC_CAP_EXT_AOB_PRESENT 0x00000040 /* AOB present */ +#define CC_CAP_EXT_SECI_PRESENT 0x00000001 /**< SECI present */ +#define CC_CAP_EXT_GSIO_PRESENT 0x00000002 /**< GSIO present */ +#define CC_CAP_EXT_GCI_PRESENT 0x00000004 /**< GCI present */ +#define CC_CAP_EXT_AOB_PRESENT 0x00000040 /**< AOB present */ +#define CC_CAP_EXT_SWD_PRESENT 0x00000400 /**< SWD present */ /* WL Channel Info to BT via GCI - bits 40 - 47 */ -#define GCI_WL_CHN_INFO_MASK (0xFF00) +#define GCI_WL_CHN_INFO_MASK (0xFF00) +/* bits [51:48] - reserved for wlan TX pwr index */ +/* bits [55:52] btc mode indication */ +#define GCI_WL_BTC_MODE_SHIFT (20) +#define GCI_WL_BTC_MODE_MASK (0xF << GCI_WL_BTC_MODE_SHIFT) +#define GCI_WL_ANT_BIT_MASK (0x00c0) +#define GCI_WL_ANT_SHIFT_BITS (6) /* PLL type */ #define PLL_NONE 0x00000000 -#define PLL_TYPE1 0x00010000 /* 48MHz base, 3 dividers */ -#define PLL_TYPE2 0x00020000 /* 48MHz, 4 dividers */ -#define PLL_TYPE3 0x00030000 /* 25MHz, 2 dividers */ -#define PLL_TYPE4 0x00008000 /* 48MHz, 4 dividers */ -#define PLL_TYPE5 0x00018000 /* 25MHz, 4 dividers */ -#define PLL_TYPE6 0x00028000 /* 100/200 or 120/240 only */ -#define PLL_TYPE7 0x00038000 /* 25MHz, 4 dividers */ +#define PLL_TYPE1 0x00010000 /**< 48MHz base, 3 dividers */ +#define PLL_TYPE2 0x00020000 /**< 48MHz, 4 dividers */ +#define PLL_TYPE3 0x00030000 /**< 25MHz, 2 dividers */ +#define PLL_TYPE4 0x00008000 /**< 48MHz, 4 dividers */ +#define PLL_TYPE5 0x00018000 /**< 25MHz, 4 dividers */ +#define PLL_TYPE6 0x00028000 /**< 100/200 or 120/240 only */ +#define PLL_TYPE7 0x00038000 /**< 25MHz, 4 dividers */ /* ILP clock */ #define ILP_CLOCK 32000 @@ -687,30 +741,34 @@ typedef volatile struct { #define HT_CLOCK 80000000 /* corecontrol */ -#define CC_UARTCLKO 0x00000001 /* Drive UART with internal clock */ -#define CC_SE 0x00000002 /* sync clk out enable (corerev >= 3) */ -#define CC_ASYNCGPIO 0x00000004 /* 1=generate GPIO interrupt without backplane clock */ -#define CC_UARTCLKEN 0x00000008 /* enable UART Clock (corerev > = 21 */ +#define CC_UARTCLKO 0x00000001 /**< Drive UART with internal clock */ +#define CC_SE 0x00000002 /**< sync clk out enable (corerev >= 3) */ +#define CC_ASYNCGPIO 0x00000004 /**< 1=generate GPIO interrupt without backplane clock */ +#define CC_UARTCLKEN 0x00000008 /**< enable UART Clock (corerev > = 21 */ + +/* retention_ctl */ +#define RCTL_MEM_RET_SLEEP_LOG_SHIFT 29 +#define RCTL_MEM_RET_SLEEP_LOG_MASK (1 << RCTL_MEM_RET_SLEEP_LOG_SHIFT) /* 4321 chipcontrol */ #define CHIPCTRL_4321A0_DEFAULT 0x3a4 #define CHIPCTRL_4321A1_DEFAULT 0x0a4 -#define CHIPCTRL_4321_PLL_DOWN 0x800000 /* serdes PLL down override */ +#define CHIPCTRL_4321_PLL_DOWN 0x800000 /**< serdes PLL down override */ /* Fields in the otpstatus register in rev >= 21 */ #define OTPS_OL_MASK 0x000000ff -#define OTPS_OL_MFG 0x00000001 /* manuf row is locked */ -#define OTPS_OL_OR1 0x00000002 /* otp redundancy row 1 is locked */ -#define OTPS_OL_OR2 0x00000004 /* otp redundancy row 2 is locked */ -#define OTPS_OL_GU 0x00000008 /* general use region is locked */ +#define OTPS_OL_MFG 0x00000001 /**< manuf row is locked */ +#define OTPS_OL_OR1 0x00000002 /**< otp redundancy row 1 is locked */ +#define OTPS_OL_OR2 0x00000004 /**< otp redundancy row 2 is locked */ +#define OTPS_OL_GU 0x00000008 /**< general use region is locked */ #define OTPS_GUP_MASK 0x00000f00 #define OTPS_GUP_SHIFT 8 -#define OTPS_GUP_HW 0x00000100 /* h/w subregion is programmed */ -#define OTPS_GUP_SW 0x00000200 /* s/w subregion is programmed */ -#define OTPS_GUP_CI 0x00000400 /* chipid/pkgopt subregion is programmed */ -#define OTPS_GUP_FUSE 0x00000800 /* fuse subregion is programmed */ +#define OTPS_GUP_HW 0x00000100 /**< h/w subregion is programmed */ +#define OTPS_GUP_SW 0x00000200 /**< s/w subregion is programmed */ +#define OTPS_GUP_CI 0x00000400 /**< chipid/pkgopt subregion is programmed */ +#define OTPS_GUP_FUSE 0x00000800 /**< fuse subregion is programmed */ #define OTPS_READY 0x00001000 -#define OTPS_RV(x) (1 << (16 + (x))) /* redundancy entry valid */ +#define OTPS_RV(x) (1 << (16 + (x))) /**< redundancy entry valid */ #define OTPS_RV_MASK 0x0fff0000 #define OTPS_PROGOK 0x40000000 @@ -814,18 +872,18 @@ typedef volatile struct { /* Jtagm characteristics that appeared at a given corerev */ -#define JTAGM_CREV_OLD 10 /* Old command set, 16bit max IR */ -#define JTAGM_CREV_IRP 22 /* Able to do pause-ir */ -#define JTAGM_CREV_RTI 28 /* Able to do return-to-idle */ +#define JTAGM_CREV_OLD 10 /**< Old command set, 16bit max IR */ +#define JTAGM_CREV_IRP 22 /**< Able to do pause-ir */ +#define JTAGM_CREV_RTI 28 /**< Able to do return-to-idle */ /* jtagcmd */ #define JCMD_START 0x80000000 #define JCMD_BUSY 0x80000000 #define JCMD_STATE_MASK 0x60000000 -#define JCMD_STATE_TLR 0x00000000 /* Test-logic-reset */ -#define JCMD_STATE_PIR 0x20000000 /* Pause IR */ -#define JCMD_STATE_PDR 0x40000000 /* Pause DR */ -#define JCMD_STATE_RTI 0x60000000 /* Run-test-idle */ +#define JCMD_STATE_TLR 0x00000000 /**< Test-logic-reset */ +#define JCMD_STATE_PIR 0x20000000 /**< Pause IR */ +#define JCMD_STATE_PDR 0x40000000 /**< Pause DR */ +#define JCMD_STATE_RTI 0x60000000 /**< Run-test-idle */ #define JCMD0_ACC_MASK 0x0000f000 #define JCMD0_ACC_IRDR 0x00000000 #define JCMD0_ACC_DR 0x00001000 @@ -834,7 +892,7 @@ typedef volatile struct { #define JCMD0_ACC_IRPDR 0x00004000 #define JCMD0_ACC_PDR 0x00005000 #define JCMD0_IRW_MASK 0x00000f00 -#define JCMD_ACC_MASK 0x000f0000 /* Changes for corerev 11 */ +#define JCMD_ACC_MASK 0x000f0000 /**< Changes for corerev 11 */ #define JCMD_ACC_IRDR 0x00000000 #define JCMD_ACC_DR 0x00010000 #define JCMD_ACC_IR 0x00020000 @@ -842,21 +900,26 @@ typedef volatile struct { #define JCMD_ACC_IRPDR 0x00040000 #define JCMD_ACC_PDR 0x00050000 #define JCMD_ACC_PIR 0x00060000 -#define JCMD_ACC_IRDR_I 0x00070000 /* rev 28: return to run-test-idle */ -#define JCMD_ACC_DR_I 0x00080000 /* rev 28: return to run-test-idle */ +#define JCMD_ACC_IRDR_I 0x00070000 /**< rev 28: return to run-test-idle */ +#define JCMD_ACC_DR_I 0x00080000 /**< rev 28: return to run-test-idle */ #define JCMD_IRW_MASK 0x00001f00 #define JCMD_IRW_SHIFT 8 #define JCMD_DRW_MASK 0x0000003f /* jtagctrl */ -#define JCTRL_FORCE_CLK 4 /* Force clock */ -#define JCTRL_EXT_EN 2 /* Enable external targets */ -#define JCTRL_EN 1 /* Enable Jtag master */ +#define JCTRL_FORCE_CLK 4 /**< Force clock */ +#define JCTRL_EXT_EN 2 /**< Enable external targets */ +#define JCTRL_EN 1 /**< Enable Jtag master */ +#define JCTRL_TAPSEL_BIT 0x00000008 /**< JtagMasterCtrl tap_sel bit */ -#define JCTRL_TAPSEL_BIT 0x00000008 /* JtagMasterCtrl tap_sel bit */ +/* swdmasterctrl */ +#define SWDCTRL_INT_EN 8 /**< Enable internal targets */ +#define SWDCTRL_FORCE_CLK 4 /**< Force clock */ +#define SWDCTRL_OVJTAG 2 /**< Enable shared SWD/JTAG pins */ +#define SWDCTRL_EN 1 /**< Enable Jtag master */ /* Fields in clkdiv */ -#define CLKD_SFLASH 0x0f000000 +#define CLKD_SFLASH 0x1f000000 #define CLKD_SFLASH_SHIFT 24 #define CLKD_OTP 0x000f0000 #define CLKD_OTP_SHIFT 16 @@ -865,46 +928,48 @@ typedef volatile struct { #define CLKD_UART 0x000000ff #define CLKD2_SROM 0x00000003 +#define CLKD2_SWD 0xf8000000 +#define CLKD2_SWD_SHIFT 27 /* intstatus/intmask */ -#define CI_GPIO 0x00000001 /* gpio intr */ -#define CI_EI 0x00000002 /* extif intr (corerev >= 3) */ -#define CI_TEMP 0x00000004 /* temp. ctrl intr (corerev >= 15) */ -#define CI_SIRQ 0x00000008 /* serial IRQ intr (corerev >= 15) */ -#define CI_ECI 0x00000010 /* eci intr (corerev >= 21) */ -#define CI_PMU 0x00000020 /* pmu intr (corerev >= 21) */ -#define CI_UART 0x00000040 /* uart intr (corerev >= 21) */ -#define CI_WDRESET 0x80000000 /* watchdog reset occurred */ +#define CI_GPIO 0x00000001 /**< gpio intr */ +#define CI_EI 0x00000002 /**< extif intr (corerev >= 3) */ +#define CI_TEMP 0x00000004 /**< temp. ctrl intr (corerev >= 15) */ +#define CI_SIRQ 0x00000008 /**< serial IRQ intr (corerev >= 15) */ +#define CI_ECI 0x00000010 /**< eci intr (corerev >= 21) */ +#define CI_PMU 0x00000020 /**< pmu intr (corerev >= 21) */ +#define CI_UART 0x00000040 /**< uart intr (corerev >= 21) */ +#define CI_WDRESET 0x80000000 /**< watchdog reset occurred */ /* slow_clk_ctl */ -#define SCC_SS_MASK 0x00000007 /* slow clock source mask */ -#define SCC_SS_LPO 0x00000000 /* source of slow clock is LPO */ -#define SCC_SS_XTAL 0x00000001 /* source of slow clock is crystal */ -#define SCC_SS_PCI 0x00000002 /* source of slow clock is PCI */ -#define SCC_LF 0x00000200 /* LPOFreqSel, 1: 160Khz, 0: 32KHz */ -#define SCC_LP 0x00000400 /* LPOPowerDown, 1: LPO is disabled, +#define SCC_SS_MASK 0x00000007 /**< slow clock source mask */ +#define SCC_SS_LPO 0x00000000 /**< source of slow clock is LPO */ +#define SCC_SS_XTAL 0x00000001 /**< source of slow clock is crystal */ +#define SCC_SS_PCI 0x00000002 /**< source of slow clock is PCI */ +#define SCC_LF 0x00000200 /**< LPOFreqSel, 1: 160Khz, 0: 32KHz */ +#define SCC_LP 0x00000400 /**< LPOPowerDown, 1: LPO is disabled, * 0: LPO is enabled */ -#define SCC_FS 0x00000800 /* ForceSlowClk, 1: sb/cores running on slow clock, +#define SCC_FS 0x00000800 /**< ForceSlowClk, 1: sb/cores running on slow clock, * 0: power logic control */ -#define SCC_IP 0x00001000 /* IgnorePllOffReq, 1/0: power logic ignores/honors +#define SCC_IP 0x00001000 /**< IgnorePllOffReq, 1/0: power logic ignores/honors * PLL clock disable requests from core */ -#define SCC_XC 0x00002000 /* XtalControlEn, 1/0: power logic does/doesn't +#define SCC_XC 0x00002000 /**< XtalControlEn, 1/0: power logic does/doesn't * disable crystal when appropriate */ -#define SCC_XP 0x00004000 /* XtalPU (RO), 1/0: crystal running/disabled */ -#define SCC_CD_MASK 0xffff0000 /* ClockDivider (SlowClk = 1/(4+divisor)) */ +#define SCC_XP 0x00004000 /**< XtalPU (RO), 1/0: crystal running/disabled */ +#define SCC_CD_MASK 0xffff0000 /**< ClockDivider (SlowClk = 1/(4+divisor)) */ #define SCC_CD_SHIFT 16 /* system_clk_ctl */ -#define SYCC_IE 0x00000001 /* ILPen: Enable Idle Low Power */ -#define SYCC_AE 0x00000002 /* ALPen: Enable Active Low Power */ -#define SYCC_FP 0x00000004 /* ForcePLLOn */ -#define SYCC_AR 0x00000008 /* Force ALP (or HT if ALPen is not set */ -#define SYCC_HR 0x00000010 /* Force HT */ -#define SYCC_CD_MASK 0xffff0000 /* ClkDiv (ILP = 1/(4 * (divisor + 1)) */ +#define SYCC_IE 0x00000001 /**< ILPen: Enable Idle Low Power */ +#define SYCC_AE 0x00000002 /**< ALPen: Enable Active Low Power */ +#define SYCC_FP 0x00000004 /**< ForcePLLOn */ +#define SYCC_AR 0x00000008 /**< Force ALP (or HT if ALPen is not set */ +#define SYCC_HR 0x00000010 /**< Force HT */ +#define SYCC_CD_MASK 0xffff0000 /**< ClkDiv (ILP = 1/(4 * (divisor + 1)) */ #define SYCC_CD_SHIFT 16 /* Indirect backplane access */ @@ -919,56 +984,56 @@ typedef volatile struct { #define BPIA_ERROR 0x00000400 /* pcmcia/prog/flash_config */ -#define CF_EN 0x00000001 /* enable */ -#define CF_EM_MASK 0x0000000e /* mode */ +#define CF_EN 0x00000001 /**< enable */ +#define CF_EM_MASK 0x0000000e /**< mode */ #define CF_EM_SHIFT 1 -#define CF_EM_FLASH 0 /* flash/asynchronous mode */ -#define CF_EM_SYNC 2 /* synchronous mode */ -#define CF_EM_PCMCIA 4 /* pcmcia mode */ -#define CF_DS 0x00000010 /* destsize: 0=8bit, 1=16bit */ -#define CF_BS 0x00000020 /* byteswap */ -#define CF_CD_MASK 0x000000c0 /* clock divider */ +#define CF_EM_FLASH 0 /**< flash/asynchronous mode */ +#define CF_EM_SYNC 2 /**< synchronous mode */ +#define CF_EM_PCMCIA 4 /**< pcmcia mode */ +#define CF_DS 0x00000010 /**< destsize: 0=8bit, 1=16bit */ +#define CF_BS 0x00000020 /**< byteswap */ +#define CF_CD_MASK 0x000000c0 /**< clock divider */ #define CF_CD_SHIFT 6 -#define CF_CD_DIV2 0x00000000 /* backplane/2 */ -#define CF_CD_DIV3 0x00000040 /* backplane/3 */ -#define CF_CD_DIV4 0x00000080 /* backplane/4 */ -#define CF_CE 0x00000100 /* clock enable */ -#define CF_SB 0x00000200 /* size/bytestrobe (synch only) */ +#define CF_CD_DIV2 0x00000000 /**< backplane/2 */ +#define CF_CD_DIV3 0x00000040 /**< backplane/3 */ +#define CF_CD_DIV4 0x00000080 /**< backplane/4 */ +#define CF_CE 0x00000100 /**< clock enable */ +#define CF_SB 0x00000200 /**< size/bytestrobe (synch only) */ /* pcmcia_memwait */ -#define PM_W0_MASK 0x0000003f /* waitcount0 */ -#define PM_W1_MASK 0x00001f00 /* waitcount1 */ +#define PM_W0_MASK 0x0000003f /**< waitcount0 */ +#define PM_W1_MASK 0x00001f00 /**< waitcount1 */ #define PM_W1_SHIFT 8 -#define PM_W2_MASK 0x001f0000 /* waitcount2 */ +#define PM_W2_MASK 0x001f0000 /**< waitcount2 */ #define PM_W2_SHIFT 16 -#define PM_W3_MASK 0x1f000000 /* waitcount3 */ +#define PM_W3_MASK 0x1f000000 /**< waitcount3 */ #define PM_W3_SHIFT 24 /* pcmcia_attrwait */ -#define PA_W0_MASK 0x0000003f /* waitcount0 */ -#define PA_W1_MASK 0x00001f00 /* waitcount1 */ +#define PA_W0_MASK 0x0000003f /**< waitcount0 */ +#define PA_W1_MASK 0x00001f00 /**< waitcount1 */ #define PA_W1_SHIFT 8 -#define PA_W2_MASK 0x001f0000 /* waitcount2 */ +#define PA_W2_MASK 0x001f0000 /**< waitcount2 */ #define PA_W2_SHIFT 16 -#define PA_W3_MASK 0x1f000000 /* waitcount3 */ +#define PA_W3_MASK 0x1f000000 /**< waitcount3 */ #define PA_W3_SHIFT 24 /* pcmcia_iowait */ -#define PI_W0_MASK 0x0000003f /* waitcount0 */ -#define PI_W1_MASK 0x00001f00 /* waitcount1 */ +#define PI_W0_MASK 0x0000003f /**< waitcount0 */ +#define PI_W1_MASK 0x00001f00 /**< waitcount1 */ #define PI_W1_SHIFT 8 -#define PI_W2_MASK 0x001f0000 /* waitcount2 */ +#define PI_W2_MASK 0x001f0000 /**< waitcount2 */ #define PI_W2_SHIFT 16 -#define PI_W3_MASK 0x1f000000 /* waitcount3 */ +#define PI_W3_MASK 0x1f000000 /**< waitcount3 */ #define PI_W3_SHIFT 24 /* prog_waitcount */ -#define PW_W0_MASK 0x0000001f /* waitcount0 */ -#define PW_W1_MASK 0x00001f00 /* waitcount1 */ +#define PW_W0_MASK 0x0000001f /**< waitcount0 */ +#define PW_W1_MASK 0x00001f00 /**< waitcount1 */ #define PW_W1_SHIFT 8 -#define PW_W2_MASK 0x001f0000 /* waitcount2 */ +#define PW_W2_MASK 0x001f0000 /**< waitcount2 */ #define PW_W2_SHIFT 16 -#define PW_W3_MASK 0x1f000000 /* waitcount3 */ +#define PW_W3_MASK 0x1f000000 /**< waitcount3 */ #define PW_W3_SHIFT 24 #define PW_W0 0x0000000c @@ -977,12 +1042,12 @@ typedef volatile struct { #define PW_W3 0x01000000 /* flash_waitcount */ -#define FW_W0_MASK 0x0000003f /* waitcount0 */ -#define FW_W1_MASK 0x00001f00 /* waitcount1 */ +#define FW_W0_MASK 0x0000003f /**< waitcount0 */ +#define FW_W1_MASK 0x00001f00 /**< waitcount1 */ #define FW_W1_SHIFT 8 -#define FW_W2_MASK 0x001f0000 /* waitcount2 */ +#define FW_W2_MASK 0x001f0000 /**< waitcount2 */ #define FW_W2_SHIFT 16 -#define FW_W3_MASK 0x1f000000 /* waitcount3 */ +#define FW_W3_MASK 0x1f000000 /**< waitcount3 */ #define FW_W3_SHIFT 24 /* When Srom support present, fields in sromcontrol */ @@ -1007,14 +1072,17 @@ typedef volatile struct { #define PCTL_ILP_DIV_MASK 0xffff0000 #define PCTL_ILP_DIV_SHIFT 16 #define PCTL_LQ_REQ_EN 0x00008000 -#define PCTL_PLL_PLLCTL_UPD 0x00000400 /* rev 2 */ -#define PCTL_NOILP_ON_WAIT 0x00000200 /* rev 1 */ +#define PCTL_PLL_PLLCTL_UPD 0x00000400 /**< rev 2 */ +#define PCTL_NOILP_ON_WAIT 0x00000200 /**< rev 1 */ #define PCTL_HT_REQ_EN 0x00000100 #define PCTL_ALP_REQ_EN 0x00000080 #define PCTL_XTALFREQ_MASK 0x0000007c #define PCTL_XTALFREQ_SHIFT 2 #define PCTL_ILP_DIV_EN 0x00000002 #define PCTL_LPO_SEL 0x00000001 +#define PCTL_EXT_FASTLPO_SWENAB 0x00000200 + +#define DEFAULT_43012_MIN_RES_MASK 0x0f8bfe77 /* Retention Control */ #define PMU_RCTL_CLK_DIV_SHIFT 0 @@ -1051,30 +1119,30 @@ typedef volatile struct { #define GPIO_ONTIME_SHIFT 16 /* clockcontrol_n */ -#define CN_N1_MASK 0x3f /* n1 control */ -#define CN_N2_MASK 0x3f00 /* n2 control */ +#define CN_N1_MASK 0x3f /**< n1 control */ +#define CN_N2_MASK 0x3f00 /**< n2 control */ #define CN_N2_SHIFT 8 -#define CN_PLLC_MASK 0xf0000 /* pll control */ +#define CN_PLLC_MASK 0xf0000 /**< pll control */ #define CN_PLLC_SHIFT 16 /* clockcontrol_sb/pci/uart */ -#define CC_M1_MASK 0x3f /* m1 control */ -#define CC_M2_MASK 0x3f00 /* m2 control */ +#define CC_M1_MASK 0x3f /**< m1 control */ +#define CC_M2_MASK 0x3f00 /**< m2 control */ #define CC_M2_SHIFT 8 -#define CC_M3_MASK 0x3f0000 /* m3 control */ +#define CC_M3_MASK 0x3f0000 /**< m3 control */ #define CC_M3_SHIFT 16 -#define CC_MC_MASK 0x1f000000 /* mux control */ +#define CC_MC_MASK 0x1f000000 /**< mux control */ #define CC_MC_SHIFT 24 /* N3M Clock control magic field values */ -#define CC_F6_2 0x02 /* A factor of 2 in */ -#define CC_F6_3 0x03 /* 6-bit fields like */ -#define CC_F6_4 0x05 /* N1, M1 or M3 */ +#define CC_F6_2 0x02 /**< A factor of 2 in */ +#define CC_F6_3 0x03 /**< 6-bit fields like */ +#define CC_F6_4 0x05 /**< N1, M1 or M3 */ #define CC_F6_5 0x09 #define CC_F6_6 0x11 #define CC_F6_7 0x21 -#define CC_F5_BIAS 5 /* 5-bit fields get this added */ +#define CC_F5_BIAS 5 /**< 5-bit fields get this added */ #define CC_MC_BYPASS 0x08 #define CC_MC_M1 0x04 @@ -1083,100 +1151,100 @@ typedef volatile struct { #define CC_MC_M1M3 0x11 /* Type 2 Clock control magic field values */ -#define CC_T2_BIAS 2 /* n1, n2, m1 & m3 bias */ -#define CC_T2M2_BIAS 3 /* m2 bias */ +#define CC_T2_BIAS 2 /**< n1, n2, m1 & m3 bias */ +#define CC_T2M2_BIAS 3 /**< m2 bias */ #define CC_T2MC_M1BYP 1 #define CC_T2MC_M2BYP 2 #define CC_T2MC_M3BYP 4 /* Type 6 Clock control magic field values */ -#define CC_T6_MMASK 1 /* bits of interest in m */ -#define CC_T6_M0 120000000 /* sb clock for m = 0 */ -#define CC_T6_M1 100000000 /* sb clock for m = 1 */ +#define CC_T6_MMASK 1 /**< bits of interest in m */ +#define CC_T6_M0 120000000 /**< sb clock for m = 0 */ +#define CC_T6_M1 100000000 /**< sb clock for m = 1 */ #define SB2MIPS_T6(sb) (2 * (sb)) /* Common clock base */ -#define CC_CLOCK_BASE1 24000000 /* Half the clock freq */ -#define CC_CLOCK_BASE2 12500000 /* Alternate crystal on some PLLs */ +#define CC_CLOCK_BASE1 24000000 /**< Half the clock freq */ +#define CC_CLOCK_BASE2 12500000 /**< Alternate crystal on some PLLs */ /* Clock control values for 200MHz in 5350 */ #define CLKC_5350_N 0x0311 #define CLKC_5350_M 0x04020009 /* Flash types in the chipcommon capabilities register */ -#define FLASH_NONE 0x000 /* No flash */ -#define SFLASH_ST 0x100 /* ST serial flash */ -#define SFLASH_AT 0x200 /* Atmel serial flash */ +#define FLASH_NONE 0x000 /**< No flash */ +#define SFLASH_ST 0x100 /**< ST serial flash */ +#define SFLASH_AT 0x200 /**< Atmel serial flash */ #define NFLASH 0x300 -#define PFLASH 0x700 /* Parallel flash */ +#define PFLASH 0x700 /**< Parallel flash */ #define QSPIFLASH_ST 0x800 #define QSPIFLASH_AT 0x900 /* Bits in the ExtBus config registers */ -#define CC_CFG_EN 0x0001 /* Enable */ -#define CC_CFG_EM_MASK 0x000e /* Extif Mode */ -#define CC_CFG_EM_ASYNC 0x0000 /* Async/Parallel flash */ -#define CC_CFG_EM_SYNC 0x0002 /* Synchronous */ -#define CC_CFG_EM_PCMCIA 0x0004 /* PCMCIA */ -#define CC_CFG_EM_IDE 0x0006 /* IDE */ -#define CC_CFG_DS 0x0010 /* Data size, 0=8bit, 1=16bit */ -#define CC_CFG_CD_MASK 0x00e0 /* Sync: Clock divisor, rev >= 20 */ -#define CC_CFG_CE 0x0100 /* Sync: Clock enable, rev >= 20 */ -#define CC_CFG_SB 0x0200 /* Sync: Size/Bytestrobe, rev >= 20 */ -#define CC_CFG_IS 0x0400 /* Extif Sync Clk Select, rev >= 20 */ +#define CC_CFG_EN 0x0001 /**< Enable */ +#define CC_CFG_EM_MASK 0x000e /**< Extif Mode */ +#define CC_CFG_EM_ASYNC 0x0000 /**< Async/Parallel flash */ +#define CC_CFG_EM_SYNC 0x0002 /**< Synchronous */ +#define CC_CFG_EM_PCMCIA 0x0004 /**< PCMCIA */ +#define CC_CFG_EM_IDE 0x0006 /**< IDE */ +#define CC_CFG_DS 0x0010 /**< Data size, 0=8bit, 1=16bit */ +#define CC_CFG_CD_MASK 0x00e0 /**< Sync: Clock divisor, rev >= 20 */ +#define CC_CFG_CE 0x0100 /**< Sync: Clock enable, rev >= 20 */ +#define CC_CFG_SB 0x0200 /**< Sync: Size/Bytestrobe, rev >= 20 */ +#define CC_CFG_IS 0x0400 /**< Extif Sync Clk Select, rev >= 20 */ /* ExtBus address space */ -#define CC_EB_BASE 0x1a000000 /* Chipc ExtBus base address */ -#define CC_EB_PCMCIA_MEM 0x1a000000 /* PCMCIA 0 memory base address */ -#define CC_EB_PCMCIA_IO 0x1a200000 /* PCMCIA 0 I/O base address */ -#define CC_EB_PCMCIA_CFG 0x1a400000 /* PCMCIA 0 config base address */ -#define CC_EB_IDE 0x1a800000 /* IDE memory base */ -#define CC_EB_PCMCIA1_MEM 0x1a800000 /* PCMCIA 1 memory base address */ -#define CC_EB_PCMCIA1_IO 0x1aa00000 /* PCMCIA 1 I/O base address */ -#define CC_EB_PCMCIA1_CFG 0x1ac00000 /* PCMCIA 1 config base address */ -#define CC_EB_PROGIF 0x1b000000 /* ProgIF Async/Sync base address */ +#define CC_EB_BASE 0x1a000000 /**< Chipc ExtBus base address */ +#define CC_EB_PCMCIA_MEM 0x1a000000 /**< PCMCIA 0 memory base address */ +#define CC_EB_PCMCIA_IO 0x1a200000 /**< PCMCIA 0 I/O base address */ +#define CC_EB_PCMCIA_CFG 0x1a400000 /**< PCMCIA 0 config base address */ +#define CC_EB_IDE 0x1a800000 /**< IDE memory base */ +#define CC_EB_PCMCIA1_MEM 0x1a800000 /**< PCMCIA 1 memory base address */ +#define CC_EB_PCMCIA1_IO 0x1aa00000 /**< PCMCIA 1 I/O base address */ +#define CC_EB_PCMCIA1_CFG 0x1ac00000 /**< PCMCIA 1 config base address */ +#define CC_EB_PROGIF 0x1b000000 /**< ProgIF Async/Sync base address */ /* Start/busy bit in flashcontrol */ #define SFLASH_OPCODE 0x000000ff #define SFLASH_ACTION 0x00000700 -#define SFLASH_CS_ACTIVE 0x00001000 /* Chip Select Active, rev >= 20 */ +#define SFLASH_CS_ACTIVE 0x00001000 /**< Chip Select Active, rev >= 20 */ #define SFLASH_START 0x80000000 #define SFLASH_BUSY SFLASH_START /* flashcontrol action codes */ -#define SFLASH_ACT_OPONLY 0x0000 /* Issue opcode only */ -#define SFLASH_ACT_OP1D 0x0100 /* opcode + 1 data byte */ -#define SFLASH_ACT_OP3A 0x0200 /* opcode + 3 addr bytes */ -#define SFLASH_ACT_OP3A1D 0x0300 /* opcode + 3 addr & 1 data bytes */ -#define SFLASH_ACT_OP3A4D 0x0400 /* opcode + 3 addr & 4 data bytes */ -#define SFLASH_ACT_OP3A4X4D 0x0500 /* opcode + 3 addr, 4 don't care & 4 data bytes */ -#define SFLASH_ACT_OP3A1X4D 0x0700 /* opcode + 3 addr, 1 don't care & 4 data bytes */ +#define SFLASH_ACT_OPONLY 0x0000 /**< Issue opcode only */ +#define SFLASH_ACT_OP1D 0x0100 /**< opcode + 1 data byte */ +#define SFLASH_ACT_OP3A 0x0200 /**< opcode + 3 addr bytes */ +#define SFLASH_ACT_OP3A1D 0x0300 /**< opcode + 3 addr & 1 data bytes */ +#define SFLASH_ACT_OP3A4D 0x0400 /**< opcode + 3 addr & 4 data bytes */ +#define SFLASH_ACT_OP3A4X4D 0x0500 /**< opcode + 3 addr, 4 don't care & 4 data bytes */ +#define SFLASH_ACT_OP3A1X4D 0x0700 /**< opcode + 3 addr, 1 don't care & 4 data bytes */ /* flashcontrol action+opcodes for ST flashes */ -#define SFLASH_ST_WREN 0x0006 /* Write Enable */ -#define SFLASH_ST_WRDIS 0x0004 /* Write Disable */ -#define SFLASH_ST_RDSR 0x0105 /* Read Status Register */ -#define SFLASH_ST_WRSR 0x0101 /* Write Status Register */ -#define SFLASH_ST_READ 0x0303 /* Read Data Bytes */ -#define SFLASH_ST_PP 0x0302 /* Page Program */ -#define SFLASH_ST_SE 0x02d8 /* Sector Erase */ -#define SFLASH_ST_BE 0x00c7 /* Bulk Erase */ -#define SFLASH_ST_DP 0x00b9 /* Deep Power-down */ -#define SFLASH_ST_RES 0x03ab /* Read Electronic Signature */ -#define SFLASH_ST_CSA 0x1000 /* Keep chip select asserted */ -#define SFLASH_ST_SSE 0x0220 /* Sub-sector Erase */ - -#define SFLASH_MXIC_RDID 0x0390 /* Read Manufacture ID */ -#define SFLASH_MXIC_MFID 0xc2 /* MXIC Manufacture ID */ +#define SFLASH_ST_WREN 0x0006 /**< Write Enable */ +#define SFLASH_ST_WRDIS 0x0004 /**< Write Disable */ +#define SFLASH_ST_RDSR 0x0105 /**< Read Status Register */ +#define SFLASH_ST_WRSR 0x0101 /**< Write Status Register */ +#define SFLASH_ST_READ 0x0303 /**< Read Data Bytes */ +#define SFLASH_ST_PP 0x0302 /**< Page Program */ +#define SFLASH_ST_SE 0x02d8 /**< Sector Erase */ +#define SFLASH_ST_BE 0x00c7 /**< Bulk Erase */ +#define SFLASH_ST_DP 0x00b9 /**< Deep Power-down */ +#define SFLASH_ST_RES 0x03ab /**< Read Electronic Signature */ +#define SFLASH_ST_CSA 0x1000 /**< Keep chip select asserted */ +#define SFLASH_ST_SSE 0x0220 /**< Sub-sector Erase */ + +#define SFLASH_MXIC_RDID 0x0390 /**< Read Manufacture ID */ +#define SFLASH_MXIC_MFID 0xc2 /**< MXIC Manufacture ID */ /* Status register bits for ST flashes */ -#define SFLASH_ST_WIP 0x01 /* Write In Progress */ -#define SFLASH_ST_WEL 0x02 /* Write Enable Latch */ -#define SFLASH_ST_BP_MASK 0x1c /* Block Protect */ +#define SFLASH_ST_WIP 0x01 /**< Write In Progress */ +#define SFLASH_ST_WEL 0x02 /**< Write Enable Latch */ +#define SFLASH_ST_BP_MASK 0x1c /**< Block Protect */ #define SFLASH_ST_BP_SHIFT 2 -#define SFLASH_ST_SRWD 0x80 /* Status Register Write Disable */ +#define SFLASH_ST_SRWD 0x80 /**< Status Register Write Disable */ /* flashcontrol action+opcodes for Atmel flashes */ #define SFLASH_AT_READ 0x07e8 @@ -1217,48 +1285,48 @@ typedef volatile struct { * a 8250, 16450, or 16550(A). */ -#define UART_RX 0 /* In: Receive buffer (DLAB=0) */ -#define UART_TX 0 /* Out: Transmit buffer (DLAB=0) */ -#define UART_DLL 0 /* Out: Divisor Latch Low (DLAB=1) */ -#define UART_IER 1 /* In/Out: Interrupt Enable Register (DLAB=0) */ -#define UART_DLM 1 /* Out: Divisor Latch High (DLAB=1) */ -#define UART_IIR 2 /* In: Interrupt Identity Register */ -#define UART_FCR 2 /* Out: FIFO Control Register */ -#define UART_LCR 3 /* Out: Line Control Register */ -#define UART_MCR 4 /* Out: Modem Control Register */ -#define UART_LSR 5 /* In: Line Status Register */ -#define UART_MSR 6 /* In: Modem Status Register */ -#define UART_SCR 7 /* I/O: Scratch Register */ -#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */ -#define UART_LCR_WLEN8 0x03 /* Word length: 8 bits */ -#define UART_MCR_OUT2 0x08 /* MCR GPIO out 2 */ -#define UART_MCR_LOOP 0x10 /* Enable loopback test mode */ -#define UART_LSR_RX_FIFO 0x80 /* Receive FIFO error */ -#define UART_LSR_TDHR 0x40 /* Data-hold-register empty */ -#define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */ -#define UART_LSR_BREAK 0x10 /* Break interrupt */ -#define UART_LSR_FRAMING 0x08 /* Framing error */ -#define UART_LSR_PARITY 0x04 /* Parity error */ -#define UART_LSR_OVERRUN 0x02 /* Overrun error */ -#define UART_LSR_RXRDY 0x01 /* Receiver ready */ -#define UART_FCR_FIFO_ENABLE 1 /* FIFO control register bit controlling FIFO enable/disable */ +#define UART_RX 0 /**< In: Receive buffer (DLAB=0) */ +#define UART_TX 0 /**< Out: Transmit buffer (DLAB=0) */ +#define UART_DLL 0 /**< Out: Divisor Latch Low (DLAB=1) */ +#define UART_IER 1 /**< In/Out: Interrupt Enable Register (DLAB=0) */ +#define UART_DLM 1 /**< Out: Divisor Latch High (DLAB=1) */ +#define UART_IIR 2 /**< In: Interrupt Identity Register */ +#define UART_FCR 2 /**< Out: FIFO Control Register */ +#define UART_LCR 3 /**< Out: Line Control Register */ +#define UART_MCR 4 /**< Out: Modem Control Register */ +#define UART_LSR 5 /**< In: Line Status Register */ +#define UART_MSR 6 /**< In: Modem Status Register */ +#define UART_SCR 7 /**< I/O: Scratch Register */ +#define UART_LCR_DLAB 0x80 /**< Divisor latch access bit */ +#define UART_LCR_WLEN8 0x03 /**< Word length: 8 bits */ +#define UART_MCR_OUT2 0x08 /**< MCR GPIO out 2 */ +#define UART_MCR_LOOP 0x10 /**< Enable loopback test mode */ +#define UART_LSR_RX_FIFO 0x80 /**< Receive FIFO error */ +#define UART_LSR_TDHR 0x40 /**< Data-hold-register empty */ +#define UART_LSR_THRE 0x20 /**< Transmit-hold-register empty */ +#define UART_LSR_BREAK 0x10 /**< Break interrupt */ +#define UART_LSR_FRAMING 0x08 /**< Framing error */ +#define UART_LSR_PARITY 0x04 /**< Parity error */ +#define UART_LSR_OVERRUN 0x02 /**< Overrun error */ +#define UART_LSR_RXRDY 0x01 /**< Receiver ready */ +#define UART_FCR_FIFO_ENABLE 1 /**< FIFO control register bit controlling FIFO enable/disable */ /* Interrupt Identity Register (IIR) bits */ -#define UART_IIR_FIFO_MASK 0xc0 /* IIR FIFO disable/enabled mask */ -#define UART_IIR_INT_MASK 0xf /* IIR interrupt ID source */ -#define UART_IIR_MDM_CHG 0x0 /* Modem status changed */ -#define UART_IIR_NOINT 0x1 /* No interrupt pending */ -#define UART_IIR_THRE 0x2 /* THR empty */ -#define UART_IIR_RCVD_DATA 0x4 /* Received data available */ -#define UART_IIR_RCVR_STATUS 0x6 /* Receiver status */ -#define UART_IIR_CHAR_TIME 0xc /* Character time */ +#define UART_IIR_FIFO_MASK 0xc0 /**< IIR FIFO disable/enabled mask */ +#define UART_IIR_INT_MASK 0xf /**< IIR interrupt ID source */ +#define UART_IIR_MDM_CHG 0x0 /**< Modem status changed */ +#define UART_IIR_NOINT 0x1 /**< No interrupt pending */ +#define UART_IIR_THRE 0x2 /**< THR empty */ +#define UART_IIR_RCVD_DATA 0x4 /**< Received data available */ +#define UART_IIR_RCVR_STATUS 0x6 /**< Receiver status */ +#define UART_IIR_CHAR_TIME 0xc /**< Character time */ /* Interrupt Enable Register (IER) bits */ -#define UART_IER_PTIME 128 /* Programmable THRE Interrupt Mode Enable */ -#define UART_IER_EDSSI 8 /* enable modem status interrupt */ -#define UART_IER_ELSI 4 /* enable receiver line status interrupt */ -#define UART_IER_ETBEI 2 /* enable transmitter holding register empty interrupt */ -#define UART_IER_ERBFI 1 /* enable data available interrupt */ +#define UART_IER_PTIME 128 /**< Programmable THRE Interrupt Mode Enable */ +#define UART_IER_EDSSI 8 /**< enable modem status interrupt */ +#define UART_IER_ELSI 4 /**< enable receiver line status interrupt */ +#define UART_IER_ETBEI 2 /**< enable transmitter holding register empty interrupt */ +#define UART_IER_ERBFI 1 /**< enable data available interrupt */ /* pmustatus */ #define PST_SLOW_WR_PENDING 0x0400 @@ -1272,6 +1340,7 @@ typedef volatile struct { #define PST_ALPAVAIL 0x0008 #define PST_HTAVAIL 0x0004 #define PST_RESINIT 0x0003 +#define PST_ILPFASTLPO 0x00010000 /* pmucapabilities */ #define PCAP_REV_MASK 0x000000ff @@ -1285,7 +1354,7 @@ typedef volatile struct { #define PCAP_VC_SHIFT 21 #define PCAP_CC_MASK 0x1e000000 #define PCAP_CC_SHIFT 25 -#define PCAP5_PC_MASK 0x003e0000 /* PMU corerev >= 5 */ +#define PCAP5_PC_MASK 0x003e0000 /**< PMU corerev >= 5 */ #define PCAP5_PC_SHIFT 17 #define PCAP5_VC_MASK 0x07c00000 #define PCAP5_VC_SHIFT 22 @@ -1301,6 +1370,11 @@ typedef volatile struct { #define PRRT_HT_REQ 0x2000 #define PRRT_HQ_REQ 0x4000 +/* PMU Int Control register bits */ +#define PMU_INTC_ALP_REQ 0x1 +#define PMU_INTC_HT_REQ 0x2 +#define PMU_INTC_HQ_REQ 0x4 + /* bit 0 of the PMU interrupt vector is asserted if this mask is enabled */ #define RSRC_INTR_MASK_TIMER_INT_0 1 @@ -1337,12 +1411,18 @@ typedef volatile struct { #define PMU_CC1_SW_TYPE_EPHYRMII 0x00000080 #define PMU_CC1_SW_TYPE_RGMII 0x000000c0 +#define PMU_CC1_ENABLE_CLOSED_LOOP_MASK 0x00000080 +#define PMU_CC1_ENABLE_CLOSED_LOOP 0x00000000 + /* PMU chip control2 register */ #define PMU_CHIPCTL2 2 #define PMU_CC2_FORCE_SUBCORE_PWR_SWITCH_ON (1 << 18) #define PMU_CC2_FORCE_PHY_PWR_SWITCH_ON (1 << 19) #define PMU_CC2_FORCE_VDDM_PWR_SWITCH_ON (1 << 20) #define PMU_CC2_FORCE_MEMLPLDO_PWR_SWITCH_ON (1 << 21) +#define PMU_CC2_MASK_WL_DEV_WAKE (1 << 22) +#define PMU_CC2_INV_GPIO_POLARITY_PMU_WAKE (1 << 25) + /* PMU chip control3 register */ #define PMU_CHIPCTL3 3 @@ -1350,6 +1430,21 @@ typedef volatile struct { #define PMU_CC3_ENABLE_RF_SHIFT 22 #define PMU_CC3_RF_DISABLE_IVALUE_SHIFT 23 +/* PMU chip control4 register */ +#define PMU_CHIPCTL4 4 + +/* 53537 series moved switch_type and gmac_if_type to CC4 [15:14] and [13:12] */ +#define PMU_CC4_IF_TYPE_MASK 0x00003000 +#define PMU_CC4_IF_TYPE_RMII 0x00000000 +#define PMU_CC4_IF_TYPE_MII 0x00001000 +#define PMU_CC4_IF_TYPE_RGMII 0x00002000 + +#define PMU_CC4_SW_TYPE_MASK 0x0000c000 +#define PMU_CC4_SW_TYPE_EPHY 0x00000000 +#define PMU_CC4_SW_TYPE_EPHYMII 0x00004000 +#define PMU_CC4_SW_TYPE_EPHYRMII 0x00008000 +#define PMU_CC4_SW_TYPE_RGMII 0x0000c000 + /* PMU chip control5 register */ #define PMU_CHIPCTL5 5 @@ -1362,6 +1457,11 @@ typedef volatile struct { #define PMU_CHIPCTL7 7 #define PMU_CC7_ENABLE_L2REFCLKPAD_PWRDWN (1 << 25) #define PMU_CC7_ENABLE_MDIO_RESET_WAR (1 << 27) +/* 53537 series have gmca1 gmac_if_type in cc7 [7:6](defalut 0b01) */ +#define PMU_CC7_IF_TYPE_MASK 0x000000c0 +#define PMU_CC7_IF_TYPE_RMII 0x00000000 +#define PMU_CC7_IF_TYPE_MII 0x00000040 +#define PMU_CC7_IF_TYPE_RGMII 0x00000080 /* PMU corerev and chip specific PLL controls. @@ -1441,7 +1541,7 @@ typedef volatile struct { #define PMU1_PLL0_PC2_NDIV_MODE_MASK 0x000e0000 #define PMU1_PLL0_PC2_NDIV_MODE_SHIFT 17 #define PMU1_PLL0_PC2_NDIV_MODE_MASH 1 -#define PMU1_PLL0_PC2_NDIV_MODE_MFB 2 /* recommended for 4319 */ +#define PMU1_PLL0_PC2_NDIV_MODE_MFB 2 /**< recommended for 4319 */ #define PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000 #define PMU1_PLL0_PC2_NDIV_INT_SHIFT 20 @@ -1460,9 +1560,10 @@ typedef volatile struct { #define PMU1_PLL0_PLLCTL6 6 #define PMU1_PLL0_PLLCTL7 7 - #define PMU1_PLL0_PLLCTL8 8 -#define PMU1_PLLCTL8_OPENLOOP_MASK 0x2 + +#define PMU1_PLLCTL8_OPENLOOP_MASK (1 << 1) +#define PMU_PLL4350_OPENLOOP_MASK (1 << 7) /* PMU rev 2 control words */ #define PMU2_PHY_PLL_PLLCTL 4 @@ -1558,7 +1659,7 @@ typedef volatile struct { /* 4706 PMU */ #define PMU4706_MAINPLL_PLL0 0 -#define PMU6_4706_PROCPLL_OFF 4 /* The CPU PLL */ +#define PMU6_4706_PROCPLL_OFF 4 /**< The CPU PLL */ #define PMU6_4706_PROC_P2DIV_MASK 0x000f0000 #define PMU6_4706_PROC_P2DIV_SHIFT 16 #define PMU6_4706_PROC_P1DIV_MASK 0x0000f000 @@ -1691,9 +1792,9 @@ typedef volatile struct { #define PMU15_FREQTGT_480_DEFAULT 0x19AB1 #define PMU15_FREQTGT_492_DEFAULT 0x1A4F5 -#define PMU15_ARM_96MHZ 96000000 /* 96 Mhz */ -#define PMU15_ARM_98MHZ 98400000 /* 98.4 Mhz */ -#define PMU15_ARM_97MHZ 97000000 /* 97 Mhz */ +#define PMU15_ARM_96MHZ 96000000 /**< 96 Mhz */ +#define PMU15_ARM_98MHZ 98400000 /**< 98.4 Mhz */ +#define PMU15_ARM_97MHZ 97000000 /**< 97 Mhz */ #define PMU17_PLLCTL2_NDIVTYPE_MASK 0x00000070 @@ -1737,26 +1838,26 @@ typedef volatile struct { #define CCTRL_5357_I2CSPI_PINS_ENABLE 0x00080000 /* I2C/SPI pins enable */ /* 5354 resources */ -#define RES5354_EXT_SWITCHER_PWM 0 /* 0x00001 */ -#define RES5354_BB_SWITCHER_PWM 1 /* 0x00002 */ -#define RES5354_BB_SWITCHER_BURST 2 /* 0x00004 */ -#define RES5354_BB_EXT_SWITCHER_BURST 3 /* 0x00008 */ -#define RES5354_ILP_REQUEST 4 /* 0x00010 */ -#define RES5354_RADIO_SWITCHER_PWM 5 /* 0x00020 */ -#define RES5354_RADIO_SWITCHER_BURST 6 /* 0x00040 */ -#define RES5354_ROM_SWITCH 7 /* 0x00080 */ -#define RES5354_PA_REF_LDO 8 /* 0x00100 */ -#define RES5354_RADIO_LDO 9 /* 0x00200 */ -#define RES5354_AFE_LDO 10 /* 0x00400 */ -#define RES5354_PLL_LDO 11 /* 0x00800 */ -#define RES5354_BG_FILTBYP 12 /* 0x01000 */ -#define RES5354_TX_FILTBYP 13 /* 0x02000 */ -#define RES5354_RX_FILTBYP 14 /* 0x04000 */ -#define RES5354_XTAL_PU 15 /* 0x08000 */ -#define RES5354_XTAL_EN 16 /* 0x10000 */ -#define RES5354_BB_PLL_FILTBYP 17 /* 0x20000 */ -#define RES5354_RF_PLL_FILTBYP 18 /* 0x40000 */ -#define RES5354_BB_PLL_PU 19 /* 0x80000 */ +#define RES5354_EXT_SWITCHER_PWM 0 /**< 0x00001 */ +#define RES5354_BB_SWITCHER_PWM 1 /**< 0x00002 */ +#define RES5354_BB_SWITCHER_BURST 2 /**< 0x00004 */ +#define RES5354_BB_EXT_SWITCHER_BURST 3 /**< 0x00008 */ +#define RES5354_ILP_REQUEST 4 /**< 0x00010 */ +#define RES5354_RADIO_SWITCHER_PWM 5 /**< 0x00020 */ +#define RES5354_RADIO_SWITCHER_BURST 6 /**< 0x00040 */ +#define RES5354_ROM_SWITCH 7 /**< 0x00080 */ +#define RES5354_PA_REF_LDO 8 /**< 0x00100 */ +#define RES5354_RADIO_LDO 9 /**< 0x00200 */ +#define RES5354_AFE_LDO 10 /**< 0x00400 */ +#define RES5354_PLL_LDO 11 /**< 0x00800 */ +#define RES5354_BG_FILTBYP 12 /**< 0x01000 */ +#define RES5354_TX_FILTBYP 13 /**< 0x02000 */ +#define RES5354_RX_FILTBYP 14 /**< 0x04000 */ +#define RES5354_XTAL_PU 15 /**< 0x08000 */ +#define RES5354_XTAL_EN 16 /**< 0x10000 */ +#define RES5354_BB_PLL_FILTBYP 17 /**< 0x20000 */ +#define RES5354_RF_PLL_FILTBYP 18 /**< 0x40000 */ +#define RES5354_BB_PLL_PU 19 /**< 0x80000 */ /* 5357 Chip specific ChipControl register bits */ #define CCTRL5357_EXTPA (1<<14) /* extPA in ChipControl 1, bit 14 */ @@ -1772,129 +1873,129 @@ typedef volatile struct { #define CCTRL43228_EXTPA_C1 (1<<9) /* core0 extPA in ChipControl 1, bit 1 */ /* 4328 resources */ -#define RES4328_EXT_SWITCHER_PWM 0 /* 0x00001 */ -#define RES4328_BB_SWITCHER_PWM 1 /* 0x00002 */ -#define RES4328_BB_SWITCHER_BURST 2 /* 0x00004 */ -#define RES4328_BB_EXT_SWITCHER_BURST 3 /* 0x00008 */ -#define RES4328_ILP_REQUEST 4 /* 0x00010 */ -#define RES4328_RADIO_SWITCHER_PWM 5 /* 0x00020 */ -#define RES4328_RADIO_SWITCHER_BURST 6 /* 0x00040 */ -#define RES4328_ROM_SWITCH 7 /* 0x00080 */ -#define RES4328_PA_REF_LDO 8 /* 0x00100 */ -#define RES4328_RADIO_LDO 9 /* 0x00200 */ -#define RES4328_AFE_LDO 10 /* 0x00400 */ -#define RES4328_PLL_LDO 11 /* 0x00800 */ -#define RES4328_BG_FILTBYP 12 /* 0x01000 */ -#define RES4328_TX_FILTBYP 13 /* 0x02000 */ -#define RES4328_RX_FILTBYP 14 /* 0x04000 */ -#define RES4328_XTAL_PU 15 /* 0x08000 */ -#define RES4328_XTAL_EN 16 /* 0x10000 */ -#define RES4328_BB_PLL_FILTBYP 17 /* 0x20000 */ -#define RES4328_RF_PLL_FILTBYP 18 /* 0x40000 */ -#define RES4328_BB_PLL_PU 19 /* 0x80000 */ +#define RES4328_EXT_SWITCHER_PWM 0 /**< 0x00001 */ +#define RES4328_BB_SWITCHER_PWM 1 /**< 0x00002 */ +#define RES4328_BB_SWITCHER_BURST 2 /**< 0x00004 */ +#define RES4328_BB_EXT_SWITCHER_BURST 3 /**< 0x00008 */ +#define RES4328_ILP_REQUEST 4 /**< 0x00010 */ +#define RES4328_RADIO_SWITCHER_PWM 5 /**< 0x00020 */ +#define RES4328_RADIO_SWITCHER_BURST 6 /**< 0x00040 */ +#define RES4328_ROM_SWITCH 7 /**< 0x00080 */ +#define RES4328_PA_REF_LDO 8 /**< 0x00100 */ +#define RES4328_RADIO_LDO 9 /**< 0x00200 */ +#define RES4328_AFE_LDO 10 /**< 0x00400 */ +#define RES4328_PLL_LDO 11 /**< 0x00800 */ +#define RES4328_BG_FILTBYP 12 /**< 0x01000 */ +#define RES4328_TX_FILTBYP 13 /**< 0x02000 */ +#define RES4328_RX_FILTBYP 14 /**< 0x04000 */ +#define RES4328_XTAL_PU 15 /**< 0x08000 */ +#define RES4328_XTAL_EN 16 /**< 0x10000 */ +#define RES4328_BB_PLL_FILTBYP 17 /**< 0x20000 */ +#define RES4328_RF_PLL_FILTBYP 18 /**< 0x40000 */ +#define RES4328_BB_PLL_PU 19 /**< 0x80000 */ /* 4325 A0/A1 resources */ -#define RES4325_BUCK_BOOST_BURST 0 /* 0x00000001 */ -#define RES4325_CBUCK_BURST 1 /* 0x00000002 */ -#define RES4325_CBUCK_PWM 2 /* 0x00000004 */ -#define RES4325_CLDO_CBUCK_BURST 3 /* 0x00000008 */ -#define RES4325_CLDO_CBUCK_PWM 4 /* 0x00000010 */ -#define RES4325_BUCK_BOOST_PWM 5 /* 0x00000020 */ -#define RES4325_ILP_REQUEST 6 /* 0x00000040 */ -#define RES4325_ABUCK_BURST 7 /* 0x00000080 */ -#define RES4325_ABUCK_PWM 8 /* 0x00000100 */ -#define RES4325_LNLDO1_PU 9 /* 0x00000200 */ -#define RES4325_OTP_PU 10 /* 0x00000400 */ -#define RES4325_LNLDO3_PU 11 /* 0x00000800 */ -#define RES4325_LNLDO4_PU 12 /* 0x00001000 */ -#define RES4325_XTAL_PU 13 /* 0x00002000 */ -#define RES4325_ALP_AVAIL 14 /* 0x00004000 */ -#define RES4325_RX_PWRSW_PU 15 /* 0x00008000 */ -#define RES4325_TX_PWRSW_PU 16 /* 0x00010000 */ -#define RES4325_RFPLL_PWRSW_PU 17 /* 0x00020000 */ -#define RES4325_LOGEN_PWRSW_PU 18 /* 0x00040000 */ -#define RES4325_AFE_PWRSW_PU 19 /* 0x00080000 */ -#define RES4325_BBPLL_PWRSW_PU 20 /* 0x00100000 */ -#define RES4325_HT_AVAIL 21 /* 0x00200000 */ +#define RES4325_BUCK_BOOST_BURST 0 /**< 0x00000001 */ +#define RES4325_CBUCK_BURST 1 /**< 0x00000002 */ +#define RES4325_CBUCK_PWM 2 /**< 0x00000004 */ +#define RES4325_CLDO_CBUCK_BURST 3 /**< 0x00000008 */ +#define RES4325_CLDO_CBUCK_PWM 4 /**< 0x00000010 */ +#define RES4325_BUCK_BOOST_PWM 5 /**< 0x00000020 */ +#define RES4325_ILP_REQUEST 6 /**< 0x00000040 */ +#define RES4325_ABUCK_BURST 7 /**< 0x00000080 */ +#define RES4325_ABUCK_PWM 8 /**< 0x00000100 */ +#define RES4325_LNLDO1_PU 9 /**< 0x00000200 */ +#define RES4325_OTP_PU 10 /**< 0x00000400 */ +#define RES4325_LNLDO3_PU 11 /**< 0x00000800 */ +#define RES4325_LNLDO4_PU 12 /**< 0x00001000 */ +#define RES4325_XTAL_PU 13 /**< 0x00002000 */ +#define RES4325_ALP_AVAIL 14 /**< 0x00004000 */ +#define RES4325_RX_PWRSW_PU 15 /**< 0x00008000 */ +#define RES4325_TX_PWRSW_PU 16 /**< 0x00010000 */ +#define RES4325_RFPLL_PWRSW_PU 17 /**< 0x00020000 */ +#define RES4325_LOGEN_PWRSW_PU 18 /**< 0x00040000 */ +#define RES4325_AFE_PWRSW_PU 19 /**< 0x00080000 */ +#define RES4325_BBPLL_PWRSW_PU 20 /**< 0x00100000 */ +#define RES4325_HT_AVAIL 21 /**< 0x00200000 */ /* 4325 B0/C0 resources */ -#define RES4325B0_CBUCK_LPOM 1 /* 0x00000002 */ -#define RES4325B0_CBUCK_BURST 2 /* 0x00000004 */ -#define RES4325B0_CBUCK_PWM 3 /* 0x00000008 */ -#define RES4325B0_CLDO_PU 4 /* 0x00000010 */ +#define RES4325B0_CBUCK_LPOM 1 /**< 0x00000002 */ +#define RES4325B0_CBUCK_BURST 2 /**< 0x00000004 */ +#define RES4325B0_CBUCK_PWM 3 /**< 0x00000008 */ +#define RES4325B0_CLDO_PU 4 /**< 0x00000010 */ /* 4325 C1 resources */ -#define RES4325C1_LNLDO2_PU 12 /* 0x00001000 */ +#define RES4325C1_LNLDO2_PU 12 /**< 0x00001000 */ /* 4325 chip-specific ChipStatus register bits */ #define CST4325_SPROM_OTP_SEL_MASK 0x00000003 -#define CST4325_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */ -#define CST4325_SPROM_SEL 1 /* OTP is powered up, SPROM is present */ -#define CST4325_OTP_SEL 2 /* OTP is powered up, no SPROM */ -#define CST4325_OTP_PWRDN 3 /* OTP is powered down, SPROM is present */ +#define CST4325_DEFCIS_SEL 0 /**< OTP is powered up, use def. CIS, no SPROM */ +#define CST4325_SPROM_SEL 1 /**< OTP is powered up, SPROM is present */ +#define CST4325_OTP_SEL 2 /**< OTP is powered up, no SPROM */ +#define CST4325_OTP_PWRDN 3 /**< OTP is powered down, SPROM is present */ #define CST4325_SDIO_USB_MODE_MASK 0x00000004 #define CST4325_SDIO_USB_MODE_SHIFT 2 #define CST4325_RCAL_VALID_MASK 0x00000008 #define CST4325_RCAL_VALID_SHIFT 3 #define CST4325_RCAL_VALUE_MASK 0x000001f0 #define CST4325_RCAL_VALUE_SHIFT 4 -#define CST4325_PMUTOP_2B_MASK 0x00000200 /* 1 for 2b, 0 for to 2a */ +#define CST4325_PMUTOP_2B_MASK 0x00000200 /**< 1 for 2b, 0 for to 2a */ #define CST4325_PMUTOP_2B_SHIFT 9 -#define RES4329_RESERVED0 0 /* 0x00000001 */ -#define RES4329_CBUCK_LPOM 1 /* 0x00000002 */ -#define RES4329_CBUCK_BURST 2 /* 0x00000004 */ -#define RES4329_CBUCK_PWM 3 /* 0x00000008 */ -#define RES4329_CLDO_PU 4 /* 0x00000010 */ -#define RES4329_PALDO_PU 5 /* 0x00000020 */ -#define RES4329_ILP_REQUEST 6 /* 0x00000040 */ -#define RES4329_RESERVED7 7 /* 0x00000080 */ -#define RES4329_RESERVED8 8 /* 0x00000100 */ -#define RES4329_LNLDO1_PU 9 /* 0x00000200 */ -#define RES4329_OTP_PU 10 /* 0x00000400 */ -#define RES4329_RESERVED11 11 /* 0x00000800 */ -#define RES4329_LNLDO2_PU 12 /* 0x00001000 */ -#define RES4329_XTAL_PU 13 /* 0x00002000 */ -#define RES4329_ALP_AVAIL 14 /* 0x00004000 */ -#define RES4329_RX_PWRSW_PU 15 /* 0x00008000 */ -#define RES4329_TX_PWRSW_PU 16 /* 0x00010000 */ -#define RES4329_RFPLL_PWRSW_PU 17 /* 0x00020000 */ -#define RES4329_LOGEN_PWRSW_PU 18 /* 0x00040000 */ -#define RES4329_AFE_PWRSW_PU 19 /* 0x00080000 */ -#define RES4329_BBPLL_PWRSW_PU 20 /* 0x00100000 */ -#define RES4329_HT_AVAIL 21 /* 0x00200000 */ +#define RES4329_RESERVED0 0 /**< 0x00000001 */ +#define RES4329_CBUCK_LPOM 1 /**< 0x00000002 */ +#define RES4329_CBUCK_BURST 2 /**< 0x00000004 */ +#define RES4329_CBUCK_PWM 3 /**< 0x00000008 */ +#define RES4329_CLDO_PU 4 /**< 0x00000010 */ +#define RES4329_PALDO_PU 5 /**< 0x00000020 */ +#define RES4329_ILP_REQUEST 6 /**< 0x00000040 */ +#define RES4329_RESERVED7 7 /**< 0x00000080 */ +#define RES4329_RESERVED8 8 /**< 0x00000100 */ +#define RES4329_LNLDO1_PU 9 /**< 0x00000200 */ +#define RES4329_OTP_PU 10 /**< 0x00000400 */ +#define RES4329_RESERVED11 11 /**< 0x00000800 */ +#define RES4329_LNLDO2_PU 12 /**< 0x00001000 */ +#define RES4329_XTAL_PU 13 /**< 0x00002000 */ +#define RES4329_ALP_AVAIL 14 /**< 0x00004000 */ +#define RES4329_RX_PWRSW_PU 15 /**< 0x00008000 */ +#define RES4329_TX_PWRSW_PU 16 /**< 0x00010000 */ +#define RES4329_RFPLL_PWRSW_PU 17 /**< 0x00020000 */ +#define RES4329_LOGEN_PWRSW_PU 18 /**< 0x00040000 */ +#define RES4329_AFE_PWRSW_PU 19 /**< 0x00080000 */ +#define RES4329_BBPLL_PWRSW_PU 20 /**< 0x00100000 */ +#define RES4329_HT_AVAIL 21 /**< 0x00200000 */ #define CST4329_SPROM_OTP_SEL_MASK 0x00000003 -#define CST4329_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */ -#define CST4329_SPROM_SEL 1 /* OTP is powered up, SPROM is present */ -#define CST4329_OTP_SEL 2 /* OTP is powered up, no SPROM */ -#define CST4329_OTP_PWRDN 3 /* OTP is powered down, SPROM is present */ +#define CST4329_DEFCIS_SEL 0 /**< OTP is powered up, use def. CIS, no SPROM */ +#define CST4329_SPROM_SEL 1 /**< OTP is powered up, SPROM is present */ +#define CST4329_OTP_SEL 2 /**< OTP is powered up, no SPROM */ +#define CST4329_OTP_PWRDN 3 /**< OTP is powered down, SPROM is present */ #define CST4329_SPI_SDIO_MODE_MASK 0x00000004 #define CST4329_SPI_SDIO_MODE_SHIFT 2 /* 4312 chip-specific ChipStatus register bits */ #define CST4312_SPROM_OTP_SEL_MASK 0x00000003 -#define CST4312_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */ -#define CST4312_SPROM_SEL 1 /* OTP is powered up, SPROM is present */ -#define CST4312_OTP_SEL 2 /* OTP is powered up, no SPROM */ -#define CST4312_OTP_BAD 3 /* OTP is broken, SPROM is present */ +#define CST4312_DEFCIS_SEL 0 /**< OTP is powered up, use def. CIS, no SPROM */ +#define CST4312_SPROM_SEL 1 /**< OTP is powered up, SPROM is present */ +#define CST4312_OTP_SEL 2 /**< OTP is powered up, no SPROM */ +#define CST4312_OTP_BAD 3 /**< OTP is broken, SPROM is present */ /* 4312 resources (all PMU chips with little memory constraint) */ -#define RES4312_SWITCHER_BURST 0 /* 0x00000001 */ -#define RES4312_SWITCHER_PWM 1 /* 0x00000002 */ -#define RES4312_PA_REF_LDO 2 /* 0x00000004 */ -#define RES4312_CORE_LDO_BURST 3 /* 0x00000008 */ -#define RES4312_CORE_LDO_PWM 4 /* 0x00000010 */ -#define RES4312_RADIO_LDO 5 /* 0x00000020 */ -#define RES4312_ILP_REQUEST 6 /* 0x00000040 */ -#define RES4312_BG_FILTBYP 7 /* 0x00000080 */ -#define RES4312_TX_FILTBYP 8 /* 0x00000100 */ -#define RES4312_RX_FILTBYP 9 /* 0x00000200 */ -#define RES4312_XTAL_PU 10 /* 0x00000400 */ -#define RES4312_ALP_AVAIL 11 /* 0x00000800 */ -#define RES4312_BB_PLL_FILTBYP 12 /* 0x00001000 */ -#define RES4312_RF_PLL_FILTBYP 13 /* 0x00002000 */ -#define RES4312_HT_AVAIL 14 /* 0x00004000 */ +#define RES4312_SWITCHER_BURST 0 /**< 0x00000001 */ +#define RES4312_SWITCHER_PWM 1 /**< 0x00000002 */ +#define RES4312_PA_REF_LDO 2 /**< 0x00000004 */ +#define RES4312_CORE_LDO_BURST 3 /**< 0x00000008 */ +#define RES4312_CORE_LDO_PWM 4 /**< 0x00000010 */ +#define RES4312_RADIO_LDO 5 /**< 0x00000020 */ +#define RES4312_ILP_REQUEST 6 /**< 0x00000040 */ +#define RES4312_BG_FILTBYP 7 /**< 0x00000080 */ +#define RES4312_TX_FILTBYP 8 /**< 0x00000100 */ +#define RES4312_RX_FILTBYP 9 /**< 0x00000200 */ +#define RES4312_XTAL_PU 10 /**< 0x00000400 */ +#define RES4312_ALP_AVAIL 11 /**< 0x00000800 */ +#define RES4312_BB_PLL_FILTBYP 12 /**< 0x00001000 */ +#define RES4312_RF_PLL_FILTBYP 13 /**< 0x00002000 */ +#define RES4312_HT_AVAIL 14 /**< 0x00004000 */ /* 4322 resources */ #define RES4322_RF_LDO 0 @@ -1911,28 +2012,28 @@ typedef volatile struct { #define CST4322_XTAL_FREQ_20_40MHZ 0x00000020 #define CST4322_SPROM_OTP_SEL_MASK 0x000000c0 #define CST4322_SPROM_OTP_SEL_SHIFT 6 -#define CST4322_NO_SPROM_OTP 0 /* no OTP, no SPROM */ -#define CST4322_SPROM_PRESENT 1 /* SPROM is present */ -#define CST4322_OTP_PRESENT 2 /* OTP is present */ +#define CST4322_NO_SPROM_OTP 0 /**< no OTP, no SPROM */ +#define CST4322_SPROM_PRESENT 1 /**< SPROM is present */ +#define CST4322_OTP_PRESENT 2 /**< OTP is present */ #define CST4322_PCI_OR_USB 0x00000100 #define CST4322_BOOT_MASK 0x00000600 #define CST4322_BOOT_SHIFT 9 -#define CST4322_BOOT_FROM_SRAM 0 /* boot from SRAM, ARM in reset */ -#define CST4322_BOOT_FROM_ROM 1 /* boot from ROM */ -#define CST4322_BOOT_FROM_FLASH 2 /* boot from FLASH */ +#define CST4322_BOOT_FROM_SRAM 0 /**< boot from SRAM, ARM in reset */ +#define CST4322_BOOT_FROM_ROM 1 /**< boot from ROM */ +#define CST4322_BOOT_FROM_FLASH 2 /**< boot from FLASH */ #define CST4322_BOOT_FROM_INVALID 3 #define CST4322_ILP_DIV_EN 0x00000800 #define CST4322_FLASH_TYPE_MASK 0x00001000 #define CST4322_FLASH_TYPE_SHIFT 12 -#define CST4322_FLASH_TYPE_SHIFT_ST 0 /* ST serial FLASH */ -#define CST4322_FLASH_TYPE_SHIFT_ATMEL 1 /* ATMEL flash */ +#define CST4322_FLASH_TYPE_SHIFT_ST 0 /**< ST serial FLASH */ +#define CST4322_FLASH_TYPE_SHIFT_ATMEL 1 /**< ATMEL flash */ #define CST4322_ARM_TAP_SEL 0x00002000 #define CST4322_RES_INIT_MODE_MASK 0x0000c000 #define CST4322_RES_INIT_MODE_SHIFT 14 -#define CST4322_RES_INIT_MODE_ILPAVAIL 0 /* resinitmode: ILP available */ -#define CST4322_RES_INIT_MODE_ILPREQ 1 /* resinitmode: ILP request */ -#define CST4322_RES_INIT_MODE_ALPAVAIL 2 /* resinitmode: ALP available */ -#define CST4322_RES_INIT_MODE_HTAVAIL 3 /* resinitmode: HT available */ +#define CST4322_RES_INIT_MODE_ILPAVAIL 0 /**< resinitmode: ILP available */ +#define CST4322_RES_INIT_MODE_ILPREQ 1 /**< resinitmode: ILP request */ +#define CST4322_RES_INIT_MODE_ALPAVAIL 2 /**< resinitmode: ALP available */ +#define CST4322_RES_INIT_MODE_HTAVAIL 3 /**< resinitmode: HT available */ #define CST4322_PCIPLLCLK_GATING 0x00010000 #define CST4322_CLK_SWITCH_PCI_TO_ALP 0x00020000 #define CST4322_PCI_CARDBUS_MODE 0x00040000 @@ -1951,23 +2052,23 @@ typedef volatile struct { #define RES43236_HT_SI_AVAIL 5 /* 43236 chip-specific ChipControl register bits */ -#define CCTRL43236_BT_COEXIST (1<<0) /* 0 disable */ -#define CCTRL43236_SECI (1<<1) /* 0 SECI is disabled (JATG functional) */ -#define CCTRL43236_EXT_LNA (1<<2) /* 0 disable */ -#define CCTRL43236_ANT_MUX_2o3 (1<<3) /* 2o3 mux, chipcontrol bit 3 */ -#define CCTRL43236_GSIO (1<<4) /* 0 disable */ +#define CCTRL43236_BT_COEXIST (1<<0) /**< 0 disable */ +#define CCTRL43236_SECI (1<<1) /**< 0 SECI is disabled (JATG functional) */ +#define CCTRL43236_EXT_LNA (1<<2) /**< 0 disable */ +#define CCTRL43236_ANT_MUX_2o3 (1<<3) /**< 2o3 mux, chipcontrol bit 3 */ +#define CCTRL43236_GSIO (1<<4) /**< 0 disable */ /* 43236 Chip specific ChipStatus register bits */ #define CST43236_SFLASH_MASK 0x00000040 #define CST43236_OTP_SEL_MASK 0x00000080 #define CST43236_OTP_SEL_SHIFT 7 -#define CST43236_HSIC_MASK 0x00000100 /* USB/HSIC */ -#define CST43236_BP_CLK 0x00000200 /* 120/96Mbps */ +#define CST43236_HSIC_MASK 0x00000100 /**< USB/HSIC */ +#define CST43236_BP_CLK 0x00000200 /**< 120/96Mbps */ #define CST43236_BOOT_MASK 0x00001800 #define CST43236_BOOT_SHIFT 11 -#define CST43236_BOOT_FROM_SRAM 0 /* boot from SRAM, ARM in reset */ -#define CST43236_BOOT_FROM_ROM 1 /* boot from ROM */ -#define CST43236_BOOT_FROM_FLASH 2 /* boot from FLASH */ +#define CST43236_BOOT_FROM_SRAM 0 /**< boot from SRAM, ARM in reset */ +#define CST43236_BOOT_FROM_ROM 1 /**< boot from ROM */ +#define CST43236_BOOT_FROM_FLASH 2 /**< boot from FLASH */ #define CST43236_BOOT_FROM_INVALID 3 /* 43237 resources */ @@ -1979,23 +2080,23 @@ typedef volatile struct { #define RES43237_HT_SI_AVAIL 5 /* 43237 chip-specific ChipControl register bits */ -#define CCTRL43237_BT_COEXIST (1<<0) /* 0 disable */ -#define CCTRL43237_SECI (1<<1) /* 0 SECI is disabled (JATG functional) */ -#define CCTRL43237_EXT_LNA (1<<2) /* 0 disable */ -#define CCTRL43237_ANT_MUX_2o3 (1<<3) /* 2o3 mux, chipcontrol bit 3 */ -#define CCTRL43237_GSIO (1<<4) /* 0 disable */ +#define CCTRL43237_BT_COEXIST (1<<0) /**< 0 disable */ +#define CCTRL43237_SECI (1<<1) /**< 0 SECI is disabled (JATG functional) */ +#define CCTRL43237_EXT_LNA (1<<2) /**< 0 disable */ +#define CCTRL43237_ANT_MUX_2o3 (1<<3) /**< 2o3 mux, chipcontrol bit 3 */ +#define CCTRL43237_GSIO (1<<4) /**< 0 disable */ /* 43237 Chip specific ChipStatus register bits */ #define CST43237_SFLASH_MASK 0x00000040 #define CST43237_OTP_SEL_MASK 0x00000080 #define CST43237_OTP_SEL_SHIFT 7 -#define CST43237_HSIC_MASK 0x00000100 /* USB/HSIC */ -#define CST43237_BP_CLK 0x00000200 /* 120/96Mbps */ +#define CST43237_HSIC_MASK 0x00000100 /**< USB/HSIC */ +#define CST43237_BP_CLK 0x00000200 /**< 120/96Mbps */ #define CST43237_BOOT_MASK 0x00001800 #define CST43237_BOOT_SHIFT 11 -#define CST43237_BOOT_FROM_SRAM 0 /* boot from SRAM, ARM in reset */ -#define CST43237_BOOT_FROM_ROM 1 /* boot from ROM */ -#define CST43237_BOOT_FROM_FLASH 2 /* boot from FLASH */ +#define CST43237_BOOT_FROM_SRAM 0 /**< boot from SRAM, ARM in reset */ +#define CST43237_BOOT_FROM_ROM 1 /**< boot from ROM */ +#define CST43237_BOOT_FROM_FLASH 2 /**< boot from FLASH */ #define CST43237_BOOT_FROM_INVALID 3 /* 43239 resources */ @@ -2008,10 +2109,10 @@ typedef volatile struct { #define CST43239_SFLASH_MASK 0x00000004 #define CST43239_RES_INIT_MODE_SHIFT 7 #define CST43239_RES_INIT_MODE_MASK 0x000001f0 -#define CST43239_CHIPMODE_SDIOD(cs) ((cs) & (1 << 15)) /* SDIO || gSPI */ -#define CST43239_CHIPMODE_USB20D(cs) (~(cs) & (1 << 15)) /* USB || USBDA */ -#define CST43239_CHIPMODE_SDIO(cs) (((cs) & (1 << 0)) == 0) /* SDIO */ -#define CST43239_CHIPMODE_GSPI(cs) (((cs) & (1 << 0)) == (1 << 0)) /* gSPI */ +#define CST43239_CHIPMODE_SDIOD(cs) ((cs) & (1 << 15)) /**< SDIO || gSPI */ +#define CST43239_CHIPMODE_USB20D(cs) (~(cs) & (1 << 15)) /**< USB || USBDA */ +#define CST43239_CHIPMODE_SDIO(cs) (((cs) & (1 << 0)) == 0) /**< SDIO */ +#define CST43239_CHIPMODE_GSPI(cs) (((cs) & (1 << 0)) == (1 << 0)) /**< gSPI */ /* 4324 resources */ /* 43242 use same PMU as 4324 */ @@ -2051,8 +2152,8 @@ typedef volatile struct { #define CST4324_RES_INIT_MODE_SHIFT 10 #define CST4324_RES_INIT_MODE_MASK 0x00000c00 #define CST4324_CHIPMODE_MASK 0x7 -#define CST4324_CHIPMODE_SDIOD(cs) ((~(cs)) & (1 << 2)) /* SDIO || gSPI */ -#define CST4324_CHIPMODE_USB20D(cs) (((cs) & CST4324_CHIPMODE_MASK) == 0x6) /* USB || USBDA */ +#define CST4324_CHIPMODE_SDIOD(cs) ((~(cs)) & (1 << 2)) /**< SDIO || gSPI */ +#define CST4324_CHIPMODE_USB20D(cs) (((cs) & CST4324_CHIPMODE_MASK) == 0x6) /**< USB || USBDA */ /* 43242 Chip specific ChipStatus register bits */ #define CST43242_SFLASH_MASK 0x00000008 @@ -2068,26 +2169,26 @@ typedef volatile struct { #define RES4331_HT_SI_AVAIL 5 /* 4331 chip-specific ChipControl register bits */ -#define CCTRL4331_BT_COEXIST (1<<0) /* 0 disable */ -#define CCTRL4331_SECI (1<<1) /* 0 SECI is disabled (JATG functional) */ -#define CCTRL4331_EXT_LNA_G (1<<2) /* 0 disable */ -#define CCTRL4331_SPROM_GPIO13_15 (1<<3) /* sprom/gpio13-15 mux */ -#define CCTRL4331_EXTPA_EN (1<<4) /* 0 ext pa disable, 1 ext pa enabled */ -#define CCTRL4331_GPIOCLK_ON_SPROMCS (1<<5) /* set drive out GPIO_CLK on sprom_cs pin */ -#define CCTRL4331_PCIE_MDIO_ON_SPROMCS (1<<6) /* use sprom_cs pin as PCIE mdio interface */ +#define CCTRL4331_BT_COEXIST (1<<0) /**< 0 disable */ +#define CCTRL4331_SECI (1<<1) /**< 0 SECI is disabled (JATG functional) */ +#define CCTRL4331_EXT_LNA_G (1<<2) /**< 0 disable */ +#define CCTRL4331_SPROM_GPIO13_15 (1<<3) /**< sprom/gpio13-15 mux */ +#define CCTRL4331_EXTPA_EN (1<<4) /**< 0 ext pa disable, 1 ext pa enabled */ +#define CCTRL4331_GPIOCLK_ON_SPROMCS (1<<5) /**< set drive out GPIO_CLK on sprom_cs pin */ +#define CCTRL4331_PCIE_MDIO_ON_SPROMCS (1<<6) /**< use sprom_cs pin as PCIE mdio interface */ #define CCTRL4331_EXTPA_ON_GPIO2_5 (1<<7) /* aband extpa will be at gpio2/5 and sprom_dout */ -#define CCTRL4331_OVR_PIPEAUXCLKEN (1<<8) /* override core control on pipe_AuxClkEnable */ -#define CCTRL4331_OVR_PIPEAUXPWRDOWN (1<<9) /* override core control on pipe_AuxPowerDown */ -#define CCTRL4331_PCIE_AUXCLKEN (1<<10) /* pcie_auxclkenable */ -#define CCTRL4331_PCIE_PIPE_PLLDOWN (1<<11) /* pcie_pipe_pllpowerdown */ -#define CCTRL4331_EXTPA_EN2 (1<<12) /* 0 ext pa disable, 1 ext pa enabled */ -#define CCTRL4331_EXT_LNA_A (1<<13) /* 0 disable */ -#define CCTRL4331_BT_SHD0_ON_GPIO4 (1<<16) /* enable bt_shd0 at gpio4 */ -#define CCTRL4331_BT_SHD1_ON_GPIO5 (1<<17) /* enable bt_shd1 at gpio5 */ -#define CCTRL4331_EXTPA_ANA_EN (1<<24) /* 0 ext pa disable, 1 ext pa enabled */ +#define CCTRL4331_OVR_PIPEAUXCLKEN (1<<8) /**< override core control on pipe_AuxClkEnable */ +#define CCTRL4331_OVR_PIPEAUXPWRDOWN (1<<9) /**< override core control on pipe_AuxPowerDown */ +#define CCTRL4331_PCIE_AUXCLKEN (1<<10) /**< pcie_auxclkenable */ +#define CCTRL4331_PCIE_PIPE_PLLDOWN (1<<11) /**< pcie_pipe_pllpowerdown */ +#define CCTRL4331_EXTPA_EN2 (1<<12) /**< 0 ext pa disable, 1 ext pa enabled */ +#define CCTRL4331_EXT_LNA_A (1<<13) /**< 0 disable */ +#define CCTRL4331_BT_SHD0_ON_GPIO4 (1<<16) /**< enable bt_shd0 at gpio4 */ +#define CCTRL4331_BT_SHD1_ON_GPIO5 (1<<17) /**< enable bt_shd1 at gpio5 */ +#define CCTRL4331_EXTPA_ANA_EN (1<<24) /**< 0 ext pa disable, 1 ext pa enabled */ /* 4331 Chip specific ChipStatus register bits */ -#define CST4331_XTAL_FREQ 0x00000001 /* crystal frequency 20/40Mhz */ +#define CST4331_XTAL_FREQ 0x00000001 /**< crystal frequency 20/40Mhz */ #define CST4331_SPROM_OTP_SEL_MASK 0x00000006 #define CST4331_SPROM_OTP_SEL_SHIFT 1 #define CST4331_SPROM_PRESENT 0x00000002 @@ -2096,77 +2197,77 @@ typedef volatile struct { #define CST4331_LDO_PAR 0x00000010 /* 4315 resource */ -#define RES4315_CBUCK_LPOM 1 /* 0x00000002 */ -#define RES4315_CBUCK_BURST 2 /* 0x00000004 */ -#define RES4315_CBUCK_PWM 3 /* 0x00000008 */ -#define RES4315_CLDO_PU 4 /* 0x00000010 */ -#define RES4315_PALDO_PU 5 /* 0x00000020 */ -#define RES4315_ILP_REQUEST 6 /* 0x00000040 */ -#define RES4315_LNLDO1_PU 9 /* 0x00000200 */ -#define RES4315_OTP_PU 10 /* 0x00000400 */ -#define RES4315_LNLDO2_PU 12 /* 0x00001000 */ -#define RES4315_XTAL_PU 13 /* 0x00002000 */ -#define RES4315_ALP_AVAIL 14 /* 0x00004000 */ -#define RES4315_RX_PWRSW_PU 15 /* 0x00008000 */ -#define RES4315_TX_PWRSW_PU 16 /* 0x00010000 */ -#define RES4315_RFPLL_PWRSW_PU 17 /* 0x00020000 */ -#define RES4315_LOGEN_PWRSW_PU 18 /* 0x00040000 */ -#define RES4315_AFE_PWRSW_PU 19 /* 0x00080000 */ -#define RES4315_BBPLL_PWRSW_PU 20 /* 0x00100000 */ -#define RES4315_HT_AVAIL 21 /* 0x00200000 */ +#define RES4315_CBUCK_LPOM 1 /**< 0x00000002 */ +#define RES4315_CBUCK_BURST 2 /**< 0x00000004 */ +#define RES4315_CBUCK_PWM 3 /**< 0x00000008 */ +#define RES4315_CLDO_PU 4 /**< 0x00000010 */ +#define RES4315_PALDO_PU 5 /**< 0x00000020 */ +#define RES4315_ILP_REQUEST 6 /**< 0x00000040 */ +#define RES4315_LNLDO1_PU 9 /**< 0x00000200 */ +#define RES4315_OTP_PU 10 /**< 0x00000400 */ +#define RES4315_LNLDO2_PU 12 /**< 0x00001000 */ +#define RES4315_XTAL_PU 13 /**< 0x00002000 */ +#define RES4315_ALP_AVAIL 14 /**< 0x00004000 */ +#define RES4315_RX_PWRSW_PU 15 /**< 0x00008000 */ +#define RES4315_TX_PWRSW_PU 16 /**< 0x00010000 */ +#define RES4315_RFPLL_PWRSW_PU 17 /**< 0x00020000 */ +#define RES4315_LOGEN_PWRSW_PU 18 /**< 0x00040000 */ +#define RES4315_AFE_PWRSW_PU 19 /**< 0x00080000 */ +#define RES4315_BBPLL_PWRSW_PU 20 /**< 0x00100000 */ +#define RES4315_HT_AVAIL 21 /**< 0x00200000 */ /* 4315 chip-specific ChipStatus register bits */ -#define CST4315_SPROM_OTP_SEL_MASK 0x00000003 /* gpio [7:6], SDIO CIS selection */ -#define CST4315_DEFCIS_SEL 0x00000000 /* use default CIS, OTP is powered up */ -#define CST4315_SPROM_SEL 0x00000001 /* use SPROM, OTP is powered up */ -#define CST4315_OTP_SEL 0x00000002 /* use OTP, OTP is powered up */ -#define CST4315_OTP_PWRDN 0x00000003 /* use SPROM, OTP is powered down */ -#define CST4315_SDIO_MODE 0x00000004 /* gpio [8], sdio/usb mode */ +#define CST4315_SPROM_OTP_SEL_MASK 0x00000003 /**< gpio [7:6], SDIO CIS selection */ +#define CST4315_DEFCIS_SEL 0x00000000 /**< use default CIS, OTP is powered up */ +#define CST4315_SPROM_SEL 0x00000001 /**< use SPROM, OTP is powered up */ +#define CST4315_OTP_SEL 0x00000002 /**< use OTP, OTP is powered up */ +#define CST4315_OTP_PWRDN 0x00000003 /**< use SPROM, OTP is powered down */ +#define CST4315_SDIO_MODE 0x00000004 /**< gpio [8], sdio/usb mode */ #define CST4315_RCAL_VALID 0x00000008 #define CST4315_RCAL_VALUE_MASK 0x000001f0 #define CST4315_RCAL_VALUE_SHIFT 4 -#define CST4315_PALDO_EXTPNP 0x00000200 /* PALDO is configured with external PNP */ +#define CST4315_PALDO_EXTPNP 0x00000200 /**< PALDO is configured with external PNP */ #define CST4315_CBUCK_MODE_MASK 0x00000c00 #define CST4315_CBUCK_MODE_BURST 0x00000400 #define CST4315_CBUCK_MODE_LPBURST 0x00000c00 /* 4319 resources */ -#define RES4319_CBUCK_LPOM 1 /* 0x00000002 */ -#define RES4319_CBUCK_BURST 2 /* 0x00000004 */ -#define RES4319_CBUCK_PWM 3 /* 0x00000008 */ -#define RES4319_CLDO_PU 4 /* 0x00000010 */ -#define RES4319_PALDO_PU 5 /* 0x00000020 */ -#define RES4319_ILP_REQUEST 6 /* 0x00000040 */ -#define RES4319_LNLDO1_PU 9 /* 0x00000200 */ -#define RES4319_OTP_PU 10 /* 0x00000400 */ -#define RES4319_LNLDO2_PU 12 /* 0x00001000 */ -#define RES4319_XTAL_PU 13 /* 0x00002000 */ -#define RES4319_ALP_AVAIL 14 /* 0x00004000 */ -#define RES4319_RX_PWRSW_PU 15 /* 0x00008000 */ -#define RES4319_TX_PWRSW_PU 16 /* 0x00010000 */ -#define RES4319_RFPLL_PWRSW_PU 17 /* 0x00020000 */ -#define RES4319_LOGEN_PWRSW_PU 18 /* 0x00040000 */ -#define RES4319_AFE_PWRSW_PU 19 /* 0x00080000 */ -#define RES4319_BBPLL_PWRSW_PU 20 /* 0x00100000 */ -#define RES4319_HT_AVAIL 21 /* 0x00200000 */ +#define RES4319_CBUCK_LPOM 1 /**< 0x00000002 */ +#define RES4319_CBUCK_BURST 2 /**< 0x00000004 */ +#define RES4319_CBUCK_PWM 3 /**< 0x00000008 */ +#define RES4319_CLDO_PU 4 /**< 0x00000010 */ +#define RES4319_PALDO_PU 5 /**< 0x00000020 */ +#define RES4319_ILP_REQUEST 6 /**< 0x00000040 */ +#define RES4319_LNLDO1_PU 9 /**< 0x00000200 */ +#define RES4319_OTP_PU 10 /**< 0x00000400 */ +#define RES4319_LNLDO2_PU 12 /**< 0x00001000 */ +#define RES4319_XTAL_PU 13 /**< 0x00002000 */ +#define RES4319_ALP_AVAIL 14 /**< 0x00004000 */ +#define RES4319_RX_PWRSW_PU 15 /**< 0x00008000 */ +#define RES4319_TX_PWRSW_PU 16 /**< 0x00010000 */ +#define RES4319_RFPLL_PWRSW_PU 17 /**< 0x00020000 */ +#define RES4319_LOGEN_PWRSW_PU 18 /**< 0x00040000 */ +#define RES4319_AFE_PWRSW_PU 19 /**< 0x00080000 */ +#define RES4319_BBPLL_PWRSW_PU 20 /**< 0x00100000 */ +#define RES4319_HT_AVAIL 21 /**< 0x00200000 */ /* 4319 chip-specific ChipStatus register bits */ #define CST4319_SPI_CPULESSUSB 0x00000001 #define CST4319_SPI_CLK_POL 0x00000002 #define CST4319_SPI_CLK_PH 0x00000008 -#define CST4319_SPROM_OTP_SEL_MASK 0x000000c0 /* gpio [7:6], SDIO CIS selection */ +#define CST4319_SPROM_OTP_SEL_MASK 0x000000c0 /**< gpio [7:6], SDIO CIS selection */ #define CST4319_SPROM_OTP_SEL_SHIFT 6 -#define CST4319_DEFCIS_SEL 0x00000000 /* use default CIS, OTP is powered up */ -#define CST4319_SPROM_SEL 0x00000040 /* use SPROM, OTP is powered up */ +#define CST4319_DEFCIS_SEL 0x00000000 /**< use default CIS, OTP is powered up */ +#define CST4319_SPROM_SEL 0x00000040 /**< use SPROM, OTP is powered up */ #define CST4319_OTP_SEL 0x00000080 /* use OTP, OTP is powered up */ #define CST4319_OTP_PWRDN 0x000000c0 /* use SPROM, OTP is powered down */ -#define CST4319_SDIO_USB_MODE 0x00000100 /* gpio [8], sdio/usb mode */ +#define CST4319_SDIO_USB_MODE 0x00000100 /**< gpio [8], sdio/usb mode */ #define CST4319_REMAP_SEL_MASK 0x00000600 #define CST4319_ILPDIV_EN 0x00000800 #define CST4319_XTAL_PD_POL 0x00001000 #define CST4319_LPO_SEL 0x00002000 #define CST4319_RES_INIT_MODE 0x0000c000 -#define CST4319_PALDO_EXTPNP 0x00010000 /* PALDO is configured with external PNP */ +#define CST4319_PALDO_EXTPNP 0x00010000 /**< PALDO is configured with external PNP */ #define CST4319_CBUCK_MODE_MASK 0x00060000 #define CST4319_CBUCK_MODE_BURST 0x00020000 #define CST4319_CBUCK_MODE_LPBURST 0x00060000 @@ -2260,16 +2361,16 @@ typedef volatile struct { #define RES4330_5g_LOGEN_PWRSW_PU 27 /* 4330 chip-specific ChipStatus register bits */ -#define CST4330_CHIPMODE_SDIOD(cs) (((cs) & 0x7) < 6) /* SDIO || gSPI */ -#define CST4330_CHIPMODE_USB20D(cs) (((cs) & 0x7) >= 6) /* USB || USBDA */ -#define CST4330_CHIPMODE_SDIO(cs) (((cs) & 0x4) == 0) /* SDIO */ -#define CST4330_CHIPMODE_GSPI(cs) (((cs) & 0x6) == 4) /* gSPI */ -#define CST4330_CHIPMODE_USB(cs) (((cs) & 0x7) == 6) /* USB packet-oriented */ -#define CST4330_CHIPMODE_USBDA(cs) (((cs) & 0x7) == 7) /* USB Direct Access */ +#define CST4330_CHIPMODE_SDIOD(cs) (((cs) & 0x7) < 6) /**< SDIO || gSPI */ +#define CST4330_CHIPMODE_USB20D(cs) (((cs) & 0x7) >= 6) /**< USB || USBDA */ +#define CST4330_CHIPMODE_SDIO(cs) (((cs) & 0x4) == 0) /**< SDIO */ +#define CST4330_CHIPMODE_GSPI(cs) (((cs) & 0x6) == 4) /**< gSPI */ +#define CST4330_CHIPMODE_USB(cs) (((cs) & 0x7) == 6) /**< USB packet-oriented */ +#define CST4330_CHIPMODE_USBDA(cs) (((cs) & 0x7) == 7) /**< USB Direct Access */ #define CST4330_OTP_PRESENT 0x00000010 #define CST4330_LPO_AUTODET_EN 0x00000020 #define CST4330_ARMREMAP_0 0x00000040 -#define CST4330_SPROM_PRESENT 0x00000080 /* takes priority over OTP if both set */ +#define CST4330_SPROM_PRESENT 0x00000080 /**< takes priority over OTP if both set */ #define CST4330_ILPDIV_EN 0x00000100 #define CST4330_LPO_SEL 0x00000200 #define CST4330_RES_INIT_MODE_SHIFT 10 @@ -2329,7 +2430,7 @@ typedef volatile struct { #define PMU_VREG4_LPLDO2_1p15V 1 #define PMU_VREG4_LPLDO2_1p20V 2 #define PMU_VREG4_LPLDO2_1p10V 3 -#define PMU_VREG4_LPLDO2_0p90V 4 /* 4 - 7 is 0.90V */ +#define PMU_VREG4_LPLDO2_0p90V 4 /**< 4 - 7 is 0.90V */ #define PMU_VREG4_HSICLDO_BYPASS_SHIFT 27 #define PMU_VREG4_HSICLDO_BYPASS_MASK 0x1 @@ -2416,7 +2517,7 @@ typedef volatile struct { #define CCTRL1_4334_ERCX_SEL (1 << 1) /* 1=select ERCX BT coex to be muxed out */ #define CCTRL1_4334_SDIO_HOST_WAKE (1 << 2) /* SDIO: 1=configure GPIO0 for host wake */ #define CCTRL1_4334_JTAG_DISABLE (1 << 3) /* 1=disable JTAG interface on mux'd pins */ -#define CCTRL1_4334_UART_ON_4_5 (1 << 28) /* 1=UART_TX/UART_RX muxed on GPIO_4/5 (4334B0/1) */ +#define CCTRL1_4334_UART_ON_4_5 (1 << 28) /**< 1=UART_TX/UART_RX muxed on GPIO_4/5 (4334B0/1) */ /* 4324 Chip specific ChipControl1 register bits */ #define CCTRL1_4324_GPIO_SEL (1 << 0) /* 1=select GPIOs to be muxed out */ @@ -2441,30 +2542,30 @@ typedef volatile struct { /* 00: SECI is disabled (JATG functional), 01: 2 wire, 10: 4 wire */ #define CCTRL_43143_SECI (1<<0) #define CCTRL_43143_BT_LEGACY (1<<1) -#define CCTRL_43143_I2S_MODE (1<<2) /* 0: SDIO enabled */ -#define CCTRL_43143_I2S_MASTER (1<<3) /* 0: I2S MCLK input disabled */ -#define CCTRL_43143_I2S_FULL (1<<4) /* 0: I2S SDIN and SPDIF_TX inputs disabled */ -#define CCTRL_43143_GSIO (1<<5) /* 0: sFlash enabled */ -#define CCTRL_43143_RF_SWCTRL_MASK (7<<6) /* 0: disabled */ +#define CCTRL_43143_I2S_MODE (1<<2) /**< 0: SDIO enabled */ +#define CCTRL_43143_I2S_MASTER (1<<3) /**< 0: I2S MCLK input disabled */ +#define CCTRL_43143_I2S_FULL (1<<4) /**< 0: I2S SDIN and SPDIF_TX inputs disabled */ +#define CCTRL_43143_GSIO (1<<5) /**< 0: sFlash enabled */ +#define CCTRL_43143_RF_SWCTRL_MASK (7<<6) /**< 0: disabled */ #define CCTRL_43143_RF_SWCTRL_0 (1<<6) #define CCTRL_43143_RF_SWCTRL_1 (2<<6) #define CCTRL_43143_RF_SWCTRL_2 (4<<6) -#define CCTRL_43143_RF_XSWCTRL (1<<9) /* 0: UART enabled */ -#define CCTRL_43143_HOST_WAKE0 (1<<11) /* 1: SDIO separate interrupt output from GPIO4 */ +#define CCTRL_43143_RF_XSWCTRL (1<<9) /**< 0: UART enabled */ +#define CCTRL_43143_HOST_WAKE0 (1<<11) /**< 1: SDIO separate interrupt output from GPIO4 */ #define CCTRL_43143_HOST_WAKE1 (1<<12) /* 1: SDIO separate interrupt output from GPIO16 */ /* 43143 resources, based on pmu_params.xls V1.19 */ -#define RES43143_EXT_SWITCHER_PWM 0 /* 0x00001 */ -#define RES43143_XTAL_PU 1 /* 0x00002 */ -#define RES43143_ILP_REQUEST 2 /* 0x00004 */ -#define RES43143_ALP_AVAIL 3 /* 0x00008 */ -#define RES43143_WL_CORE_READY 4 /* 0x00010 */ -#define RES43143_BBPLL_PWRSW_PU 5 /* 0x00020 */ -#define RES43143_HT_AVAIL 6 /* 0x00040 */ -#define RES43143_RADIO_PU 7 /* 0x00080 */ -#define RES43143_MACPHY_CLK_AVAIL 8 /* 0x00100 */ -#define RES43143_OTP_PU 9 /* 0x00200 */ -#define RES43143_LQ_AVAIL 10 /* 0x00400 */ +#define RES43143_EXT_SWITCHER_PWM 0 /**< 0x00001 */ +#define RES43143_XTAL_PU 1 /**< 0x00002 */ +#define RES43143_ILP_REQUEST 2 /**< 0x00004 */ +#define RES43143_ALP_AVAIL 3 /**< 0x00008 */ +#define RES43143_WL_CORE_READY 4 /**< 0x00010 */ +#define RES43143_BBPLL_PWRSW_PU 5 /**< 0x00020 */ +#define RES43143_HT_AVAIL 6 /**< 0x00040 */ +#define RES43143_RADIO_PU 7 /**< 0x00080 */ +#define RES43143_MACPHY_CLK_AVAIL 8 /**< 0x00100 */ +#define RES43143_OTP_PU 9 /**< 0x00200 */ +#define RES43143_LQ_AVAIL 10 /**< 0x00400 */ #define PMU43143_XTAL_CORE_SIZE_MASK 0x3F @@ -2555,18 +2656,18 @@ typedef volatile struct { /* 4706 flashstrconfig reg bits */ #define FLSTRCF4706_MASK 0x000000ff -#define FLSTRCF4706_SF1 0x00000001 /* 2nd serial flash present */ -#define FLSTRCF4706_PF1 0x00000002 /* 2nd parallel flash present */ -#define FLSTRCF4706_SF1_TYPE 0x00000004 /* 2nd serial flash type : 0 : ST, 1 : Atmel */ -#define FLSTRCF4706_NF1 0x00000008 /* 2nd NAND flash present */ -#define FLSTRCF4706_1ST_MADDR_SEG_MASK 0x000000f0 /* Valid value mask */ -#define FLSTRCF4706_1ST_MADDR_SEG_4MB 0x00000010 /* 4MB */ -#define FLSTRCF4706_1ST_MADDR_SEG_8MB 0x00000020 /* 8MB */ -#define FLSTRCF4706_1ST_MADDR_SEG_16MB 0x00000030 /* 16MB */ -#define FLSTRCF4706_1ST_MADDR_SEG_32MB 0x00000040 /* 32MB */ -#define FLSTRCF4706_1ST_MADDR_SEG_64MB 0x00000050 /* 64MB */ -#define FLSTRCF4706_1ST_MADDR_SEG_128MB 0x00000060 /* 128MB */ -#define FLSTRCF4706_1ST_MADDR_SEG_256MB 0x00000070 /* 256MB */ +#define FLSTRCF4706_SF1 0x00000001 /**< 2nd serial flash present */ +#define FLSTRCF4706_PF1 0x00000002 /**< 2nd parallel flash present */ +#define FLSTRCF4706_SF1_TYPE 0x00000004 /**< 2nd serial flash type : 0 : ST, 1 : Atmel */ +#define FLSTRCF4706_NF1 0x00000008 /**< 2nd NAND flash present */ +#define FLSTRCF4706_1ST_MADDR_SEG_MASK 0x000000f0 /**< Valid value mask */ +#define FLSTRCF4706_1ST_MADDR_SEG_4MB 0x00000010 /**< 4MB */ +#define FLSTRCF4706_1ST_MADDR_SEG_8MB 0x00000020 /**< 8MB */ +#define FLSTRCF4706_1ST_MADDR_SEG_16MB 0x00000030 /**< 16MB */ +#define FLSTRCF4706_1ST_MADDR_SEG_32MB 0x00000040 /**< 32MB */ +#define FLSTRCF4706_1ST_MADDR_SEG_64MB 0x00000050 /**< 64MB */ +#define FLSTRCF4706_1ST_MADDR_SEG_128MB 0x00000060 /**< 128MB */ +#define FLSTRCF4706_1ST_MADDR_SEG_256MB 0x00000070 /**< 256MB */ /* 4360 Chip specific ChipControl register bits */ #define CCTRL4360_I2C_MODE (1 << 0) @@ -2595,6 +2696,8 @@ typedef volatile struct { #define RES4360_BBPLLPWRSW_PU 6 #define RES4360_HT_AVAIL 7 #define RES4360_OTP_PU 8 +#define RES4360_AVB_PLL_PWRSW_PU 9 +#define RES4360_PCIE_TL_CLK_AVAIL 10 #define CST4360_XTAL_40MZ 0x00000001 #define CST4360_SFLASH 0x00000002 @@ -2672,6 +2775,22 @@ typedef volatile struct { #define PMU43602_CC3_ARMCR4_DBG_CLK (1 << 29) +/* 4365 PMU resources */ +#define RES4365_REGULATOR_PU 0 +#define RES4365_XTALLDO_PU 1 +#define RES4365_XTAL_PU 2 +#define RES4365_CPU_PLLLDO_PU 3 +#define RES4365_CPU_PLL_PU 4 +#define RES4365_WL_CORE_RDY 5 +#define RES4365_ILP_REQ 6 +#define RES4365_ALP_AVAIL 7 +#define RES4365_HT_AVAIL 8 +#define RES4365_BB_PLLLDO_PU 9 +#define RES4365_BB_PLL_PU 10 +#define RES4365_MINIMU_PU 11 +#define RES4365_RADIO_PU 12 +#define RES4365_MACPHY_CLK_AVAIL 13 + /* 4349 related */ #define RES4349_LPLDO_PU 0 #define RES4349_BG_PU 1 @@ -2706,13 +2825,32 @@ typedef volatile struct { #define RES4349_MACPHY_CLKAVAIL 30 #define CR4_4349_RAM_BASE (0x180000) -#define CC4_4349_SR_ASM_ADDR (0x48) +#define CR4_4349_RAM_BASE_FROM_REV_9 (0x160000) + +/* SR binary offset is at 8K */ +#define CC_SR1_4349_SR_ASM_ADDR (0x10) #define CST4349_CHIPMODE_SDIOD(cs) (((cs) & (1 << 6)) != 0) /* SDIO */ #define CST4349_CHIPMODE_PCIE(cs) (((cs) & (1 << 7)) != 0) /* PCIE */ #define CST4349_SPROM_PRESENT 0x00000010 +#define CC2_4349_VDDM_PWRSW_EN_MASK (1 << 20) +#define CC2_4349_VDDM_PWRSW_EN_SHIFT (20) +#define CC2_4349_SDIO_AOS_WAKEUP_MASK (1 << 24) +#define CC2_4349_SDIO_AOS_WAKEUP_SHIFT (24) + + +#define CC6_4349_PCIE_CLKREQ_WAKEUP_MASK (1 << 4) +#define CC6_4349_PCIE_CLKREQ_WAKEUP_SHIFT (4) +#define CC6_4349_PMU_WAKEUP_ALPAVAIL_MASK (1 << 6) +#define CC6_4349_PMU_WAKEUP_ALPAVAIL_SHIFT (6) +#define CC6_4349_PMU_EN_EXT_PERST_MASK (1 << 13) +#define CC6_4349_PMU_ENABLE_L2REFCLKPAD_PWRDWN (1 << 15) +#define CC6_4349_PMU_EN_MDIO_MASK (1 << 16) +#define CC6_4349_PMU_EN_ASSERT_L2_MASK (1 << 25) + + /* 43430 PMU resources based on pmu_params.xls */ #define RES43430_LPLDO_PU 0 @@ -2763,6 +2901,12 @@ typedef volatile struct { #define CST43430_TRIM_EN 0x00800000 #define CST43430_DIN_PACKAGE_OPTION 0x10000000 +#define PMU_MACCORE_0_RES_REQ_TIMER 0x19000000 +#define PMU_MACCORE_0_RES_REQ_MASK 0x5FF2364F + +#define PMU_MACCORE_1_RES_REQ_TIMER 0x19000000 +#define PMU_MACCORE_1_RES_REQ_MASK 0x5FF2364F + /* defines to detect active host interface in use */ #define CHIP_HOSTIF_PCIEMODE 0x1 #define CHIP_HOSTIF_USBMODE 0x2 @@ -2812,7 +2956,7 @@ typedef volatile struct { #define CST4335_CHIPMODE_MASK 0xF #define CST4335_CHIPMODE_SDIOD(cs) (((cs) & (1 << 0)) != 0) /* SDIO */ #define CST4335_CHIPMODE_GSPI(cs) (((cs) & (1 << 1)) != 0) /* gSPI */ -#define CST4335_CHIPMODE_USB20D(cs) (((cs) & (1 << 2)) != 0) /* HSIC || USBDA */ +#define CST4335_CHIPMODE_USB20D(cs) (((cs) & (1 << 2)) != 0) /**< HSIC || USBDA */ #define CST4335_CHIPMODE_PCIE(cs) (((cs) & (1 << 3)) != 0) /* PCIE */ /* 4335 Chip specific ChipControl1 register bits */ @@ -2830,6 +2974,8 @@ typedef volatile struct { #define CR4_4350_RAM_BASE (0x180000) #define CR4_4360_RAM_BASE (0x0) #define CR4_43602_RAM_BASE (0x180000) +#define CA7_4365_RAM_BASE (0x200000) + /* 4335 chip OTP present & OTP select bits. */ #define SPROM4335_OTP_SELECT 0x00000010 @@ -2995,7 +3141,7 @@ typedef volatile struct { #define MUXENAB4350_UART_MASK (0x0000000f) #define MUXENAB4350_UART_SHIFT 0 -#define MUXENAB4350_HOSTWAKE_MASK (0x000000f0) /* configure GPIO for SDIO host_wake */ +#define MUXENAB4350_HOSTWAKE_MASK (0x000000f0) /**< configure GPIO for SDIO host_wake */ #define MUXENAB4350_HOSTWAKE_SHIFT 4 @@ -3160,6 +3306,7 @@ typedef volatile struct { #define CC_GCI_CHIPCTRL_06 (6) #define CC_GCI_CHIPCTRL_07 (7) #define CC_GCI_CHIPCTRL_08 (8) +#define CC_GCI_CHIPCTRL_11 (11) #define CC_GCI_XTAL_BUFSTRG_NFC (0xff << 12) #define CC_GCI_06_JTAG_SEL_SHIFT 4 @@ -3252,13 +3399,13 @@ typedef volatile struct { #define CC4335_FNSEL_TRI (15) /* GCI Core Control Reg */ -#define GCI_CORECTRL_SR_MASK (1 << 0) /* SECI block Reset */ -#define GCI_CORECTRL_RSL_MASK (1 << 1) /* ResetSECILogic */ -#define GCI_CORECTRL_ES_MASK (1 << 2) /* EnableSECI */ -#define GCI_CORECTRL_FSL_MASK (1 << 3) /* Force SECI Out Low */ -#define GCI_CORECTRL_SOM_MASK (7 << 4) /* SECI Op Mode */ -#define GCI_CORECTRL_US_MASK (1 << 7) /* Update SECI */ -#define GCI_CORECTRL_BOS_MASK (1 << 8) /* Break On Sleep */ +#define GCI_CORECTRL_SR_MASK (1 << 0) /**< SECI block Reset */ +#define GCI_CORECTRL_RSL_MASK (1 << 1) /**< ResetSECILogic */ +#define GCI_CORECTRL_ES_MASK (1 << 2) /**< EnableSECI */ +#define GCI_CORECTRL_FSL_MASK (1 << 3) /**< Force SECI Out Low */ +#define GCI_CORECTRL_SOM_MASK (7 << 4) /**< SECI Op Mode */ +#define GCI_CORECTRL_US_MASK (1 << 7) /**< Update SECI */ +#define GCI_CORECTRL_BOS_MASK (1 << 8) /**< Break On Sleep */ /* 4345 pins * note: only the values set as default/used are added here. @@ -3377,46 +3524,46 @@ typedef volatile struct { /* 4335 GCI Intstatus(Mask)/WakeMask Register bits. */ -#define GCI_INTSTATUS_RBI (1 << 0) /* Rx Break Interrupt */ -#define GCI_INTSTATUS_UB (1 << 1) /* UART Break Interrupt */ -#define GCI_INTSTATUS_SPE (1 << 2) /* SECI Parity Error Interrupt */ -#define GCI_INTSTATUS_SFE (1 << 3) /* SECI Framing Error Interrupt */ -#define GCI_INTSTATUS_SRITI (1 << 9) /* SECI Rx Idle Timer Interrupt */ -#define GCI_INTSTATUS_STFF (1 << 10) /* SECI Tx FIFO Full Interrupt */ -#define GCI_INTSTATUS_STFAE (1 << 11) /* SECI Tx FIFO Almost Empty Intr */ -#define GCI_INTSTATUS_SRFAF (1 << 12) /* SECI Rx FIFO Almost Full */ -#define GCI_INTSTATUS_SRFNE (1 << 14) /* SECI Rx FIFO Not Empty */ -#define GCI_INTSTATUS_SRFOF (1 << 15) /* SECI Rx FIFO Not Empty Timeout */ -#define GCI_INTSTATUS_GPIOINT (1 << 25) /* GCIGpioInt */ -#define GCI_INTSTATUS_GPIOWAKE (1 << 26) /* GCIGpioWake */ +#define GCI_INTSTATUS_RBI (1 << 0) /**< Rx Break Interrupt */ +#define GCI_INTSTATUS_UB (1 << 1) /**< UART Break Interrupt */ +#define GCI_INTSTATUS_SPE (1 << 2) /**< SECI Parity Error Interrupt */ +#define GCI_INTSTATUS_SFE (1 << 3) /**< SECI Framing Error Interrupt */ +#define GCI_INTSTATUS_SRITI (1 << 9) /**< SECI Rx Idle Timer Interrupt */ +#define GCI_INTSTATUS_STFF (1 << 10) /**< SECI Tx FIFO Full Interrupt */ +#define GCI_INTSTATUS_STFAE (1 << 11) /**< SECI Tx FIFO Almost Empty Intr */ +#define GCI_INTSTATUS_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */ +#define GCI_INTSTATUS_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */ +#define GCI_INTSTATUS_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */ +#define GCI_INTSTATUS_GPIOINT (1 << 25) /**< GCIGpioInt */ +#define GCI_INTSTATUS_GPIOWAKE (1 << 26) /**< GCIGpioWake */ /* 4335 GCI IntMask Register bits. */ -#define GCI_INTMASK_RBI (1 << 0) /* Rx Break Interrupt */ -#define GCI_INTMASK_UB (1 << 1) /* UART Break Interrupt */ -#define GCI_INTMASK_SPE (1 << 2) /* SECI Parity Error Interrupt */ -#define GCI_INTMASK_SFE (1 << 3) /* SECI Framing Error Interrupt */ -#define GCI_INTMASK_SRITI (1 << 9) /* SECI Rx Idle Timer Interrupt */ -#define GCI_INTMASK_STFF (1 << 10) /* SECI Tx FIFO Full Interrupt */ -#define GCI_INTMASK_STFAE (1 << 11) /* SECI Tx FIFO Almost Empty Intr */ -#define GCI_INTMASK_SRFAF (1 << 12) /* SECI Rx FIFO Almost Full */ -#define GCI_INTMASK_SRFNE (1 << 14) /* SECI Rx FIFO Not Empty */ -#define GCI_INTMASK_SRFOF (1 << 15) /* SECI Rx FIFO Not Empty Timeout */ -#define GCI_INTMASK_GPIOINT (1 << 25) /* GCIGpioInt */ -#define GCI_INTMASK_GPIOWAKE (1 << 26) /* GCIGpioWake */ +#define GCI_INTMASK_RBI (1 << 0) /**< Rx Break Interrupt */ +#define GCI_INTMASK_UB (1 << 1) /**< UART Break Interrupt */ +#define GCI_INTMASK_SPE (1 << 2) /**< SECI Parity Error Interrupt */ +#define GCI_INTMASK_SFE (1 << 3) /**< SECI Framing Error Interrupt */ +#define GCI_INTMASK_SRITI (1 << 9) /**< SECI Rx Idle Timer Interrupt */ +#define GCI_INTMASK_STFF (1 << 10) /**< SECI Tx FIFO Full Interrupt */ +#define GCI_INTMASK_STFAE (1 << 11) /**< SECI Tx FIFO Almost Empty Intr */ +#define GCI_INTMASK_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */ +#define GCI_INTMASK_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */ +#define GCI_INTMASK_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */ +#define GCI_INTMASK_GPIOINT (1 << 25) /**< GCIGpioInt */ +#define GCI_INTMASK_GPIOWAKE (1 << 26) /**< GCIGpioWake */ /* 4335 GCI WakeMask Register bits. */ -#define GCI_WAKEMASK_RBI (1 << 0) /* Rx Break Interrupt */ -#define GCI_WAKEMASK_UB (1 << 1) /* UART Break Interrupt */ -#define GCI_WAKEMASK_SPE (1 << 2) /* SECI Parity Error Interrupt */ -#define GCI_WAKEMASK_SFE (1 << 3) /* SECI Framing Error Interrupt */ -#define GCI_WAKE_SRITI (1 << 9) /* SECI Rx Idle Timer Interrupt */ -#define GCI_WAKEMASK_STFF (1 << 10) /* SECI Tx FIFO Full Interrupt */ -#define GCI_WAKEMASK_STFAE (1 << 11) /* SECI Tx FIFO Almost Empty Intr */ -#define GCI_WAKEMASK_SRFAF (1 << 12) /* SECI Rx FIFO Almost Full */ -#define GCI_WAKEMASK_SRFNE (1 << 14) /* SECI Rx FIFO Not Empty */ -#define GCI_WAKEMASK_SRFOF (1 << 15) /* SECI Rx FIFO Not Empty Timeout */ -#define GCI_WAKEMASK_GPIOINT (1 << 25) /* GCIGpioInt */ -#define GCI_WAKEMASK_GPIOWAKE (1 << 26) /* GCIGpioWake */ +#define GCI_WAKEMASK_RBI (1 << 0) /**< Rx Break Interrupt */ +#define GCI_WAKEMASK_UB (1 << 1) /**< UART Break Interrupt */ +#define GCI_WAKEMASK_SPE (1 << 2) /**< SECI Parity Error Interrupt */ +#define GCI_WAKEMASK_SFE (1 << 3) /**< SECI Framing Error Interrupt */ +#define GCI_WAKE_SRITI (1 << 9) /**< SECI Rx Idle Timer Interrupt */ +#define GCI_WAKEMASK_STFF (1 << 10) /**< SECI Tx FIFO Full Interrupt */ +#define GCI_WAKEMASK_STFAE (1 << 11) /**< SECI Tx FIFO Almost Empty Intr */ +#define GCI_WAKEMASK_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */ +#define GCI_WAKEMASK_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */ +#define GCI_WAKEMASK_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */ +#define GCI_WAKEMASK_GPIOINT (1 << 25) /**< GCIGpioInt */ +#define GCI_WAKEMASK_GPIOWAKE (1 << 26) /**< GCIGpioWake */ #define GCI_WAKE_ON_GCI_GPIO1 1 #define GCI_WAKE_ON_GCI_GPIO2 2 @@ -3434,7 +3581,7 @@ typedef volatile struct { #define MUXENAB4335_UART_MASK (0x0000000f) #define MUXENAB4335_UART_SHIFT 0 -#define MUXENAB4335_HOSTWAKE_MASK (0x000000f0) /* configure GPIO for SDIO host_wake */ +#define MUXENAB4335_HOSTWAKE_MASK (0x000000f0) /**< configure GPIO for SDIO host_wake */ #define MUXENAB4335_HOSTWAKE_SHIFT 4 #define MUXENAB4335_GETIX(val, name) \ ((((val) & MUXENAB4335_ ## name ## _MASK) >> MUXENAB4335_ ## name ## _SHIFT) - 1) diff --git a/drivers/amlogic/wifi/bcmdhd/include/sbconfig.h b/drivers/amlogic/wifi/bcmdhd/include/sbconfig.h new file mode 100644 index 0000000000000..53e26ae4e3206 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/sbconfig.h @@ -0,0 +1,285 @@ +/* + * Broadcom SiliconBackplane hardware register definitions. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbconfig.h 530150 2015-01-29 08:43:40Z $ + */ + +#ifndef _SBCONFIG_H +#define _SBCONFIG_H + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif + +/* enumeration in SB is based on the premise that cores are contiguos in the + * enumeration space. + */ +#define SB_BUS_SIZE 0x10000 /**< Each bus gets 64Kbytes for cores */ +#define SB_BUS_BASE(b) (SI_ENUM_BASE + (b) * SB_BUS_SIZE) +#define SB_BUS_MAXCORES (SB_BUS_SIZE / SI_CORE_SIZE) /**< Max cores per bus */ + +/* + * Sonics Configuration Space Registers. + */ +#define SBCONFIGOFF 0xf00 /**< core sbconfig regs are top 256bytes of regs */ +#define SBCONFIGSIZE 256 /**< sizeof (sbconfig_t) */ + +#define SBIPSFLAG 0x08 +#define SBTPSFLAG 0x18 +#define SBTMERRLOGA 0x48 /**< sonics >= 2.3 */ +#define SBTMERRLOG 0x50 /**< sonics >= 2.3 */ +#define SBADMATCH3 0x60 +#define SBADMATCH2 0x68 +#define SBADMATCH1 0x70 +#define SBIMSTATE 0x90 +#define SBINTVEC 0x94 +#define SBTMSTATELOW 0x98 +#define SBTMSTATEHIGH 0x9c +#define SBBWA0 0xa0 +#define SBIMCONFIGLOW 0xa8 +#define SBIMCONFIGHIGH 0xac +#define SBADMATCH0 0xb0 +#define SBTMCONFIGLOW 0xb8 +#define SBTMCONFIGHIGH 0xbc +#define SBBCONFIG 0xc0 +#define SBBSTATE 0xc8 +#define SBACTCNFG 0xd8 +#define SBFLAGST 0xe8 +#define SBIDLOW 0xf8 +#define SBIDHIGH 0xfc + +/* All the previous registers are above SBCONFIGOFF, but with Sonics 2.3, we have + * a few registers *below* that line. I think it would be very confusing to try + * and change the value of SBCONFIGOFF, so I'm definig them as absolute offsets here, + */ + +#define SBIMERRLOGA 0xea8 +#define SBIMERRLOG 0xeb0 +#define SBTMPORTCONNID0 0xed8 +#define SBTMPORTLOCK0 0xef8 + +#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) + +typedef volatile struct _sbconfig { + uint32 PAD[2]; + uint32 sbipsflag; /**< initiator port ocp slave flag */ + uint32 PAD[3]; + uint32 sbtpsflag; /**< target port ocp slave flag */ + uint32 PAD[11]; + uint32 sbtmerrloga; /**< (sonics >= 2.3) */ + uint32 PAD; + uint32 sbtmerrlog; /**< (sonics >= 2.3) */ + uint32 PAD[3]; + uint32 sbadmatch3; /**< address match3 */ + uint32 PAD; + uint32 sbadmatch2; /**< address match2 */ + uint32 PAD; + uint32 sbadmatch1; /**< address match1 */ + uint32 PAD[7]; + uint32 sbimstate; /**< initiator agent state */ + uint32 sbintvec; /**< interrupt mask */ + uint32 sbtmstatelow; /**< target state */ + uint32 sbtmstatehigh; /**< target state */ + uint32 sbbwa0; /**< bandwidth allocation table0 */ + uint32 PAD; + uint32 sbimconfiglow; /**< initiator configuration */ + uint32 sbimconfighigh; /**< initiator configuration */ + uint32 sbadmatch0; /**< address match0 */ + uint32 PAD; + uint32 sbtmconfiglow; /**< target configuration */ + uint32 sbtmconfighigh; /**< target configuration */ + uint32 sbbconfig; /**< broadcast configuration */ + uint32 PAD; + uint32 sbbstate; /**< broadcast state */ + uint32 PAD[3]; + uint32 sbactcnfg; /**< activate configuration */ + uint32 PAD[3]; + uint32 sbflagst; /**< current sbflags */ + uint32 PAD[3]; + uint32 sbidlow; /**< identification */ + uint32 sbidhigh; /**< identification */ +} sbconfig_t; + +#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */ + +/* sbipsflag */ +#define SBIPS_INT1_MASK 0x3f /**< which sbflags get routed to mips interrupt 1 */ +#define SBIPS_INT1_SHIFT 0 +#define SBIPS_INT2_MASK 0x3f00 /**< which sbflags get routed to mips interrupt 2 */ +#define SBIPS_INT2_SHIFT 8 +#define SBIPS_INT3_MASK 0x3f0000 /**< which sbflags get routed to mips interrupt 3 */ +#define SBIPS_INT3_SHIFT 16 +#define SBIPS_INT4_MASK 0x3f000000 /**< which sbflags get routed to mips interrupt 4 */ +#define SBIPS_INT4_SHIFT 24 + +/* sbtpsflag */ +#define SBTPS_NUM0_MASK 0x3f /**< interrupt sbFlag # generated by this core */ +#define SBTPS_F0EN0 0x40 /**< interrupt is always sent on the backplane */ + +/* sbtmerrlog */ +#define SBTMEL_CM 0x00000007 /**< command */ +#define SBTMEL_CI 0x0000ff00 /**< connection id */ +#define SBTMEL_EC 0x0f000000 /**< error code */ +#define SBTMEL_ME 0x80000000 /**< multiple error */ + +/* sbimstate */ +#define SBIM_PC 0xf /**< pipecount */ +#define SBIM_AP_MASK 0x30 /**< arbitration policy */ +#define SBIM_AP_BOTH 0x00 /**< use both timeslaces and token */ +#define SBIM_AP_TS 0x10 /**< use timesliaces only */ +#define SBIM_AP_TK 0x20 /**< use token only */ +#define SBIM_AP_RSV 0x30 /**< reserved */ +#define SBIM_IBE 0x20000 /**< inbanderror */ +#define SBIM_TO 0x40000 /**< timeout */ +#define SBIM_BY 0x01800000 /**< busy (sonics >= 2.3) */ +#define SBIM_RJ 0x02000000 /**< reject (sonics >= 2.3) */ + +/* sbtmstatelow */ +#define SBTML_RESET 0x0001 /**< reset */ +#define SBTML_REJ_MASK 0x0006 /**< reject field */ +#define SBTML_REJ 0x0002 /**< reject */ +#define SBTML_TMPREJ 0x0004 /**< temporary reject, for error recovery */ + +#define SBTML_SICF_SHIFT 16 /**< Shift to locate the SI control flags in sbtml */ + +/* sbtmstatehigh */ +#define SBTMH_SERR 0x0001 /**< serror */ +#define SBTMH_INT 0x0002 /**< interrupt */ +#define SBTMH_BUSY 0x0004 /**< busy */ +#define SBTMH_TO 0x0020 /**< timeout (sonics >= 2.3) */ + +#define SBTMH_SISF_SHIFT 16 /**< Shift to locate the SI status flags in sbtmh */ + +/* sbbwa0 */ +#define SBBWA_TAB0_MASK 0xffff /**< lookup table 0 */ +#define SBBWA_TAB1_MASK 0xffff /**< lookup table 1 */ +#define SBBWA_TAB1_SHIFT 16 + +/* sbimconfiglow */ +#define SBIMCL_STO_MASK 0x7 /**< service timeout */ +#define SBIMCL_RTO_MASK 0x70 /**< request timeout */ +#define SBIMCL_RTO_SHIFT 4 +#define SBIMCL_CID_MASK 0xff0000 /**< connection id */ +#define SBIMCL_CID_SHIFT 16 + +/* sbimconfighigh */ +#define SBIMCH_IEM_MASK 0xc /**< inband error mode */ +#define SBIMCH_TEM_MASK 0x30 /**< timeout error mode */ +#define SBIMCH_TEM_SHIFT 4 +#define SBIMCH_BEM_MASK 0xc0 /**< bus error mode */ +#define SBIMCH_BEM_SHIFT 6 + +/* sbadmatch0 */ +#define SBAM_TYPE_MASK 0x3 /**< address type */ +#define SBAM_AD64 0x4 /**< reserved */ +#define SBAM_ADINT0_MASK 0xf8 /**< type0 size */ +#define SBAM_ADINT0_SHIFT 3 +#define SBAM_ADINT1_MASK 0x1f8 /**< type1 size */ +#define SBAM_ADINT1_SHIFT 3 +#define SBAM_ADINT2_MASK 0x1f8 /**< type2 size */ +#define SBAM_ADINT2_SHIFT 3 +#define SBAM_ADEN 0x400 /**< enable */ +#define SBAM_ADNEG 0x800 /**< negative decode */ +#define SBAM_BASE0_MASK 0xffffff00 /**< type0 base address */ +#define SBAM_BASE0_SHIFT 8 +#define SBAM_BASE1_MASK 0xfffff000 /**< type1 base address for the core */ +#define SBAM_BASE1_SHIFT 12 +#define SBAM_BASE2_MASK 0xffff0000 /**< type2 base address for the core */ +#define SBAM_BASE2_SHIFT 16 + +/* sbtmconfiglow */ +#define SBTMCL_CD_MASK 0xff /**< clock divide */ +#define SBTMCL_CO_MASK 0xf800 /**< clock offset */ +#define SBTMCL_CO_SHIFT 11 +#define SBTMCL_IF_MASK 0xfc0000 /**< interrupt flags */ +#define SBTMCL_IF_SHIFT 18 +#define SBTMCL_IM_MASK 0x3000000 /**< interrupt mode */ +#define SBTMCL_IM_SHIFT 24 + +/* sbtmconfighigh */ +#define SBTMCH_BM_MASK 0x3 /**< busy mode */ +#define SBTMCH_RM_MASK 0x3 /**< retry mode */ +#define SBTMCH_RM_SHIFT 2 +#define SBTMCH_SM_MASK 0x30 /**< stop mode */ +#define SBTMCH_SM_SHIFT 4 +#define SBTMCH_EM_MASK 0x300 /**< sb error mode */ +#define SBTMCH_EM_SHIFT 8 +#define SBTMCH_IM_MASK 0xc00 /**< int mode */ +#define SBTMCH_IM_SHIFT 10 + +/* sbbconfig */ +#define SBBC_LAT_MASK 0x3 /**< sb latency */ +#define SBBC_MAX0_MASK 0xf0000 /**< maxccntr0 */ +#define SBBC_MAX0_SHIFT 16 +#define SBBC_MAX1_MASK 0xf00000 /**< maxccntr1 */ +#define SBBC_MAX1_SHIFT 20 + +/* sbbstate */ +#define SBBS_SRD 0x1 /**< st reg disable */ +#define SBBS_HRD 0x2 /**< hold reg disable */ + +/* sbidlow */ +#define SBIDL_CS_MASK 0x3 /**< config space */ +#define SBIDL_AR_MASK 0x38 /**< # address ranges supported */ +#define SBIDL_AR_SHIFT 3 +#define SBIDL_SYNCH 0x40 /**< sync */ +#define SBIDL_INIT 0x80 /**< initiator */ +#define SBIDL_MINLAT_MASK 0xf00 /**< minimum backplane latency */ +#define SBIDL_MINLAT_SHIFT 8 +#define SBIDL_MAXLAT 0xf000 /**< maximum backplane latency */ +#define SBIDL_MAXLAT_SHIFT 12 +#define SBIDL_FIRST 0x10000 /**< this initiator is first */ +#define SBIDL_CW_MASK 0xc0000 /**< cycle counter width */ +#define SBIDL_CW_SHIFT 18 +#define SBIDL_TP_MASK 0xf00000 /**< target ports */ +#define SBIDL_TP_SHIFT 20 +#define SBIDL_IP_MASK 0xf000000 /**< initiator ports */ +#define SBIDL_IP_SHIFT 24 +#define SBIDL_RV_MASK 0xf0000000 /**< sonics backplane revision code */ +#define SBIDL_RV_SHIFT 28 +#define SBIDL_RV_2_2 0x00000000 /**< version 2.2 or earlier */ +#define SBIDL_RV_2_3 0x10000000 /**< version 2.3 */ + +/* sbidhigh */ +#define SBIDH_RC_MASK 0x000f /**< revision code */ +#define SBIDH_RCE_MASK 0x7000 /**< revision code extension field */ +#define SBIDH_RCE_SHIFT 8 +#define SBCOREREV(sbidh) \ + ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK)) +#define SBIDH_CC_MASK 0x8ff0 /**< core code */ +#define SBIDH_CC_SHIFT 4 +#define SBIDH_VC_MASK 0xffff0000 /**< vendor code */ +#define SBIDH_VC_SHIFT 16 + +#define SB_COMMIT 0xfd8 /**< update buffered registers value */ + +/* vendor codes */ +#define SB_VEND_BCM 0x4243 /**< Broadcom's SB vendor code */ + +#endif /* _SBCONFIG_H */ diff --git a/drivers/amlogic/wifi/bcmdhd/include/sbhnddma.h b/drivers/amlogic/wifi/bcmdhd/include/sbhnddma.h new file mode 100644 index 0000000000000..5692ea954b359 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/sbhnddma.h @@ -0,0 +1,420 @@ +/* + * Generic Broadcom Home Networking Division (HND) DMA engine HW interface + * This supports the following chips: BCM42xx, 44xx, 47xx . + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbhnddma.h 530150 2015-01-29 08:43:40Z $ + */ + +#ifndef _sbhnddma_h_ +#define _sbhnddma_h_ + +/* DMA structure: + * support two DMA engines: 32 bits address or 64 bit addressing + * basic DMA register set is per channel(transmit or receive) + * a pair of channels is defined for convenience + */ + + +/* 32 bits addressing */ + +/** dma registers per channel(xmt or rcv) */ +typedef volatile struct { + uint32 control; /**< enable, et al */ + uint32 addr; /**< descriptor ring base address (4K aligned) */ + uint32 ptr; /**< last descriptor posted to chip */ + uint32 status; /**< current active descriptor, et al */ +} dma32regs_t; + +typedef volatile struct { + dma32regs_t xmt; /**< dma tx channel */ + dma32regs_t rcv; /**< dma rx channel */ +} dma32regp_t; + +typedef volatile struct { /* diag access */ + uint32 fifoaddr; /**< diag address */ + uint32 fifodatalow; /**< low 32bits of data */ + uint32 fifodatahigh; /**< high 32bits of data */ + uint32 pad; /**< reserved */ +} dma32diag_t; + +/** + * DMA Descriptor + * Descriptors are only read by the hardware, never written back. + */ +typedef volatile struct { + uint32 ctrl; /**< misc control bits & bufcount */ + uint32 addr; /**< data buffer address */ +} dma32dd_t; + +/** Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page. */ +#define D32RINGALIGN_BITS 12 +#define D32MAXRINGSZ (1 << D32RINGALIGN_BITS) +#define D32RINGALIGN (1 << D32RINGALIGN_BITS) + +#define D32MAXDD (D32MAXRINGSZ / sizeof (dma32dd_t)) + +/* transmit channel control */ +#define XC_XE ((uint32)1 << 0) /**< transmit enable */ +#define XC_SE ((uint32)1 << 1) /**< transmit suspend request */ +#define XC_LE ((uint32)1 << 2) /**< loopback enable */ +#define XC_FL ((uint32)1 << 4) /**< flush request */ +#define XC_MR_MASK 0x000001C0 /**< Multiple outstanding reads */ +#define XC_MR_SHIFT 6 +#define XC_PD ((uint32)1 << 11) /**< parity check disable */ +#define XC_AE ((uint32)3 << 16) /**< address extension bits */ +#define XC_AE_SHIFT 16 +#define XC_BL_MASK 0x001C0000 /**< BurstLen bits */ +#define XC_BL_SHIFT 18 +#define XC_PC_MASK 0x00E00000 /**< Prefetch control */ +#define XC_PC_SHIFT 21 +#define XC_PT_MASK 0x03000000 /**< Prefetch threshold */ +#define XC_PT_SHIFT 24 + +/** Multiple outstanding reads */ +#define DMA_MR_1 0 +#define DMA_MR_2 1 +#define DMA_MR_4 2 +#define DMA_MR_8 3 +#define DMA_MR_12 4 +#define DMA_MR_16 5 +#define DMA_MR_20 6 +#define DMA_MR_32 7 + +/** DMA Burst Length in bytes */ +#define DMA_BL_16 0 +#define DMA_BL_32 1 +#define DMA_BL_64 2 +#define DMA_BL_128 3 +#define DMA_BL_256 4 +#define DMA_BL_512 5 +#define DMA_BL_1024 6 + +/** Prefetch control */ +#define DMA_PC_0 0 +#define DMA_PC_4 1 +#define DMA_PC_8 2 +#define DMA_PC_16 3 +/* others: reserved */ + +/** Prefetch threshold */ +#define DMA_PT_1 0 +#define DMA_PT_2 1 +#define DMA_PT_4 2 +#define DMA_PT_8 3 + +/* transmit descriptor table pointer */ +#define XP_LD_MASK 0xfff /**< last valid descriptor */ + +/* transmit channel status */ +#define XS_CD_MASK 0x0fff /**< current descriptor pointer */ +#define XS_XS_MASK 0xf000 /**< transmit state */ +#define XS_XS_SHIFT 12 +#define XS_XS_DISABLED 0x0000 /**< disabled */ +#define XS_XS_ACTIVE 0x1000 /**< active */ +#define XS_XS_IDLE 0x2000 /**< idle wait */ +#define XS_XS_STOPPED 0x3000 /**< stopped */ +#define XS_XS_SUSP 0x4000 /**< suspend pending */ +#define XS_XE_MASK 0xf0000 /**< transmit errors */ +#define XS_XE_SHIFT 16 +#define XS_XE_NOERR 0x00000 /**< no error */ +#define XS_XE_DPE 0x10000 /**< descriptor protocol error */ +#define XS_XE_DFU 0x20000 /**< data fifo underrun */ +#define XS_XE_BEBR 0x30000 /**< bus error on buffer read */ +#define XS_XE_BEDA 0x40000 /**< bus error on descriptor access */ +#define XS_AD_MASK 0xfff00000 /**< active descriptor */ +#define XS_AD_SHIFT 20 + +/* receive channel control */ +#define RC_RE ((uint32)1 << 0) /**< receive enable */ +#define RC_RO_MASK 0xfe /**< receive frame offset */ +#define RC_RO_SHIFT 1 +#define RC_FM ((uint32)1 << 8) /**< direct fifo receive (pio) mode */ +#define RC_SH ((uint32)1 << 9) /**< separate rx header descriptor enable */ +#define RC_OC ((uint32)1 << 10) /**< overflow continue */ +#define RC_PD ((uint32)1 << 11) /**< parity check disable */ +#define RC_AE ((uint32)3 << 16) /**< address extension bits */ +#define RC_AE_SHIFT 16 +#define RC_BL_MASK 0x001C0000 /**< BurstLen bits */ +#define RC_BL_SHIFT 18 +#define RC_PC_MASK 0x00E00000 /**< Prefetch control */ +#define RC_PC_SHIFT 21 +#define RC_PT_MASK 0x03000000 /**< Prefetch threshold */ +#define RC_PT_SHIFT 24 + +/* receive descriptor table pointer */ +#define RP_LD_MASK 0xfff /**< last valid descriptor */ + +/* receive channel status */ +#define RS_CD_MASK 0x0fff /**< current descriptor pointer */ +#define RS_RS_MASK 0xf000 /**< receive state */ +#define RS_RS_SHIFT 12 +#define RS_RS_DISABLED 0x0000 /**< disabled */ +#define RS_RS_ACTIVE 0x1000 /**< active */ +#define RS_RS_IDLE 0x2000 /**< idle wait */ +#define RS_RS_STOPPED 0x3000 /**< reserved */ +#define RS_RE_MASK 0xf0000 /**< receive errors */ +#define RS_RE_SHIFT 16 +#define RS_RE_NOERR 0x00000 /**< no error */ +#define RS_RE_DPE 0x10000 /**< descriptor protocol error */ +#define RS_RE_DFO 0x20000 /**< data fifo overflow */ +#define RS_RE_BEBW 0x30000 /**< bus error on buffer write */ +#define RS_RE_BEDA 0x40000 /**< bus error on descriptor access */ +#define RS_AD_MASK 0xfff00000 /**< active descriptor */ +#define RS_AD_SHIFT 20 + +/* fifoaddr */ +#define FA_OFF_MASK 0xffff /**< offset */ +#define FA_SEL_MASK 0xf0000 /**< select */ +#define FA_SEL_SHIFT 16 +#define FA_SEL_XDD 0x00000 /**< transmit dma data */ +#define FA_SEL_XDP 0x10000 /**< transmit dma pointers */ +#define FA_SEL_RDD 0x40000 /**< receive dma data */ +#define FA_SEL_RDP 0x50000 /**< receive dma pointers */ +#define FA_SEL_XFD 0x80000 /**< transmit fifo data */ +#define FA_SEL_XFP 0x90000 /**< transmit fifo pointers */ +#define FA_SEL_RFD 0xc0000 /**< receive fifo data */ +#define FA_SEL_RFP 0xd0000 /**< receive fifo pointers */ +#define FA_SEL_RSD 0xe0000 /**< receive frame status data */ +#define FA_SEL_RSP 0xf0000 /**< receive frame status pointers */ + +/* descriptor control flags */ +#define CTRL_BC_MASK 0x00001fff /**< buffer byte count, real data len must <= 4KB */ +#define CTRL_AE ((uint32)3 << 16) /**< address extension bits */ +#define CTRL_AE_SHIFT 16 +#define CTRL_PARITY ((uint32)3 << 18) /**< parity bit */ +#define CTRL_EOT ((uint32)1 << 28) /**< end of descriptor table */ +#define CTRL_IOC ((uint32)1 << 29) /**< interrupt on completion */ +#define CTRL_EOF ((uint32)1 << 30) /**< end of frame */ +#define CTRL_SOF ((uint32)1 << 31) /**< start of frame */ + +/** control flags in the range [27:20] are core-specific and not defined here */ +#define CTRL_CORE_MASK 0x0ff00000 + +/* 64 bits addressing */ + +/** dma registers per channel(xmt or rcv) */ +typedef volatile struct { + uint32 control; /**< enable, et al */ + uint32 ptr; /**< last descriptor posted to chip */ + uint32 addrlow; /**< descriptor ring base address low 32-bits (8K aligned) */ + uint32 addrhigh; /**< descriptor ring base address bits 63:32 (8K aligned) */ + uint32 status0; /**< current descriptor, xmt state */ + uint32 status1; /**< active descriptor, xmt error */ +} dma64regs_t; + +typedef volatile struct { + dma64regs_t tx; /**< dma64 tx channel */ + dma64regs_t rx; /**< dma64 rx channel */ +} dma64regp_t; + +typedef volatile struct { /**< diag access */ + uint32 fifoaddr; /**< diag address */ + uint32 fifodatalow; /**< low 32bits of data */ + uint32 fifodatahigh; /**< high 32bits of data */ + uint32 pad; /**< reserved */ +} dma64diag_t; + +/** + * DMA Descriptor + * Descriptors are only read by the hardware, never written back. + */ +typedef volatile struct { + uint32 ctrl1; /**< misc control bits */ + uint32 ctrl2; /**< buffer count and address extension */ + uint32 addrlow; /**< memory address of the date buffer, bits 31:0 */ + uint32 addrhigh; /**< memory address of the date buffer, bits 63:32 */ +} dma64dd_t; + +/** + * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical addresss. + */ +#define D64RINGALIGN_BITS 13 +#define D64MAXRINGSZ (1 << D64RINGALIGN_BITS) +#define D64RINGBOUNDARY (1 << D64RINGALIGN_BITS) + +#define D64MAXDD (D64MAXRINGSZ / sizeof (dma64dd_t)) + +/** for cores with large descriptor ring support, descriptor ring size can be up to 4096 */ +#define D64MAXDD_LARGE ((1 << 16) / sizeof (dma64dd_t)) + +/** + * for cores with large descriptor ring support (4k descriptors), descriptor ring cannot cross + * 64K boundary + */ +#define D64RINGBOUNDARY_LARGE (1 << 16) + +/* + * Default DMA Burstlen values for USBRev >= 12 and SDIORev >= 11. + * When this field contains the value N, the burst length is 2**(N + 4) bytes. + */ +#define D64_DEF_USBBURSTLEN 2 +#define D64_DEF_SDIOBURSTLEN 1 + + +#ifndef D64_USBBURSTLEN +#define D64_USBBURSTLEN DMA_BL_64 +#endif +#ifndef D64_SDIOBURSTLEN +#define D64_SDIOBURSTLEN DMA_BL_32 +#endif + +/* transmit channel control */ +#define D64_XC_XE 0x00000001 /**< transmit enable */ +#define D64_XC_SE 0x00000002 /**< transmit suspend request */ +#define D64_XC_LE 0x00000004 /**< loopback enable */ +#define D64_XC_FL 0x00000010 /**< flush request */ +#define D64_XC_MR_MASK 0x000001C0 /**< Multiple outstanding reads */ +#define D64_XC_MR_SHIFT 6 +#define D64_XC_PD 0x00000800 /**< parity check disable */ +#define D64_XC_AE 0x00030000 /**< address extension bits */ +#define D64_XC_AE_SHIFT 16 +#define D64_XC_BL_MASK 0x001C0000 /**< BurstLen bits */ +#define D64_XC_BL_SHIFT 18 +#define D64_XC_PC_MASK 0x00E00000 /**< Prefetch control */ +#define D64_XC_PC_SHIFT 21 +#define D64_XC_PT_MASK 0x03000000 /**< Prefetch threshold */ +#define D64_XC_PT_SHIFT 24 + +/* transmit descriptor table pointer */ +#define D64_XP_LD_MASK 0x00001fff /**< last valid descriptor */ + +/* transmit channel status */ +#define D64_XS0_CD_MASK (di->d64_xs0_cd_mask) /**< current descriptor pointer */ +#define D64_XS0_XS_MASK 0xf0000000 /**< transmit state */ +#define D64_XS0_XS_SHIFT 28 +#define D64_XS0_XS_DISABLED 0x00000000 /**< disabled */ +#define D64_XS0_XS_ACTIVE 0x10000000 /**< active */ +#define D64_XS0_XS_IDLE 0x20000000 /**< idle wait */ +#define D64_XS0_XS_STOPPED 0x30000000 /**< stopped */ +#define D64_XS0_XS_SUSP 0x40000000 /**< suspend pending */ + +#define D64_XS1_AD_MASK (di->d64_xs1_ad_mask) /**< active descriptor */ +#define D64_XS1_XE_MASK 0xf0000000 /**< transmit errors */ +#define D64_XS1_XE_SHIFT 28 +#define D64_XS1_XE_NOERR 0x00000000 /**< no error */ +#define D64_XS1_XE_DPE 0x10000000 /**< descriptor protocol error */ +#define D64_XS1_XE_DFU 0x20000000 /**< data fifo underrun */ +#define D64_XS1_XE_DTE 0x30000000 /**< data transfer error */ +#define D64_XS1_XE_DESRE 0x40000000 /**< descriptor read error */ +#define D64_XS1_XE_COREE 0x50000000 /**< core error */ + +/* receive channel control */ +#define D64_RC_RE 0x00000001 /**< receive enable */ +#define D64_RC_RO_MASK 0x000000fe /**< receive frame offset */ +#define D64_RC_RO_SHIFT 1 +#define D64_RC_FM 0x00000100 /**< direct fifo receive (pio) mode */ +#define D64_RC_SH 0x00000200 /**< separate rx header descriptor enable */ +#define D64_RC_SHIFT 9 /**< separate rx header descriptor enable */ +#define D64_RC_OC 0x00000400 /**< overflow continue */ +#define D64_RC_PD 0x00000800 /**< parity check disable */ +#define D64_RC_SA 0x00002000 /**< select active */ +#define D64_RC_GE 0x00004000 /**< Glom enable */ +#define D64_RC_AE 0x00030000 /**< address extension bits */ +#define D64_RC_AE_SHIFT 16 +#define D64_RC_BL_MASK 0x001C0000 /**< BurstLen bits */ +#define D64_RC_BL_SHIFT 18 +#define D64_RC_PC_MASK 0x00E00000 /**< Prefetch control */ +#define D64_RC_PC_SHIFT 21 +#define D64_RC_PT_MASK 0x03000000 /**< Prefetch threshold */ +#define D64_RC_PT_SHIFT 24 + +/* flags for dma controller */ +#define DMA_CTRL_PEN (1 << 0) /**< partity enable */ +#define DMA_CTRL_ROC (1 << 1) /**< rx overflow continue */ +#define DMA_CTRL_RXMULTI (1 << 2) /**< allow rx scatter to multiple descriptors */ +#define DMA_CTRL_UNFRAMED (1 << 3) /**< Unframed Rx/Tx data */ +#define DMA_CTRL_USB_BOUNDRY4KB_WAR (1 << 4) +#define DMA_CTRL_DMA_AVOIDANCE_WAR (1 << 5) /**< DMA avoidance WAR for 4331 */ +#define DMA_CTRL_RXSINGLE (1 << 6) /**< always single buffer */ +#define DMA_CTRL_SDIO_RXGLOM (1 << 7) /**< DMA Rx glome is enabled */ + +/* receive descriptor table pointer */ +#define D64_RP_LD_MASK 0x00001fff /**< last valid descriptor */ + +/* receive channel status */ +#define D64_RS0_CD_MASK (di->d64_rs0_cd_mask) /**< current descriptor pointer */ +#define D64_RS0_RS_MASK 0xf0000000 /**< receive state */ +#define D64_RS0_RS_SHIFT 28 +#define D64_RS0_RS_DISABLED 0x00000000 /**< disabled */ +#define D64_RS0_RS_ACTIVE 0x10000000 /**< active */ +#define D64_RS0_RS_IDLE 0x20000000 /**< idle wait */ +#define D64_RS0_RS_STOPPED 0x30000000 /**< stopped */ +#define D64_RS0_RS_SUSP 0x40000000 /**< suspend pending */ + +#define D64_RS1_AD_MASK 0x0001ffff /**< active descriptor */ +#define D64_RS1_RE_MASK 0xf0000000 /**< receive errors */ +#define D64_RS1_RE_SHIFT 28 +#define D64_RS1_RE_NOERR 0x00000000 /**< no error */ +#define D64_RS1_RE_DPO 0x10000000 /**< descriptor protocol error */ +#define D64_RS1_RE_DFU 0x20000000 /**< data fifo overflow */ +#define D64_RS1_RE_DTE 0x30000000 /**< data transfer error */ +#define D64_RS1_RE_DESRE 0x40000000 /**< descriptor read error */ +#define D64_RS1_RE_COREE 0x50000000 /**< core error */ + +/* fifoaddr */ +#define D64_FA_OFF_MASK 0xffff /**< offset */ +#define D64_FA_SEL_MASK 0xf0000 /**< select */ +#define D64_FA_SEL_SHIFT 16 +#define D64_FA_SEL_XDD 0x00000 /**< transmit dma data */ +#define D64_FA_SEL_XDP 0x10000 /**< transmit dma pointers */ +#define D64_FA_SEL_RDD 0x40000 /**< receive dma data */ +#define D64_FA_SEL_RDP 0x50000 /**< receive dma pointers */ +#define D64_FA_SEL_XFD 0x80000 /**< transmit fifo data */ +#define D64_FA_SEL_XFP 0x90000 /**< transmit fifo pointers */ +#define D64_FA_SEL_RFD 0xc0000 /**< receive fifo data */ +#define D64_FA_SEL_RFP 0xd0000 /**< receive fifo pointers */ +#define D64_FA_SEL_RSD 0xe0000 /**< receive frame status data */ +#define D64_FA_SEL_RSP 0xf0000 /**< receive frame status pointers */ + +/* descriptor control flags 1 */ +#define D64_CTRL_COREFLAGS 0x0ff00000 /**< core specific flags */ +#define D64_CTRL1_NOTPCIE ((uint32)1 << 18) /**< buirst size control */ +#define D64_CTRL1_EOT ((uint32)1 << 28) /**< end of descriptor table */ +#define D64_CTRL1_IOC ((uint32)1 << 29) /**< interrupt on completion */ +#define D64_CTRL1_EOF ((uint32)1 << 30) /**< end of frame */ +#define D64_CTRL1_SOF ((uint32)1 << 31) /**< start of frame */ + +/* descriptor control flags 2 */ +#define D64_CTRL2_BC_MASK 0x00007fff /**< buffer byte count. real data len must <= 16KB */ +#define D64_CTRL2_AE 0x00030000 /**< address extension bits */ +#define D64_CTRL2_AE_SHIFT 16 +#define D64_CTRL2_PARITY 0x00040000 /* parity bit */ + +/** control flags in the range [27:20] are core-specific and not defined here */ +#define D64_CTRL_CORE_MASK 0x0ff00000 + +#define D64_RX_FRM_STS_LEN 0x0000ffff /**< frame length mask */ +#define D64_RX_FRM_STS_OVFL 0x00800000 /**< RxOverFlow */ +#define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /**< no. of descriptors used - 1, d11corerev >= 22 */ +#define D64_RX_FRM_STS_DATATYPE 0xf0000000 /**< core-dependent data type */ + +/** receive frame status */ +typedef volatile struct { + uint16 len; + uint16 flags; +} dma_rxh_t; + +#endif /* _sbhnddma_h_ */ diff --git a/drivers/amlogic/wifi/bcmdhd/include/sbpcmcia.h b/drivers/amlogic/wifi/bcmdhd/include/sbpcmcia.h new file mode 100644 index 0000000000000..d2e42ffffdbe3 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/sbpcmcia.h @@ -0,0 +1,116 @@ +/* + * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbpcmcia.h 521344 2014-12-17 10:03:55Z $ + */ + +#ifndef _SBPCMCIA_H +#define _SBPCMCIA_H + +/* All the addresses that are offsets in attribute space are divided + * by two to account for the fact that odd bytes are invalid in + * attribute space and our read/write routines make the space appear + * as if they didn't exist. Still we want to show the original numbers + * as documented in the hnd_pcmcia core manual. + */ + +/* PCMCIA Function Configuration Registers */ +#define PCMCIA_FCR (0x700 / 2) + +#define FCR0_OFF 0 +#define FCR1_OFF (0x40 / 2) +#define FCR2_OFF (0x80 / 2) +#define FCR3_OFF (0xc0 / 2) + +#define PCMCIA_FCR0 (0x700 / 2) +#define PCMCIA_FCR1 (0x740 / 2) +#define PCMCIA_FCR2 (0x780 / 2) +#define PCMCIA_FCR3 (0x7c0 / 2) + +/* Standard PCMCIA FCR registers */ + +#define PCMCIA_COR 0 + +#define COR_RST 0x80 +#define COR_LEV 0x40 +#define COR_IRQEN 0x04 +#define COR_BLREN 0x01 +#define COR_FUNEN 0x01 + + +#define PCICIA_FCSR (2 / 2) +#define PCICIA_PRR (4 / 2) +#define PCICIA_SCR (6 / 2) +#define PCICIA_ESR (8 / 2) + + +#define PCM_MEMOFF 0x0000 +#define F0_MEMOFF 0x1000 +#define F1_MEMOFF 0x2000 +#define F2_MEMOFF 0x3000 +#define F3_MEMOFF 0x4000 + +/* Memory base in the function fcr's */ +#define MEM_ADDR0 (0x728 / 2) +#define MEM_ADDR1 (0x72a / 2) +#define MEM_ADDR2 (0x72c / 2) + +/* PCMCIA base plus Srom access in fcr0: */ +#define PCMCIA_ADDR0 (0x072e / 2) +#define PCMCIA_ADDR1 (0x0730 / 2) +#define PCMCIA_ADDR2 (0x0732 / 2) + +#define MEM_SEG (0x0734 / 2) +#define SROM_CS (0x0736 / 2) +#define SROM_DATAL (0x0738 / 2) +#define SROM_DATAH (0x073a / 2) +#define SROM_ADDRL (0x073c / 2) +#define SROM_ADDRH (0x073e / 2) +#define SROM_INFO2 (0x0772 / 2) /* Corerev >= 2 && <= 5 */ +#define SROM_INFO (0x07be / 2) /* Corerev >= 6 */ + +/* Values for srom_cs: */ +#define SROM_IDLE 0 +#define SROM_WRITE 1 +#define SROM_READ 2 +#define SROM_WEN 4 +#define SROM_WDS 7 +#define SROM_DONE 8 + +/* Fields in srom_info: */ +#define SRI_SZ_MASK 0x03 +#define SRI_BLANK 0x04 +#define SRI_OTP 0x80 + + +/* sbtmstatelow */ +#define SBTML_INT_ACK 0x40000 /* ack the sb interrupt */ +#define SBTML_INT_EN 0x20000 /* enable sb interrupt */ + +/* sbtmstatehigh */ +#define SBTMH_INT_STATUS 0x40000 /* sb interrupt status */ + +#endif /* _SBPCMCIA_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sbsdio.h b/drivers/amlogic/wifi/bcmdhd/include/sbsdio.h similarity index 85% rename from drivers/net/wireless/bcmdhd/include/sbsdio.h rename to drivers/amlogic/wifi/bcmdhd/include/sbsdio.h index 1395c32ae28e0..f4760a22c0770 100644 --- a/drivers/net/wireless/bcmdhd/include/sbsdio.h +++ b/drivers/amlogic/wifi/bcmdhd/include/sbsdio.h @@ -4,9 +4,30 @@ * * SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode. * - * $Copyright Open 2003 Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: sbsdio.h 383835 2013-02-07 23:32:39Z $ + * + * <> + * + * $Id: sbsdio.h 514727 2014-11-12 03:02:48Z $ */ #ifndef _SBSDIO_H diff --git a/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h b/drivers/amlogic/wifi/bcmdhd/include/sbsdpcmdev.h similarity index 89% rename from drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h rename to drivers/amlogic/wifi/bcmdhd/include/sbsdpcmdev.h index dbbd2f6b784cc..c0c889e5316f8 100644 --- a/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h +++ b/drivers/amlogic/wifi/bcmdhd/include/sbsdpcmdev.h @@ -2,9 +2,30 @@ * Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific * device core support * - * $Copyright Open 2005 Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: sbsdpcmdev.h 416730 2013-08-06 09:33:19Z $ + * + * <> + * + * $Id: sbsdpcmdev.h 514727 2014-11-12 03:02:48Z $ */ #ifndef _sbsdpcmdev_h_ @@ -272,10 +293,6 @@ typedef volatile struct { /* HW frame tag */ #define SDPCM_FRAMETAG_LEN 4 /* HW frametag: 2 bytes len, 2 bytes check val */ -#if !defined(NDISVER) || (NDISVER < 0x0630) #define SDPCM_HWEXT_LEN 8 -#else -#define SDPCM_HWEXT_LEN 0 -#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */ #endif /* _sbsdpcmdev_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/sbsocram.h b/drivers/amlogic/wifi/bcmdhd/include/sbsocram.h similarity index 79% rename from drivers/net/wireless/bcmdhd/include/sbsocram.h rename to drivers/amlogic/wifi/bcmdhd/include/sbsocram.h index 97e1a32ed76dd..ad7b1fcf2fe8d 100644 --- a/drivers/net/wireless/bcmdhd/include/sbsocram.h +++ b/drivers/amlogic/wifi/bcmdhd/include/sbsocram.h @@ -1,9 +1,30 @@ /* * BCM47XX Sonics SiliconBackplane embedded ram core * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: sbsocram.h 481602 2014-05-29 22:43:34Z $ + * + * <> + * + * $Id: sbsocram.h 514727 2014-11-12 03:02:48Z $ */ #ifndef _SBSOCRAM_H @@ -102,6 +123,8 @@ typedef volatile struct sbsocramregs { #define SRCI_SRBSZ_MASK 0xf #define SRCI_SRBSZ_SHIFT 0 +#define SRCI_SRNB_MASK_EXT 0x100 + #define SR_BSZ_BASE 14 /* Standby control register */ diff --git a/drivers/amlogic/wifi/bcmdhd/include/sbsysmem.h b/drivers/amlogic/wifi/bcmdhd/include/sbsysmem.h new file mode 100644 index 0000000000000..99a810c434e8a --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/sbsysmem.h @@ -0,0 +1,200 @@ +/* + * SiliconBackplane System Memory core + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbsysmem.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _SBSYSMEM_H +#define _SBSYSMEM_H + +#ifndef _LANGUAGE_ASSEMBLY + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +/* sysmem core registers */ +typedef volatile struct sysmemregs { + uint32 coreinfo; + uint32 bwalloc; + uint32 extracoreinfo; + uint32 biststat; + uint32 bankidx; + uint32 standbyctrl; + + uint32 errlogstatus; + uint32 errlogaddr; + + uint32 cambankidx; + uint32 cambankstandbyctrl; + uint32 cambankpatchctrl; + uint32 cambankpatchtblbaseaddr; + uint32 cambankcmdreg; + uint32 cambankdatareg; + uint32 cambankmaskreg; + uint32 PAD[1]; + uint32 bankinfo; + uint32 PAD[15]; + uint32 extmemconfig; + uint32 extmemparitycsr; + uint32 extmemparityerrdata; + uint32 extmemparityerrcnt; + uint32 extmemwrctrlandsize; + uint32 PAD[84]; + uint32 workaround; + uint32 pwrctl; + uint32 PAD[133]; + uint32 sr_control; + uint32 sr_status; + uint32 sr_address; + uint32 sr_data; +} sysmemregs_t; + +#endif /* _LANGUAGE_ASSEMBLY */ + +/* Register offsets */ +#define SR_COREINFO 0x00 +#define SR_BWALLOC 0x04 +#define SR_BISTSTAT 0x0c +#define SR_BANKINDEX 0x10 +#define SR_BANKSTBYCTL 0x14 +#define SR_PWRCTL 0x1e8 + +/* Coreinfo register */ +#define SRCI_PT_MASK 0x00070000 /* port type[18:16] */ +#define SRCI_PT_SHIFT 16 +/* port types : SRCI_PT__ */ +#define SRCI_PT_OCP_OCP 0 +#define SRCI_PT_AXI_OCP 1 +#define SRCI_PT_ARM7AHB_OCP 2 +#define SRCI_PT_CM3AHB_OCP 3 +#define SRCI_PT_AXI_AXI 4 +#define SRCI_PT_AHB_AXI 5 + +#define SRCI_LSS_MASK 0x00f00000 +#define SRCI_LSS_SHIFT 20 +#define SRCI_LRS_MASK 0x0f000000 +#define SRCI_LRS_SHIFT 24 + +/* In corerev 0, the memory size is 2 to the power of the + * base plus 16 plus to the contents of the memsize field plus 1. + */ +#define SRCI_MS0_MASK 0xf +#define SR_MS0_BASE 16 + +/* + * In corerev 1 the bank size is 2 ^ the bank size field plus 14, + * the memory size is number of banks times bank size. + * The same applies to rom size. + */ +#define SRCI_ROMNB_MASK 0xf000 +#define SRCI_ROMNB_SHIFT 12 +#define SRCI_ROMBSZ_MASK 0xf00 +#define SRCI_ROMBSZ_SHIFT 8 +#define SRCI_SRNB_MASK 0xf0 +#define SRCI_SRNB_SHIFT 4 +#define SRCI_SRBSZ_MASK 0xf +#define SRCI_SRBSZ_SHIFT 0 + +#define SR_BSZ_BASE 14 + +/* Standby control register */ +#define SRSC_SBYOVR_MASK 0x80000000 +#define SRSC_SBYOVR_SHIFT 31 +#define SRSC_SBYOVRVAL_MASK 0x60000000 +#define SRSC_SBYOVRVAL_SHIFT 29 +#define SRSC_SBYEN_MASK 0x01000000 +#define SRSC_SBYEN_SHIFT 24 + +/* Power control register */ +#define SRPC_PMU_STBYDIS_MASK 0x00000010 +#define SRPC_PMU_STBYDIS_SHIFT 4 +#define SRPC_STBYOVRVAL_MASK 0x00000008 +#define SRPC_STBYOVRVAL_SHIFT 3 +#define SRPC_STBYOVR_MASK 0x00000007 +#define SRPC_STBYOVR_SHIFT 0 + +/* Extra core capability register */ +#define SRECC_NUM_BANKS_MASK 0x000000F0 +#define SRECC_NUM_BANKS_SHIFT 4 +#define SRECC_BANKSIZE_MASK 0x0000000F +#define SRECC_BANKSIZE_SHIFT 0 + +#define SRECC_BANKSIZE(value) (1 << (value)) + +/* CAM bank patch control */ +#define SRCBPC_PATCHENABLE 0x80000000 + +#define SRP_ADDRESS 0x0001FFFC +#define SRP_VALID 0x8000 + +/* CAM bank command reg */ +#define SRCMD_WRITE 0x00020000 +#define SRCMD_READ 0x00010000 +#define SRCMD_DONE 0x80000000 + +#define SRCMD_DONE_DLY 1000 + +/* bankidx and bankinfo reg defines */ +#define SYSMEM_BANKINFO_SZMASK 0x7f +#define SYSMEM_BANKIDX_ROM_MASK 0x100 + +#define SYSMEM_BANKIDX_MEMTYPE_SHIFT 8 +/* sysmem bankinfo memtype */ +#define SYSMEM_MEMTYPE_RAM 0 +#define SYSMEM_MEMTYPE_R0M 1 +#define SYSMEM_MEMTYPE_DEVRAM 2 + +#define SYSMEM_BANKINFO_REG 0x40 +#define SYSMEM_BANKIDX_REG 0x10 +#define SYSMEM_BANKINFO_STDBY_MASK 0x400 +#define SYSMEM_BANKINFO_STDBY_TIMER 0x800 + +#define SYSMEM_BANKINFO_DEVRAMSEL_SHIFT 13 +#define SYSMEM_BANKINFO_DEVRAMSEL_MASK 0x2000 +#define SYSMEM_BANKINFO_DEVRAMPRO_SHIFT 14 +#define SYSMEM_BANKINFO_DEVRAMPRO_MASK 0x4000 +#define SYSMEM_BANKINFO_SLPSUPP_SHIFT 15 +#define SYSMEM_BANKINFO_SLPSUPP_MASK 0x8000 +#define SYSMEM_BANKINFO_RETNTRAM_SHIFT 16 +#define SYSMEM_BANKINFO_RETNTRAM_MASK 0x00010000 +#define SYSMEM_BANKINFO_PDASZ_SHIFT 17 +#define SYSMEM_BANKINFO_PDASZ_MASK 0x003E0000 +#define SYSMEM_BANKINFO_DEVRAMREMAP_SHIFT 24 +#define SYSMEM_BANKINFO_DEVRAMREMAP_MASK 0x01000000 + +/* extracoreinfo register */ +#define SYSMEM_DEVRAMBANK_MASK 0xF000 +#define SYSMEM_DEVRAMBANK_SHIFT 12 + +/* bank info to calculate bank size */ +#define SYSMEM_BANKINFO_SZBASE 8192 +#define SYSMEM_BANKSIZE_SHIFT 13 /* SYSMEM_BANKINFO_SZBASE */ + +#endif /* _SBSYSMEM_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sdio.h b/drivers/amlogic/wifi/bcmdhd/include/sdio.h similarity index 94% rename from drivers/net/wireless/bcmdhd/include/sdio.h rename to drivers/amlogic/wifi/bcmdhd/include/sdio.h index b8586b99cd88b..ca53afbcf3e93 100644 --- a/drivers/net/wireless/bcmdhd/include/sdio.h +++ b/drivers/amlogic/wifi/bcmdhd/include/sdio.h @@ -2,9 +2,30 @@ * SDIO spec header file * Protocol and standard (common) device definitions * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: sdio.h 416730 2013-08-06 09:33:19Z $ + * + * <> + * + * $Id: sdio.h 514727 2014-11-12 03:02:48Z $ */ #ifndef _SDIO_H diff --git a/drivers/net/wireless/bcmdhd/include/sdioh.h b/drivers/amlogic/wifi/bcmdhd/include/sdioh.h similarity index 92% rename from drivers/net/wireless/bcmdhd/include/sdioh.h rename to drivers/amlogic/wifi/bcmdhd/include/sdioh.h index fdb43f24d7ca6..bc1fcbc0a04b3 100644 --- a/drivers/net/wireless/bcmdhd/include/sdioh.h +++ b/drivers/amlogic/wifi/bcmdhd/include/sdioh.h @@ -2,9 +2,30 @@ * SDIO Host Controller Spec header file * Register map and definitions for the Standard Host Controller * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: sdioh.h 345499 2012-07-18 06:59:05Z $ + * + * <> + * + * $Id: sdioh.h 514727 2014-11-12 03:02:48Z $ */ #ifndef _SDIOH_H diff --git a/drivers/amlogic/wifi/bcmdhd/include/sdiovar.h b/drivers/amlogic/wifi/bcmdhd/include/sdiovar.h new file mode 100644 index 0000000000000..335e53a2f65bf --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/sdiovar.h @@ -0,0 +1,62 @@ +/* + * Structure used by apps whose drivers access SDIO drivers. + * Pulled out separately so dhdu and wlu can both use it. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sdiovar.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _sdiovar_h_ +#define _sdiovar_h_ + +#include + +/* require default structure packing */ +#define BWL_DEFAULT_PACKING +#include + +typedef struct sdreg { + int func; + int offset; + int value; +} sdreg_t; + +/* Common msglevel constants */ +#define SDH_ERROR_VAL 0x0001 /* Error */ +#define SDH_TRACE_VAL 0x0002 /* Trace */ +#define SDH_INFO_VAL 0x0004 /* Info */ +#define SDH_DEBUG_VAL 0x0008 /* Debug */ +#define SDH_DATA_VAL 0x0010 /* Data */ +#define SDH_CTRL_VAL 0x0020 /* Control Regs */ +#define SDH_LOG_VAL 0x0040 /* Enable bcmlog */ +#define SDH_DMA_VAL 0x0080 /* DMA */ +#define SDH_COST_VAL 0x8000 /* Control Regs */ + +#define NUM_PREV_TRANSACTIONS 16 + + +#include + +#endif /* _sdiovar_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/siutils.h b/drivers/amlogic/wifi/bcmdhd/include/siutils.h similarity index 77% rename from drivers/net/wireless/bcmdhd/include/siutils.h rename to drivers/amlogic/wifi/bcmdhd/include/siutils.h index 5a1ee51ffc7b4..4393a7426d349 100644 --- a/drivers/net/wireless/bcmdhd/include/siutils.h +++ b/drivers/amlogic/wifi/bcmdhd/include/siutils.h @@ -2,9 +2,30 @@ * Misc utility routines for accessing the SOC Interconnects * of Broadcom HNBU chips. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: siutils.h 481602 2014-05-29 22:43:34Z $ + * + * <> + * + * $Id: siutils.h 530150 2015-01-29 08:43:40Z $ */ #ifndef _siutils_h_ @@ -15,33 +36,33 @@ #endif /* SR_DEBUG */ -/* +/** * Data structure to export all chip specific common variables * public (read-only) portion of siutils handle returned by si_attach()/si_kattach() */ struct si_pub { - uint socitype; /* SOCI_SB, SOCI_AI */ - - uint bustype; /* SI_BUS, PCI_BUS */ - uint buscoretype; /* PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */ - uint buscorerev; /* buscore rev */ - uint buscoreidx; /* buscore index */ - int ccrev; /* chip common core rev */ - uint32 cccaps; /* chip common capabilities */ - uint32 cccaps_ext; /* chip common capabilities extension */ - int pmurev; /* pmu core rev */ - uint32 pmucaps; /* pmu capabilities */ - uint boardtype; /* board type */ + uint socitype; /**< SOCI_SB, SOCI_AI */ + + uint bustype; /**< SI_BUS, PCI_BUS */ + uint buscoretype; /**< PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */ + uint buscorerev; /**< buscore rev */ + uint buscoreidx; /**< buscore index */ + int ccrev; /**< chip common core rev */ + uint32 cccaps; /**< chip common capabilities */ + uint32 cccaps_ext; /**< chip common capabilities extension */ + int pmurev; /**< pmu core rev */ + uint32 pmucaps; /**< pmu capabilities */ + uint boardtype; /**< board type */ uint boardrev; /* board rev */ - uint boardvendor; /* board vendor */ - uint boardflags; /* board flags */ - uint boardflags2; /* board flags2 */ - uint chip; /* chip number */ - uint chiprev; /* chip revision */ - uint chippkg; /* chip package option */ - uint32 chipst; /* chip status */ - bool issim; /* chip is in simulation or emulation */ - uint socirev; /* SOC interconnect rev */ + uint boardvendor; /**< board vendor */ + uint boardflags; /**< board flags */ + uint boardflags2; /**< board flags2 */ + uint chip; /**< chip number */ + uint chiprev; /**< chip revision */ + uint chippkg; /**< chip package option */ + uint32 chipst; /**< chip status */ + bool issim; /**< chip is in simulation or emulation */ + uint socirev; /**< SOC interconnect rev */ bool pci_pr32414; }; @@ -58,39 +79,39 @@ typedef const struct si_pub si_t; * (the "current core"). * Use si_setcore() or si_setcoreidx() to change the association to another core. */ -#define SI_OSH NULL /* Use for si_kattach when no osh is available */ +#define SI_OSH NULL /**< Use for si_kattach when no osh is available */ #define BADIDX (SI_MAXCORES + 1) /* clkctl xtal what flags */ -#define XTAL 0x1 /* primary crystal oscillator (2050) */ -#define PLL 0x2 /* main chip pll */ +#define XTAL 0x1 /**< primary crystal oscillator (2050) */ +#define PLL 0x2 /**< main chip pll */ /* clkctl clk mode */ -#define CLK_FAST 0 /* force fast (pll) clock */ -#define CLK_DYNAMIC 2 /* enable dynamic clock control */ +#define CLK_FAST 0 /**< force fast (pll) clock */ +#define CLK_DYNAMIC 2 /**< enable dynamic clock control */ /* GPIO usage priorities */ -#define GPIO_DRV_PRIORITY 0 /* Driver */ -#define GPIO_APP_PRIORITY 1 /* Application */ -#define GPIO_HI_PRIORITY 2 /* Highest priority. Ignore GPIO reservation */ +#define GPIO_DRV_PRIORITY 0 /**< Driver */ +#define GPIO_APP_PRIORITY 1 /**< Application */ +#define GPIO_HI_PRIORITY 2 /**< Highest priority. Ignore GPIO reservation */ /* GPIO pull up/down */ #define GPIO_PULLUP 0 #define GPIO_PULLDN 1 /* GPIO event regtype */ -#define GPIO_REGEVT 0 /* GPIO register event */ -#define GPIO_REGEVT_INTMSK 1 /* GPIO register event int mask */ -#define GPIO_REGEVT_INTPOL 2 /* GPIO register event int polarity */ +#define GPIO_REGEVT 0 /**< GPIO register event */ +#define GPIO_REGEVT_INTMSK 1 /**< GPIO register event int mask */ +#define GPIO_REGEVT_INTPOL 2 /**< GPIO register event int polarity */ /* device path */ -#define SI_DEVPATH_BUFSZ 16 /* min buffer size in bytes */ +#define SI_DEVPATH_BUFSZ 16 /**< min buffer size in bytes */ /* SI routine enumeration: to be used by update function with multiple hooks */ #define SI_DOATTACH 1 -#define SI_PCIDOWN 2 /* wireless interface is down */ -#define SI_PCIUP 3 /* wireless interface is up */ +#define SI_PCIDOWN 2 /**< wireless interface is down */ +#define SI_PCIUP 3 /**< wireless interface is up */ #ifdef SR_DEBUG #define PMU_RES 31 @@ -98,6 +119,8 @@ typedef const struct si_pub si_t; #define ISSIM_ENAB(sih) FALSE +#define INVALID_ADDR (~0) + /* PMU clock/power control */ #if defined(BCMPMUCTL) #define PMUCTL_ENAB(sih) (BCMPMUCTL) @@ -117,8 +140,8 @@ typedef const struct si_pub si_t; #define CCPLL_ENAB(sih) ((sih)->cccaps & CC_CAP_PLL_MASK) #endif -typedef void (*gpio_handler_t)(uint32 stat, void *arg); typedef void (*gci_gpio_handler_t)(uint32 stat, void *arg); + /* External BT Coex enable mask */ #define CC_BTCOEX_EN_MASK 0x01 /* External PA enable mask */ @@ -129,6 +152,7 @@ typedef void (*gci_gpio_handler_t)(uint32 stat, void *arg); #define GPIO_OUT_7_EN_MASK 0x80 + /* CR4 specific defines used by the host driver */ #define SI_CR4_CAP (0x04) #define SI_CR4_BANKIDX (0x40) @@ -143,7 +167,9 @@ typedef void (*gci_gpio_handler_t)(uint32 stat, void *arg); #define SICF_CPUHALT (0x0020) #define ARMCR4_BSZ_MASK 0x3f #define ARMCR4_BSZ_MULT 8192 - +#define SI_BPIND_1BYTE 0x1 +#define SI_BPIND_2BYTE 0x3 +#define SI_BPIND_4BYTE 0xF #include /* === exported functions === */ extern si_t *si_attach(uint pcidev, osl_t *osh, void *regs, uint bustype, @@ -164,6 +190,8 @@ extern uint si_corevendor(si_t *sih); extern uint si_corerev(si_t *sih); extern void *si_osh(si_t *sih); extern void si_setosh(si_t *sih, osl_t *osh); +extern uint si_backplane_access(si_t *sih, uint addr, uint size, + uint *val, bool read); extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); extern uint si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val); extern uint32 *si_corereg_addr(si_t *sih, uint coreidx, uint regoff); @@ -174,6 +202,8 @@ extern void *si_wrapperregs(si_t *sih); extern uint32 si_core_cflags(si_t *sih, uint32 mask, uint32 val); extern void si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val); extern uint32 si_core_sflags(si_t *sih, uint32 mask, uint32 val); +extern void si_d11rsdb_core1_alt_reg_clk_dis(si_t *sih); +extern void si_d11rsdb_core1_alt_reg_clk_en(si_t *sih); extern bool si_iscoreup(si_t *sih); extern uint si_numcoreunits(si_t *sih, uint coreid); extern uint si_numd11coreunits(si_t *sih); @@ -209,6 +239,7 @@ extern int si_clkctl_xtal(si_t *sih, uint what, bool on); extern uint32 si_gpiotimerval(si_t *sih, uint32 mask, uint32 val); extern void si_btcgpiowar(si_t *sih); extern bool si_deviceremoved(si_t *sih); +extern uint32 si_sysmem_size(si_t *sih); extern uint32 si_socram_size(si_t *sih); extern uint32 si_socdevram_size(si_t *sih); extern uint32 si_socram_srmem_size(si_t *sih); @@ -239,11 +270,6 @@ extern void si_gci_enable_gpio(si_t *sih, uint8 gpio, uint32 mask, uint32 value) extern uint8 si_gci_host_wake_gpio_init(si_t *sih); extern void si_gci_host_wake_gpio_enable(si_t *sih, uint8 gpio, bool state); -/* GPIO event handlers */ -extern void *si_gpio_handler_register(si_t *sih, uint32 e, bool lev, gpio_handler_t cb, void *arg); -extern void si_gpio_handler_unregister(si_t *sih, void* gpioh); -extern void si_gpio_handler_process(si_t *sih); - /* GCI interrupt handlers */ extern void si_gci_handler_process(si_t *sih); @@ -262,6 +288,7 @@ extern void si_pci_pmeen(si_t *sih); extern void si_pci_pmestatclr(si_t *sih); extern uint si_pcie_readreg(void *sih, uint addrtype, uint offset); extern uint si_pcie_writereg(void *sih, uint addrtype, uint offset, uint val); +extern void si_deepsleep_count(si_t *sih, bool arm_wakeup); #ifdef BCMSDIO @@ -299,10 +326,10 @@ extern int si_cis_source(si_t *sih); #define CIS_OTP 2 /* Fab-id information */ -#define DEFAULT_FAB 0x0 /* Original/first fab used for this chip */ -#define CSM_FAB7 0x1 /* CSM Fab7 chip */ -#define TSMC_FAB12 0x2 /* TSMC Fab12/Fab14 chip */ -#define SMIC_FAB4 0x3 /* SMIC Fab4 chip */ +#define DEFAULT_FAB 0x0 /**< Original/first fab used for this chip */ +#define CSM_FAB7 0x1 /**< CSM Fab7 chip */ +#define TSMC_FAB12 0x2 /**< TSMC Fab12/Fab14 chip */ +#define SMIC_FAB4 0x3 /**< SMIC Fab4 chip */ extern int si_otp_fabid(si_t *sih, uint16 *fabid, bool rw); extern uint16 si_fabid(si_t *sih); @@ -347,6 +374,7 @@ extern uint32 si_chipcontrl_read(si_t *sih); extern void si_chipcontrl_epa4331(si_t *sih, bool on); extern void si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl); extern void si_chipcontrl_srom4360(si_t *sih, bool on); +extern void si_clk_srom4365(si_t *sih); /* Enable BT-COEX & Ex-PA for 4313 */ extern void si_epa_4313war(si_t *sih); extern void si_btc_enable_chipcontrol(si_t *sih); @@ -354,19 +382,26 @@ extern void si_btc_enable_chipcontrol(si_t *sih); extern void si_btcombo_p250_4313_war(si_t *sih); extern void si_btcombo_43228_war(si_t *sih); extern void si_clk_pmu_htavail_set(si_t *sih, bool set_clear); +extern void si_pmu_avb_clk_set(si_t *sih, osl_t *osh, bool set_flag); extern void si_pmu_synth_pwrsw_4313_war(si_t *sih); extern uint si_pll_reset(si_t *sih); /* === debug routines === */ extern bool si_taclear(si_t *sih, bool details); +#if defined(BCMDBG_PHYDUMP) +struct bcmstrbuf; +extern int si_dump_pcieinfo(si_t *sih, struct bcmstrbuf *b); +#endif #if defined(BCMDBG_PHYDUMP) extern void si_dumpregs(si_t *sih, struct bcmstrbuf *b); -#endif +#endif extern uint32 si_ccreg(si_t *sih, uint32 offset, uint32 mask, uint32 val); extern uint32 si_pciereg(si_t *sih, uint32 offset, uint32 mask, uint32 val, uint type); +extern int si_bpind_access(si_t *sih, uint32 addr_high, uint32 addr_low, + int32* data, bool read); #ifdef SR_DEBUG extern void si_dump_pmu(si_t *sih, void *pmu_var); extern void si_pmu_keep_on(si_t *sih, int32 int_val); @@ -420,6 +455,7 @@ extern uint32 si_gci_preinit_upd_indirect(uint32 regidx, uint32 setval, uint32 m extern uint8 si_enable_device_wake(si_t *sih, uint8 *wake_status, uint8 *cur_status); extern void si_swdenable(si_t *sih, uint32 swdflag); +extern uint32 si_get_pmu_reg_addr(si_t *sih, uint32 offset); #define CHIPCTRLREG1 0x1 #define CHIPCTRLREG2 0x2 #define CHIPCTRLREG3 0x3 @@ -466,37 +502,42 @@ extern void si_d11rsdb_core_reset(si_t *sih, uint32 bits, uint32 resetbits); #define PLL_DIV2_MASK (0x37 << PLL_DIV2_BIT_START) #define PLL_DIV2_DIS_OP (0x37 << PLL_DIV2_BIT_START) -#define PMUREG(si, member) \ - (AOB_ENAB(si) ? \ - si_corereg_addr(si, si_findcoreidx(si, PMU_CORE_ID, 0), \ - OFFSETOF(pmuregs_t, member)): \ - si_corereg_addr(si, SI_CC_IDX, OFFSETOF(chipcregs_t, member))) - #define pmu_corereg(si, cc_idx, member, mask, val) \ (AOB_ENAB(si) ? \ - si_pmu_corereg(si, si_findcoreidx(sih, PMU_CORE_ID, 0), \ + si_pmu_corereg(si, si_findcoreidx(si, PMU_CORE_ID, 0), \ OFFSETOF(pmuregs_t, member), mask, val): \ si_pmu_corereg(si, cc_idx, OFFSETOF(chipcregs_t, member), mask, val)) +/* Used only for the regs present in the pmu core and not present in the old cc core */ +#define PMU_REG_NEW(si, member, mask, val) \ + si_corereg(si, si_findcoreidx(si, PMU_CORE_ID, 0), \ + OFFSETOF(pmuregs_t, member), mask, val) + +#define PMU_REG(si, member, mask, val) \ + (AOB_ENAB(si) ? \ + si_corereg(si, si_findcoreidx(si, PMU_CORE_ID, 0), \ + OFFSETOF(pmuregs_t, member), mask, val): \ + si_corereg(si, SI_CC_IDX, OFFSETOF(chipcregs_t, member), mask, val)) + /* GCI Macros */ #define ALLONES_32 0xFFFFFFFF -#define GCI_CCTL_SECIRST_OFFSET 0 /* SeciReset */ -#define GCI_CCTL_RSTSL_OFFSET 1 /* ResetSeciLogic */ -#define GCI_CCTL_SECIEN_OFFSET 2 /* EnableSeci */ -#define GCI_CCTL_FSL_OFFSET 3 /* ForceSeciOutLow */ -#define GCI_CCTL_SMODE_OFFSET 4 /* SeciOpMode, 6:4 */ -#define GCI_CCTL_US_OFFSET 7 /* UpdateSeci */ -#define GCI_CCTL_BRKONSLP_OFFSET 8 /* BreakOnSleep */ -#define GCI_CCTL_SILOWTOUT_OFFSET 9 /* SeciInLowTimeout, 10:9 */ -#define GCI_CCTL_RSTOCC_OFFSET 11 /* ResetOffChipCoex */ -#define GCI_CCTL_ARESEND_OFFSET 12 /* AutoBTSigResend */ -#define GCI_CCTL_FGCR_OFFSET 16 /* ForceGciClkReq */ -#define GCI_CCTL_FHCRO_OFFSET 17 /* ForceHWClockReqOff */ -#define GCI_CCTL_FREGCLK_OFFSET 18 /* ForceRegClk */ -#define GCI_CCTL_FSECICLK_OFFSET 19 /* ForceSeciClk */ -#define GCI_CCTL_FGCA_OFFSET 20 /* ForceGciClkAvail */ -#define GCI_CCTL_FGCAV_OFFSET 21 /* ForceGciClkAvailValue */ -#define GCI_CCTL_SCS_OFFSET 24 /* SeciClkStretch, 31:24 */ +#define GCI_CCTL_SECIRST_OFFSET 0 /**< SeciReset */ +#define GCI_CCTL_RSTSL_OFFSET 1 /**< ResetSeciLogic */ +#define GCI_CCTL_SECIEN_OFFSET 2 /**< EnableSeci */ +#define GCI_CCTL_FSL_OFFSET 3 /**< ForceSeciOutLow */ +#define GCI_CCTL_SMODE_OFFSET 4 /**< SeciOpMode, 6:4 */ +#define GCI_CCTL_US_OFFSET 7 /**< UpdateSeci */ +#define GCI_CCTL_BRKONSLP_OFFSET 8 /**< BreakOnSleep */ +#define GCI_CCTL_SILOWTOUT_OFFSET 9 /**< SeciInLowTimeout, 10:9 */ +#define GCI_CCTL_RSTOCC_OFFSET 11 /**< ResetOffChipCoex */ +#define GCI_CCTL_ARESEND_OFFSET 12 /**< AutoBTSigResend */ +#define GCI_CCTL_FGCR_OFFSET 16 /**< ForceGciClkReq */ +#define GCI_CCTL_FHCRO_OFFSET 17 /**< ForceHWClockReqOff */ +#define GCI_CCTL_FREGCLK_OFFSET 18 /**< ForceRegClk */ +#define GCI_CCTL_FSECICLK_OFFSET 19 /**< ForceSeciClk */ +#define GCI_CCTL_FGCA_OFFSET 20 /**< ForceGciClkAvail */ +#define GCI_CCTL_FGCAV_OFFSET 21 /**< ForceGciClkAvailValue */ +#define GCI_CCTL_SCS_OFFSET 24 /**< SeciClkStretch, 31:24 */ #define GCI_MODE_UART 0x0 #define GCI_MODE_SECI 0x1 @@ -532,7 +573,7 @@ extern void si_d11rsdb_core_reset(si_t *sih, uint32 bits, uint32 resetbits); #define GCI_GPIOIDX_OFFSET 16 -#define GCI_LTECX_SECI_ID 0 /* SECI port for LTECX */ +#define GCI_LTECX_SECI_ID 0 /**< SECI port for LTECX */ /* To access per GCI bit registers */ #define GCI_REG_WIDTH 32 @@ -564,8 +605,11 @@ extern void si_d11rsdb_core_reset(si_t *sih, uint32 bits, uint32 resetbits); #define PMU_OOB 0x2 #define D11_OOB 0x3 #define SDIOD_OOB 0x4 -#define PMU_OOB_BIT (0x10 | PMU_OOB) +#define WLAN_OOB 0x5 +#define PMU_OOB_BIT 0x12 #endif /* REROUTE_OOBINT */ +extern void si_pll_sr_reinit(si_t *sih); +extern void si_pll_closeloop(si_t *sih); #endif /* _siutils_h_ */ diff --git a/drivers/amlogic/wifi/bcmdhd/include/spid.h b/drivers/amlogic/wifi/bcmdhd/include/spid.h new file mode 100644 index 0000000000000..9a39aaf0dd3f5 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/spid.h @@ -0,0 +1,168 @@ +/* + * SPI device spec header file + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: spid.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _SPI_H +#define _SPI_H + +/* + * Brcm SPI Device Register Map. + * + */ + +typedef volatile struct { + uint8 config; /* 0x00, len, endian, clock, speed, polarity, wakeup */ + uint8 response_delay; /* 0x01, read response delay in bytes (corerev < 3) */ + uint8 status_enable; /* 0x02, status-enable, intr with status, response_delay + * function selection, command/data error check + */ + uint8 reset_bp; /* 0x03, reset on wlan/bt backplane reset (corerev >= 1) */ + uint16 intr_reg; /* 0x04, Intr status register */ + uint16 intr_en_reg; /* 0x06, Intr mask register */ + uint32 status_reg; /* 0x08, RO, Status bits of last spi transfer */ + uint16 f1_info_reg; /* 0x0c, RO, enabled, ready for data transfer, blocksize */ + uint16 f2_info_reg; /* 0x0e, RO, enabled, ready for data transfer, blocksize */ + uint16 f3_info_reg; /* 0x10, RO, enabled, ready for data transfer, blocksize */ + uint32 test_read; /* 0x14, RO 0xfeedbead signature */ + uint32 test_rw; /* 0x18, RW */ + uint8 resp_delay_f0; /* 0x1c, read resp delay bytes for F0 (corerev >= 3) */ + uint8 resp_delay_f1; /* 0x1d, read resp delay bytes for F1 (corerev >= 3) */ + uint8 resp_delay_f2; /* 0x1e, read resp delay bytes for F2 (corerev >= 3) */ + uint8 resp_delay_f3; /* 0x1f, read resp delay bytes for F3 (corerev >= 3) */ +} spi_regs_t; + +/* SPI device register offsets */ +#define SPID_CONFIG 0x00 +#define SPID_RESPONSE_DELAY 0x01 +#define SPID_STATUS_ENABLE 0x02 +#define SPID_RESET_BP 0x03 /* (corerev >= 1) */ +#define SPID_INTR_REG 0x04 /* 16 bits - Interrupt status */ +#define SPID_INTR_EN_REG 0x06 /* 16 bits - Interrupt mask */ +#define SPID_STATUS_REG 0x08 /* 32 bits */ +#define SPID_F1_INFO_REG 0x0C /* 16 bits */ +#define SPID_F2_INFO_REG 0x0E /* 16 bits */ +#define SPID_F3_INFO_REG 0x10 /* 16 bits */ +#define SPID_TEST_READ 0x14 /* 32 bits */ +#define SPID_TEST_RW 0x18 /* 32 bits */ +#define SPID_RESP_DELAY_F0 0x1c /* 8 bits (corerev >= 3) */ +#define SPID_RESP_DELAY_F1 0x1d /* 8 bits (corerev >= 3) */ +#define SPID_RESP_DELAY_F2 0x1e /* 8 bits (corerev >= 3) */ +#define SPID_RESP_DELAY_F3 0x1f /* 8 bits (corerev >= 3) */ + +/* Bit masks for SPID_CONFIG device register */ +#define WORD_LENGTH_32 0x1 /* 0/1 16/32 bit word length */ +#define ENDIAN_BIG 0x2 /* 0/1 Little/Big Endian */ +#define CLOCK_PHASE 0x4 /* 0/1 clock phase delay */ +#define CLOCK_POLARITY 0x8 /* 0/1 Idle state clock polarity is low/high */ +#define HIGH_SPEED_MODE 0x10 /* 1/0 High Speed mode / Normal mode */ +#define INTR_POLARITY 0x20 /* 1/0 Interrupt active polarity is high/low */ +#define WAKE_UP 0x80 /* 0/1 Wake-up command from Host to WLAN */ + +/* Bit mask for SPID_RESPONSE_DELAY device register */ +#define RESPONSE_DELAY_MASK 0xFF /* Configurable rd response delay in multiples of 8 bits */ + +/* Bit mask for SPID_STATUS_ENABLE device register */ +#define STATUS_ENABLE 0x1 /* 1/0 Status sent/not sent to host after read/write */ +#define INTR_WITH_STATUS 0x2 /* 0/1 Do-not / do-interrupt if status is sent */ +#define RESP_DELAY_ALL 0x4 /* Applicability of resp delay to F1 or all func's read */ +#define DWORD_PKT_LEN_EN 0x8 /* Packet len denoted in dwords instead of bytes */ +#define CMD_ERR_CHK_EN 0x20 /* Command error check enable */ +#define DATA_ERR_CHK_EN 0x40 /* Data error check enable */ + +/* Bit mask for SPID_RESET_BP device register */ +#define RESET_ON_WLAN_BP_RESET 0x4 /* enable reset for WLAN backplane */ +#define RESET_ON_BT_BP_RESET 0x8 /* enable reset for BT backplane */ +#define RESET_SPI 0x80 /* reset the above enabled logic */ + +/* Bit mask for SPID_INTR_REG device register */ +#define DATA_UNAVAILABLE 0x0001 /* Requested data not available; Clear by writing a "1" */ +#define F2_F3_FIFO_RD_UNDERFLOW 0x0002 +#define F2_F3_FIFO_WR_OVERFLOW 0x0004 +#define COMMAND_ERROR 0x0008 /* Cleared by writing 1 */ +#define DATA_ERROR 0x0010 /* Cleared by writing 1 */ +#define F2_PACKET_AVAILABLE 0x0020 +#define F3_PACKET_AVAILABLE 0x0040 +#define F1_OVERFLOW 0x0080 /* Due to last write. Bkplane has pending write requests */ +#define MISC_INTR0 0x0100 +#define MISC_INTR1 0x0200 +#define MISC_INTR2 0x0400 +#define MISC_INTR3 0x0800 +#define MISC_INTR4 0x1000 +#define F1_INTR 0x2000 +#define F2_INTR 0x4000 +#define F3_INTR 0x8000 + +/* Bit mask for 32bit SPID_STATUS_REG device register */ +#define STATUS_DATA_NOT_AVAILABLE 0x00000001 +#define STATUS_UNDERFLOW 0x00000002 +#define STATUS_OVERFLOW 0x00000004 +#define STATUS_F2_INTR 0x00000008 +#define STATUS_F3_INTR 0x00000010 +#define STATUS_F2_RX_READY 0x00000020 +#define STATUS_F3_RX_READY 0x00000040 +#define STATUS_HOST_CMD_DATA_ERR 0x00000080 +#define STATUS_F2_PKT_AVAILABLE 0x00000100 +#define STATUS_F2_PKT_LEN_MASK 0x000FFE00 +#define STATUS_F2_PKT_LEN_SHIFT 9 +#define STATUS_F3_PKT_AVAILABLE 0x00100000 +#define STATUS_F3_PKT_LEN_MASK 0xFFE00000 +#define STATUS_F3_PKT_LEN_SHIFT 21 + +/* Bit mask for 16 bits SPID_F1_INFO_REG device register */ +#define F1_ENABLED 0x0001 +#define F1_RDY_FOR_DATA_TRANSFER 0x0002 +#define F1_MAX_PKT_SIZE 0x01FC + +/* Bit mask for 16 bits SPID_F2_INFO_REG device register */ +#define F2_ENABLED 0x0001 +#define F2_RDY_FOR_DATA_TRANSFER 0x0002 +#define F2_MAX_PKT_SIZE 0x3FFC + +/* Bit mask for 16 bits SPID_F3_INFO_REG device register */ +#define F3_ENABLED 0x0001 +#define F3_RDY_FOR_DATA_TRANSFER 0x0002 +#define F3_MAX_PKT_SIZE 0x3FFC + +/* Bit mask for 32 bits SPID_TEST_READ device register read in 16bit LE mode */ +#define TEST_RO_DATA_32BIT_LE 0xFEEDBEAD + +/* Maximum number of I/O funcs */ +#define SPI_MAX_IOFUNCS 4 + +#define SPI_MAX_PKT_LEN (2048*4) + +/* Misc defines */ +#define SPI_FUNC_0 0 +#define SPI_FUNC_1 1 +#define SPI_FUNC_2 2 +#define SPI_FUNC_3 3 + +#define WAIT_F2RXFIFORDY 100 +#define WAIT_F2RXFIFORDY_DELAY 20 + +#endif /* _SPI_H */ diff --git a/drivers/net/wireless/bcmdhd/include/trxhdr.h b/drivers/amlogic/wifi/bcmdhd/include/trxhdr.h similarity index 66% rename from drivers/net/wireless/bcmdhd/include/trxhdr.h rename to drivers/amlogic/wifi/bcmdhd/include/trxhdr.h index 249527cd3420a..f7404be99b0e5 100644 --- a/drivers/net/wireless/bcmdhd/include/trxhdr.h +++ b/drivers/amlogic/wifi/bcmdhd/include/trxhdr.h @@ -1,9 +1,30 @@ /* * TRX image file header format. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: trxhdr.h 349211 2012-08-07 09:45:24Z $ + * + * <> + * + * $Id: trxhdr.h 520026 2014-12-10 01:29:40Z $ */ #ifndef _TRX_HDR_H @@ -59,7 +80,7 @@ struct trx_header { #define TRX_V2_MAX_OFFSETS 5 #define SIZEOF_TRXHDR_V1 (sizeof(struct trx_header)+(TRX_V1_MAX_OFFSETS-1)*sizeof(uint32)) #define SIZEOF_TRXHDR_V2 (sizeof(struct trx_header)+(TRX_V2_MAX_OFFSETS-1)*sizeof(uint32)) -#define TRX_VER(trx) (trx->flag_version>>16) +#define TRX_VER(trx) ((trx)->flag_version>>16) #define ISTRX_V1(trx) (TRX_VER(trx) == TRX_V1) #define ISTRX_V2(trx) (TRX_VER(trx) == TRX_V2) /* For V2, return size of V2 size: others, return V1 size */ diff --git a/drivers/net/wireless/bcmdhd/include/typedefs.h b/drivers/amlogic/wifi/bcmdhd/include/typedefs.h similarity index 83% rename from drivers/net/wireless/bcmdhd/include/typedefs.h rename to drivers/amlogic/wifi/bcmdhd/include/typedefs.h index 75fbc49a831fc..0e110a1908ed4 100644 --- a/drivers/net/wireless/bcmdhd/include/typedefs.h +++ b/drivers/amlogic/wifi/bcmdhd/include/typedefs.h @@ -1,6 +1,28 @@ /* - * $Copyright Open Broadcom Corporation$ - * $Id: typedefs.h 484281 2014-06-12 22:42:26Z $ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: typedefs.h 514727 2014-11-12 03:02:48Z $ */ #ifndef _TYPEDEFS_H_ @@ -75,7 +97,6 @@ typedef long unsigned int size_t; * a duplicate typedef error; there is no way to "undefine" a typedef. * We know when it's per-port code because each file defines LINUX_PORT at the top. */ -#if !defined(LINUX_HYBRID) || defined(LINUX_PORT) #define TYPEDEF_UINT #ifndef TARGETENV_android #define TYPEDEF_USHORT @@ -94,7 +115,6 @@ typedef long unsigned int size_t; #endif #endif /* == 2.6.18 */ #endif /* __KERNEL__ */ -#endif /* !defined(LINUX_HYBRID) || defined(LINUX_PORT) */ /* Do not support the (u)int64 types with strict ansi for GNU C */ @@ -122,9 +142,7 @@ typedef long unsigned int size_t; #if defined(__KERNEL__) /* See note above */ -#if !defined(LINUX_HYBRID) || defined(LINUX_PORT) #include /* sys/types.h and linux/types.h are oil and water */ -#endif /* !defined(LINUX_HYBRID) || defined(LINUX_PORT) */ #else @@ -132,7 +150,7 @@ typedef long unsigned int size_t; #endif /* linux && __KERNEL__ */ -#endif +#endif /* use the default typedefs in the next section of this file */ @@ -272,7 +290,7 @@ typedef float64 float_t; #define BWL_COMPILER_ARMCC #else #error "Unknown compiler!" -#endif +#endif #ifndef INLINE @@ -284,7 +302,7 @@ typedef float64 float_t; #define INLINE __inline #else #define INLINE - #endif + #endif #endif /* INLINE */ #undef TYPEDEF_BOOL diff --git a/drivers/net/wireless/bcmdhd/include/wlfc_proto.h b/drivers/amlogic/wifi/bcmdhd/include/wlfc_proto.h similarity index 74% rename from drivers/net/wireless/bcmdhd/include/wlfc_proto.h rename to drivers/amlogic/wifi/bcmdhd/include/wlfc_proto.h index a7ccc5bf8f1ab..0d5b434198eeb 100644 --- a/drivers/net/wireless/bcmdhd/include/wlfc_proto.h +++ b/drivers/amlogic/wifi/bcmdhd/include/wlfc_proto.h @@ -1,8 +1,34 @@ /* -* $Copyright Open 2009 Broadcom Corporation$ -* $Id: wlfc_proto.h 499510 2014-08-28 23:40:47Z $ -* -*/ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wlfc_proto.h 542895 2015-03-22 14:13:12Z $ + * + */ + +/** WL flow control for PROP_TXSTATUS. Related to host AMPDU reordering. */ + + #ifndef __wlfc_proto_definitions_h__ #define __wlfc_proto_definitions_h__ @@ -17,7 +43,7 @@ | 3 | 2 | (count, handle, prec_bmp)| Set the credit depth for a MAC dstn --------------------------------------------------------------------------- | 4 | 4+ | see pkttag comments | TXSTATUS - | | | TX status & timestamps | Present only when pkt timestamp is enabled + | | 12 | TX status & timestamps | Present only when pkt timestamp is enabled --------------------------------------------------------------------------- | 5 | 4 | see pkttag comments | PKKTTAG [host->firmware] --------------------------------------------------------------------------- @@ -59,7 +85,7 @@ #define WLFC_CTL_TYPE_MAC_CLOSE 2 #define WLFC_CTL_TYPE_MAC_REQUEST_CREDIT 3 #define WLFC_CTL_TYPE_TXSTATUS 4 -#define WLFC_CTL_TYPE_PKTTAG 5 +#define WLFC_CTL_TYPE_PKTTAG 5 /** host<->dongle */ #define WLFC_CTL_TYPE_MACDESC_ADD 6 #define WLFC_CTL_TYPE_MACDESC_DEL 7 @@ -70,13 +96,13 @@ #define WLFC_CTL_TYPE_FIFO_CREDITBACK 11 -#define WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP 12 +#define WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP 12 /** host->dongle */ #define WLFC_CTL_TYPE_MAC_REQUEST_PACKET 13 #define WLFC_CTL_TYPE_HOST_REORDER_RXPKTS 14 - #define WLFC_CTL_TYPE_TX_ENTRY_STAMP 15 #define WLFC_CTL_TYPE_RX_STAMP 16 +#define WLFC_CTL_TYPE_TX_STATUS_STAMP 17 /** obsolete */ #define WLFC_CTL_TYPE_TRANS_ID 18 #define WLFC_CTL_TYPE_COMP_TXSTATUS 19 @@ -87,9 +113,9 @@ #define WLFC_CTL_TYPE_FILLER 255 -#define WLFC_CTL_VALUE_LEN_MACDESC 8 /* handle, interface, MAC */ +#define WLFC_CTL_VALUE_LEN_MACDESC 8 /** handle, interface, MAC */ -#define WLFC_CTL_VALUE_LEN_MAC 1 /* MAC-handle */ +#define WLFC_CTL_VALUE_LEN_MAC 1 /** MAC-handle */ #define WLFC_CTL_VALUE_LEN_RSSI 1 #define WLFC_CTL_VALUE_LEN_INTERFACE 1 @@ -97,9 +123,15 @@ #define WLFC_CTL_VALUE_LEN_TXSTATUS 4 #define WLFC_CTL_VALUE_LEN_PKTTAG 4 +#define WLFC_CTL_VALUE_LEN_TIMESTAMP 12 /** 4-byte rate info + 2 TSF */ #define WLFC_CTL_VALUE_LEN_SEQ 2 +/* The high bits of ratespec report in timestamp are used for various status */ +#define WLFC_TSFLAGS_RX_RETRY (1 << 31) +#define WLFC_TSFLAGS_PM_ENABLED (1 << 30) +#define WLFC_TSFLAGS_MASK (WLFC_TSFLAGS_RX_RETRY | WLFC_TSFLAGS_PM_ENABLED) + /* enough space to host all 4 ACs, bc/mc and atim fifo credit */ #define WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK 6 @@ -107,9 +139,8 @@ #define WLFC_CTL_VALUE_LEN_REQUEST_PACKET 3 /* credit, MAC-handle, prec_bitmap */ -#define WLFC_PKTFLAG_PKTFROMHOST 0x01 /* packet originated from hot side */ -#define WLFC_PKTFLAG_PKT_REQUESTED 0x02 /* packet requsted by firmware side */ -#define WLFC_PKTFLAG_PKT_FORCELOWRATE 0x04 /* force low rate for this packet */ +#define WLFC_PKTFLAG_PKTFROMHOST 0x01 +#define WLFC_PKTFLAG_PKT_REQUESTED 0x02 #define WL_TXSTATUS_STATUS_MASK 0xff /* allow 8 bits */ #define WL_TXSTATUS_STATUS_SHIFT 24 @@ -120,6 +151,12 @@ #define WL_TXSTATUS_GET_STATUS(x) (((x) >> WL_TXSTATUS_STATUS_SHIFT) & \ WL_TXSTATUS_STATUS_MASK) +/** + * Bit 31 of the 32-bit packet tag is defined as 'generation ID'. It is set by the host to the + * "current" generation, and by the firmware to the "expected" generation, toggling on suppress. The + * firmware accepts a packet when the generation matches; on reset (startup) both "current" and + * "expected" are set to 0. + */ #define WL_TXSTATUS_GENERATION_MASK 1 /* allow 1 bit */ #define WL_TXSTATUS_GENERATION_SHIFT 31 @@ -168,6 +205,16 @@ ((ctr) & WL_TXSTATUS_FREERUNCTR_MASK)) #define WL_TXSTATUS_GET_FREERUNCTR(x) ((x)& WL_TXSTATUS_FREERUNCTR_MASK) +/* Seq number part of AMSDU */ +#define WL_SEQ_AMSDU_MASK 0x1 /* allow 1 bit */ +#define WL_SEQ_AMSDU_SHIFT 14 +#define WL_SEQ_SET_AMSDU(x, val) ((x) = \ + ((x) & ~(WL_SEQ_AMSDU_MASK << WL_SEQ_AMSDU_SHIFT)) | \ + (((val) & WL_SEQ_AMSDU_MASK) << WL_SEQ_AMSDU_SHIFT)) +#define WL_SEQ_GET_AMSDU(x) (((x) >> WL_SEQ_AMSDU_SHIFT) & \ + WL_SEQ_AMSDU_MASK) + +/* Seq number is valid coming from FW */ #define WL_SEQ_FROMFW_MASK 0x1 /* allow 1 bit */ #define WL_SEQ_FROMFW_SHIFT 13 #define WL_SEQ_SET_FROMFW(x, val) ((x) = \ @@ -176,6 +223,13 @@ #define WL_SEQ_GET_FROMFW(x) (((x) >> WL_SEQ_FROMFW_SHIFT) & \ WL_SEQ_FROMFW_MASK) +/** + * Proptxstatus related. + * + * Pkt from bus layer (DHD for SDIO and pciedev for PCIE) + * is re-using seq number previously suppressed + * so FW should not assign new one + */ #define WL_SEQ_FROMDRV_MASK 0x1 /* allow 1 bit */ #define WL_SEQ_FROMDRV_SHIFT 12 #define WL_SEQ_SET_FROMDRV(x, val) ((x) = \ @@ -192,6 +246,10 @@ #define WL_SEQ_GET_NUM(x) (((x) >> WL_SEQ_NUM_SHIFT) & \ WL_SEQ_NUM_MASK) +#define WL_SEQ_AMSDU_SUPPR_MASK ((WL_SEQ_FROMDRV_MASK << WL_SEQ_FROMDRV_SHIFT) | \ + (WL_SEQ_AMSDU_MASK << WL_SEQ_AMSDU_SHIFT) | \ + (WL_SEQ_NUM_MASK << WL_SEQ_NUM_SHIFT)) + /* 32 STA should be enough??, 6 bits; Must be power of 2 */ #define WLFC_MAC_DESC_TABLE_SIZE 32 #define WLFC_MAX_IFNUM 16 @@ -200,6 +258,13 @@ /* b[7:5] -reuse guard, b[4:0] -value */ #define WLFC_MAC_DESC_GET_LOOKUP_INDEX(x) ((x) & 0x1f) +#define WLFC_PKTFLAG_SET_PKTREQUESTED(x) (x) |= \ + (WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT) + +#define WLFC_PKTFLAG_CLR_PKTREQUESTED(x) (x) &= \ + ~(WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT) + + #define WLFC_MAX_PENDING_DATALEN 120 /* host is free to discard the packet */ @@ -214,12 +279,14 @@ #define WLFC_CTL_PKTFLAG_TOSSED_BYWLC 3 /* Firmware tossed after retries */ #define WLFC_CTL_PKTFLAG_DISCARD_NOACK 4 +/* Firmware wrongly reported suppressed previously,now fixing to acked */ +#define WLFC_CTL_PKTFLAG_SUPPRESS_ACKED 5 #define WLFC_D11_STATUS_INTERPRET(txs) \ - (((txs)->status.suppr_ind != TX_STATUS_SUPR_NONE) ? \ - WLFC_CTL_PKTFLAG_D11SUPPRESS : \ - ((txs)->status.was_acked ? \ - WLFC_CTL_PKTFLAG_DISCARD : WLFC_CTL_PKTFLAG_DISCARD_NOACK)) + ((txs)->status.was_acked ? WLFC_CTL_PKTFLAG_DISCARD : \ + (TXS_SUPR_MAGG_DONE((txs)->status.suppr_ind) ? \ + WLFC_CTL_PKTFLAG_DISCARD_NOACK : WLFC_CTL_PKTFLAG_D11SUPPRESS)) + #ifdef PROP_TXSTATUS_DEBUG #define WLFC_DBGMESG(x) printf x @@ -259,7 +326,7 @@ #define WLFC_TYPE_TRANS_ID_LEN 6 #define WLFC_MODE_HANGER 1 /* use hanger */ -#define WLFC_MODE_AFQ 2 /* use afq */ +#define WLFC_MODE_AFQ 2 /* use afq (At Firmware Queue) */ #define WLFC_IS_OLD_DEF(x) ((x & 1) || (x & 2)) #define WLFC_MODE_AFQ_SHIFT 2 /* afq bit */ diff --git a/drivers/net/wireless/bcmdhd/include/wlioctl.h b/drivers/amlogic/wifi/bcmdhd/include/wlioctl.h similarity index 58% rename from drivers/net/wireless/bcmdhd/include/wlioctl.h rename to drivers/amlogic/wifi/bcmdhd/include/wlioctl.h index ea727bae425c0..4447ca0107b52 100644 --- a/drivers/net/wireless/bcmdhd/include/wlioctl.h +++ b/drivers/amlogic/wifi/bcmdhd/include/wlioctl.h @@ -1,12 +1,33 @@ /* * Custom OID/ioctl definitions for + * + * * Broadcom 802.11abg Networking Device Driver * * Definitions subject to change without notice. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * <> * - * $Id: wlioctl.h 504503 2014-09-24 11:28:56Z $ + * $Id: wlioctl.h 609280 2016-01-01 06:31:38Z $ */ #ifndef _wlioctl_h_ @@ -23,18 +44,23 @@ #include #include #include +#include - -#ifndef LINUX_POSTMOGRIFY_REMOVAL #include #include -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ -#ifndef LINUX_POSTMOGRIFY_REMOVAL +typedef struct { + uint32 num; + chanspec_t list[1]; +} chanspec_list_t; + +#define RSN_KCK_LENGTH 16 +#define RSN_KEK_LENGTH 16 + #ifndef INTF_NAME_SIZ #define INTF_NAME_SIZ 16 @@ -48,10 +74,6 @@ typedef struct remote_ioctl { } rem_ioctl_t; #define REMOTE_SIZE sizeof(rem_ioctl_t) -typedef struct { - uint32 num; - chanspec_t list[1]; -} chanspec_list_t; /* DFS Forced param */ typedef struct wl_dfs_forced_params { @@ -69,16 +91,21 @@ typedef struct wl_dfs_forced_params { /* association decision information */ typedef struct { - bool assoc_approved; /* (re)association approved */ - uint16 reject_reason; /* reason code for rejecting association */ + bool assoc_approved; /**< (re)association approved */ + uint16 reject_reason; /**< reason code for rejecting association */ struct ether_addr da; -#if 0 && (NDISVER >= 0x0620) - LARGE_INTEGER sys_time; /* current system time */ -#else - int64 sys_time; /* current system time */ -#endif + int64 sys_time; /**< current system time */ } assoc_decision_t; +#define DFS_SCAN_S_IDLE -1 +#define DFS_SCAN_S_RADAR_FREE 0 +#define DFS_SCAN_S_RADAR_FOUND 1 +#define DFS_SCAN_S_INPROGESS 2 +#define DFS_SCAN_S_SCAN_ABORTED 3 +#define DFS_SCAN_S_SCAN_MODESW_INPROGRESS 4 +#define DFS_SCAN_S_MAX 5 + + #define ACTION_FRAME_SIZE 1800 typedef struct wl_action_frame { @@ -92,8 +119,8 @@ typedef struct wl_action_frame { typedef struct ssid_info { - uint8 ssid_len; /* the length of SSID */ - uint8 ssid[32]; /* SSID string */ + uint8 ssid_len; /**< the length of SSID */ + uint8 ssid[32]; /**< SSID string */ } ssid_info_t; typedef struct wl_af_params { @@ -114,8 +141,6 @@ typedef struct wl_sa_query { struct ether_addr da; } wl_sa_query_t; -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ - /* require default structure packing */ #define BWL_DEFAULT_PACKING #include @@ -157,39 +182,65 @@ typedef BWL_PRE_PACKED_STRUCT struct { } BWL_POST_PACKED_STRUCT wlc_prot_dynbwsw_config_t; typedef BWL_PRE_PACKED_STRUCT struct { - uint32 version; /* version field */ + uint32 version; /**< version field */ uint32 config_mask; uint32 reset_mask; wlc_prot_dynbwsw_config_t config_params; } BWL_POST_PACKED_STRUCT obss_config_params_t; +/* bsscfg type */ +typedef enum bsscfg_type_t { + BSSCFG_TYPE_GENERIC = 0, /**< default */ + BSSCFG_TYPE_P2P = 1, /**< The BSS is for p2p link */ + BSSCFG_TYPE_BTA = 2, + BSSCFG_TYPE_TDLS = 4, + BSSCFG_TYPE_AWDL = 5, + BSSCFG_TYPE_PROXD = 6, + BSSCFG_TYPE_NAN = 7, + BSSCFG_TYPE_MAX +} bsscfg_type_t; + +/* bsscfg subtype */ +enum { + BSSCFG_GENERIC_STA = 1, /* GENERIC */ + BSSCFG_GENERIC_AP = 2, /* GENERIC */ + BSSCFG_P2P_GC = 3, /* P2P */ + BSSCFG_P2P_GO = 4, /* P2P */ + BSSCFG_P2P_DISC = 5, /* P2P */ +}; + +typedef struct wlc_bsscfg_info { + uint32 type; + uint32 subtype; +} wlc_bsscfg_info_t; + + -#ifndef LINUX_POSTMOGRIFY_REMOVAL /* Legacy structure to help keep backward compatible wl tool and tray app */ -#define LEGACY_WL_BSS_INFO_VERSION 107 /* older version of wl_bss_info struct */ +#define LEGACY_WL_BSS_INFO_VERSION 107 /**< older version of wl_bss_info struct */ typedef struct wl_bss_info_107 { - uint32 version; /* version field */ - uint32 length; /* byte length of data in this record, + uint32 version; /**< version field */ + uint32 length; /**< byte length of data in this record, * starting at version and including IEs */ struct ether_addr BSSID; - uint16 beacon_period; /* units are Kusec */ - uint16 capability; /* Capability information */ + uint16 beacon_period; /**< units are Kusec */ + uint16 capability; /**< Capability information */ uint8 SSID_len; uint8 SSID[32]; struct { - uint count; /* # rates in this set */ - uint8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */ - } rateset; /* supported rates */ - uint8 channel; /* Channel no. */ - uint16 atim_window; /* units are Kusec */ - uint8 dtim_period; /* DTIM period */ - int16 RSSI; /* receive signal strength (in dBm) */ - int8 phy_noise; /* noise (in dBm) */ - uint32 ie_length; /* byte length of Information Elements */ + uint count; /**< # rates in this set */ + uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */ + } rateset; /**< supported rates */ + uint8 channel; /**< Channel no. */ + uint16 atim_window; /**< units are Kusec */ + uint8 dtim_period; /**< DTIM period */ + int16 RSSI; /**< receive signal strength (in dBm) */ + int8 phy_noise; /**< noise (in dBm) */ + uint32 ie_length; /**< byte length of Information Elements */ /* variable length Information Elements */ } wl_bss_info_107_t; @@ -197,93 +248,103 @@ typedef struct wl_bss_info_107 { * Per-BSS information structure. */ -#define LEGACY2_WL_BSS_INFO_VERSION 108 /* old version of wl_bss_info struct */ +#define LEGACY2_WL_BSS_INFO_VERSION 108 /**< old version of wl_bss_info struct */ /* BSS info structure * Applications MUST CHECK ie_offset field and length field to access IEs and * next bss_info structure in a vector (in wl_scan_results_t) */ typedef struct wl_bss_info_108 { - uint32 version; /* version field */ - uint32 length; /* byte length of data in this record, + uint32 version; /**< version field */ + uint32 length; /**< byte length of data in this record, * starting at version and including IEs */ struct ether_addr BSSID; - uint16 beacon_period; /* units are Kusec */ - uint16 capability; /* Capability information */ + uint16 beacon_period; /**< units are Kusec */ + uint16 capability; /**< Capability information */ uint8 SSID_len; uint8 SSID[32]; struct { - uint count; /* # rates in this set */ - uint8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */ - } rateset; /* supported rates */ - chanspec_t chanspec; /* chanspec for bss */ - uint16 atim_window; /* units are Kusec */ - uint8 dtim_period; /* DTIM period */ - int16 RSSI; /* receive signal strength (in dBm) */ - int8 phy_noise; /* noise (in dBm) */ - - uint8 n_cap; /* BSS is 802.11N Capable */ - uint32 nbss_cap; /* 802.11N BSS Capabilities (based on HT_CAP_*) */ - uint8 ctl_ch; /* 802.11N BSS control channel number */ - uint32 reserved32[1]; /* Reserved for expansion of BSS properties */ - uint8 flags; /* flags */ - uint8 reserved[3]; /* Reserved for expansion of BSS properties */ - uint8 basic_mcs[MCSSET_LEN]; /* 802.11N BSS required MCS set */ - - uint16 ie_offset; /* offset at which IEs start, from beginning */ - uint32 ie_length; /* byte length of Information Elements */ + uint count; /**< # rates in this set */ + uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */ + } rateset; /**< supported rates */ + chanspec_t chanspec; /**< chanspec for bss */ + uint16 atim_window; /**< units are Kusec */ + uint8 dtim_period; /**< DTIM period */ + int16 RSSI; /**< receive signal strength (in dBm) */ + int8 phy_noise; /**< noise (in dBm) */ + + uint8 n_cap; /**< BSS is 802.11N Capable */ + uint32 nbss_cap; /**< 802.11N BSS Capabilities (based on HT_CAP_*) */ + uint8 ctl_ch; /**< 802.11N BSS control channel number */ + uint32 reserved32[1]; /**< Reserved for expansion of BSS properties */ + uint8 flags; /**< flags */ + uint8 reserved[3]; /**< Reserved for expansion of BSS properties */ + uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */ + + uint16 ie_offset; /**< offset at which IEs start, from beginning */ + uint32 ie_length; /**< byte length of Information Elements */ /* Add new fields here */ /* variable length Information Elements */ } wl_bss_info_108_t; -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ - -#define WL_BSS_INFO_VERSION 109 /* current version of wl_bss_info struct */ +#define WL_BSS_INFO_VERSION 109 /**< current version of wl_bss_info struct */ /* BSS info structure * Applications MUST CHECK ie_offset field and length field to access IEs and * next bss_info structure in a vector (in wl_scan_results_t) */ typedef struct wl_bss_info { - uint32 version; /* version field */ - uint32 length; /* byte length of data in this record, + uint32 version; /**< version field */ + uint32 length; /**< byte length of data in this record, * starting at version and including IEs */ struct ether_addr BSSID; - uint16 beacon_period; /* units are Kusec */ - uint16 capability; /* Capability information */ + uint16 beacon_period; /**< units are Kusec */ + uint16 capability; /**< Capability information */ uint8 SSID_len; uint8 SSID[32]; struct { - uint count; /* # rates in this set */ - uint8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */ - } rateset; /* supported rates */ - chanspec_t chanspec; /* chanspec for bss */ - uint16 atim_window; /* units are Kusec */ - uint8 dtim_period; /* DTIM period */ - int16 RSSI; /* receive signal strength (in dBm) */ - int8 phy_noise; /* noise (in dBm) */ - - uint8 n_cap; /* BSS is 802.11N Capable */ - uint32 nbss_cap; /* 802.11N+AC BSS Capabilities */ - uint8 ctl_ch; /* 802.11N BSS control channel number */ - uint8 padding1[3]; /* explicit struct alignment padding */ - uint16 vht_rxmcsmap; /* VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */ - uint16 vht_txmcsmap; /* VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */ - uint8 flags; /* flags */ - uint8 vht_cap; /* BSS is vht capable */ - uint8 reserved[2]; /* Reserved for expansion of BSS properties */ - uint8 basic_mcs[MCSSET_LEN]; /* 802.11N BSS required MCS set */ - - uint16 ie_offset; /* offset at which IEs start, from beginning */ - uint32 ie_length; /* byte length of Information Elements */ - int16 SNR; /* average SNR of during frame reception */ + uint count; /**< # rates in this set */ + uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */ + } rateset; /**< supported rates */ + chanspec_t chanspec; /**< chanspec for bss */ + uint16 atim_window; /**< units are Kusec */ + uint8 dtim_period; /**< DTIM period */ + int16 RSSI; /**< receive signal strength (in dBm) */ + int8 phy_noise; /**< noise (in dBm) */ + + uint8 n_cap; /**< BSS is 802.11N Capable */ + uint32 nbss_cap; /**< 802.11N+AC BSS Capabilities */ + uint8 ctl_ch; /**< 802.11N BSS control channel number */ + uint8 padding1[3]; /**< explicit struct alignment padding */ + uint16 vht_rxmcsmap; /**< VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */ + uint16 vht_txmcsmap; /**< VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */ + uint8 flags; /**< flags */ + uint8 vht_cap; /**< BSS is vht capable */ + uint8 reserved[2]; /**< Reserved for expansion of BSS properties */ + uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */ + + uint16 ie_offset; /**< offset at which IEs start, from beginning */ + uint32 ie_length; /**< byte length of Information Elements */ + int16 SNR; /**< average SNR of during frame reception */ + uint16 vht_mcsmap; /**< STA's Associated vhtmcsmap */ + uint16 vht_mcsmap_prop; /**< STA's Associated prop vhtmcsmap */ + uint16 vht_txmcsmap_prop; /**< prop VHT tx mcs prop */ /* Add new fields here */ /* variable length Information Elements */ } wl_bss_info_t; -#ifndef LINUX_POSTMOGRIFY_REMOVAL +#define WL_GSCAN_BSS_INFO_VERSION 1 /* current version of wl_gscan_bss_info struct */ +#define WL_GSCAN_INFO_FIXED_FIELD_SIZE (sizeof(wl_gscan_bss_info_t) - sizeof(wl_bss_info_t)) + +typedef struct wl_gscan_bss_info { + uint32 timestamp[2]; + wl_bss_info_t info; + /* Do not add any more members below, fixed */ + /* and variable length Information Elements to follow */ +} wl_gscan_bss_info_t; + typedef struct wl_bsscfg { uint32 bsscfg_idx; @@ -305,6 +366,7 @@ typedef struct wl_if_add { uint32 if_flags; uint32 ap; struct ether_addr mac_addr; + uint32 wlc_index; } wl_if_add_t; typedef struct wl_bss_config { @@ -313,15 +375,17 @@ typedef struct wl_bss_config { uint32 chanspec; } wl_bss_config_t; -#define WL_BSS_USER_RADAR_CHAN_SELECT 0x1 /* User application will randomly select +#define WL_BSS_USER_RADAR_CHAN_SELECT 0x1 /**< User application will randomly select * radar channel. */ -#define DLOAD_HANDLER_VER 1 /* Downloader version */ -#define DLOAD_FLAG_VER_MASK 0xf000 /* Downloader version mask */ -#define DLOAD_FLAG_VER_SHIFT 12 /* Downloader version shift */ +#define DLOAD_HANDLER_VER 1 /**< Downloader version */ +#define DLOAD_FLAG_VER_MASK 0xf000 /**< Downloader version mask */ +#define DLOAD_FLAG_VER_SHIFT 12 /**< Downloader version shift */ -#define DL_CRC_NOT_INUSE 0x0001 +#define DL_CRC_NOT_INUSE 0x0001 +#define DL_BEGIN 0x0002 +#define DL_END 0x0004 /* generic download types & flags */ enum { @@ -364,14 +428,17 @@ struct wl_clm_dload_info { }; typedef struct wl_clm_dload_info wl_clm_dload_info_t; -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ - typedef struct wlc_ssid { uint32 SSID_len; uchar SSID[DOT11_MAX_SSID_LEN]; } wlc_ssid_t; -#ifndef LINUX_POSTMOGRIFY_REMOVAL +typedef struct wlc_ssid_ext { + bool hidden; + uint32 SSID_len; + uchar SSID[DOT11_MAX_SSID_LEN]; +} wlc_ssid_ext_t; + #define MAX_PREFERRED_AP_NUM 5 typedef struct wlc_fastssidinfo { @@ -379,15 +446,124 @@ typedef struct wlc_fastssidinfo { wlc_ssid_t SSID_info[MAX_PREFERRED_AP_NUM]; } wlc_fastssidinfo_t; +#ifdef CUSTOMER_HW_31_1 + +#define AP_NORM 0 +#define AP_STEALTH 1 +#define STREET_PASS_AP 2 + +#define NSC_MAX_TGT_SSID 20 +typedef struct nsc_ssid_entry_list { + wlc_ssid_t ssid_info; + int ssid_type; +} nsc_ssid_entry_list_t; + +typedef struct nsc_ssid_list { + uint32 num_entries; /* N wants 150 */ + nsc_ssid_entry_list_t ssid_entry[1]; +} nsc_ssid_list_t; + +#define NSC_TGT_SSID_BUFSZ (sizeof(nsc_ssid_entry_list_t) * \ + (NSC_MAX_TGT_SSID - 1) + sizeof(nsc_ssid_list_t)) + +/* Default values from N */ +#define NSC_SCPATT_ARRSZ 32 + +/* scan types */ +#define UNI_SCAN 0 +#define SP_SCAN_ACTIVE 1 +#define SP_SCAN_PASSIVE 2 +#define DOZE 3 + +/* what we found */ +typedef struct nsc_scan_results { + wlc_ssid_t ssid; + struct ether_addr mac; + int scantype; + uint16 channel; +} nsc_scan_results_t; + +typedef BWL_PRE_PACKED_STRUCT struct nsc_af_body { + uint8 type; /* should be 0x7f */ + uint8 oui[DOT11_OUI_LEN]; /* just like it says */ + uint8 subtype; + uint8 ielen; /* */ + uint8 data[1]; /* variable */ +} BWL_POST_PACKED_STRUCT nsc_af_body_t; + +typedef BWL_PRE_PACKED_STRUCT struct nsc_sdlist { + uint8 scantype; + uint16 duration; + uint16 channel; /* SP only */ + uint8 ssid_index; /* SP only */ + uint16 rate; /* SP only */ +} BWL_POST_PACKED_STRUCT nsc_sdlist_t; + +typedef struct nsc_scandes { + uint32 num_entries; /* number of list entries */ + nsc_sdlist_t sdlist[1]; /* variable */ +} nsc_scandes_t; + +#define NSC_MAX_SDLIST_ENTRIES 8 +#define NSC_SDDESC_BUFSZ (sizeof(nsc_sdlist_t) * \ + (NSC_MAX_SDLIST_ENTRIES - 1) + sizeof(nsc_scandes_t)) + +#define SCAN_ARR_END (NSC_MAX_SDLIST_ENTRIES) +#endif /* CUSTOMER_HW_31_1 */ + typedef BWL_PRE_PACKED_STRUCT struct wnm_url { uint8 len; uint8 data[1]; } BWL_POST_PACKED_STRUCT wnm_url_t; +#define WNM_BSS_SELECT_TYPE_RSSI 0 +#define WNM_BSS_SELECT_TYPE_CU 1 + +#define WNM_BSSLOAD_MONITOR_VERSION 1 +typedef struct wnm_bssload_monitor_cfg { + uint8 version; + uint8 band; + uint8 duration; /* duration between 1 to 20sec */ +} wnm_bssload_monitor_cfg_t; + +#define BSS_MAXTABLE_SIZE 10 +#define WNM_BSS_SELECT_FACTOR_VERSION 1 +typedef struct wnm_bss_select_factor_params { + uint8 low; + uint8 high; + uint8 factor; + uint8 pad; +} wnm_bss_select_factor_params_t; + +typedef struct wnm_bss_select_factor_cfg { + uint8 version; + uint8 band; + uint16 type; + uint16 pad; + uint16 count; + wnm_bss_select_factor_params_t params[1]; +} wnm_bss_select_factor_cfg_t; + +#define WNM_BSS_SELECT_WEIGHT_VERSION 1 +typedef struct wnm_bss_select_weight_cfg { + uint8 version; + uint8 band; + uint16 type; + uint16 weight; /* weightage for each type between 0 to 100 */ +} wnm_bss_select_weight_cfg_t; + +#define WNM_ROAM_TRIGGER_VERSION 1 +typedef struct wnm_roam_trigger_cfg { + uint8 version; + uint8 band; + uint16 type; + int16 trigger; /* trigger for each type in new roam algorithm */ +} wnm_roam_trigger_cfg_t; + typedef struct chan_scandata { uint8 txpower; uint8 pad; - chanspec_t channel; /* Channel num, bw, ctrl_sb and band */ + chanspec_t channel; /**< Channel num, bw, ctrl_sb and band */ uint32 channel_mintime; uint32 channel_maxtime; } chan_scandata_t; @@ -401,15 +577,15 @@ typedef enum wl_scan_type { #define WLC_EXTDSCAN_MAX_SSID 5 typedef struct wl_extdscan_params { - int8 nprobes; /* 0, passive, otherwise active */ - int8 split_scan; /* split scan */ - int8 band; /* band */ + int8 nprobes; /**< 0, passive, otherwise active */ + int8 split_scan; /**< split scan */ + int8 band; /**< band */ int8 pad; wlc_ssid_t ssid[WLC_EXTDSCAN_MAX_SSID]; /* ssid list */ - uint32 tx_rate; /* in 500ksec units */ - wl_scan_type_t scan_type; /* enum */ + uint32 tx_rate; /**< in 500ksec units */ + wl_scan_type_t scan_type; /**< enum */ int32 channel_num; - chan_scandata_t channel_list[1]; /* list of chandata structs */ + chan_scandata_t channel_list[1]; /**< list of chandata structs */ } wl_extdscan_params_t; #define WL_EXTDSCAN_PARAMS_FIXED_SIZE (sizeof(wl_extdscan_params_t) - sizeof(chan_scandata_t)) @@ -417,23 +593,23 @@ typedef struct wl_extdscan_params { #define WL_SCAN_PARAMS_SSID_MAX 10 typedef struct wl_scan_params { - wlc_ssid_t ssid; /* default: {0, ""} */ - struct ether_addr bssid; /* default: bcast */ - int8 bss_type; /* default: any, + wlc_ssid_t ssid; /**< default: {0, ""} */ + struct ether_addr bssid; /**< default: bcast */ + int8 bss_type; /**< default: any, * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT */ - uint8 scan_type; /* flags, 0 use default */ - int32 nprobes; /* -1 use default, number of probes per channel */ - int32 active_time; /* -1 use default, dwell time per channel for + uint8 scan_type; /**< flags, 0 use default */ + int32 nprobes; /**< -1 use default, number of probes per channel */ + int32 active_time; /**< -1 use default, dwell time per channel for * active scanning */ - int32 passive_time; /* -1 use default, dwell time per channel + int32 passive_time; /**< -1 use default, dwell time per channel * for passive scanning */ - int32 home_time; /* -1 use default, dwell time for the home channel + int32 home_time; /**< -1 use default, dwell time for the home channel * between channel scans */ - int32 channel_num; /* count of channels and ssids that follow + int32 channel_num; /**< count of channels and ssids that follow * * low half is count of channels in channel_list, 0 * means default (use all available channels) @@ -447,7 +623,7 @@ typedef struct wl_scan_params { * parameter portion is assumed, otherwise ssid in * the fixed portion is ignored */ - uint16 channel_list[1]; /* list of chanspecs */ + uint16 channel_list[1]; /**< list of chanspecs */ } wl_scan_params_t; /* size of wl_scan_params not including variable length array */ @@ -466,7 +642,6 @@ typedef struct wl_iscan_params { /* 3 fields + size of wl_scan_params, not including variable length array */ #define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t)) -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ typedef struct wl_scan_results { uint32 buflen; @@ -475,13 +650,13 @@ typedef struct wl_scan_results { wl_bss_info_t bss_info[1]; } wl_scan_results_t; -#ifndef LINUX_POSTMOGRIFY_REMOVAL /* size of wl_scan_results not including variable length array */ #define WL_SCAN_RESULTS_FIXED_SIZE (sizeof(wl_scan_results_t) - sizeof(wl_bss_info_t)) #define ESCAN_REQ_VERSION 1 +/** event scan reduces amount of SOC memory needed to store scan results */ typedef struct wl_escan_params { uint32 version; uint16 action; @@ -491,6 +666,7 @@ typedef struct wl_escan_params { #define WL_ESCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_escan_params_t, params) + sizeof(wlc_ssid_t)) +/** event scan reduces amount of SOC memory needed to store scan results */ typedef struct wl_escan_result { uint32 buflen; uint32 version; @@ -501,6 +677,14 @@ typedef struct wl_escan_result { #define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(wl_escan_result_t) - sizeof(wl_bss_info_t)) +typedef struct wl_gscan_result { + uint32 buflen; + uint32 version; + wl_gscan_bss_info_t bss_info[1]; +} wl_gscan_result_t; + +#define WL_GSCAN_RESULTS_FIXED_SIZE (sizeof(wl_gscan_result_t) - sizeof(wl_gscan_bss_info_t)) + /* incremental scan results struct */ typedef struct wl_iscan_results { uint32 status; @@ -515,10 +699,10 @@ typedef struct wl_iscan_results { typedef struct scanol_params { uint32 version; - uint32 flags; /* offload scanning flags */ - int32 active_time; /* -1 use default, dwell time per channel for active scanning */ - int32 passive_time; /* -1 use default, dwell time per channel for passive scanning */ - int32 idle_rest_time; /* -1 use default, time idle between scan cycle */ + uint32 flags; /**< offload scanning flags */ + int32 active_time; /**< -1 use default, dwell time per channel for active scanning */ + int32 passive_time; /**< -1 use default, dwell time per channel for passive scanning */ + int32 idle_rest_time; /**< -1 use default, time idle between scan cycle */ int32 idle_rest_time_multiplier; int32 active_rest_time; int32 active_rest_time_multiplier; @@ -528,7 +712,7 @@ typedef struct scanol_params { int32 scan_cycle_active_rest_multiplier; int32 max_rest_time; int32 max_scan_cycles; - int32 nprobes; /* -1 use default, number of probes per channel */ + int32 nprobes; /**< -1 use default, number of probes per channel */ int32 scan_start_delay; uint32 nchannels; uint32 ssid_count; @@ -540,17 +724,16 @@ typedef struct wl_probe_params { struct ether_addr bssid; struct ether_addr mac; } wl_probe_params_t; -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ -#define WL_MAXRATES_IN_SET 16 /* max # of rates in a rateset */ +#define WL_MAXRATES_IN_SET 16 /**< max # of rates in a rateset */ typedef struct wl_rateset { - uint32 count; /* # rates in this set */ - uint8 rates[WL_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */ + uint32 count; /**< # rates in this set */ + uint8 rates[WL_MAXRATES_IN_SET]; /**< rates in 500kbps units w/hi bit set if basic */ } wl_rateset_t; typedef struct wl_rateset_args { - uint32 count; /* # rates in this set */ - uint8 rates[WL_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */ + uint32 count; /**< # rates in this set */ + uint8 rates[WL_MAXRATES_IN_SET]; /**< rates in 500kbps units w/hi bit set if basic */ uint8 mcs[MCSSET_LEN]; /* supported mcs index bit map */ uint16 vht_mcs[VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */ } wl_rateset_args_t; @@ -560,11 +743,11 @@ typedef struct wl_rateset_args { #define TXBF_RATE_OFDM_ALL 8 typedef struct wl_txbf_rateset { - uint8 txbf_rate_mcs[TXBF_RATE_MCS_ALL]; /* one for each stream */ - uint8 txbf_rate_mcs_bcm[TXBF_RATE_MCS_ALL]; /* one for each stream */ - uint16 txbf_rate_vht[TXBF_RATE_VHT_ALL]; /* one for each stream */ - uint16 txbf_rate_vht_bcm[TXBF_RATE_VHT_ALL]; /* one for each stream */ - uint8 txbf_rate_ofdm[TXBF_RATE_OFDM_ALL]; /* bitmap of ofdm rates that enables txbf */ + uint8 txbf_rate_mcs[TXBF_RATE_MCS_ALL]; /**< one for each stream */ + uint8 txbf_rate_mcs_bcm[TXBF_RATE_MCS_ALL]; /**< one for each stream */ + uint16 txbf_rate_vht[TXBF_RATE_VHT_ALL]; /**< one for each stream */ + uint16 txbf_rate_vht_bcm[TXBF_RATE_VHT_ALL]; /**< one for each stream */ + uint8 txbf_rate_ofdm[TXBF_RATE_OFDM_ALL]; /**< bitmap of ofdm rates that enables txbf */ uint8 txbf_rate_ofdm_bcm[TXBF_RATE_OFDM_ALL]; /* bitmap of ofdm rates that enables txbf */ uint8 txbf_rate_ofdm_cnt; uint8 txbf_rate_ofdm_cnt_bcm; @@ -594,17 +777,17 @@ typedef struct wl_uint32_list { /* used for association with a specific BSSID and chanspec list */ typedef struct wl_assoc_params { - struct ether_addr bssid; /* 00:00:00:00:00:00: broadcast scan */ - uint16 bssid_cnt; /* 0: use chanspec_num, and the single bssid, + struct ether_addr bssid; /**< 00:00:00:00:00:00: broadcast scan */ + uint16 bssid_cnt; /**< 0: use chanspec_num, and the single bssid, * otherwise count of chanspecs in chanspec_list * AND paired bssids following chanspec_list * also, chanspec_num has to be set to zero * for bssid list to be used */ - int32 chanspec_num; /* 0: all available channels, + int32 chanspec_num; /**< 0: all available channels, * otherwise count of chanspecs in chanspec_list */ - chanspec_t chanspec_list[1]; /* list of chanspecs */ + chanspec_t chanspec_list[1]; /**< list of chanspecs */ } wl_assoc_params_t; #define WL_ASSOC_PARAMS_FIXED_SIZE OFFSETOF(wl_assoc_params_t, chanspec_list) @@ -620,34 +803,33 @@ typedef wl_assoc_params_t wl_join_assoc_params_t; /* used for join with or without a specific bssid and channel list */ typedef struct wl_join_params { wlc_ssid_t ssid; - wl_assoc_params_t params; /* optional field, but it must include the fixed portion + wl_assoc_params_t params; /**< optional field, but it must include the fixed portion * of the wl_assoc_params_t struct when it does present. */ } wl_join_params_t; -#ifndef LINUX_POSTMOGRIFY_REMOVAL #define WL_JOIN_PARAMS_FIXED_SIZE (OFFSETOF(wl_join_params_t, params) + \ WL_ASSOC_PARAMS_FIXED_SIZE) /* scan params for extended join */ typedef struct wl_join_scan_params { - uint8 scan_type; /* 0 use default, active or passive scan */ - int32 nprobes; /* -1 use default, number of probes per channel */ - int32 active_time; /* -1 use default, dwell time per channel for + uint8 scan_type; /**< 0 use default, active or passive scan */ + int32 nprobes; /**< -1 use default, number of probes per channel */ + int32 active_time; /**< -1 use default, dwell time per channel for * active scanning */ - int32 passive_time; /* -1 use default, dwell time per channel + int32 passive_time; /**< -1 use default, dwell time per channel * for passive scanning */ - int32 home_time; /* -1 use default, dwell time for the home channel + int32 home_time; /**< -1 use default, dwell time for the home channel * between channel scans */ } wl_join_scan_params_t; /* extended join params */ typedef struct wl_extjoin_params { - wlc_ssid_t ssid; /* {0, ""}: wildcard scan */ + wlc_ssid_t ssid; /**< {0, ""}: wildcard scan */ wl_join_scan_params_t scan; - wl_join_assoc_params_t assoc; /* optional field, but it must include the fixed portion + wl_join_assoc_params_t assoc; /**< optional field, but it must include the fixed portion * of the wl_join_assoc_params_t struct when it does * present. */ @@ -655,70 +837,111 @@ typedef struct wl_extjoin_params { #define WL_EXTJOIN_PARAMS_FIXED_SIZE (OFFSETOF(wl_extjoin_params_t, assoc) + \ WL_JOIN_ASSOC_PARAMS_FIXED_SIZE) -#define ANT_SELCFG_MAX 4 /* max number of antenna configurations */ -#define MAX_STREAMS_SUPPORTED 4 /* max number of streams supported */ +#define ANT_SELCFG_MAX 4 /**< max number of antenna configurations */ +#define MAX_STREAMS_SUPPORTED 4 /**< max number of streams supported */ typedef struct { - uint8 ant_config[ANT_SELCFG_MAX]; /* antenna configuration */ - uint8 num_antcfg; /* number of available antenna configurations */ + uint8 ant_config[ANT_SELCFG_MAX]; /**< antenna configuration */ + uint8 num_antcfg; /**< number of available antenna configurations */ } wlc_antselcfg_t; typedef struct { - uint32 duration; /* millisecs spent sampling this channel */ - uint32 congest_ibss; /* millisecs in our bss (presumably this traffic will */ - /* move if cur bss moves channels) */ - uint32 congest_obss; /* traffic not in our bss */ - uint32 interference; /* millisecs detecting a non 802.11 interferer. */ - uint32 timestamp; /* second timestamp */ + uint32 duration; /**< millisecs spent sampling this channel */ + uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */ + /**< move if cur bss moves channels) */ + uint32 congest_obss; /**< traffic not in our bss */ + uint32 interference; /**< millisecs detecting a non 802.11 interferer. */ + uint32 timestamp; /**< second timestamp */ } cca_congest_t; typedef struct { - chanspec_t chanspec; /* Which channel? */ - uint8 num_secs; /* How many secs worth of data */ - cca_congest_t secs[1]; /* Data */ + chanspec_t chanspec; /**< Which channel? */ + uint16 num_secs; /**< How many secs worth of data */ + cca_congest_t secs[1]; /**< Data */ } cca_congest_channel_req_t; +typedef struct { + uint32 duration; /**< millisecs spent sampling this channel */ + uint32 congest; /**< millisecs detecting busy CCA */ + uint32 timestamp; /**< second timestamp */ +} cca_congest_simple_t; + +typedef struct { + uint16 status; + uint16 id; + chanspec_t chanspec; /**< Which channel? */ + uint16 len; + union { + cca_congest_simple_t cca_busy; /**< CCA busy */ + int noise; /**< noise floor */ + }; +} cca_chan_qual_event_t; + /* interference sources */ enum interference_source { - ITFR_NONE = 0, /* interference */ - ITFR_PHONE, /* wireless phone */ - ITFR_VIDEO_CAMERA, /* wireless video camera */ - ITFR_MICROWAVE_OVEN, /* microwave oven */ - ITFR_BABY_MONITOR, /* wireless baby monitor */ - ITFR_BLUETOOTH, /* bluetooth */ - ITFR_VIDEO_CAMERA_OR_BABY_MONITOR, /* wireless camera or baby monitor */ - ITFR_BLUETOOTH_OR_BABY_MONITOR, /* bluetooth or baby monitor */ - ITFR_VIDEO_CAMERA_OR_PHONE, /* video camera or phone */ - ITFR_UNIDENTIFIED /* interference from unidentified source */ + ITFR_NONE = 0, /**< interference */ + ITFR_PHONE, /**< wireless phone */ + ITFR_VIDEO_CAMERA, /**< wireless video camera */ + ITFR_MICROWAVE_OVEN, /**< microwave oven */ + ITFR_BABY_MONITOR, /**< wireless baby monitor */ + ITFR_BLUETOOTH, /**< bluetooth */ + ITFR_VIDEO_CAMERA_OR_BABY_MONITOR, /**< wireless camera or baby monitor */ + ITFR_BLUETOOTH_OR_BABY_MONITOR, /**< bluetooth or baby monitor */ + ITFR_VIDEO_CAMERA_OR_PHONE, /**< video camera or phone */ + ITFR_UNIDENTIFIED /**< interference from unidentified source */ }; /* structure for interference source report */ typedef struct { - uint32 flags; /* flags. bit definitions below */ - uint32 source; /* last detected interference source */ - uint32 timestamp; /* second timestamp on interferenced flag change */ + uint32 flags; /**< flags. bit definitions below */ + uint32 source; /**< last detected interference source */ + uint32 timestamp; /**< second timestamp on interferenced flag change */ } interference_source_rep_t; -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ -#define WLC_CNTRY_BUF_SZ 4 /* Country string is 3 bytes + NUL */ +#define WLC_CNTRY_BUF_SZ 4 /**< Country string is 3 bytes + NUL */ -#ifndef LINUX_POSTMOGRIFY_REMOVAL typedef struct wl_country { - char country_abbrev[WLC_CNTRY_BUF_SZ]; /* nul-terminated country code used in + char country_abbrev[WLC_CNTRY_BUF_SZ]; /**< nul-terminated country code used in * the Country IE */ - int32 rev; /* revision specifier for ccode + int32 rev; /**< revision specifier for ccode * on set, -1 indicates unspecified. * on get, rev >= 0 */ - char ccode[WLC_CNTRY_BUF_SZ]; /* nul-terminated built-in country code. + char ccode[WLC_CNTRY_BUF_SZ]; /**< nul-terminated built-in country code. * variable length, but fixed size in * struct allows simple allocation for * expected country strings <= 3 chars. */ } wl_country_t; +#define CCODE_INFO_VERSION 1 + +typedef enum wl_ccode_role { + WLC_CCODE_ROLE_ACTIVE = 0, + WLC_CCODE_ROLE_HOST, + WLC_CCODE_ROLE_80211D_ASSOC, + WLC_CCODE_ROLE_80211D_SCAN, + WLC_CCODE_ROLE_DEFAULT, + WLC_CCODE_LAST +} wl_ccode_role_t; +#define WLC_NUM_CCODE_INFO WLC_CCODE_LAST + +typedef struct wl_ccode_entry { + uint16 reserved; + uint8 band; + uint8 role; + char ccode[WLC_CNTRY_BUF_SZ]; +} wl_ccode_entry_t; + +typedef struct wl_ccode_info { + uint16 version; + uint16 count; /* Number of ccodes entries in the set */ + wl_ccode_entry_t ccodelist[1]; +} wl_ccode_info_t; +#define WL_CCODE_INFO_FIXED_LEN OFFSETOF(wl_ccode_info_t, ccodelist) + typedef struct wl_channels_in_country { uint32 buflen; uint32 band; @@ -739,18 +962,18 @@ typedef struct wl_rm_req_elt { int8 type; int8 flags; chanspec_t chanspec; - uint32 token; /* token for this measurement */ - uint32 tsf_h; /* TSF high 32-bits of Measurement start time */ - uint32 tsf_l; /* TSF low 32-bits */ - uint32 dur; /* TUs */ + uint32 token; /**< token for this measurement */ + uint32 tsf_h; /**< TSF high 32-bits of Measurement start time */ + uint32 tsf_l; /**< TSF low 32-bits */ + uint32 dur; /**< TUs */ } wl_rm_req_elt_t; typedef struct wl_rm_req { - uint32 token; /* overall measurement set token */ - uint32 count; /* number of measurement requests */ - void *cb; /* completion callback function: may be NULL */ - void *cb_arg; /* arg to completion callback function */ - wl_rm_req_elt_t req[1]; /* variable length block of requests */ + uint32 token; /**< overall measurement set token */ + uint32 count; /**< number of measurement requests */ + void *cb; /**< completion callback function: may be NULL */ + void *cb_arg; /**< arg to completion callback function */ + wl_rm_req_elt_t req[1]; /**< variable length block of requests */ } wl_rm_req_t; #define WL_RM_REQ_FIXED_LEN OFFSETOF(wl_rm_req_t, req) @@ -758,14 +981,14 @@ typedef struct wl_rm_rep_elt { int8 type; int8 flags; chanspec_t chanspec; - uint32 token; /* token for this measurement */ - uint32 tsf_h; /* TSF high 32-bits of Measurement start time */ - uint32 tsf_l; /* TSF low 32-bits */ - uint32 dur; /* TUs */ - uint32 len; /* byte length of data block */ - uint8 data[1]; /* variable length data block */ + uint32 token; /**< token for this measurement */ + uint32 tsf_h; /**< TSF high 32-bits of Measurement start time */ + uint32 tsf_l; /**< TSF low 32-bits */ + uint32 dur; /**< TUs */ + uint32 len; /**< byte length of data block */ + uint8 data[1]; /**< variable length data block */ } wl_rm_rep_elt_t; -#define WL_RM_REP_ELT_FIXED_LEN 24 /* length excluding data block */ +#define WL_RM_REP_ELT_FIXED_LEN 24 /**< length excluding data block */ #define WL_RPI_REP_BIN_NUM 8 typedef struct wl_rm_rpi_rep { @@ -774,35 +997,12 @@ typedef struct wl_rm_rpi_rep { } wl_rm_rpi_rep_t; typedef struct wl_rm_rep { - uint32 token; /* overall measurement set token */ - uint32 len; /* length of measurement report block */ - wl_rm_rep_elt_t rep[1]; /* variable length block of reports */ + uint32 token; /**< overall measurement set token */ + uint32 len; /**< length of measurement report block */ + wl_rm_rep_elt_t rep[1]; /**< variable length block of reports */ } wl_rm_rep_t; #define WL_RM_REP_FIXED_LEN 8 -#ifdef BCMCCX - -#define LEAP_USER_MAX 32 -#define LEAP_DOMAIN_MAX 32 -#define LEAP_PASSWORD_MAX 32 - -typedef struct wl_leap_info { - wlc_ssid_t ssid; - uint8 user_len; - uchar user[LEAP_USER_MAX]; - uint8 password_len; - uchar password[LEAP_PASSWORD_MAX]; - uint8 domain_len; - uchar domain[LEAP_DOMAIN_MAX]; -} wl_leap_info_t; - -typedef struct wl_leap_list { - uint32 buflen; - uint32 version; - uint32 count; - wl_leap_info_t leap_info[1]; -} wl_leap_list_t; -#endif /* BCMCCX */ typedef enum sup_auth_status { /* Basic supplicant authentication states */ @@ -823,30 +1023,29 @@ typedef enum sup_auth_status { WLC_SUP_KEYXCHANGE_PREP_M2 = WLC_SUP_KEYXCHANGE, /* Waiting to receive handshake msg M3 */ WLC_SUP_KEYXCHANGE_WAIT_M3 = WLC_SUP_LAST_BASIC_STATE, - WLC_SUP_KEYXCHANGE_PREP_M4, /* Preparing to send handshake msg M4 */ - WLC_SUP_KEYXCHANGE_WAIT_G1, /* Waiting to receive handshake msg G1 */ - WLC_SUP_KEYXCHANGE_PREP_G2 /* Preparing to send handshake msg G2 */ + WLC_SUP_KEYXCHANGE_PREP_M4, /**< Preparing to send handshake msg M4 */ + WLC_SUP_KEYXCHANGE_WAIT_G1, /**< Waiting to receive handshake msg G1 */ + WLC_SUP_KEYXCHANGE_PREP_G2 /**< Preparing to send handshake msg G2 */ } sup_auth_status_t; -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ typedef struct wl_wsec_key { - uint32 index; /* key index */ - uint32 len; /* key length */ - uint8 data[DOT11_MAX_KEY_SIZE]; /* key data */ + uint32 index; /**< key index */ + uint32 len; /**< key length */ + uint8 data[DOT11_MAX_KEY_SIZE]; /**< key data */ uint32 pad_1[18]; - uint32 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */ - uint32 flags; /* misc flags */ + uint32 algo; /**< CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */ + uint32 flags; /**< misc flags */ uint32 pad_2[2]; int pad_3; - int iv_initialized; /* has IV been initialized already? */ + int iv_initialized; /**< has IV been initialized already? */ int pad_4; /* Rx IV */ struct { - uint32 hi; /* upper 32 bits of IV */ - uint16 lo; /* lower 16 bits of IV */ + uint32 hi; /**< upper 32 bits of IV */ + uint16 lo; /**< lower 16 bits of IV */ } rxiv; uint32 pad_5[2]; - struct ether_addr ea; /* per station */ + struct ether_addr ea; /**< per station */ } wl_wsec_key_t; #define WSEC_MIN_PSK_LEN 8 @@ -857,9 +1056,9 @@ typedef struct wl_wsec_key { /* receptacle for WLC_SET_WSEC_PMK parameter */ typedef struct { - ushort key_len; /* octets in key material */ - ushort flags; /* key handling qualification */ - uint8 key[WSEC_MAX_PSK_LEN]; /* PMK material */ + ushort key_len; /**< octets in key material */ + ushort flags; /**< key handling qualification */ + uint8 key[WSEC_MAX_PSK_LEN]; /**< PMK material */ } wsec_pmk_t; typedef struct _pmkid { @@ -882,9 +1081,8 @@ typedef struct _pmkid_cand_list { pmkid_cand_t pmkid_cand[1]; } pmkid_cand_list_t; -#define WL_STA_ANT_MAX 4 /* max possible rx antennas */ +#define WL_STA_ANT_MAX 4 /**< max possible rx antennas */ -#ifndef LINUX_POSTMOGRIFY_REMOVAL typedef struct wl_assoc_info { uint32 req_len; uint32 resp_len; @@ -903,36 +1101,36 @@ typedef struct wl_led_info { /* srom read/write struct passed through ioctl */ typedef struct { - uint byteoff; /* byte offset */ - uint nbytes; /* number of bytes */ + uint byteoff; /**< byte offset */ + uint nbytes; /**< number of bytes */ uint16 buf[1]; } srom_rw_t; #define CISH_FLAG_PCIECIS (1 << 15) /* write CIS format bit for PCIe CIS */ /* similar cis (srom or otp) struct [iovar: may not be aligned] */ typedef struct { - uint16 source; /* cis source */ - uint16 flags; /* flags */ - uint32 byteoff; /* byte offset */ - uint32 nbytes; /* number of bytes */ + uint16 source; /**< cis source */ + uint16 flags; /**< flags */ + uint32 byteoff; /**< byte offset */ + uint32 nbytes; /**< number of bytes */ /* data follows here */ } cis_rw_t; /* R_REG and W_REG struct passed through ioctl */ typedef struct { - uint32 byteoff; /* byte offset of the field in d11regs_t */ - uint32 val; /* read/write value of the field */ - uint32 size; /* sizeof the field */ - uint band; /* band (optional) */ + uint32 byteoff; /**< byte offset of the field in d11regs_t */ + uint32 val; /**< read/write value of the field */ + uint32 size; /**< sizeof the field */ + uint band; /**< band (optional) */ } rw_reg_t; /* Structure used by GET/SET_ATTEN ioctls - it controls power in b/g-band */ /* PCL - Power Control Loop */ typedef struct { - uint16 auto_ctrl; /* WL_ATTEN_XX */ - uint16 bb; /* Baseband attenuation */ - uint16 radio; /* Radio attenuation */ - uint16 txctl1; /* Radio TX_CTL1 value */ + uint16 auto_ctrl; /**< WL_ATTEN_XX */ + uint16 bb; /**< Baseband attenuation */ + uint16 radio; /**< Radio attenuation */ + uint16 txctl1; /**< Radio TX_CTL1 value */ } atten_t; /* Per-AC retry parameters */ @@ -948,22 +1146,6 @@ typedef struct wme_tx_params_s wme_tx_params_t; #define WL_WME_TX_PARAMS_IO_BYTES (sizeof(wme_tx_params_t) * AC_COUNT) -typedef struct wl_plc_nodelist { - uint32 count; /* Number of nodes */ - struct _node { - struct ether_addr ea; /* Node ether address */ - uint32 node_type; /* Node type */ - uint32 cost; /* PLC affinity */ - } node[1]; -} wl_plc_nodelist_t; - -typedef struct wl_plc_params { - uint32 cmd; /* Command */ - uint8 plc_failover; /* PLC failover control/status */ - struct ether_addr node_ea; /* Node ether address */ - uint32 cost; /* Link cost or mac cost */ -} wl_plc_params_t; - /* Used to get specific link/ac parameters */ typedef struct { int32 ac; @@ -975,48 +1157,48 @@ typedef struct { #define WL_PM_MUTE_TX_VER 1 typedef struct wl_pm_mute_tx { - uint16 version; /* version */ - uint16 len; /* length */ - uint16 deadline; /* deadline timer (in milliseconds) */ - uint8 enable; /* set to 1 to enable mode; set to 0 to disable it */ + uint16 version; /**< version */ + uint16 len; /**< length */ + uint16 deadline; /**< deadline timer (in milliseconds) */ + uint8 enable; /**< set to 1 to enable mode; set to 0 to disable it */ } wl_pm_mute_tx_t; typedef struct { - uint16 ver; /* version of this struct */ - uint16 len; /* length in bytes of this structure */ - uint16 cap; /* sta's advertised capabilities */ - uint32 flags; /* flags defined below */ - uint32 idle; /* time since data pkt rx'd from sta */ - struct ether_addr ea; /* Station address */ - wl_rateset_t rateset; /* rateset in use */ - uint32 in; /* seconds elapsed since associated */ + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + uint16 cap; /**< sta's advertised capabilities */ + uint32 flags; /**< flags defined below */ + uint32 idle; /**< time since data pkt rx'd from sta */ + struct ether_addr ea; /**< Station address */ + wl_rateset_t rateset; /**< rateset in use */ + uint32 in; /**< seconds elapsed since associated */ uint32 listen_interval_inms; /* Min Listen interval in ms for this STA */ - uint32 tx_pkts; /* # of user packets transmitted (unicast) */ - uint32 tx_failures; /* # of user packets failed */ - uint32 rx_ucast_pkts; /* # of unicast packets received */ - uint32 rx_mcast_pkts; /* # of multicast packets received */ - uint32 tx_rate; /* Rate used by last tx frame */ - uint32 rx_rate; /* Rate of last successful rx frame */ - uint32 rx_decrypt_succeeds; /* # of packet decrypted successfully */ - uint32 rx_decrypt_failures; /* # of packet decrypted unsuccessfully */ - uint32 tx_tot_pkts; /* # of user tx pkts (ucast + mcast) */ - uint32 rx_tot_pkts; /* # of data packets recvd (uni + mcast) */ - uint32 tx_mcast_pkts; /* # of mcast pkts txed */ - uint64 tx_tot_bytes; /* data bytes txed (ucast + mcast) */ - uint64 rx_tot_bytes; /* data bytes recvd (ucast + mcast) */ - uint64 tx_ucast_bytes; /* data bytes txed (ucast) */ - uint64 tx_mcast_bytes; /* # data bytes txed (mcast) */ - uint64 rx_ucast_bytes; /* data bytes recvd (ucast) */ - uint64 rx_mcast_bytes; /* data bytes recvd (mcast) */ + uint32 tx_pkts; /**< # of user packets transmitted (unicast) */ + uint32 tx_failures; /**< # of user packets failed */ + uint32 rx_ucast_pkts; /**< # of unicast packets received */ + uint32 rx_mcast_pkts; /**< # of multicast packets received */ + uint32 tx_rate; /**< Rate used by last tx frame */ + uint32 rx_rate; /**< Rate of last successful rx frame */ + uint32 rx_decrypt_succeeds; /**< # of packet decrypted successfully */ + uint32 rx_decrypt_failures; /**< # of packet decrypted unsuccessfully */ + uint32 tx_tot_pkts; /**< # of user tx pkts (ucast + mcast) */ + uint32 rx_tot_pkts; /**< # of data packets recvd (uni + mcast) */ + uint32 tx_mcast_pkts; /**< # of mcast pkts txed */ + uint64 tx_tot_bytes; /**< data bytes txed (ucast + mcast) */ + uint64 rx_tot_bytes; /**< data bytes recvd (ucast + mcast) */ + uint64 tx_ucast_bytes; /**< data bytes txed (ucast) */ + uint64 tx_mcast_bytes; /**< # data bytes txed (mcast) */ + uint64 rx_ucast_bytes; /**< data bytes recvd (ucast) */ + uint64 rx_mcast_bytes; /**< data bytes recvd (mcast) */ int8 rssi[WL_STA_ANT_MAX]; /* average rssi per antenna * of data frames */ - int8 nf[WL_STA_ANT_MAX]; /* per antenna noise floor */ - uint16 aid; /* association ID */ - uint16 ht_capabilities; /* advertised ht caps */ - uint16 vht_flags; /* converted vht flags */ - uint32 tx_pkts_retried; /* # of frames where a retry was + int8 nf[WL_STA_ANT_MAX]; /**< per antenna noise floor */ + uint16 aid; /**< association ID */ + uint16 ht_capabilities; /**< advertised ht caps */ + uint16 vht_flags; /**< converted vht flags */ + uint32 tx_pkts_retried; /**< # of frames where a retry was * necessary */ uint32 tx_pkts_retry_exhausted; /* # of user frames where a retry @@ -1029,31 +1211,59 @@ typedef struct { * Separated for host requested frames and WLAN locally generated frames. * Include unicast frame only where the retries/failures can be counted. */ - uint32 tx_pkts_total; /* # user frames sent successfully */ - uint32 tx_pkts_retries; /* # user frames retries */ - uint32 tx_pkts_fw_total; /* # FW generated sent successfully */ - uint32 tx_pkts_fw_retries; /* # retries for FW generated frames */ - uint32 tx_pkts_fw_retry_exhausted; /* # FW generated where a retry + uint32 tx_pkts_total; /**< # user frames sent successfully */ + uint32 tx_pkts_retries; /**< # user frames retries */ + uint32 tx_pkts_fw_total; /**< # FW generated sent successfully */ + uint32 tx_pkts_fw_retries; /**< # retries for FW generated frames */ + uint32 tx_pkts_fw_retry_exhausted; /**< # FW generated where a retry * was exhausted */ - uint32 rx_pkts_retried; /* # rx with retry bit set */ - uint32 tx_rate_fallback; /* lowest fallback TX rate */ + uint32 rx_pkts_retried; /**< # rx with retry bit set */ + uint32 tx_rate_fallback; /**< lowest fallback TX rate */ } sta_info_t; #define WL_OLD_STAINFO_SIZE OFFSETOF(sta_info_t, tx_tot_pkts) #define WL_STA_VER 4 -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ - -#define WLC_NUMRATES 16 /* max # of rates in a rateset */ +typedef struct { + uint32 auto_en; + uint32 active_ant; + uint32 rxcount; + int32 avg_snr_per_ant0; + int32 avg_snr_per_ant1; + int32 avg_snr_per_ant2; + uint32 swap_ge_rxcount0; + uint32 swap_ge_rxcount1; + uint32 swap_ge_snrthresh0; + uint32 swap_ge_snrthresh1; + uint32 swap_txfail0; + uint32 swap_txfail1; + uint32 swap_timer0; + uint32 swap_timer1; + uint32 swap_alivecheck0; + uint32 swap_alivecheck1; + uint32 rxcount_per_ant0; + uint32 rxcount_per_ant1; + uint32 acc_rxcount; + uint32 acc_rxcount_per_ant0; + uint32 acc_rxcount_per_ant1; + uint32 tx_auto_en; + uint32 tx_active_ant; + uint32 rx_policy; + uint32 tx_policy; + uint32 cell_policy; +} wlc_swdiv_stats_t; + +#define WLC_NUMRATES 16 /**< max # of rates in a rateset */ typedef struct wlc_rateset { - uint32 count; /* number of rates in rates[] */ - uint8 rates[WLC_NUMRATES]; /* rates in 500kbps units w/hi bit set if basic */ - uint8 htphy_membership; /* HT PHY Membership */ - uint8 mcs[MCSSET_LEN]; /* supported mcs index bit map */ - uint16 vht_mcsmap; /* supported vht mcs nss bit map */ + uint32 count; /**< number of rates in rates[] */ + uint8 rates[WLC_NUMRATES]; /**< rates in 500kbps units w/hi bit set if basic */ + uint8 htphy_membership; /**< HT PHY Membership */ + uint8 mcs[MCSSET_LEN]; /**< supported mcs index bit map */ + uint16 vht_mcsmap; /**< supported vht mcs nss bit map */ + uint16 vht_mcsmap_prop; /**< supported prop vht mcs nss bit map */ } wlc_rateset_t; /* Used to get specific STA parameters */ @@ -1077,11 +1287,10 @@ typedef struct channel_info { /* For ioctls that take a list of MAC addresses */ typedef struct maclist { - uint count; /* number of MAC addresses */ - struct ether_addr ea[1]; /* variable length array of MAC addresses */ + uint count; /**< number of MAC addresses */ + struct ether_addr ea[1]; /**< variable length array of MAC addresses */ } maclist_t; -#ifndef LINUX_POSTMOGRIFY_REMOVAL /* get pkt count struct passed through ioctl */ typedef struct get_pktcnt { uint rx_good_pkt; @@ -1125,39 +1334,38 @@ typedef struct { /* Get MAC specific rate histogram command */ typedef struct { - struct ether_addr ea; /* MAC Address */ - uint8 ac_cat; /* Access Category */ - uint8 num_pkts; /* Number of packet entries to be averaged */ -} wl_mac_ratehisto_cmd_t; /* MAC Specific Rate Histogram command */ + struct ether_addr ea; /**< MAC Address */ + uint8 ac_cat; /**< Access Category */ + uint8 num_pkts; /**< Number of packet entries to be averaged */ +} wl_mac_ratehisto_cmd_t; /**< MAC Specific Rate Histogram command */ /* Get MAC rate histogram response */ typedef struct { - uint32 rate[DOT11_RATE_MAX + 1]; /* Rates */ - uint32 mcs[WL_RATESET_SZ_HT_MCS * WL_TX_CHAINS_MAX]; /* MCS counts */ - uint32 vht[WL_RATESET_SZ_VHT_MCS][WL_TX_CHAINS_MAX]; /* VHT counts */ - uint32 tsf_timer[2][2]; /* Start and End time for 8bytes value */ -} wl_mac_ratehisto_res_t; /* MAC Specific Rate Histogram Response */ - -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ + uint32 rate[DOT11_RATE_MAX + 1]; /**< Rates */ + uint32 mcs[WL_RATESET_SZ_HT_IOCTL * WL_TX_CHAINS_MAX]; /**< MCS counts */ + uint32 vht[WL_RATESET_SZ_VHT_MCS][WL_TX_CHAINS_MAX]; /**< VHT counts */ + uint32 tsf_timer[2][2]; /**< Start and End time for 8bytes value */ + uint32 prop11n_mcs[WLC_11N_LAST_PROP_MCS - WLC_11N_FIRST_PROP_MCS + 1]; /* MCS counts */ +} wl_mac_ratehisto_res_t; /**< MAC Specific Rate Histogram Response */ /* Linux network driver ioctl encoding */ typedef struct wl_ioctl { - uint cmd; /* common ioctl definition */ - void *buf; /* pointer to user buffer */ - uint len; /* length of user buffer */ - uint8 set; /* 1=set IOCTL; 0=query IOCTL */ - uint used; /* bytes read or written (optional) */ - uint needed; /* bytes needed (optional) */ + uint cmd; /**< common ioctl definition */ + void *buf; /**< pointer to user buffer */ + uint len; /**< length of user buffer */ + uint8 set; /**< 1=set IOCTL; 0=query IOCTL */ + uint used; /**< bytes read or written (optional) */ + uint needed; /**< bytes needed (optional) */ } wl_ioctl_t; #ifdef CONFIG_COMPAT typedef struct compat_wl_ioctl { - uint cmd; /* common ioctl definition */ - uint32 buf; /* pointer to user buffer */ - uint len; /* length of user buffer */ - uint8 set; /* 1=set IOCTL; 0=query IOCTL */ - uint used; /* bytes read or written (optional) */ - uint needed; /* bytes needed (optional) */ + uint cmd; /**< common ioctl definition */ + uint32 buf; /**< pointer to user buffer */ + uint len; /**< length of user buffer */ + uint8 set; /**< 1=set IOCTL; 0=query IOCTL */ + uint used; /**< bytes read or written (optional) */ + uint needed; /**< bytes needed (optional) */ } compat_wl_ioctl_t; #endif /* CONFIG_COMPAT */ @@ -1168,30 +1376,29 @@ typedef struct compat_wl_ioctl { #define WL_NUM_RATES_VHT 10 #define WL_NUM_RATES_MCS32 1 -#ifndef LINUX_POSTMOGRIFY_REMOVAL /* * Structure for passing hardware and software * revision info up from the driver. */ typedef struct wlc_rev_info { - uint vendorid; /* PCI vendor id */ - uint deviceid; /* device id of chip */ - uint radiorev; /* radio revision */ - uint chiprev; /* chip revision */ - uint corerev; /* core revision */ - uint boardid; /* board identifier (usu. PCI sub-device id) */ - uint boardvendor; /* board vendor (usu. PCI sub-vendor id) */ - uint boardrev; /* board revision */ - uint driverrev; /* driver version */ - uint ucoderev; /* microcode version */ - uint bus; /* bus type */ - uint chipnum; /* chip number */ - uint phytype; /* phy type */ - uint phyrev; /* phy revision */ - uint anarev; /* anacore rev */ - uint chippkg; /* chip package info */ - uint nvramrev; /* nvram revision number */ + uint vendorid; /**< PCI vendor id */ + uint deviceid; /**< device id of chip */ + uint radiorev; /**< radio revision */ + uint chiprev; /**< chip revision */ + uint corerev; /**< core revision */ + uint boardid; /**< board identifier (usu. PCI sub-device id) */ + uint boardvendor; /**< board vendor (usu. PCI sub-vendor id) */ + uint boardrev; /**< board revision */ + uint driverrev; /**< driver version */ + uint ucoderev; /**< microcode version */ + uint bus; /**< bus type */ + uint chipnum; /**< chip number */ + uint phytype; /**< phy type */ + uint phyrev; /**< phy revision */ + uint anarev; /**< anacore rev */ + uint chippkg; /**< chip package info */ + uint nvramrev; /**< nvram revision number */ } wlc_rev_info_t; #define WL_REV_INFO_LEGACY_LENGTH 48 @@ -1231,29 +1438,31 @@ typedef struct { #endif -#define WL_PHY_PAVARS_LEN 32 /* Phy type, Band range, chain, a1[0], b0[0], b1[0] ... */ -#define WL_PHY_PAVAR_VER 1 /* pavars version */ -#define WL_PHY_PAVARS2_NUM 3 /* a1, b0, b1 */ +#define WL_PHY_PAVARS_LEN 32 /**< Phytype, Bandrange, chain, a[0], b[0], c[0], d[0] .. */ + + +#define WL_PHY_PAVAR_VER 1 /**< pavars version */ +#define WL_PHY_PAVARS2_NUM 3 /**< a1, b0, b1 */ typedef struct wl_pavars2 { - uint16 ver; /* version of this struct */ - uint16 len; /* len of this structure */ - uint16 inuse; /* driver return 1 for a1,b0,b1 in current band range */ - uint16 phy_type; /* phy type */ + uint16 ver; /**< version of this struct */ + uint16 len; /**< len of this structure */ + uint16 inuse; /**< driver return 1 for a1,b0,b1 in current band range */ + uint16 phy_type; /**< phy type */ uint16 bandrange; uint16 chain; - uint16 inpa[WL_PHY_PAVARS2_NUM]; /* phy pavars for one band range */ + uint16 inpa[WL_PHY_PAVARS2_NUM]; /**< phy pavars for one band range */ } wl_pavars2_t; typedef struct wl_po { - uint16 phy_type; /* Phy type */ + uint16 phy_type; /**< Phy type */ uint16 band; uint16 cckpo; uint32 ofdmpo; uint16 mcspo[8]; } wl_po_t; -#define WL_NUM_RPCALVARS 5 /* number of rpcal vars */ +#define WL_NUM_RPCALVARS 5 /**< number of rpcal vars */ typedef struct wl_rpcal { uint16 value; @@ -1265,14 +1474,14 @@ typedef struct wl_aci_args { int exit_aci_thresh; /* Trigger level to exit ACI mode */ int usec_spin; /* microsecs to delay between rssi samples */ int glitch_delay; /* interval between ACI scans when glitch count is consistently high */ - uint16 nphy_adcpwr_enter_thresh; /* ADC power to enter ACI mitigation mode */ - uint16 nphy_adcpwr_exit_thresh; /* ADC power to exit ACI mitigation mode */ - uint16 nphy_repeat_ctr; /* Number of tries per channel to compute power */ - uint16 nphy_num_samples; /* Number of samples to compute power on one channel */ - uint16 nphy_undetect_window_sz; /* num of undetects to exit ACI Mitigation mode */ - uint16 nphy_b_energy_lo_aci; /* low ACI power energy threshold for bphy */ - uint16 nphy_b_energy_md_aci; /* mid ACI power energy threshold for bphy */ - uint16 nphy_b_energy_hi_aci; /* high ACI power energy threshold for bphy */ + uint16 nphy_adcpwr_enter_thresh; /**< ADC power to enter ACI mitigation mode */ + uint16 nphy_adcpwr_exit_thresh; /**< ADC power to exit ACI mitigation mode */ + uint16 nphy_repeat_ctr; /**< Number of tries per channel to compute power */ + uint16 nphy_num_samples; /**< Number of samples to compute power on one channel */ + uint16 nphy_undetect_window_sz; /**< num of undetects to exit ACI Mitigation mode */ + uint16 nphy_b_energy_lo_aci; /**< low ACI power energy threshold for bphy */ + uint16 nphy_b_energy_md_aci; /**< mid ACI power energy threshold for bphy */ + uint16 nphy_b_energy_hi_aci; /**< high ACI power energy threshold for bphy */ uint16 nphy_noise_noassoc_glitch_th_up; /* wl interference 4 */ uint16 nphy_noise_noassoc_glitch_th_dn; uint16 nphy_noise_assoc_glitch_th_up; @@ -1287,8 +1496,8 @@ typedef struct wl_aci_args { uint16 nphy_noise_crsidx_decr; } wl_aci_args_t; -#define WL_ACI_ARGS_LEGACY_LENGTH 16 /* bytes of pre NPHY aci args */ -#define WL_SAMPLECOLLECT_T_VERSION 2 /* version of wl_samplecollect_args_t struct */ +#define WL_ACI_ARGS_LEGACY_LENGTH 16 /**< bytes of pre NPHY aci args */ +#define WL_SAMPLECOLLECT_T_VERSION 2 /**< version of wl_samplecollect_args_t struct */ typedef struct wl_samplecollect_args { /* version 0 fields */ uint8 coll_us; @@ -1304,8 +1513,8 @@ typedef struct wl_samplecollect_args { uint8 gpio_sel; uint8 downsamp; uint8 be_deaf; - uint8 agc; /* loop from init gain and going down */ - uint8 filter; /* override high pass corners to lowest */ + uint8 agc; /**< loop from init gain and going down */ + uint8 filter; /**< override high pass corners to lowest */ /* add'l version 2 fields */ uint8 trigger_state; uint8 module_sel1; @@ -1315,73 +1524,83 @@ typedef struct wl_samplecollect_args { uint32 gpioCapMask; } wl_samplecollect_args_t; -#define WL_SAMPLEDATA_T_VERSION 1 /* version of wl_samplecollect_args_t struct */ +#define WL_SAMPLEDATA_T_VERSION 1 /**< version of wl_samplecollect_args_t struct */ /* version for unpacked sample data, int16 {(I,Q),Core(0..N)} */ #define WL_SAMPLEDATA_T_VERSION_SPEC_AN 2 typedef struct wl_sampledata { - uint16 version; /* structure version */ - uint16 size; /* size of structure */ - uint16 tag; /* Header/Data */ - uint16 length; /* data length */ - uint32 flag; /* bit def */ + uint16 version; /**< structure version */ + uint16 size; /**< size of structure */ + uint16 tag; /**< Header/Data */ + uint16 length; /**< data length */ + uint32 flag; /**< bit def */ } wl_sampledata_t; /* WL_OTA START */ /* OTA Test Status */ enum { - WL_OTA_TEST_IDLE = 0, /* Default Idle state */ - WL_OTA_TEST_ACTIVE = 1, /* Test Running */ - WL_OTA_TEST_SUCCESS = 2, /* Successfully Finished Test */ - WL_OTA_TEST_FAIL = 3 /* Test Failed in the Middle */ + WL_OTA_TEST_IDLE = 0, /**< Default Idle state */ + WL_OTA_TEST_ACTIVE = 1, /**< Test Running */ + WL_OTA_TEST_SUCCESS = 2, /**< Successfully Finished Test */ + WL_OTA_TEST_FAIL = 3 /**< Test Failed in the Middle */ }; /* OTA SYNC Status */ enum { - WL_OTA_SYNC_IDLE = 0, /* Idle state */ - WL_OTA_SYNC_ACTIVE = 1, /* Waiting for Sync */ - WL_OTA_SYNC_FAIL = 2 /* Sync pkt not recieved */ + WL_OTA_SYNC_IDLE = 0, /**< Idle state */ + WL_OTA_SYNC_ACTIVE = 1, /**< Waiting for Sync */ + WL_OTA_SYNC_FAIL = 2 /**< Sync pkt not recieved */ }; /* Various error states dut can get stuck during test */ enum { - WL_OTA_SKIP_TEST_CAL_FAIL = 1, /* Phy calibration failed */ - WL_OTA_SKIP_TEST_SYNCH_FAIL = 2, /* Sync Packet not recieved */ - WL_OTA_SKIP_TEST_FILE_DWNLD_FAIL = 3, /* Cmd flow file download failed */ - WL_OTA_SKIP_TEST_NO_TEST_FOUND = 4, /* No test found in Flow file */ - WL_OTA_SKIP_TEST_WL_NOT_UP = 5, /* WL UP failed */ - WL_OTA_SKIP_TEST_UNKNOWN_CALL /* Unintentional scheduling on ota test */ + WL_OTA_SKIP_TEST_CAL_FAIL = 1, /**< Phy calibration failed */ + WL_OTA_SKIP_TEST_SYNCH_FAIL = 2, /**< Sync Packet not recieved */ + WL_OTA_SKIP_TEST_FILE_DWNLD_FAIL = 3, /**< Cmd flow file download failed */ + WL_OTA_SKIP_TEST_NO_TEST_FOUND = 4, /**< No test found in Flow file */ + WL_OTA_SKIP_TEST_WL_NOT_UP = 5, /**< WL UP failed */ + WL_OTA_SKIP_TEST_UNKNOWN_CALL /**< Unintentional scheduling on ota test */ }; /* Differentiator for ota_tx and ota_rx */ enum { - WL_OTA_TEST_TX = 0, /* ota_tx */ - WL_OTA_TEST_RX = 1, /* ota_rx */ + WL_OTA_TEST_TX = 0, /**< ota_tx */ + WL_OTA_TEST_RX = 1, /**< ota_rx */ }; /* Catch 3 modes of operation: 20Mhz, 40Mhz, 20 in 40 Mhz */ enum { - WL_OTA_TEST_BW_20_IN_40MHZ = 0, /* 20 in 40 operation */ - WL_OTA_TEST_BW_20MHZ = 1, /* 20 Mhz operation */ - WL_OTA_TEST_BW_40MHZ = 2 /* full 40Mhz operation */ + WL_OTA_TEST_BW_20_IN_40MHZ = 0, /**< 20 in 40 operation */ + WL_OTA_TEST_BW_20MHZ = 1, /**< 20 Mhz operation */ + WL_OTA_TEST_BW_40MHZ = 2, /**< full 40Mhz operation */ + WL_OTA_TEST_BW_80MHZ = 3 /* full 80Mhz operation */ }; + +#define HT_MCS_INUSE 0x00000080 /* HT MCS in use,indicates b0-6 holds an mcs */ +#define VHT_MCS_INUSE 0x00000100 /* VHT MCS in use,indicates b0-6 holds an mcs */ +#define OTA_RATE_MASK 0x0000007f /* rate/mcs value */ +#define OTA_STF_SISO 0 +#define OTA_STF_CDD 1 +#define OTA_STF_STBC 2 +#define OTA_STF_SDM 3 + typedef struct ota_rate_info { - uint8 rate_cnt; /* Total number of rates */ - uint8 rate_val_mbps[WL_OTA_TEST_MAX_NUM_RATE]; /* array of rates from 1mbps to 130mbps */ - /* for legacy rates : ratein mbps * 2 */ - /* for HT rates : mcs index */ + uint8 rate_cnt; /**< Total number of rates */ + uint16 rate_val_mbps[WL_OTA_TEST_MAX_NUM_RATE]; /**< array of rates from 1mbps to 130mbps */ + /**< for legacy rates : ratein mbps * 2 */ + /**< for HT rates : mcs index */ } ota_rate_info_t; typedef struct ota_power_info { - int8 pwr_ctrl_on; /* power control on/off */ - int8 start_pwr; /* starting power/index */ - int8 delta_pwr; /* delta power/index */ - int8 end_pwr; /* end power/index */ + int8 pwr_ctrl_on; /**< power control on/off */ + int8 start_pwr; /**< starting power/index */ + int8 delta_pwr; /**< delta power/index */ + int8 end_pwr; /**< end power/index */ } ota_power_info_t; typedef struct ota_packetengine { uint16 delay; /* Inter-packet delay */ - /* for ota_tx, delay is tx ifs in micro seconds */ + /**< for ota_tx, delay is tx ifs in micro seconds */ /* for ota_rx, delay is wait time in milliseconds */ uint16 nframes; /* Number of frames */ uint16 length; /* Packet length */ @@ -1389,61 +1608,82 @@ typedef struct ota_packetengine { /* Test info vector */ typedef struct wl_ota_test_args { - uint8 cur_test; /* test phase */ - uint8 chan; /* channel */ - uint8 bw; /* bandwidth */ - uint8 control_band; /* control band */ - uint8 stf_mode; /* stf mode */ - ota_rate_info_t rt_info; /* Rate info */ - ota_packetengine_t pkteng; /* packeteng info */ - uint8 txant; /* tx antenna */ - uint8 rxant; /* rx antenna */ - ota_power_info_t pwr_info; /* power sweep info */ - uint8 wait_for_sync; /* wait for sync or not */ + uint8 cur_test; /**< test phase */ + uint8 chan; /**< channel */ + uint8 bw; /**< bandwidth */ + uint8 control_band; /**< control band */ + uint8 stf_mode; /**< stf mode */ + ota_rate_info_t rt_info; /**< Rate info */ + ota_packetengine_t pkteng; /**< packeteng info */ + uint8 txant; /**< tx antenna */ + uint8 rxant; /**< rx antenna */ + ota_power_info_t pwr_info; /**< power sweep info */ + uint8 wait_for_sync; /**< wait for sync or not */ + uint8 ldpc; + uint8 sgi; + /* Update WL_OTA_TESTVEC_T_VERSION for adding new members to this structure */ } wl_ota_test_args_t; +#define WL_OTA_TESTVEC_T_VERSION 1 /* version of wl_ota_test_vector_t struct */ typedef struct wl_ota_test_vector { - wl_ota_test_args_t test_arg[WL_OTA_TEST_MAX_NUM_SEQ]; /* Test argument struct */ - uint16 test_cnt; /* Total no of test */ - uint8 file_dwnld_valid; /* File successfully downloaded */ - uint8 sync_timeout; /* sync packet timeout */ - int8 sync_fail_action; /* sync fail action */ - struct ether_addr sync_mac; /* macaddress for sync pkt */ - struct ether_addr tx_mac; /* macaddress for tx */ - struct ether_addr rx_mac; /* macaddress for rx */ - int8 loop_test; /* dbg feature to loop the test */ + uint16 version; + wl_ota_test_args_t test_arg[WL_OTA_TEST_MAX_NUM_SEQ]; /**< Test argument struct */ + uint16 test_cnt; /**< Total no of test */ + uint8 file_dwnld_valid; /**< File successfully downloaded */ + uint8 sync_timeout; /**< sync packet timeout */ + int8 sync_fail_action; /**< sync fail action */ + struct ether_addr sync_mac; /**< macaddress for sync pkt */ + struct ether_addr tx_mac; /**< macaddress for tx */ + struct ether_addr rx_mac; /**< macaddress for rx */ + int8 loop_test; /**< dbg feature to loop the test */ + uint16 test_rxcnt; + /* Update WL_OTA_TESTVEC_T_VERSION for adding new members to this structure */ } wl_ota_test_vector_t; /* struct copied back form dongle to host to query the status */ typedef struct wl_ota_test_status { - int16 cur_test_cnt; /* test phase */ - int8 skip_test_reason; /* skip test reasoin */ - wl_ota_test_args_t test_arg; /* cur test arg details */ - uint16 test_cnt; /* total no of test downloaded */ - uint8 file_dwnld_valid; /* file successfully downloaded ? */ - uint8 sync_timeout; /* sync timeout */ - int8 sync_fail_action; /* sync fail action */ - struct ether_addr sync_mac; /* macaddress for sync pkt */ - struct ether_addr tx_mac; /* tx mac address */ - struct ether_addr rx_mac; /* rx mac address */ - uint8 test_stage; /* check the test status */ - int8 loop_test; /* Debug feature to puts test enfine in a loop */ - uint8 sync_status; /* sync status */ + int16 cur_test_cnt; /**< test phase */ + int8 skip_test_reason; /**< skip test reasoin */ + wl_ota_test_args_t test_arg; /**< cur test arg details */ + uint16 test_cnt; /**< total no of test downloaded */ + uint8 file_dwnld_valid; /**< file successfully downloaded ? */ + uint8 sync_timeout; /**< sync timeout */ + int8 sync_fail_action; /**< sync fail action */ + struct ether_addr sync_mac; /**< macaddress for sync pkt */ + struct ether_addr tx_mac; /**< tx mac address */ + struct ether_addr rx_mac; /**< rx mac address */ + uint8 test_stage; /**< check the test status */ + int8 loop_test; /**< Debug feature to puts test enfine in a loop */ + uint8 sync_status; /**< sync status */ } wl_ota_test_status_t; +typedef struct wl_ota_rx_rssi { + uint16 pktcnt; /* Pkt count used for this rx test */ + chanspec_t chanspec; /* Channel info on which the packets are received */ + int16 rssi; /* Average RSSI of the first 50% packets received */ +} wl_ota_rx_rssi_t; + +#define WL_OTARSSI_T_VERSION 1 /* version of wl_ota_test_rssi_t struct */ +#define WL_OTA_TEST_RSSI_FIXED_SIZE OFFSETOF(wl_ota_test_rssi_t, rx_rssi) + +typedef struct wl_ota_test_rssi { + uint8 version; + uint8 testcnt; /* total measured RSSI values, valid on output only */ + wl_ota_rx_rssi_t rx_rssi[1]; /* Variable length array of wl_ota_rx_rssi_t */ +} wl_ota_test_rssi_t; /* WL_OTA END */ /* wl_radar_args_t */ typedef struct { - int npulses; /* required number of pulses at n * t_int */ - int ncontig; /* required number of pulses at t_int */ - int min_pw; /* minimum pulse width (20 MHz clocks) */ - int max_pw; /* maximum pulse width (20 MHz clocks) */ - uint16 thresh0; /* Radar detection, thresh 0 */ - uint16 thresh1; /* Radar detection, thresh 1 */ - uint16 blank; /* Radar detection, blank control */ - uint16 fmdemodcfg; /* Radar detection, fmdemod config */ + int npulses; /**< required number of pulses at n * t_int */ + int ncontig; /**< required number of pulses at t_int */ + int min_pw; /**< minimum pulse width (20 MHz clocks) */ + int max_pw; /**< maximum pulse width (20 MHz clocks) */ + uint16 thresh0; /**< Radar detection, thresh 0 */ + uint16 thresh1; /**< Radar detection, thresh 1 */ + uint16 blank; /**< Radar detection, blank control */ + uint16 fmdemodcfg; /**< Radar detection, fmdemod config */ int npulses_lp; /* Radar detection, minimum long pulses */ int min_pw_lp; /* Minimum pulsewidth for long pulses */ int max_pw_lp; /* Maximum pulsewidth for long pulses */ @@ -1451,20 +1691,20 @@ typedef struct { int max_span_lp; /* Maximum deltat for long pulses */ int min_deltat; /* Minimum spacing between pulses */ int max_deltat; /* Maximum spacing between pulses */ - uint16 autocorr; /* Radar detection, autocorr on or off */ - uint16 st_level_time; /* Radar detection, start_timing level */ + uint16 autocorr; /**< Radar detection, autocorr on or off */ + uint16 st_level_time; /**< Radar detection, start_timing level */ uint16 t2_min; /* minimum clocks needed to remain in state 2 */ uint32 version; /* version */ - uint32 fra_pulse_err; /* sample error margin for detecting French radar pulsed */ + uint32 fra_pulse_err; /**< sample error margin for detecting French radar pulsed */ int npulses_fra; /* Radar detection, minimum French pulses set */ int npulses_stg2; /* Radar detection, minimum staggered-2 pulses set */ int npulses_stg3; /* Radar detection, minimum staggered-3 pulses set */ - uint16 percal_mask; /* defines which period cal is masked from radar detection */ - int quant; /* quantization resolution to pulse positions */ - uint32 min_burst_intv_lp; /* minimum burst to burst interval for bin3 radar */ - uint32 max_burst_intv_lp; /* maximum burst to burst interval for bin3 radar */ - int nskip_rst_lp; /* number of skipped pulses before resetting lp buffer */ - int max_pw_tol; /* maximum tollerance allowed in detected pulse width for radar detection */ + uint16 percal_mask; /**< defines which period cal is masked from radar detection */ + int quant; /**< quantization resolution to pulse positions */ + uint32 min_burst_intv_lp; /**< minimum burst to burst interval for bin3 radar */ + uint32 max_burst_intv_lp; /**< maximum burst to burst interval for bin3 radar */ + int nskip_rst_lp; /**< number of skipped pulses before resetting lp buffer */ + int max_pw_tol; /**< maximum tolerance allowd in detected pulse width for radar detection */ uint16 feature_mask; /* 16-bit mask to specify enabled features */ } wl_radar_args_t; @@ -1496,15 +1736,15 @@ typedef struct { /* RSSI per antenna */ typedef struct { - uint32 version; /* version field */ - uint32 count; /* number of valid antenna rssi */ - int8 rssi_ant[WL_RSSI_ANT_MAX]; /* rssi per antenna */ + uint32 version; /**< version field */ + uint32 count; /**< number of valid antenna rssi */ + int8 rssi_ant[WL_RSSI_ANT_MAX]; /**< rssi per antenna */ } wl_rssi_ant_t; /* data structure used in 'dfs_status' wl interface, which is used to query dfs status */ typedef struct { - uint state; /* noted by WL_DFS_CACSTATE_XX. */ - uint duration; /* time spent in ms in state. */ + uint state; /**< noted by WL_DFS_CACSTATE_XX. */ + uint duration; /**< time spent in ms in state. */ /* as dfs enters ISM state, it removes the operational channel from quiet channel * list and notes the channel in channel_cleared. set to 0 if no channel is cleared */ @@ -1513,6 +1753,31 @@ typedef struct { uint16 pad; } wl_dfs_status_t; +typedef struct { + uint state; /* noted by WL_DFS_CACSTATE_XX */ + uint duration; /* time spent in ms in state */ + chanspec_t chanspec; /* chanspec of this core */ + chanspec_t chanspec_last_cleared; /* chanspec last cleared for operation by scanning */ + uint16 sub_type; /* currently just the index of the core or the respective PLL */ + uint16 pad; +} wl_dfs_sub_status_t; + +#define WL_DFS_STATUS_ALL_VERSION (1) +typedef struct { + uint16 version; /* version field; current max version 1 */ + uint16 num_sub_status; + wl_dfs_sub_status_t dfs_sub_status[1]; /* struct array of length num_sub_status */ +} wl_dfs_status_all_t; + +#define WL_DFS_AP_MOVE_VERSION (1) +typedef struct wl_dfs_ap_move_status { + int8 version; /* version field; current max version 1 */ + int8 move_status; /* DFS move status */ + chanspec_t chanspec; /* New AP Chanspec */ + wl_dfs_status_all_t scan_status; /* status; see dfs_status_all for wl_dfs_status_all_t */ +} wl_dfs_ap_move_status_t; + + /* data structure used in 'radar_status' wl interface, which is use to query radar det status */ typedef struct { bool detected; @@ -1533,19 +1798,19 @@ typedef struct { #define NUM_PWRCTRL_RATES 12 typedef struct { - uint8 txpwr_band_max[NUM_PWRCTRL_RATES]; /* User set target */ - uint8 txpwr_limit[NUM_PWRCTRL_RATES]; /* reg and local power limit */ - uint8 txpwr_local_max; /* local max according to the AP */ - uint8 txpwr_local_constraint; /* local constraint according to the AP */ - uint8 txpwr_chan_reg_max; /* Regulatory max for this channel */ - uint8 txpwr_target[2][NUM_PWRCTRL_RATES]; /* Latest target for 2.4 and 5 Ghz */ - uint8 txpwr_est_Pout[2]; /* Latest estimate for 2.4 and 5 Ghz */ - uint8 txpwr_opo[NUM_PWRCTRL_RATES]; /* On G phy, OFDM power offset */ - uint8 txpwr_bphy_cck_max[NUM_PWRCTRL_RATES]; /* Max CCK power for this band (SROM) */ - uint8 txpwr_bphy_ofdm_max; /* Max OFDM power for this band (SROM) */ - uint8 txpwr_aphy_max[NUM_PWRCTRL_RATES]; /* Max power for A band (SROM) */ - int8 txpwr_antgain[2]; /* Ant gain for each band - from SROM */ - uint8 txpwr_est_Pout_gofdm; /* Pwr estimate for 2.4 OFDM */ + uint8 txpwr_band_max[NUM_PWRCTRL_RATES]; /**< User set target */ + uint8 txpwr_limit[NUM_PWRCTRL_RATES]; /**< reg and local power limit */ + uint8 txpwr_local_max; /**< local max according to the AP */ + uint8 txpwr_local_constraint; /**< local constraint according to the AP */ + uint8 txpwr_chan_reg_max; /**< Regulatory max for this channel */ + uint8 txpwr_target[2][NUM_PWRCTRL_RATES]; /**< Latest target for 2.4 and 5 Ghz */ + uint8 txpwr_est_Pout[2]; /**< Latest estimate for 2.4 and 5 Ghz */ + uint8 txpwr_opo[NUM_PWRCTRL_RATES]; /**< On G phy, OFDM power offset */ + uint8 txpwr_bphy_cck_max[NUM_PWRCTRL_RATES]; /**< Max CCK power for this band (SROM) */ + uint8 txpwr_bphy_ofdm_max; /**< Max OFDM power for this band (SROM) */ + uint8 txpwr_aphy_max[NUM_PWRCTRL_RATES]; /**< Max power for A band (SROM) */ + int8 txpwr_antgain[2]; /**< Ant gain for each band - from SROM */ + uint8 txpwr_est_Pout_gofdm; /**< Pwr estimate for 2.4 OFDM */ } tx_power_legacy_t; #define WL_TX_POWER_RATES_LEGACY 45 @@ -1572,29 +1837,18 @@ typedef struct { uint8 target[WL_TX_POWER_RATES_LEGACY]; /* Latest target power */ } tx_power_legacy2_t; -/* TX Power index defines */ -#define WLC_NUM_RATES_CCK WL_NUM_RATES_CCK -#define WLC_NUM_RATES_OFDM WL_NUM_RATES_OFDM -#define WLC_NUM_RATES_MCS_1_STREAM WL_NUM_RATES_MCS_1STREAM -#define WLC_NUM_RATES_MCS_2_STREAM WL_NUM_RATES_MCS_1STREAM -#define WLC_NUM_RATES_MCS32 WL_NUM_RATES_MCS32 -#define WL_TX_POWER_CCK_NUM WL_NUM_RATES_CCK -#define WL_TX_POWER_OFDM_NUM WL_NUM_RATES_OFDM -#define WL_TX_POWER_MCS_1_STREAM_NUM WL_NUM_RATES_MCS_1STREAM -#define WL_TX_POWER_MCS_2_STREAM_NUM WL_NUM_RATES_MCS_1STREAM -#define WL_TX_POWER_MCS_32_NUM WL_NUM_RATES_MCS32 - #define WL_NUM_2x2_ELEMENTS 4 #define WL_NUM_3x3_ELEMENTS 6 +#define WL_NUM_4x4_ELEMENTS 10 typedef struct { - uint16 ver; /* version of this struct */ - uint16 len; /* length in bytes of this structure */ + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ uint32 flags; - chanspec_t chanspec; /* txpwr report for this channel */ - chanspec_t local_chanspec; /* channel on which we are associated */ - uint32 buflen; /* ppr buffer length */ - uint8 pprbuf[1]; /* Latest target power buffer */ + chanspec_t chanspec; /**< txpwr report for this channel */ + chanspec_t local_chanspec; /**< channel on which we are associated */ + uint32 buflen; /**< ppr buffer length */ + uint8 pprbuf[1]; /**< Latest target power buffer */ } wl_txppr_t; #define WL_TXPPR_VERSION 1 @@ -1610,23 +1864,23 @@ typedef struct chanspec_txpwr_max { } chanspec_txpwr_max_t; typedef struct wl_chanspec_txpwr_max { - uint16 ver; /* version of this struct */ - uint16 len; /* length in bytes of this structure */ - uint32 count; /* number of elements of (chanspec, txpwr_max) pair */ - chanspec_txpwr_max_t txpwr[1]; /* array of (chanspec, max_txpwr) pair */ + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + uint32 count; /**< number of elements of (chanspec, txpwr_max) pair */ + chanspec_txpwr_max_t txpwr[1]; /**< array of (chanspec, max_txpwr) pair */ } wl_chanspec_txpwr_max_t; #define WL_CHANSPEC_TXPWR_MAX_VER 1 #define WL_CHANSPEC_TXPWR_MAX_LEN (sizeof(wl_chanspec_txpwr_max_t)) typedef struct tx_inst_power { - uint8 txpwr_est_Pout[2]; /* Latest estimate for 2.4 and 5 Ghz */ - uint8 txpwr_est_Pout_gofdm; /* Pwr estimate for 2.4 OFDM */ + uint8 txpwr_est_Pout[2]; /**< Latest estimate for 2.4 and 5 Ghz */ + uint8 txpwr_est_Pout_gofdm; /**< Pwr estimate for 2.4 OFDM */ } tx_inst_power_t; #define WL_NUM_TXCHAIN_MAX 4 typedef struct wl_txchain_pwr_offsets { - int8 offset[WL_NUM_TXCHAIN_MAX]; /* quarter dBm signed offset for each chain */ + int8 offset[WL_NUM_TXCHAIN_MAX]; /**< quarter dBm signed offset for each chain */ } wl_txchain_pwr_offsets_t; /* maximum channels returned by the get valid channels iovar */ #define WL_NUMCHANNELS 64 @@ -1675,7 +1929,6 @@ typedef struct wl_txchain_pwr_offsets { struct tsinfo_arg { uint8 octets[3]; }; -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ #define RATE_CCK_1MBPS 0 #define RATE_CCK_2MBPS 1 @@ -1694,11 +1947,11 @@ struct tsinfo_arg { #define WL_BSSTRANS_RSSI_RATE_MAP_VERSION 1 typedef struct wl_bsstrans_rssi { - int8 rssi_2g; /* RSSI in dbm for 2.4 G */ - int8 rssi_5g; /* RSSI in dbm for 5G, unused for cck */ + int8 rssi_2g; /**< RSSI in dbm for 2.4 G */ + int8 rssi_5g; /**< RSSI in dbm for 5G, unused for cck */ } wl_bsstrans_rssi_t; -#define RSSI_RATE_MAP_MAX_STREAMS 4 /* max streams supported */ +#define RSSI_RATE_MAP_MAX_STREAMS 4 /**< max streams supported */ /* RSSI to rate mapping, all 20Mhz, no SGI */ typedef struct wl_bsstrans_rssi_rate_map { @@ -1719,189 +1972,701 @@ typedef struct wl_bsstrans_roamthrottle { uint16 scans_allowed; } wl_bsstrans_roamthrottle_t; -#define NFIFO 6 /* # tx/rx fifopairs */ +#define NFIFO 6 /**< # tx/rx fifopairs */ #define NREINITREASONCOUNT 8 #define REINITREASONIDX(_x) (((_x) < NREINITREASONCOUNT) ? (_x) : 0) -#define WL_CNT_T_VERSION 10 /* current version of wl_cnt_t struct */ +#define WL_CNT_T_VERSION 30 /**< current version of wl_cnt_t struct */ +#define WL_CNT_VERSION_6 6 +#define WL_CNT_VERSION_11 11 + +#define WLC_WITH_XTLV_CNT + +/* + * tlv IDs uniquely identifies counter component + * packed into wl_cmd_t container + */ +enum wl_cnt_xtlv_id { + WL_CNT_XTLV_WLC = 0x100, /**< WLC layer counters */ + WL_CNT_XTLV_CNTV_LE10_UCODE = 0x200, /**< wl counter ver < 11 UCODE MACSTAT */ + WL_CNT_XTLV_LT40_UCODE_V1 = 0x300, /**< corerev < 40 UCODE MACSTAT */ + WL_CNT_XTLV_GE40_UCODE_V1 = 0x400, /**< corerev >= 40 UCODE MACSTAT */ + WL_CNT_XTLV_GE64_UCODEX_V1 = 0x800 /* corerev >= 64 UCODEX MACSTAT */ +}; + +/* The number of variables in wl macstat cnt struct. + * (wl_cnt_ge40mcst_v1_t, wl_cnt_lt40mcst_v1_t, wl_cnt_v_le10_mcst_t) + */ +#define WL_CNT_MCST_VAR_NUM 64 +/* sizeof(wl_cnt_ge40mcst_v1_t), sizeof(wl_cnt_lt40mcst_v1_t), and sizeof(wl_cnt_v_le10_mcst_t) */ +#define WL_CNT_MCST_STRUCT_SZ ((uint)sizeof(uint32) * WL_CNT_MCST_VAR_NUM) +#define INVALID_CNT_VAL (uint32)(-1) +#define WL_CNT_MCXST_STRUCT_SZ ((uint)sizeof(wl_cnt_ge64mcxst_v1_t)) + +#define WL_XTLV_CNTBUF_MAX_SIZE ((uint)(OFFSETOF(wl_cnt_info_t, data)) + \ + (uint)BCM_XTLV_HDR_SIZE + (uint)sizeof(wl_cnt_wlc_t) + \ + (uint)BCM_XTLV_HDR_SIZE + WL_CNT_MCST_STRUCT_SZ + \ + (uint)BCM_XTLV_HDR_SIZE + WL_CNT_MCXST_STRUCT_SZ) + +#define WL_CNTBUF_MAX_SIZE MAX(WL_XTLV_CNTBUF_MAX_SIZE, (uint)sizeof(wl_cnt_ver_11_t)) + +/* Top structure of counters IOVar buffer */ typedef struct { - uint16 version; /* see definition of WL_CNT_T_VERSION */ - uint16 length; /* length of entire structure */ + uint16 version; /**< see definition of WL_CNT_T_VERSION */ + uint16 datalen; /**< length of data including all paddings. */ + uint8 data [1]; /**< variable length payload: + * 1 or more bcm_xtlv_t type of tuples. + * each tuple is padded to multiple of 4 bytes. + * 'datalen' field of this structure includes all paddings. + */ +} wl_cnt_info_t; +/* wlc layer counters */ +typedef struct { /* transmit stat counters */ - uint32 txframe; /* tx data frames */ - uint32 txbyte; /* tx data bytes */ - uint32 txretrans; /* tx mac retransmits */ - uint32 txerror; /* tx data errors (derived: sum of others) */ - uint32 txctl; /* tx management frames */ - uint32 txprshort; /* tx short preamble frames */ - uint32 txserr; /* tx status errors */ - uint32 txnobuf; /* tx out of buffers errors */ - uint32 txnoassoc; /* tx discard because we're not associated */ - uint32 txrunt; /* tx runt frames */ - uint32 txchit; /* tx header cache hit (fastpath) */ - uint32 txcmiss; /* tx header cache miss (slowpath) */ + uint32 txframe; /**< tx data frames */ + uint32 txbyte; /**< tx data bytes */ + uint32 txretrans; /**< tx mac retransmits */ + uint32 txerror; /**< tx data errors (derived: sum of others) */ + uint32 txctl; /**< tx management frames */ + uint32 txprshort; /**< tx short preamble frames */ + uint32 txserr; /**< tx status errors */ + uint32 txnobuf; /**< tx out of buffers errors */ + uint32 txnoassoc; /**< tx discard because we're not associated */ + uint32 txrunt; /**< tx runt frames */ + uint32 txchit; /**< tx header cache hit (fastpath) */ + uint32 txcmiss; /**< tx header cache miss (slowpath) */ /* transmit chip error counters */ - uint32 txuflo; /* tx fifo underflows */ - uint32 txphyerr; /* tx phy errors (indicated in tx status) */ + uint32 txuflo; /**< tx fifo underflows */ + uint32 txphyerr; /**< tx phy errors (indicated in tx status) */ uint32 txphycrs; /* receive stat counters */ - uint32 rxframe; /* rx data frames */ - uint32 rxbyte; /* rx data bytes */ - uint32 rxerror; /* rx data errors (derived: sum of others) */ - uint32 rxctl; /* rx management frames */ - uint32 rxnobuf; /* rx out of buffers errors */ - uint32 rxnondata; /* rx non data frames in the data channel errors */ - uint32 rxbadds; /* rx bad DS errors */ - uint32 rxbadcm; /* rx bad control or management frames */ - uint32 rxfragerr; /* rx fragmentation errors */ - uint32 rxrunt; /* rx runt frames */ - uint32 rxgiant; /* rx giant frames */ - uint32 rxnoscb; /* rx no scb error */ - uint32 rxbadproto; /* rx invalid frames */ - uint32 rxbadsrcmac; /* rx frames with Invalid Src Mac */ - uint32 rxbadda; /* rx frames tossed for invalid da */ - uint32 rxfilter; /* rx frames filtered out */ + uint32 rxframe; /**< rx data frames */ + uint32 rxbyte; /**< rx data bytes */ + uint32 rxerror; /**< rx data errors (derived: sum of others) */ + uint32 rxctl; /**< rx management frames */ + uint32 rxnobuf; /**< rx out of buffers errors */ + uint32 rxnondata; /**< rx non data frames in the data channel errors */ + uint32 rxbadds; /**< rx bad DS errors */ + uint32 rxbadcm; /**< rx bad control or management frames */ + uint32 rxfragerr; /**< rx fragmentation errors */ + uint32 rxrunt; /**< rx runt frames */ + uint32 rxgiant; /**< rx giant frames */ + uint32 rxnoscb; /**< rx no scb error */ + uint32 rxbadproto; /**< rx invalid frames */ + uint32 rxbadsrcmac; /**< rx frames with Invalid Src Mac */ + uint32 rxbadda; /**< rx frames tossed for invalid da */ + uint32 rxfilter; /**< rx frames filtered out */ /* receive chip error counters */ - uint32 rxoflo; /* rx fifo overflow errors */ - uint32 rxuflo[NFIFO]; /* rx dma descriptor underflow errors */ + uint32 rxoflo; /**< rx fifo overflow errors */ + uint32 rxuflo[NFIFO]; /**< rx dma descriptor underflow errors */ - uint32 d11cnt_txrts_off; /* d11cnt txrts value when reset d11cnt */ - uint32 d11cnt_rxcrc_off; /* d11cnt rxcrc value when reset d11cnt */ - uint32 d11cnt_txnocts_off; /* d11cnt txnocts value when reset d11cnt */ + uint32 d11cnt_txrts_off; /**< d11cnt txrts value when reset d11cnt */ + uint32 d11cnt_rxcrc_off; /**< d11cnt rxcrc value when reset d11cnt */ + uint32 d11cnt_txnocts_off; /**< d11cnt txnocts value when reset d11cnt */ /* misc counters */ - uint32 dmade; /* tx/rx dma descriptor errors */ - uint32 dmada; /* tx/rx dma data errors */ - uint32 dmape; /* tx/rx dma descriptor protocol errors */ - uint32 reset; /* reset count */ - uint32 tbtt; /* cnts the TBTT int's */ + uint32 dmade; /**< tx/rx dma descriptor errors */ + uint32 dmada; /**< tx/rx dma data errors */ + uint32 dmape; /**< tx/rx dma descriptor protocol errors */ + uint32 reset; /**< reset count */ + uint32 tbtt; /**< cnts the TBTT int's */ uint32 txdmawar; - uint32 pkt_callback_reg_fail; /* callbacks register failure */ + uint32 pkt_callback_reg_fail; /**< callbacks register failure */ + + /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */ + uint32 txfrag; /**< dot11TransmittedFragmentCount */ + uint32 txmulti; /**< dot11MulticastTransmittedFrameCount */ + uint32 txfail; /**< dot11FailedCount */ + uint32 txretry; /**< dot11RetryCount */ + uint32 txretrie; /**< dot11MultipleRetryCount */ + uint32 rxdup; /**< dot11FrameduplicateCount */ + uint32 txrts; /**< dot11RTSSuccessCount */ + uint32 txnocts; /**< dot11RTSFailureCount */ + uint32 txnoack; /**< dot11ACKFailureCount */ + uint32 rxfrag; /**< dot11ReceivedFragmentCount */ + uint32 rxmulti; /**< dot11MulticastReceivedFrameCount */ + uint32 rxcrc; /**< dot11FCSErrorCount */ + uint32 txfrmsnt; /**< dot11TransmittedFrameCount (bogus MIB?) */ + uint32 rxundec; /**< dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay; /**< TKIPReplays */ + uint32 ccmpfmterr; /**< CCMPFormatErrors */ + uint32 ccmpreplay; /**< CCMPReplays */ + uint32 ccmpundec; /**< CCMPDecryptErrors */ + uint32 fourwayfail; /**< FourWayHandshakeFailures */ + uint32 wepundec; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr; /**< dot11WEPICVErrorCount */ + uint32 decsuccess; /**< DecryptSuccessCount */ + uint32 tkipicverr; /**< TKIPICVErrorCount */ + uint32 wepexcluded; /**< dot11WEPExcludedCount */ + + uint32 txchanrej; /**< Tx frames suppressed due to channel rejection */ + uint32 psmwds; /**< Count PSM watchdogs */ + uint32 phywatchdog; /**< Count Phy watchdogs (triggered by ucode) */ + + /* MBSS counters, AP only */ + uint32 prq_entries_handled; /**< PRQ entries read in */ + uint32 prq_undirected_entries; /**< which were bcast bss & ssid */ + uint32 prq_bad_entries; /**< which could not be translated to info */ + uint32 atim_suppress_count; /**< TX suppressions on ATIM fifo */ + uint32 bcn_template_not_ready; /**< Template marked in use on send bcn ... */ + uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */ + uint32 late_tbtt_dpc; /**< TBTT DPC did not happen in time */ + /* per-rate receive stat counters */ + uint32 rx1mbps; /* packets rx at 1Mbps */ + uint32 rx2mbps; /* packets rx at 2Mbps */ + uint32 rx5mbps5; /* packets rx at 5.5Mbps */ + uint32 rx6mbps; /* packets rx at 6Mbps */ + uint32 rx9mbps; /* packets rx at 9Mbps */ + uint32 rx11mbps; /* packets rx at 11Mbps */ + uint32 rx12mbps; /* packets rx at 12Mbps */ + uint32 rx18mbps; /* packets rx at 18Mbps */ + uint32 rx24mbps; /* packets rx at 24Mbps */ + uint32 rx36mbps; /* packets rx at 36Mbps */ + uint32 rx48mbps; /* packets rx at 48Mbps */ + uint32 rx54mbps; /* packets rx at 54Mbps */ + uint32 rx108mbps; /* packets rx at 108mbps */ + uint32 rx162mbps; /* packets rx at 162mbps */ + uint32 rx216mbps; /* packets rx at 216 mbps */ + uint32 rx270mbps; /* packets rx at 270 mbps */ + uint32 rx324mbps; /* packets rx at 324 mbps */ + uint32 rx378mbps; /* packets rx at 378 mbps */ + uint32 rx432mbps; /* packets rx at 432 mbps */ + uint32 rx486mbps; /* packets rx at 486 mbps */ + uint32 rx540mbps; /* packets rx at 540 mbps */ + + uint32 rfdisable; /**< count of radio disables */ + + uint32 txexptime; /**< Tx frames suppressed due to timer expiration */ + + uint32 txmpdu_sgi; /**< count for sgi transmit */ + uint32 rxmpdu_sgi; /**< count for sgi received */ + uint32 txmpdu_stbc; /**< count for stbc transmit */ + uint32 rxmpdu_stbc; /**< count for stbc received */ + + uint32 rxundec_mcst; /**< dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill_mcst; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr_mcst; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay_mcst; /**< TKIPReplays */ + uint32 ccmpfmterr_mcst; /**< CCMPFormatErrors */ + uint32 ccmpreplay_mcst; /**< CCMPReplays */ + uint32 ccmpundec_mcst; /**< CCMPDecryptErrors */ + uint32 fourwayfail_mcst; /**< FourWayHandshakeFailures */ + uint32 wepundec_mcst; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr_mcst; /**< dot11WEPICVErrorCount */ + uint32 decsuccess_mcst; /**< DecryptSuccessCount */ + uint32 tkipicverr_mcst; /**< TKIPICVErrorCount */ + uint32 wepexcluded_mcst; /**< dot11WEPExcludedCount */ + + uint32 dma_hang; /**< count for dma hang */ + uint32 reinit; /**< count for reinit */ + + uint32 pstatxucast; /**< count of ucast frames xmitted on all psta assoc */ + uint32 pstatxnoassoc; /**< count of txnoassoc frames xmitted on all psta assoc */ + uint32 pstarxucast; /**< count of ucast frames received on all psta assoc */ + uint32 pstarxbcmc; /**< count of bcmc frames received on all psta */ + uint32 pstatxbcmc; /**< count of bcmc frames transmitted on all psta */ + + uint32 cso_passthrough; /* hw cso required but passthrough */ + uint32 cso_normal; /**< hw cso hdr for normal process */ + uint32 chained; /**< number of frames chained */ + uint32 chainedsz1; /**< number of chain size 1 frames */ + uint32 unchained; /**< number of frames not chained */ + uint32 maxchainsz; /**< max chain size so far */ + uint32 currchainsz; /**< current chain size */ + uint32 pciereset; /**< Secondary Bus Reset issued by driver */ + uint32 cfgrestore; /**< configspace restore by driver */ + uint32 reinitreason[NREINITREASONCOUNT]; /* reinitreason counters; 0: Unknown reason */ + uint32 rxrtry; + + uint32 rxmpdu_mu; /* Number of MU MPDUs received */ + + /* detailed control/management frames */ + uint32 txbar; /**< Number of TX BAR */ + uint32 rxbar; /**< Number of RX BAR */ + uint32 txpspoll; /**< Number of TX PS-poll */ + uint32 rxpspoll; /**< Number of RX PS-poll */ + uint32 txnull; /**< Number of TX NULL_DATA */ + uint32 rxnull; /**< Number of RX NULL_DATA */ + uint32 txqosnull; /**< Number of TX NULL_QoSDATA */ + uint32 rxqosnull; /**< Number of RX NULL_QoSDATA */ + uint32 txassocreq; /**< Number of TX ASSOC request */ + uint32 rxassocreq; /**< Number of RX ASSOC request */ + uint32 txreassocreq; /**< Number of TX REASSOC request */ + uint32 rxreassocreq; /**< Number of RX REASSOC request */ + uint32 txdisassoc; /**< Number of TX DISASSOC */ + uint32 rxdisassoc; /**< Number of RX DISASSOC */ + uint32 txassocrsp; /**< Number of TX ASSOC response */ + uint32 rxassocrsp; /**< Number of RX ASSOC response */ + uint32 txreassocrsp; /**< Number of TX REASSOC response */ + uint32 rxreassocrsp; /**< Number of RX REASSOC response */ + uint32 txauth; /**< Number of TX AUTH */ + uint32 rxauth; /**< Number of RX AUTH */ + uint32 txdeauth; /**< Number of TX DEAUTH */ + uint32 rxdeauth; /**< Number of RX DEAUTH */ + uint32 txprobereq; /**< Number of TX probe request */ + uint32 rxprobereq; /**< Number of RX probe request */ + uint32 txprobersp; /**< Number of TX probe response */ + uint32 rxprobersp; /**< Number of RX probe response */ + uint32 txaction; /**< Number of TX action frame */ + uint32 rxaction; /**< Number of RX action frame */ +} wl_cnt_wlc_t; + +/* MACXSTAT counters for ucodex (corerev >= 64) */ +typedef struct { + uint32 macxsusp; + uint32 m2vmsg; + uint32 v2mmsg; + uint32 mboxout; + uint32 musnd; + uint32 sfb2v; +} wl_cnt_ge64mcxst_v1_t; + +/* MACSTAT counters for ucode (corerev >= 40) */ +typedef struct { /* MAC counters: 32-bit version of d11.h's macstat_t */ - uint32 txallfrm; /* total number of frames sent, incl. Data, ACK, RTS, CTS, + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, * Control Management (includes retransmissions) */ - uint32 txrtsfrm; /* number of RTS sent out by the MAC */ - uint32 txctsfrm; /* number of CTS sent out by the MAC */ - uint32 txackfrm; /* number of ACK frames sent out */ - uint32 txdnlfrm; /* Not used */ - uint32 txbcnfrm; /* beacons transmitted */ - uint32 txfunfl[6]; /* per-fifo tx underflows */ - uint32 rxtoolate; /* receive too late */ - uint32 txfbw; /* transmit at fallback bw (dynamic bw) */ - uint32 txtplunfl; /* Template underflows (mac was too slow to transmit ACK/CTS + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 txampdu; /**< number of AMPDUs transmitted */ + uint32 txmpdu; /**< number of MPDUs transmitted */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS * or BCN) */ - uint32 txphyerror; /* Transmit phy error, type of error is reported in tx-status for + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for * driver enqueued frames */ - uint32 rxfrmtoolong; /* Received frame longer than legal limit (2346 bytes) */ - uint32 rxfrmtooshrt; /* Received frame did not contain enough bytes for its frame type */ - uint32 rxinvmachdr; /* Either the protocol version != 0 or frame type not + uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */ + uint32 rxmgucastmbss; /**< number of received mgmt frames with good FCS and matching RA */ + uint32 rxctlucast; /**< number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdtocast; /**< number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmgocast; /**< number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxctlocast; /**< number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmgmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxctlmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdtucastobss; /* number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */ + uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< number of receive fifo 1 overflows */ + uint32 rxhlovfl; /**< number of length / header fifo overflows */ + uint32 missbcn_dbg; /**< number of beacon missed to receive */ + uint32 pmqovfl; /**< number of PMQ overflows */ + uint32 rxcgprqfrm; /**< number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */ + uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */ + uint32 txinrtstxop; /**< number of data frame transmissions during rts txop */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 rxdrop20s; /**< drop secondary cnt */ + uint32 rxtoolate; /**< receive too late */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ +} wl_cnt_ge40mcst_v1_t; + +/* MACSTAT counters for ucode (corerev < 40) */ +typedef struct { + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 txampdu; /**< number of AMPDUs transmitted */ + uint32 txmpdu; /**< number of MPDUs transmitted */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */ + uint32 rxmgucastmbss; /**< number of received mgmt frames with good FCS and matching RA */ + uint32 rxctlucast; /**< number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdtocast; /**< number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmgocast; /**< number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxctlocast; /**< number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmgmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxctlmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdtucastobss; /* number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */ + uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */ + uint32 dbgoff46; + uint32 dbgoff47; + uint32 dbgoff48; /**< Used for counting txstatus queue overflow (corerev <= 4) */ + uint32 pmqovfl; /**< number of PMQ overflows */ + uint32 rxcgprqfrm; /**< number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */ + uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */ + uint32 txinrtstxop; /**< number of data frame transmissions during rts txop */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 phywatch; + uint32 rxtoolate; /**< receive too late */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ +} wl_cnt_lt40mcst_v1_t; + +/* MACSTAT counters for "wl counter" version <= 10 */ +typedef struct { + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 txfbw; /**< transmit at fallback bw (dynamic bw) */ + uint32 PAD0; /**< number of MPDUs transmitted */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxinvmachdr; /**< Either the protocol version != 0 or frame type not + * data/control/management + */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdfrmucastmbss; /* number of received DATA frames with good FCS and matching RA */ + uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */ + uint32 rxcfrmucast; /**< number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdfrmocast; /**< number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmfrmocast; /**< number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxcfrmocast; /**< number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdfrmmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmfrmmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxcfrmmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdfrmucastobss; /**< number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 PAD1; + uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< Number of receive fifo 1 overflows (obsolete) */ + uint32 rxf2ovfl; /**< Number of receive fifo 2 overflows (obsolete) */ + uint32 txsfovfl; /**< Number of transmit status fifo overflows (obsolete) */ + uint32 pmqovfl; /**< number of PMQ overflows */ + uint32 rxcgprqfrm; /**< number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 rxnack; /**< obsolete */ + uint32 frmscons; /**< obsolete */ + uint32 txnack; /**< obsolete */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 rxdrop20s; /**< drop secondary cnt */ + uint32 rxtoolate; /**< receive too late */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ +} wl_cnt_v_le10_mcst_t; + +typedef struct { + uint16 version; /**< see definition of WL_CNT_T_VERSION */ + uint16 length; /**< length of entire structure */ + + /* transmit stat counters */ + uint32 txframe; /**< tx data frames */ + uint32 txbyte; /**< tx data bytes */ + uint32 txretrans; /**< tx mac retransmits */ + uint32 txerror; /**< tx data errors (derived: sum of others) */ + uint32 txctl; /**< tx management frames */ + uint32 txprshort; /**< tx short preamble frames */ + uint32 txserr; /**< tx status errors */ + uint32 txnobuf; /**< tx out of buffers errors */ + uint32 txnoassoc; /**< tx discard because we're not associated */ + uint32 txrunt; /**< tx runt frames */ + uint32 txchit; /**< tx header cache hit (fastpath) */ + uint32 txcmiss; /**< tx header cache miss (slowpath) */ + + /* transmit chip error counters */ + uint32 txuflo; /**< tx fifo underflows */ + uint32 txphyerr; /**< tx phy errors (indicated in tx status) */ + uint32 txphycrs; + + /* receive stat counters */ + uint32 rxframe; /**< rx data frames */ + uint32 rxbyte; /**< rx data bytes */ + uint32 rxerror; /**< rx data errors (derived: sum of others) */ + uint32 rxctl; /**< rx management frames */ + uint32 rxnobuf; /**< rx out of buffers errors */ + uint32 rxnondata; /**< rx non data frames in the data channel errors */ + uint32 rxbadds; /**< rx bad DS errors */ + uint32 rxbadcm; /**< rx bad control or management frames */ + uint32 rxfragerr; /**< rx fragmentation errors */ + uint32 rxrunt; /**< rx runt frames */ + uint32 rxgiant; /**< rx giant frames */ + uint32 rxnoscb; /**< rx no scb error */ + uint32 rxbadproto; /**< rx invalid frames */ + uint32 rxbadsrcmac; /**< rx frames with Invalid Src Mac */ + uint32 rxbadda; /**< rx frames tossed for invalid da */ + uint32 rxfilter; /**< rx frames filtered out */ + + /* receive chip error counters */ + uint32 rxoflo; /**< rx fifo overflow errors */ + uint32 rxuflo[NFIFO]; /**< rx dma descriptor underflow errors */ + + uint32 d11cnt_txrts_off; /**< d11cnt txrts value when reset d11cnt */ + uint32 d11cnt_rxcrc_off; /**< d11cnt rxcrc value when reset d11cnt */ + uint32 d11cnt_txnocts_off; /**< d11cnt txnocts value when reset d11cnt */ + + /* misc counters */ + uint32 dmade; /**< tx/rx dma descriptor errors */ + uint32 dmada; /**< tx/rx dma data errors */ + uint32 dmape; /**< tx/rx dma descriptor protocol errors */ + uint32 reset; /**< reset count */ + uint32 tbtt; /**< cnts the TBTT int's */ + uint32 txdmawar; + uint32 pkt_callback_reg_fail; /**< callbacks register failure */ + + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< Not used */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 rxtoolate; /**< receive too late */ + uint32 txfbw; /**< transmit at fallback bw (dynamic bw) */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxinvmachdr; /**< Either the protocol version != 0 or frame type not * data/control/management */ - uint32 rxbadfcs; /* number of frames for which the CRC check failed in the MAC */ - uint32 rxbadplcp; /* parity check of the PLCP header failed */ - uint32 rxcrsglitch; /* PHY was able to correlate the preamble but not the header */ - uint32 rxstrt; /* Number of received frames with a good PLCP + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP * (i.e. passing parity check) */ uint32 rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */ uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */ - uint32 rxcfrmucast; /* number of received CNTRL frames with good FCS and matching RA */ - uint32 rxrtsucast; /* number of unicast RTS addressed to the MAC (good FCS) */ - uint32 rxctsucast; /* number of unicast CTS addressed to the MAC (good FCS) */ - uint32 rxackucast; /* number of ucast ACKS received (good FCS) */ - uint32 rxdfrmocast; /* number of received DATA frames (good FCS and not matching RA) */ - uint32 rxmfrmocast; /* number of received MGMT frames (good FCS and not matching RA) */ - uint32 rxcfrmocast; /* number of received CNTRL frame (good FCS and not matching RA) */ - uint32 rxrtsocast; /* number of received RTS not addressed to the MAC */ - uint32 rxctsocast; /* number of received CTS not addressed to the MAC */ - uint32 rxdfrmmcast; /* number of RX Data multicast frames received by the MAC */ - uint32 rxmfrmmcast; /* number of RX Management multicast frames received by the MAC */ - uint32 rxcfrmmcast; /* number of RX Control multicast frames received by the MAC + uint32 rxcfrmucast; /**< number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdfrmocast; /**< number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmfrmocast; /**< number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxcfrmocast; /**< number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdfrmmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmfrmmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxcfrmmcast; /**< number of RX Control multicast frames received by the MAC * (unlikely to see these) */ - uint32 rxbeaconmbss; /* beacons received from member of BSS */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ uint32 rxdfrmucastobss; /* number of unicast frames addressed to the MAC from * other BSS (WDS FRAME) */ - uint32 rxbeaconobss; /* beacons received from other BSS */ - uint32 rxrsptmout; /* Number of response timeouts for transmitted frames + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< Number of response timeouts for transmitted frames * expecting a response */ - uint32 bcntxcancl; /* transmit beacons canceled due to receipt of beacon (IBSS) */ - uint32 rxf0ovfl; /* Number of receive fifo 0 overflows */ - uint32 rxf1ovfl; /* Number of receive fifo 1 overflows (obsolete) */ - uint32 rxf2ovfl; /* Number of receive fifo 2 overflows (obsolete) */ - uint32 txsfovfl; /* Number of transmit status fifo overflows (obsolete) */ - uint32 pmqovfl; /* Number of PMQ overflows */ - uint32 rxcgprqfrm; /* Number of received Probe requests that made it into + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxf0ovfl; /**< Number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< Number of receive fifo 1 overflows (obsolete) */ + uint32 rxf2ovfl; /**< Number of receive fifo 2 overflows (obsolete) */ + uint32 txsfovfl; /**< Number of transmit status fifo overflows (obsolete) */ + uint32 pmqovfl; /**< Number of PMQ overflows */ + uint32 rxcgprqfrm; /**< Number of received Probe requests that made it into * the PRQ fifo */ - uint32 rxcgprsqovfl; /* Rx Probe Request Que overflow in the AP */ - uint32 txcgprsfail; /* Tx Probe Response Fail. AP sent probe response but did + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did * not get ACK */ - uint32 txcgprssuc; /* Tx Probe Response Success (ACK was received) */ - uint32 prs_timeout; /* Number of probe requests that were dropped from the PRQ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< Number of probe requests that were dropped from the PRQ * fifo because a probe response could not be sent out within * the time limit defined in M_PRS_MAXTIME */ - uint32 rxnack; /* obsolete */ - uint32 frmscons; /* obsolete */ - uint32 txnack; /* obsolete */ - uint32 rxback; /* blockack rxcnt */ - uint32 txback; /* blockack txcnt */ + uint32 rxnack; /**< obsolete */ + uint32 frmscons; /**< obsolete */ + uint32 txnack; /**< obsolete */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */ - uint32 txfrag; /* dot11TransmittedFragmentCount */ - uint32 txmulti; /* dot11MulticastTransmittedFrameCount */ - uint32 txfail; /* dot11FailedCount */ - uint32 txretry; /* dot11RetryCount */ - uint32 txretrie; /* dot11MultipleRetryCount */ - uint32 rxdup; /* dot11FrameduplicateCount */ - uint32 txrts; /* dot11RTSSuccessCount */ - uint32 txnocts; /* dot11RTSFailureCount */ - uint32 txnoack; /* dot11ACKFailureCount */ - uint32 rxfrag; /* dot11ReceivedFragmentCount */ - uint32 rxmulti; /* dot11MulticastReceivedFrameCount */ - uint32 rxcrc; /* dot11FCSErrorCount */ - uint32 txfrmsnt; /* dot11TransmittedFrameCount (bogus MIB?) */ - uint32 rxundec; /* dot11WEPUndecryptableCount */ + uint32 txfrag; /**< dot11TransmittedFragmentCount */ + uint32 txmulti; /**< dot11MulticastTransmittedFrameCount */ + uint32 txfail; /**< dot11FailedCount */ + uint32 txretry; /**< dot11RetryCount */ + uint32 txretrie; /**< dot11MultipleRetryCount */ + uint32 rxdup; /**< dot11FrameduplicateCount */ + uint32 txrts; /**< dot11RTSSuccessCount */ + uint32 txnocts; /**< dot11RTSFailureCount */ + uint32 txnoack; /**< dot11ACKFailureCount */ + uint32 rxfrag; /**< dot11ReceivedFragmentCount */ + uint32 rxmulti; /**< dot11MulticastReceivedFrameCount */ + uint32 rxcrc; /**< dot11FCSErrorCount */ + uint32 txfrmsnt; /**< dot11TransmittedFrameCount (bogus MIB?) */ + uint32 rxundec; /**< dot11WEPUndecryptableCount */ /* WPA2 counters (see rxundec for DecryptFailureCount) */ - uint32 tkipmicfaill; /* TKIPLocalMICFailures */ - uint32 tkipcntrmsr; /* TKIPCounterMeasuresInvoked */ - uint32 tkipreplay; /* TKIPReplays */ - uint32 ccmpfmterr; /* CCMPFormatErrors */ - uint32 ccmpreplay; /* CCMPReplays */ - uint32 ccmpundec; /* CCMPDecryptErrors */ - uint32 fourwayfail; /* FourWayHandshakeFailures */ - uint32 wepundec; /* dot11WEPUndecryptableCount */ - uint32 wepicverr; /* dot11WEPICVErrorCount */ - uint32 decsuccess; /* DecryptSuccessCount */ - uint32 tkipicverr; /* TKIPICVErrorCount */ - uint32 wepexcluded; /* dot11WEPExcludedCount */ - - uint32 txchanrej; /* Tx frames suppressed due to channel rejection */ - uint32 psmwds; /* Count PSM watchdogs */ - uint32 phywatchdog; /* Count Phy watchdogs (triggered by ucode) */ + uint32 tkipmicfaill; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay; /**< TKIPReplays */ + uint32 ccmpfmterr; /**< CCMPFormatErrors */ + uint32 ccmpreplay; /**< CCMPReplays */ + uint32 ccmpundec; /**< CCMPDecryptErrors */ + uint32 fourwayfail; /**< FourWayHandshakeFailures */ + uint32 wepundec; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr; /**< dot11WEPICVErrorCount */ + uint32 decsuccess; /**< DecryptSuccessCount */ + uint32 tkipicverr; /**< TKIPICVErrorCount */ + uint32 wepexcluded; /**< dot11WEPExcludedCount */ + + uint32 txchanrej; /**< Tx frames suppressed due to channel rejection */ + uint32 psmwds; /**< Count PSM watchdogs */ + uint32 phywatchdog; /**< Count Phy watchdogs (triggered by ucode) */ /* MBSS counters, AP only */ - uint32 prq_entries_handled; /* PRQ entries read in */ - uint32 prq_undirected_entries; /* which were bcast bss & ssid */ - uint32 prq_bad_entries; /* which could not be translated to info */ - uint32 atim_suppress_count; /* TX suppressions on ATIM fifo */ - uint32 bcn_template_not_ready; /* Template marked in use on send bcn ... */ + uint32 prq_entries_handled; /**< PRQ entries read in */ + uint32 prq_undirected_entries; /**< which were bcast bss & ssid */ + uint32 prq_bad_entries; /**< which could not be translated to info */ + uint32 atim_suppress_count; /**< TX suppressions on ATIM fifo */ + uint32 bcn_template_not_ready; /**< Template marked in use on send bcn ... */ uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */ - uint32 late_tbtt_dpc; /* TBTT DPC did not happen in time */ + uint32 late_tbtt_dpc; /**< TBTT DPC did not happen in time */ /* per-rate receive stat counters */ uint32 rx1mbps; /* packets rx at 1Mbps */ @@ -1930,56 +2695,93 @@ typedef struct { uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */ uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */ - uint32 rfdisable; /* count of radio disables */ - uint32 bphy_rxcrsglitch; /* PHY count of bphy glitches */ + uint32 rfdisable; /**< count of radio disables */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ uint32 bphy_badplcp; - uint32 txexptime; /* Tx frames suppressed due to timer expiration */ + uint32 txexptime; /**< Tx frames suppressed due to timer expiration */ - uint32 txmpdu_sgi; /* count for sgi transmit */ - uint32 rxmpdu_sgi; /* count for sgi received */ - uint32 txmpdu_stbc; /* count for stbc transmit */ - uint32 rxmpdu_stbc; /* count for stbc received */ + uint32 txmpdu_sgi; /**< count for sgi transmit */ + uint32 rxmpdu_sgi; /**< count for sgi received */ + uint32 txmpdu_stbc; /**< count for stbc transmit */ + uint32 rxmpdu_stbc; /**< count for stbc received */ - uint32 rxundec_mcst; /* dot11WEPUndecryptableCount */ + uint32 rxundec_mcst; /**< dot11WEPUndecryptableCount */ /* WPA2 counters (see rxundec for DecryptFailureCount) */ - uint32 tkipmicfaill_mcst; /* TKIPLocalMICFailures */ - uint32 tkipcntrmsr_mcst; /* TKIPCounterMeasuresInvoked */ - uint32 tkipreplay_mcst; /* TKIPReplays */ - uint32 ccmpfmterr_mcst; /* CCMPFormatErrors */ - uint32 ccmpreplay_mcst; /* CCMPReplays */ - uint32 ccmpundec_mcst; /* CCMPDecryptErrors */ - uint32 fourwayfail_mcst; /* FourWayHandshakeFailures */ - uint32 wepundec_mcst; /* dot11WEPUndecryptableCount */ - uint32 wepicverr_mcst; /* dot11WEPICVErrorCount */ - uint32 decsuccess_mcst; /* DecryptSuccessCount */ - uint32 tkipicverr_mcst; /* TKIPICVErrorCount */ - uint32 wepexcluded_mcst; /* dot11WEPExcludedCount */ - - uint32 dma_hang; /* count for dma hang */ - uint32 reinit; /* count for reinit */ - - uint32 pstatxucast; /* count of ucast frames xmitted on all psta assoc */ - uint32 pstatxnoassoc; /* count of txnoassoc frames xmitted on all psta assoc */ - uint32 pstarxucast; /* count of ucast frames received on all psta assoc */ - uint32 pstarxbcmc; /* count of bcmc frames received on all psta */ - uint32 pstatxbcmc; /* count of bcmc frames transmitted on all psta */ + uint32 tkipmicfaill_mcst; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr_mcst; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay_mcst; /**< TKIPReplays */ + uint32 ccmpfmterr_mcst; /**< CCMPFormatErrors */ + uint32 ccmpreplay_mcst; /**< CCMPReplays */ + uint32 ccmpundec_mcst; /**< CCMPDecryptErrors */ + uint32 fourwayfail_mcst; /**< FourWayHandshakeFailures */ + uint32 wepundec_mcst; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr_mcst; /**< dot11WEPICVErrorCount */ + uint32 decsuccess_mcst; /**< DecryptSuccessCount */ + uint32 tkipicverr_mcst; /**< TKIPICVErrorCount */ + uint32 wepexcluded_mcst; /**< dot11WEPExcludedCount */ + + uint32 dma_hang; /**< count for dma hang */ + uint32 reinit; /**< count for reinit */ + + uint32 pstatxucast; /**< count of ucast frames xmitted on all psta assoc */ + uint32 pstatxnoassoc; /**< count of txnoassoc frames xmitted on all psta assoc */ + uint32 pstarxucast; /**< count of ucast frames received on all psta assoc */ + uint32 pstarxbcmc; /**< count of bcmc frames received on all psta */ + uint32 pstatxbcmc; /**< count of bcmc frames transmitted on all psta */ uint32 cso_passthrough; /* hw cso required but passthrough */ - uint32 cso_normal; /* hw cso hdr for normal process */ - uint32 chained; /* number of frames chained */ - uint32 chainedsz1; /* number of chain size 1 frames */ - uint32 unchained; /* number of frames not chained */ - uint32 maxchainsz; /* max chain size so far */ - uint32 currchainsz; /* current chain size */ - uint32 rxdrop20s; /* drop secondary cnt */ - uint32 pciereset; /* Secondary Bus Reset issued by driver */ - uint32 cfgrestore; /* configspace restore by driver */ + uint32 cso_normal; /**< hw cso hdr for normal process */ + uint32 chained; /**< number of frames chained */ + uint32 chainedsz1; /**< number of chain size 1 frames */ + uint32 unchained; /**< number of frames not chained */ + uint32 maxchainsz; /**< max chain size so far */ + uint32 currchainsz; /**< current chain size */ + uint32 rxdrop20s; /**< drop secondary cnt */ + uint32 pciereset; /**< Secondary Bus Reset issued by driver */ + uint32 cfgrestore; /**< configspace restore by driver */ uint32 reinitreason[NREINITREASONCOUNT]; /* reinitreason counters; 0: Unknown reason */ -} wl_cnt_t; + uint32 rxrtry; /**< num of received packets with retry bit on */ + uint32 txmpdu; /**< macstat cnt only valid in ver 11. number of MPDUs txed. */ + uint32 rxnodelim; /**< macstat cnt only valid in ver 11. + * number of occasions that no valid delimiter is detected + * by ampdu parser. + */ + uint32 rxmpdu_mu; /* Number of MU MPDUs received */ + + /* detailed control/management frames */ + uint32 txbar; /**< Number of TX BAR */ + uint32 rxbar; /**< Number of RX BAR */ + uint32 txpspoll; /**< Number of TX PS-poll */ + uint32 rxpspoll; /**< Number of RX PS-poll */ + uint32 txnull; /**< Number of TX NULL_DATA */ + uint32 rxnull; /**< Number of RX NULL_DATA */ + uint32 txqosnull; /**< Number of TX NULL_QoSDATA */ + uint32 rxqosnull; /**< Number of RX NULL_QoSDATA */ + uint32 txassocreq; /**< Number of TX ASSOC request */ + uint32 rxassocreq; /**< Number of RX ASSOC request */ + uint32 txreassocreq; /**< Number of TX REASSOC request */ + uint32 rxreassocreq; /**< Number of RX REASSOC request */ + uint32 txdisassoc; /**< Number of TX DISASSOC */ + uint32 rxdisassoc; /**< Number of RX DISASSOC */ + uint32 txassocrsp; /**< Number of TX ASSOC response */ + uint32 rxassocrsp; /**< Number of RX ASSOC response */ + uint32 txreassocrsp; /**< Number of TX REASSOC response */ + uint32 rxreassocrsp; /**< Number of RX REASSOC response */ + uint32 txauth; /**< Number of TX AUTH */ + uint32 rxauth; /**< Number of RX AUTH */ + uint32 txdeauth; /**< Number of TX DEAUTH */ + uint32 rxdeauth; /**< Number of RX DEAUTH */ + uint32 txprobereq; /**< Number of TX probe request */ + uint32 rxprobereq; /**< Number of RX probe request */ + uint32 txprobersp; /**< Number of TX probe response */ + uint32 rxprobersp; /**< Number of RX probe response */ + uint32 txaction; /**< Number of TX action frame */ + uint32 rxaction; /**< Number of RX action frame */ + +} wl_cnt_ver_11_t; -#ifndef LINUX_POSTMOGRIFY_REMOVAL typedef struct { uint16 version; /* see definition of WL_CNT_T_VERSION */ uint16 length; /* length of entire structure */ @@ -2212,8 +3014,7 @@ typedef struct { uint32 rxmpdu_stbc; /* count for stbc received */ uint32 rxdrop20s; /* drop secondary cnt */ - -} wl_cnt_ver_six_t; +} wl_cnt_ver_6_t; #define WL_DELTA_STATS_T_VERSION 2 /* current version of wl_delta_stats_t struct */ @@ -2262,26 +3063,6 @@ typedef struct { } wl_delta_stats_t; -#if defined(CUSTOM_PLATFORM_NV_TEGRA) -/* structure to store per-rate rx statistics */ -typedef struct wl_scb_rx_rate_stats { - uint32 rx1mbps[2]; /* packets rx at 1Mbps */ - uint32 rx2mbps[2]; /* packets rx at 2Mbps */ - uint32 rx5mbps5[2]; /* packets rx at 5.5Mbps */ - uint32 rx6mbps[2]; /* packets rx at 6Mbps */ - uint32 rx9mbps[2]; /* packets rx at 9Mbps */ - uint32 rx11mbps[2]; /* packets rx at 11Mbps */ - uint32 rx12mbps[2]; /* packets rx at 12Mbps */ - uint32 rx18mbps[2]; /* packets rx at 18Mbps */ - uint32 rx24mbps[2]; /* packets rx at 24Mbps */ - uint32 rx36mbps[2]; /* packets rx at 36Mbps */ - uint32 rx48mbps[2]; /* packets rx at 48Mbps */ - uint32 rx54mbps[2]; /* packets rx at 54Mbps */ -} wl_scb_rx_rate_stats_t; -#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */ - -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ - typedef struct { uint32 packets; uint32 bytes; @@ -2302,7 +3083,6 @@ typedef struct { } wl_wme_cnt_t; -#ifndef LINUX_POSTMOGRIFY_REMOVAL struct wl_msglevel2 { uint32 low; uint32 high; @@ -2507,6 +3287,7 @@ typedef struct wl_lifetime { uint32 lifetime; /* Packet lifetime value in ms */ } wl_lifetime_t; + /* Channel Switch Announcement param */ typedef struct wl_chan_switch { uint8 mode; /* value 0 or 1 */ @@ -2574,6 +3355,9 @@ enum { #define PFN_PARTIAL_SCAN_BIT 0 #define PFN_PARTIAL_SCAN_MASK 1 +#define PFN_SWC_RSSI_WINDOW_MAX 8 +#define PFN_SWC_MAX_NUM_APS 16 +#define PFN_HOTLIST_MAX_NUM_APS 64 /* PFN network info structure */ typedef struct wl_pfn_subnet_info { @@ -2613,6 +3397,21 @@ typedef struct wl_pfn_scanresults { wl_pfn_net_info_t netinfo[1]; } wl_pfn_scanresults_t; +typedef struct wl_pfn_significant_net { + uint16 flags; + uint16 channel; + struct ether_addr BSSID; + int8 rssi[PFN_SWC_RSSI_WINDOW_MAX]; +} wl_pfn_significant_net_t; + + +typedef struct wl_pfn_swc_results { + uint32 version; + uint32 pkt_count; + uint32 total_count; + wl_pfn_significant_net_t list[1]; +} wl_pfn_swc_results_t; + /* used to report exactly one scan result */ /* plus reports detailed scan info in bss_info */ typedef struct wl_pfn_scanresult { @@ -2651,6 +3450,12 @@ typedef struct wl_pfn_bssid { /* Bit4: suppress_lost, Bit3: suppress_found */ uint16 flags; } wl_pfn_bssid_t; + +typedef struct wl_pfn_significant_bssid { + struct ether_addr macaddr; + int8 rssi_low_threshold; + int8 rssi_high_threshold; +} wl_pfn_significant_bssid_t; #define WL_PFN_SUPPRESSFOUND_MASK 0x08 #define WL_PFN_SUPPRESSLOST_MASK 0x10 #define WL_PFN_RSSI_MASK 0xff00 @@ -2662,10 +3467,45 @@ typedef struct wl_pfn_cfg { uint16 channel_list[WL_NUMCHANNELS]; uint32 flags; } wl_pfn_cfg_t; + +#define CH_BUCKET_REPORT_REGULAR 0 +#define CH_BUCKET_REPORT_FULL_RESULT 2 +#define CH_BUCKET_GSCAN 4 + + +typedef struct wl_pfn_gscan_channel_bucket { + uint16 bucket_end_index; + uint8 bucket_freq_multiple; + uint8 report_flag; +} wl_pfn_gscan_channel_bucket_t; + +#define GSCAN_SEND_ALL_RESULTS_MASK (1 << 0) +#define GSCAN_CFG_FLAGS_ONLY_MASK (1 << 7) + +typedef struct wl_pfn_gscan_cfg { + /* BIT0 1 = send probes/beacons to HOST + * BIT2 Reserved + * Add any future flags here + * BIT7 1 = no other useful cfg sent + */ + uint8 flags; + /* Buffer filled threshold in % to generate an event */ + uint8 buffer_threshold; + /* No. of BSSIDs with "change" to generate an evt + * change - crosses rssi threshold/lost + */ + uint8 swc_nbssid_threshold; + /* Max=8 (for now) Size of rssi cache buffer */ + uint8 swc_rssi_window_size; + uint16 count_of_channel_buckets; + uint16 lost_ap_window; + wl_pfn_gscan_channel_bucket_t channel_bucket[1]; +} wl_pfn_gscan_cfg_t; + + #define WL_PFN_REPORT_ALLNET 0 #define WL_PFN_REPORT_SSIDNET 1 #define WL_PFN_REPORT_BSSIDNET 2 - #define WL_PFN_CFG_FLAGS_PROHIBITED 0x00000001 /* Accept and use prohibited channels */ #define WL_PFN_CFG_FLAGS_RESERVED 0xfffffffe /* Remaining reserved for future use */ @@ -2685,6 +3525,15 @@ typedef struct wl_pfn_list { wl_pfn_t pfn[1]; } wl_pfn_list_t; +#define WL_PFN_MAC_OUI_ONLY_MASK 1 +#define WL_PFN_SET_MAC_UNASSOC_MASK 2 +/* To configure pfn_macaddr */ +typedef struct wl_pfn_macaddr_cfg { + uint8 version; + uint8 flags; + struct ether_addr macaddr; +} wl_pfn_macaddr_cfg_t; +#define WL_PFN_MACADDR_CFG_VER 1 typedef BWL_PRE_PACKED_STRUCT struct pfn_olmsg_params_t { wlc_ssid_t ssid; uint32 cipher_type; @@ -2703,7 +3552,18 @@ typedef BWL_PRE_PACKED_STRUCT struct pfn_olmsg_params_t { #define MSCAN_MAX 90 #endif -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ +/* + * WLFCTS definition + */ +typedef struct wl_txstatus_additional_info { + uint32 rspec; + uint32 enq_ts; + uint32 last_ts; + uint32 entry_ts; + uint16 seq; + uint8 rts_cnt; + uint8 tx_cnt; +} wl_txstatus_additional_info_t; /* Service discovery */ typedef struct { @@ -2717,6 +3577,8 @@ typedef struct { typedef struct { uint16 period; /* extended listen period */ uint16 interval; /* extended listen interval */ + uint16 count; /* count to repeat */ + uint16 pad; /* pad for 32bit align */ } wl_p2po_listen_t; /* GAS state machine tunable parameters. Structure field values of 0 means use the default. */ @@ -2848,7 +3710,6 @@ typedef struct { struct ether_addr bssid[1]; /* max ANQPO_MAX_IGNORE_BSSID */ } wl_anqpo_ignore_bssid_list_t; -#ifndef LINUX_POSTMOGRIFY_REMOVAL struct toe_ol_stats_t { /* Num of tx packets that don't need to be checksummed */ @@ -2933,20 +3794,6 @@ typedef struct wl_keep_alive_pkt { * Dongle pattern matching filter. */ -/* Packet filter operation mode */ -/* True: 1; False: 0 */ -#define PKT_FILTER_MODE_FORWARD_ON_MATCH 1 -/* Enable and disable pkt_filter as a whole */ -#define PKT_FILTER_MODE_DISABLE 2 -/* Cache first matched rx pkt(be queried by host later) */ -#define PKT_FILTER_MODE_PKT_CACHE_ON_MATCH 4 -/* If pkt_filter is enabled and no filter is set, don't forward anything */ -#define PKT_FILTER_MODE_PKT_FORWARD_OFF_DEFAULT 8 -#if defined(CUSTOM_PLATFORM_NV_TEGRA) -/* Ports only filter mode */ -#define PKT_FILTER_MODE_PORTS_ONLY 16 -#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */ - #define MAX_WAKE_PACKET_CACHE_BYTES 128 /* Maximum cached wake packet */ #define MAX_WAKE_PACKET_BYTES (DOT11_A3_HDR_LEN + \ @@ -2980,8 +3827,8 @@ typedef enum wl_pkt_filter_type { /* Secured WOWL packet was encrypted, need decrypted before check filter match */ typedef struct wl_pkt_decrypter { - uint8* (*dec_cb)(void* dec_ctx, const void *sdu, int sending); - void* dec_ctx; + uint8* (*dec_cb)(void* dec_ctx, const void *sdu, int sending); + void* dec_ctx; } wl_pkt_decrypter_t; /* Pattern matching filter. Specifies an offset within received packets to @@ -2989,16 +3836,14 @@ typedef struct wl_pkt_decrypter { * that indicates which bits within the pattern should be matched. */ typedef struct wl_pkt_filter_pattern { -// terence 20150525: fix pkt filter error -14 in 64bit OS -// union { - uint32 offset; /* Offset within received packet to start pattern matching. + uint32 offset; /* Offset within received packet to start pattern matching. * Offset '0' is the first byte of the ethernet header. */ -// wl_pkt_decrypter_t* decrypt_ctx; /* Decrypt context */ -// }; uint32 size_bytes; /* Size of the pattern. Bitmask must be the same size. */ uint8 mask_and_pattern[1]; /* Variable length mask and pattern data. mask starts - * at offset 0. Pattern immediately follows mask. + * at offset 0. Pattern immediately follows mask. for + * secured pattern, put the descrypter pointer to the + * beginning, mask and pattern postponed correspondingly */ } wl_pkt_filter_pattern_t; @@ -3076,8 +3921,6 @@ typedef struct wl_pkt_filter_ports { #define WL_PKT_FILTER_PORTS_VERSION 0 #define WL_PKT_FILTER_PORTS_MAX 128 -#define RSN_KCK_LENGTH 16 -#define RSN_KEK_LENGTH 16 #define RSN_REPLAY_LEN 8 typedef struct _gtkrefresh { uchar KCK[RSN_KCK_LENGTH]; @@ -3194,6 +4037,31 @@ typedef struct wl_rssi_event { */ } wl_rssi_event_t; +/* CCA based channel quality event configuration */ +#define WL_CHAN_QUAL_CCA 0 +#define WL_CHAN_QUAL_NF 1 +#define WL_CHAN_QUAL_NF_LTE 2 +#define WL_CHAN_QUAL_TOTAL 3 + +#define MAX_CHAN_QUAL_LEVELS 8 + +typedef struct wl_chan_qual_metric { + uint8 id; /* metric ID */ + uint8 num_levels; /* Number of entries in rssi_levels[] below */ + uint16 flags; + int16 htol[MAX_CHAN_QUAL_LEVELS]; /* threshold level array: hi-to-lo */ + int16 ltoh[MAX_CHAN_QUAL_LEVELS]; /* threshold level array: lo-to-hi */ +} wl_chan_qual_metric_t; + +typedef struct wl_chan_qual_event { + uint32 rate_limit_msec; /* # of events posted to application will be limited to + * one per specified period (0 to disable rate limit). + */ + uint16 flags; + uint16 num_metrics; + wl_chan_qual_metric_t metric[WL_CHAN_QUAL_TOTAL]; /* metric array */ +} wl_chan_qual_event_t; + typedef struct wl_action_obss_coex_req { uint8 info; uint8 num; @@ -3309,6 +4177,15 @@ typedef struct { #define PKTQ_LOG_AUTO (1 << 31) #define PKTQ_LOG_DEF_PREC (1 << 30) + +#define LEGACY1_WL_PFN_MACADDR_CFG_VER 0 + +#define WL_PFN_MAC_OUI_ONLY_MASK 1 +#define WL_PFN_SET_MAC_UNASSOC_MASK 2 +#define WL_PFN_RESTRICT_LA_MAC_MASK 4 +#define WL_PFN_MACADDR_FLAG_MASK 0x7 + + /* * SCB_BS_DATA iovar definitions start. */ @@ -3436,9 +4313,7 @@ typedef struct { } wl_ioctl_overlay_t; #endif /* DONGLEOVERLAYS */ -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ - -/* 11k Neighbor Report element */ +/* 11k Neighbor Report element (unversioned, deprecated) */ typedef struct nbr_element { uint8 id; uint8 len; @@ -3450,6 +4325,24 @@ typedef struct nbr_element { uint8 pad; } nbr_element_t; +#define WL_RRM_NBR_RPT_VER 1 +/* 11k Neighbor Report element */ +typedef struct nbr_rpt_elem { + uint8 version; + uint8 id; + uint8 len; + uint8 pad; + struct ether_addr bssid; + uint8 pad_1[2]; + uint32 bssid_info; + uint8 reg; + uint8 channel; + uint8 phytype; + uint8 pad_2; + wlc_ssid_t ssid; + uint8 bss_trans_preference; + uint8 pad_3[3]; +} nbr_rpt_elem_t; typedef enum event_msgs_ext_command { EVENTMSGS_NONE = 0, @@ -3492,7 +4385,7 @@ typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_tput_stats { #define MAX_ROAMOFFL_BSSID_NUM 100 typedef BWL_PRE_PACKED_STRUCT struct roamoffl_bssid_list { - int cnt; + int32 cnt; struct ether_addr bssid[1]; } BWL_POST_PACKED_STRUCT roamoffl_bssid_list_t; @@ -3545,9 +4438,10 @@ typedef BWL_PRE_PACKED_STRUCT struct wl_pwrstats { #define WL_PWRSTATS_TYPE_PHY 0 /* struct wl_pwr_phy_stats */ #define WL_PWRSTATS_TYPE_SCAN 1 /* struct wl_pwr_scan_stats */ #define WL_PWRSTATS_TYPE_USB_HSIC 2 /* struct wl_pwr_usb_hsic_stats */ -#define WL_PWRSTATS_TYPE_PM_AWAKE 3 /* struct wl_pwr_pm_awake_stats */ +#define WL_PWRSTATS_TYPE_PM_AWAKE1 3 /* struct wl_pwr_pm_awake_stats_v1 */ #define WL_PWRSTATS_TYPE_CONNECTION 4 /* struct wl_pwr_connect_stats; assoc and key-exch time */ #define WL_PWRSTATS_TYPE_PCIE 6 /* struct wl_pwr_pcie_stats */ +#define WL_PWRSTATS_TYPE_PM_AWAKE2 7 /* struct wl_pwr_pm_awake_stats_v2 */ /* Bits for wake reasons */ #define WLC_PMD_WAKE_SET 0x1 @@ -3571,13 +4465,48 @@ typedef BWL_PRE_PACKED_STRUCT struct wlc_pm_debug { uint32 reason; /* reason(s) for staying awake */ } BWL_POST_PACKED_STRUCT wlc_pm_debug_t; +/* WL_PWRSTATS_TYPE_PM_AWAKE1 structures (for 6.25 firmware) */ +#define WLC_STA_AWAKE_STATES_MAX_V1 30 +#define WLC_PMD_EVENT_MAX_V1 32 +/* Data sent as part of pwrstats IOVAR (and EXCESS_PM_WAKE event) */ +typedef BWL_PRE_PACKED_STRUCT struct pm_awake_data_v1 { + uint32 curr_time; /* ms */ + uint32 hw_macc; /* HW maccontrol */ + uint32 sw_macc; /* SW maccontrol */ + uint32 pm_dur; /* Total sleep time in PM, msecs */ + uint32 mpc_dur; /* Total sleep time in MPC, msecs */ + + /* int32 drifts = remote - local; +ve drift => local-clk slow */ + int32 last_drift; /* Most recent TSF drift from beacon */ + int32 min_drift; /* Min TSF drift from beacon in magnitude */ + int32 max_drift; /* Max TSF drift from beacon in magnitude */ + + uint32 avg_drift; /* Avg TSF drift from beacon */ + + /* Wake history tracking */ + uint8 pmwake_idx; /* for stepping through pm_state */ + wlc_pm_debug_t pm_state[WLC_STA_AWAKE_STATES_MAX_V1]; /* timestamped wake bits */ + uint32 pmd_event_wake_dur[WLC_PMD_EVENT_MAX_V1]; /* cumulative usecs per wake reason */ + uint32 drift_cnt; /* Count of drift readings over which avg_drift was computed */ +} BWL_POST_PACKED_STRUCT pm_awake_data_v1_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_stats_v1 { + uint16 type; /* WL_PWRSTATS_TYPE_PM_AWAKE */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + + pm_awake_data_v1_t awake_data; + uint32 frts_time; /* Cumulative ms spent in frts since driver load */ + uint32 frts_end_cnt; /* No of times frts ended since driver load */ +} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_stats_v1_t; + +/* WL_PWRSTATS_TYPE_PM_AWAKE2 structures */ /* Data sent as part of pwrstats IOVAR */ -typedef BWL_PRE_PACKED_STRUCT struct pm_awake_data { +typedef BWL_PRE_PACKED_STRUCT struct pm_awake_data_v2 { uint32 curr_time; /* ms */ uint32 hw_macc; /* HW maccontrol */ uint32 sw_macc; /* SW maccontrol */ - uint32 pm_dur; /* Total sleep time in PM, usecs */ - uint32 mpc_dur; /* Total sleep time in MPC, usecs */ + uint32 pm_dur; /* Total sleep time in PM, msecs */ + uint32 mpc_dur; /* Total sleep time in MPC, msecs */ /* int32 drifts = remote - local; +ve drift => local-clk slow */ int32 last_drift; /* Most recent TSF drift from beacon */ @@ -3603,14 +4532,14 @@ typedef BWL_PRE_PACKED_STRUCT struct pm_awake_data { uint8 pad[3]; uint32 frts_time; /* Cumulative ms spent in frts since driver load */ uint32 frts_end_cnt; /* No of times frts ended since driver load */ -} BWL_POST_PACKED_STRUCT pm_awake_data_t; +} BWL_POST_PACKED_STRUCT pm_awake_data_v2_t; -typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_stats { +typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_stats_v2 { uint16 type; /* WL_PWRSTATS_TYPE_PM_AWAKE */ uint16 len; /* Up to 4K-1, top 4 bits are reserved */ - pm_awake_data_t awake_data; -} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_stats_t; + pm_awake_data_v2_t awake_data; +} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_stats_v2_t; /* Original bus structure is for HSIC */ typedef BWL_PRE_PACKED_STRUCT struct bus_metrics { @@ -3649,6 +4578,21 @@ typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_metrics { uint32 l1_2_usecs; /* L1_2ss duration in usecs */ uint32 l2_cnt; /* L2 entry count */ uint32 l2_usecs; /* L2 duration in usecs */ + uint32 timestamp; /* Timestamp on when stats are collected */ + uint32 num_h2d_doorbell; /* # of doorbell interrupts - h2d */ + uint32 num_d2h_doorbell; /* # of doorbell interrupts - d2h */ + uint32 num_submissions; /* # of submissions */ + uint32 num_completions; /* # of completions */ + uint32 num_rxcmplt; /* # of rx completions */ + uint32 num_rxcmplt_drbl; /* of drbl interrupts for rx complt. */ + uint32 num_txstatus; /* # of tx completions */ + uint32 num_txstatus_drbl; /* of drbl interrupts for tx complt. */ + uint32 ltr_active_ct; /* # of times chip went to LTR ACTIVE */ + uint32 ltr_active_dur; /* # of msecs chip was in LTR ACTIVE */ + uint32 ltr_sleep_ct; /* # of times chip went to LTR SLEEP */ + uint32 ltr_sleep_dur; /* # of msecs chip was in LTR SLEEP */ + uint32 deepsleep_count; /* # of times chip went to deepsleep */ + uint32 deepsleep_dur; /* # of msecs chip was in deepsleep */ } BWL_POST_PACKED_STRUCT pcie_bus_metrics_t; /* Bus interface info for PCIE */ @@ -3701,6 +4645,19 @@ BWL_PRE_PACKED_STRUCT struct hostip_id { uint8 id; } BWL_POST_PACKED_STRUCT; +/* Return values */ +#define ND_REPLY_PEER 0x1 /* Reply was sent to service NS request from peer */ +#define ND_REQ_SINK 0x2 /* Input packet should be discarded */ +#define ND_FORCE_FORWARD 0X3 /* For the dongle to forward req to HOST */ + +/* Neighbor Solicitation Response Offload IOVAR param */ +typedef BWL_PRE_PACKED_STRUCT struct nd_param { + struct ipv6_addr host_ip[2]; + struct ipv6_addr solicit_ip; + struct ipv6_addr remote_ip; + uint8 host_mac[ETHER_ADDR_LEN]; + uint32 offload_id; +} BWL_POST_PACKED_STRUCT nd_param_t; typedef BWL_PRE_PACKED_STRUCT struct wl_pfn_roam_thresh { uint32 pfn_alert_thresh; /* time in ms */ @@ -3745,16 +4702,19 @@ typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert { #define WL_PMALERT_PMSTATE 1 /* struct wl_pmalert_pmstate_t, variable */ #define WL_PMALERT_EVENT_DUR 2 /* struct wl_pmalert_event_dur_t, variable */ #define WL_PMALERT_UCODE_DBG 3 /* struct wl_pmalert_ucode_dbg_t, variable */ +#define WL_PMALERT_PS_ALLOWED_HIST 4 /* struct wl_pmalert_ps_allowed_history, variable */ +#define WL_PMALERT_EXT_UCODE_DBG 5 /* struct wl_pmalert_ext_ucode_dbg_t, variable */ +#define WL_PMALERT_EPM_START_EVENT_DUR 6 /* struct wl_pmalert_event_dur_t, variable */ typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_fixed { uint16 type; /* WL_PMALERT_FIXED */ uint16 len; /* Up to 4K-1, top 4 bits are reserved */ uint32 prev_stats_time; /* msecs */ uint32 curr_time; /* ms */ - uint32 prev_pm_dur; /* usecs */ - uint32 pm_dur; /* Total sleep time in PM, usecs */ - uint32 prev_mpc_dur; /* usecs */ - uint32 mpc_dur; /* Total sleep time in MPC, usecs */ + uint32 prev_pm_dur; /* msecs */ + uint32 pm_dur; /* Total sleep time in PM, msecs */ + uint32 prev_mpc_dur; /* msecs */ + uint32 mpc_dur; /* Total sleep time in MPC, msecs */ uint32 hw_macc; /* HW maccontrol */ uint32 sw_macc; /* SW maccontrol */ @@ -3765,8 +4725,11 @@ typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_fixed { uint32 avg_drift; /* Avg TSF drift from beacon */ uint32 drift_cnt; /* Count of drift readings over which avg_drift was computed */ - uint32 frts_time; /* Cumulative ms spent in frts since driver load */ + uint32 frts_time; /* Cumulative ms spent in data frts since driver load */ uint32 frts_end_cnt; /* No of times frts ended since driver load */ + uint32 prev_frts_dur; /* Data frts duration at start of pm-period */ + uint32 cal_dur; /* Cumulative ms spent in calibration */ + uint32 prev_cal_dur; /* cal duration at start of pm-period */ } BWL_POST_PACKED_STRUCT wl_pmalert_fixed_t; typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_pmstate { @@ -3799,7 +4762,6 @@ typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_ucode_dbg { uint32 phydebug[20]; } BWL_POST_PACKED_STRUCT wl_pmalert_ucode_dbg_t; -#ifndef LINUX_POSTMOGRIFY_REMOVAL /* Structures and constants used for "vndr_ie" IOVar interface */ #define VNDR_IE_CMD_LEN 4 /* length of the set command string: @@ -3915,7 +4877,6 @@ typedef BWL_PRE_PACKED_STRUCT struct { } BWL_POST_PACKED_STRUCT ibss_route_tbl_t; #define MAX_IBSS_ROUTE_TBL_ENTRY 64 -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ #define TXPWR_TARGET_VERSION 0 typedef BWL_PRE_PACKED_STRUCT struct { @@ -3967,6 +4928,8 @@ typedef BWL_PRE_PACKED_STRUCT struct { } BWL_POST_PACKED_STRUCT aibss_bcn_force_config_t; #define AIBSS_TXFAIL_CONFIG_VER_0 0 +#define AIBSS_TXFAIL_CONFIG_VER_1 1 +#define AIBSS_TXFAIL_CONFIG_CUR_VER AIBSS_TXFAIL_CONFIG_VER_1 /* structure used to configure aibss tx fail event */ typedef BWL_PRE_PACKED_STRUCT struct { @@ -3974,6 +4937,7 @@ typedef BWL_PRE_PACKED_STRUCT struct { uint16 len; uint32 bcn_timeout; /* dur in seconds to receive 1 bcn */ uint32 max_tx_retry; /* no of consecutive no acks to send txfail event */ + uint32 max_atim_failure; /* no of consecutive atim failure */ } BWL_POST_PACKED_STRUCT aibss_txfail_config_t; typedef BWL_PRE_PACKED_STRUCT struct wl_aibss_if { @@ -4000,7 +4964,6 @@ typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_tbl { /* no strict structure packing */ #include -#ifndef LINUX_POSTMOGRIFY_REMOVAL /* Global ASSERT Logging */ #define ASSERTLOG_CUR_VER 0x0100 #define MAX_ASSRTSTR_LEN 64 @@ -4021,20 +4984,6 @@ typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_tbl { #define LOGRRC_FIX_LEN 8 #define IOBUF_ALLOWED_NUM_OF_LOGREC(type, len) ((len - LOGRRC_FIX_LEN)/sizeof(type)) -#ifdef BCMWAPI_WAI -#define IV_LEN 16 - struct wapi_sta_msg_t - { - uint16 msg_type; - uint16 datalen; - uint8 vap_mac[6]; - uint8 reserve_data1[2]; - uint8 sta_mac[6]; - uint8 reserve_data2[2]; - uint8 gsn[IV_LEN]; - uint8 wie[256]; - }; -#endif /* BCMWAPI_WAI */ /* chanim acs record */ typedef struct { @@ -4044,6 +4993,7 @@ typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_tbl { int8 bgnoise; uint32 glitch_cnt; uint8 ccastats; + uint8 chan_idle; uint timestamp; } chanim_acs_record_t; @@ -4057,9 +5007,9 @@ typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_tbl { uint32 glitchcnt; /* normalized as per second count */ uint32 badplcp; /* normalized as per second count */ uint8 ccastats[CCASTATS_MAX]; /* normalized as 0-255 */ - int8 bgnoise; /* background noise level (in dBm) */ - chanspec_t chanspec; - uint32 timestamp; + int8 bgnoise; /* background noise level (in dBm) */ + chanspec_t chanspec; /* ctrl chanspec of the interface */ + uint32 timestamp; /* time stamp at which the stats are collected */ uint32 bphy_glitchcnt; /* normalized as per second count */ uint32 bphy_badplcp; /* normalized as per second count */ uint8 chan_idle; /* normalized as 0~255 */ @@ -4301,9 +5251,14 @@ typedef struct { #define WLC_TXCAL_CORE_MAX 2 /* max number of txcore supports for txcal */ #define MAX_NUM_TXCAL_MEAS 128 - +#define MAX_NUM_PWR_STEP 40 +#define TXCAL_ROUNDING_FIX 1 typedef struct wl_txcal_meas { +#ifdef TXCAL_ROUNDING_FIX + uint16 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS]; +#else uint8 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS]; +#endif /* TXCAL_ROUNDING_FIX */ int16 pwr[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS]; uint8 valid_cnt; } wl_txcal_meas_t; @@ -4311,9 +5266,11 @@ typedef struct wl_txcal_meas { typedef struct wl_txcal_power_tssi { uint8 set_core; uint8 channel; + int16 tempsense[WLC_TXCAL_CORE_MAX]; int16 pwr_start[WLC_TXCAL_CORE_MAX]; + uint8 pwr_start_idx[WLC_TXCAL_CORE_MAX]; uint8 num_entries[WLC_TXCAL_CORE_MAX]; - uint8 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS]; + uint8 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_PWR_STEP]; bool gen_tbl; } wl_txcal_power_tssi_t; @@ -4839,6 +5796,7 @@ enum proxd_method { #define WL_PROXD_FLAG_SEQ_EN 0x80 #define WL_PROXD_RANDOM_WAKEUP 0x8000 +#define WL_PROXD_MAXREPORT 8 typedef struct wl_proxd_iovar { uint16 method; /* Proxmity Detection method */ @@ -4913,6 +5871,17 @@ typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_method { /* add more params required for other methods can be added here */ } BWL_POST_PACKED_STRUCT wl_proxd_params_tof_method_t; +typedef struct wl_proxd_seq_config +{ + int16 N_tx_log2; + int16 N_rx_log2; + int16 N_tx_scale; + int16 N_rx_scale; + int16 w_len; + int16 w_offset; +} wl_proxd_seq_config_t; + + typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_tune { uint32 Ki; /* h/w delay K factor for initiator */ uint32 Kt; /* h/w delay K factor for target */ @@ -4932,6 +5901,9 @@ typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_tune { uint8 hw_adj; /* enable hw assisted timestamp adjustment */ uint8 seq_en; /* enable ranging sequence */ uint8 ftm_cnt[TOF_BW_SEQ_NUM]; /* number of ftm frames based on bandwidth */ + int16 N_log2_2g; /* simple threshold crossing for 2g channel */ + int16 N_scale_2g; /* simple threshold crossing for 2g channel */ + wl_proxd_seq_config_t seq_5g20; } BWL_POST_PACKED_STRUCT wl_proxd_params_tof_tune_t; typedef struct wl_proxd_params_iovar { @@ -4953,6 +5925,7 @@ typedef struct wl_proxd_params_iovar { #define PROXD_COLLECT_QUERY_DATA 3 #define PROXD_COLLECT_QUERY_DEBUG 4 #define PROXD_COLLECT_REMOTE_REQUEST 5 +#define PROXD_COLLECT_DONE 6 typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_query { uint32 method; /* method */ @@ -4988,9 +5961,12 @@ typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_header { } BWL_POST_PACKED_STRUCT wl_proxd_collect_header_t; +#ifdef WL_NAN /* ********************** NAN wl interface struct types and defs ******************** */ #define WL_NAN_IOCTL_VERSION 0x1 +#define NAN_IOC_BUFSZ 256 /**< some sufficient ioc buff size for our module */ +#define NAN_IOC_BUFSZ_EXT 1024 /* some sufficient ioc buff size for dump commands */ /* wl_nan_sub_cmd may also be used in dhd */ typedef struct wl_nan_sub_cmd wl_nan_sub_cmd_t; @@ -5009,6 +5985,7 @@ typedef BWL_PRE_PACKED_STRUCT struct wl_nan_ioc { uint16 version; /* interface command or event version */ uint16 id; /* nan ioctl cmd ID */ uint16 len; /* total length of all tlv records in data[] */ + uint16 pad; /* pad to be 32 bit aligment */ uint8 data [1]; /* var len payload of bcm_xtlv_t type */ } BWL_POST_PACKED_STRUCT wl_nan_ioc_t; @@ -5025,8 +6002,16 @@ typedef struct wl_nan_status { uint32 cnt_svc_disc_tx; /* TX svc disc frame count */ uint32 cnt_svc_disc_rx; /* RX svc disc frame count */ struct ether_addr cid; + uint32 chspec_5g; } wl_nan_status_t; +typedef struct wl_nan_count { + uint32 cnt_bcn_tx; /* TX disc/sync beacon count */ + uint32 cnt_bcn_rx; /* RX disc/sync beacon count */ + uint32 cnt_svc_disc_tx; /* TX svc disc frame count */ + uint32 cnt_svc_disc_rx; /* RX svc disc frame count */ +} wl_nan_count_t; + /* various params and ctl swithce for nan_debug instance */ typedef struct nan_debug_params { uint8 enabled; /* runtime debuging enabled */ @@ -5036,17 +6021,24 @@ typedef struct nan_debug_params { uint16 status; } nan_debug_params_t; +/* time slot */ +#define NAN_MAX_TIMESLOT 32 +typedef struct nan_timeslot { + uint32 abitmap; /* available bitmap */ + uint32 chanlist[NAN_MAX_TIMESLOT]; +} nan_timeslot_t; /* nan passive scan params */ #define NAN_SCAN_MAX_CHCNT 8 -typedef BWL_PRE_PACKED_STRUCT struct nan_scan_params { +typedef struct nan_scan_params { uint16 scan_time; uint16 home_time; uint16 ms_intvl; /* interval between merge scan */ uint16 ms_dur; /* duration of merge scan */ uint16 chspec_num; + uint8 pad[2]; chanspec_t chspec_list[NAN_SCAN_MAX_CHCNT]; /* act. used 3, 5 rfu */ -} BWL_POST_PACKED_STRUCT nan_scan_params_t; +} nan_scan_params_t; enum wl_nan_role { WL_NAN_ROLE_AUTO = 0, @@ -5065,6 +6057,14 @@ enum wl_nan_cmds { WL_NAN_CMD_LEAVE = 4, WL_NAN_CMD_MERGE = 5, WL_NAN_CMD_STATUS = 6, + WL_NAN_CMD_TSRESERVE = 7, + WL_NAN_CMD_TSSCHEDULE = 8, + WL_NAN_CMD_TSRELEASE = 9, + WL_NAN_CMD_OUI = 10, + + WL_NAN_CMD_COUNT = 15, + WL_NAN_CMD_CLEARCOUNT = 16, + /* discovery engine commands */ WL_NAN_CMD_PUBLISH = 20, WL_NAN_CMD_SUBSCRIBE = 21, @@ -5080,11 +6080,15 @@ enum wl_nan_cmds { WL_NAN_CMD_SCAN_RESULTS = 48, WL_NAN_CMD_EVENT_MASK = 49, WL_NAN_CMD_EVENT_CHECK = 50, + WL_NAN_CMD_DUMP = 51, + WL_NAN_CMD_CLEAR = 52, + WL_NAN_CMD_RSSI = 53, WL_NAN_CMD_DEBUG = 60, WL_NAN_CMD_TEST1 = 61, WL_NAN_CMD_TEST2 = 62, - WL_NAN_CMD_TEST3 = 63 + WL_NAN_CMD_TEST3 = 63, + WL_NAN_CMD_DISC_RESULTS = 64 }; /* @@ -5102,10 +6106,9 @@ enum wl_nan_cmd_xtlv_id { /* 0x02 ~ 0xFF: reserved. In case to use with the same data format as NAN attribute TLV */ /* 0x100 ~ : private TLV ID defined just for NAN command */ /* common types */ - WL_NAN_XTLV_BUFFER = 0x101, /* generic type, function depends on cmd context */ WL_NAN_XTLV_MAC_ADDR = 0x102, /* used in various cmds */ WL_NAN_XTLV_REASON = 0x103, - WL_NAN_XTLV_ENABLE = 0x104, + WL_NAN_XTLV_ENABLED = 0x104, /* explicit types, primarily for discovery engine iovars */ WL_NAN_XTLV_SVC_PARAMS = 0x120, /* Contains required params: wl_nan_disc_params_t */ WL_NAN_XTLV_MATCH_RX = 0x121, /* Matching filter to evaluate on receive */ @@ -5116,6 +6119,9 @@ enum wl_nan_cmd_xtlv_id { WL_NAN_XTLV_PRIORITY = 0x126, /* used in transmit cmd context */ WL_NAN_XTLV_REQUESTOR_ID = 0x127, /* Requestor instance ID */ WL_NAN_XTLV_VNDR = 0x128, /* Vendor specific attribute */ + WL_NAN_XTLV_SR_FILTER = 0x129, /* Service Response Filter */ + WL_NAN_XTLV_FOLLOWUP = 0x130, /* Service Info for Follow-Up SDF */ + WL_NAN_XTLV_PEER_INSTANCE_ID = 0x131, /* Used to parse remote instance Id */ /* explicit types, primarily for NAN MAC iovars */ WL_NAN_XTLV_DW_LEN = 0x140, /* discovery win length */ WL_NAN_XTLV_BCN_INTERVAL = 0x141, /* beacon interval, both sync and descovery bcns? */ @@ -5142,7 +6148,26 @@ enum wl_nan_cmd_xtlv_id { WL_NAN_XTLV_SUBSCR_ID = 0x154, /* subscriber id */ WL_NAN_XTLV_PUBLR_ID = 0x155, /* publisher id */ WL_NAN_XTLV_EVENT_MASK = 0x156, - WL_NAN_XTLV_MERGE = 0x157 + WL_NAN_XTLV_MASTER_RANK = 0x158, + WL_NAN_XTLV_WARM_UP_TIME = 0x159, + WL_NAN_XTLV_PM_OPTION = 0x15a, + WL_NAN_XTLV_OUI = 0x15b, /* NAN OUI */ + WL_NAN_XTLV_MAC_COUNT = 0x15c, /* xtlv payload is nan_count_t */ + /* nan timeslot management */ + WL_NAN_XTLV_TSRESERVE = 0x160, + WL_NAN_XTLV_TSRELEASE = 0x161, + WL_NAN_XTLV_IDLE_DW_TIMEOUT = 0x162, + WL_NAN_XTLV_IDLE_DW_LEN = 0x163, + WL_NAN_XTLV_RND_FACTOR = 0x164, + WL_NAN_XTLV_SVC_DISC_TXTIME = 0x165, /* svc disc frame tx time in DW */ + WL_NAN_XTLV_OPERATING_BAND = 0x166, + WL_NAN_XTLV_STOP_BCN_TX = 0x167, + WL_NAN_XTLV_CONCUR_SCAN = 0x168, + WL_NAN_XTLV_DUMP_CLR_TYPE = 0x175, /* wl nan dump/clear subtype */ + WL_NAN_XTLV_PEER_RSSI = 0x176, /* xtlv payload for wl nan dump rssi */ + WL_NAN_XTLV_MAC_CHANSPEC_1 = 0x17A, /* to get chanspec[1] */ + WL_NAN_XTLV_DISC_RESULTS = 0x17B, /* get disc results */ + WL_NAN_XTLV_MAC_STATS = 0x17C /* xtlv payload for wl nan dump stats */ }; /* Flag bits for Publish and Subscribe (wl_nan_disc_params_t flags) */ @@ -5177,10 +6202,16 @@ enum wl_nan_cmd_xtlv_id { /* The service hash (service id) is exactly this many bytes. */ #define WL_NAN_SVC_HASH_LEN 6 +/* Number of hash functions per bloom filter */ +#define WL_NAN_HASHES_PER_BLOOM 4 + /* Instance ID type (unique identifier) */ typedef uint8 wl_nan_instance_id_t; -/* Mandatory parameters for publish/subscribe iovars - NAN_TLV_SVC_PARAMS */ +/* no. of max last disc results */ +#define WL_NAN_MAX_DISC_RESULTS 3 + +/** Mandatory parameters for publish/subscribe iovars - NAN_TLV_SVC_PARAMS */ typedef struct wl_nan_disc_params_s { /* Periodicity of unsolicited/query transmissions, in DWs */ uint32 period; @@ -5190,10 +6221,27 @@ typedef struct wl_nan_disc_params_s { uint32 flags; /* Publish or subscribe service id, i.e. hash of the service name */ uint8 svc_hash[WL_NAN_SVC_HASH_LEN]; + /* pad to make 4 byte alignment, can be used for something else in the future */ + uint8 pad; /* Publish or subscribe id */ wl_nan_instance_id_t instance_id; } wl_nan_disc_params_t; +/* recent discovery results */ +typedef struct wl_nan_disc_result_s +{ + wl_nan_instance_id_t instance_id; /* instance id of pub/sub req */ + wl_nan_instance_id_t peer_instance_id; /* peer instance id of pub/sub req/resp */ + uint8 svc_hash[WL_NAN_SVC_HASH_LEN]; /* service descp string */ + struct ether_addr peer_mac; /* peer mac address */ +} wl_nan_disc_result_t; + +/* list of recent discovery results */ +typedef struct wl_nan_disc_results_s +{ + wl_nan_disc_result_t disc_result[WL_NAN_MAX_DISC_RESULTS]; +} wl_nan_disc_results_list_t; + /* * desovery interface event structures * */ @@ -5269,8 +6317,80 @@ typedef struct nan_ranging_event_data { uint8 count; /* number of peers in the list */ wl_nan_ranging_result_t rr[1]; /* variable array of ranging peers */ } wl_nan_ranging_event_data_t; +enum { + WL_NAN_RSSI_DATA = 1, + WL_NAN_STATS_DATA = 2, +/* + * ***** ADD before this line **** + */ + WL_NAN_INVALID +}; + +typedef struct wl_nan_stats { + /* general */ + uint32 cnt_dw; /* DW slots */ + uint32 cnt_disc_bcn_sch; /* disc beacon slots */ + uint32 cnt_amr_exp; /* count of ambtt expiries resetting roles */ + uint32 cnt_bcn_upd; /* count of beacon template updates */ + uint32 cnt_bcn_tx; /* count of sync & disc bcn tx */ + uint32 cnt_bcn_rx; /* count of sync & disc bcn rx */ + uint32 cnt_sync_bcn_tx; /* count of sync bcn tx within DW */ + uint32 cnt_disc_bcn_tx; /* count of disc bcn tx */ + uint32 cnt_sdftx_bcmc; /* count of bcast/mcast sdf tx */ + uint32 cnt_sdftx_uc; /* count of unicast sdf tx */ + uint32 cnt_sdftx_fail; /* count of unicast sdf tx fails */ + uint32 cnt_sdf_rx; /* count of sdf rx */ + /* NAN roles */ + uint32 cnt_am; /* anchor master */ + uint32 cnt_master; /* master */ + uint32 cnt_nms; /* non master sync */ + uint32 cnt_nmns; /* non master non sync */ + /* TX */ + uint32 cnt_err_txtime; /* error in txtime */ + uint32 cnt_err_unsch_tx; /* tx while not in DW/ disc bcn slot */ + uint32 cnt_err_bcn_tx; /* beacon tx error */ + uint32 cnt_sync_bcn_tx_miss; /* no. of times time delta between 2 cosequetive + * sync beacons is more than dw interval + */ + /* SCANS */ + uint32 cnt_mrg_scan; /* count of merge scans completed */ + uint32 cnt_err_ms_rej; /* number of merge scan failed */ + uint32 cnt_scan_results; /* no. of nan beacons scanned */ + uint32 cnt_join_scan_rej; /* no. of join scans rejected */ + uint32 cnt_nan_scan_abort; /* no. of join scans rejected */ + /* enable/disable */ + uint32 cnt_nan_enab; /* no. of times nan feature got enabled */ + uint32 cnt_nan_disab; /* no. of times nan feature got disabled */ +} wl_nan_stats_t; + +#define WL_NAN_MAC_MAX_NAN_PEERS 6 +#define WL_NAN_MAC_MAX_RSSI_DATA_PER_PEER 10 + +typedef struct wl_nan_nbr_rssi { + uint8 rx_chan; /* channel number on which bcn rcvd */ + int rssi_raw; /* received rssi value */ + int rssi_avg; /* normalized rssi value */ +} wl_nan_peer_rssi_t; + +typedef struct wl_nan_peer_rssi_entry { + struct ether_addr mac; /* peer mac address */ + uint8 flags; /* TODO:rssi data order: latest first, oldest first etc */ + uint8 rssi_cnt; /* rssi data sample present */ + wl_nan_peer_rssi_t rssi[WL_NAN_MAC_MAX_RSSI_DATA_PER_PEER]; /* RSSI data frm peer */ +} wl_nan_peer_rssi_entry_t; + +#define WL_NAN_PEER_RSSI 0x1 +#define WL_NAN_PEER_RSSI_LIST 0x2 + +typedef struct wl_nan_nbr_rssi_data { + uint8 flags; /* this is a list or single rssi data */ + uint8 peer_cnt; /* number of peers */ + uint16 pad; /* padding */ + wl_nan_peer_rssi_entry_t peers[1]; /* peers data list */ +} wl_nan_peer_rssi_data_t; /* ********************* end of NAN section ******************************** */ +#endif /* WL_NAN */ #define RSSI_THRESHOLD_SIZE 16 @@ -5279,7 +6399,7 @@ typedef struct nan_ranging_event_data { typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_rssi_bias { int32 version; /* version */ int32 threshold[RSSI_THRESHOLD_SIZE]; /* threshold */ - int32 peak_offset; /* peak offset */ + int32 peak_offset; /* peak offset */ int32 bias; /* rssi bias */ int32 gd_delta; /* GD - GD_ADJ */ int32 imp_resp[MAX_IMP_RESP_SIZE]; /* (Hi*Hi)+(Hr*Hr) */ @@ -5403,6 +6523,47 @@ typedef struct wl_wsec_info { wl_wsec_info_tlv_t tlvs[1]; /* tlv data follows */ } wl_wsec_info_t; +/* + * scan MAC definitions + */ + +/* common iovar struct */ +typedef struct wl_scanmac { + uint16 subcmd_id; /* subcommand id */ + uint16 len; /* total length of data[] */ + uint8 data[1]; /* subcommand data */ +} wl_scanmac_t; + +/* subcommand ids */ +#define WL_SCANMAC_SUBCMD_ENABLE 0 +#define WL_SCANMAC_SUBCMD_BSSCFG 1 /* only GET supported */ +#define WL_SCANMAC_SUBCMD_CONFIG 2 + +/* scanmac enable data struct */ +typedef struct wl_scanmac_enable { + uint8 enable; /* 1 - enable, 0 - disable */ + uint8 pad[3]; /* 4-byte struct alignment */ +} wl_scanmac_enable_t; + +/* scanmac bsscfg data struct */ +typedef struct wl_scanmac_bsscfg { + uint32 bsscfg; /* bsscfg index */ +} wl_scanmac_bsscfg_t; + +/* scanmac config data struct */ +typedef struct wl_scanmac_config { + struct ether_addr mac; /* 6 bytes of MAC address or MAC prefix (i.e. OUI) */ + struct ether_addr random_mask; /* randomized bits on each scan */ + uint16 scan_bitmap; /* scans to use this MAC address */ + uint8 pad[2]; /* 4-byte struct alignment */ +} wl_scanmac_config_t; + +/* scan bitmap */ +#define WL_SCANMAC_SCAN_UNASSOC (0x01 << 0) /* unassociated scans */ +#define WL_SCANMAC_SCAN_ASSOC_ROAM (0x01 << 1) /* associated roam scans */ +#define WL_SCANMAC_SCAN_ASSOC_PNO (0x01 << 2) /* associated PNO scans */ +#define WL_SCANMAC_SCAN_ASSOC_HOST (0x01 << 3) /* associated host scans */ + /* no default structure packing */ #include @@ -5547,8 +6708,7 @@ typedef struct net_detect_wake_data { #endif /* NET_DETECT */ -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ - +/* (unversioned, deprecated) */ typedef struct bcnreq { uint8 bcn_mode; int dur; @@ -5559,6 +6719,22 @@ typedef struct bcnreq { uint16 reps; } bcnreq_t; +#define WL_RRM_BCN_REQ_VER 1 +typedef struct bcn_req { + uint8 version; + uint8 bcn_mode; + uint8 pad_1[2]; + int32 dur; + int32 channel; + struct ether_addr da; + uint16 random_int; + wlc_ssid_t ssid; + uint16 reps; + uint8 req_elements; + uint8 pad_2; + chanspec_list_t chspec_list; +} bcn_req_t; + typedef struct rrmreq { struct ether_addr da; uint8 reg; @@ -5588,10 +6764,11 @@ typedef struct statreq { } statreq_t; #define WL_RRM_RPT_VER 0 -#define WL_RRM_RPT_MAX_PAYLOAD 64 +#define WL_RRM_RPT_MAX_PAYLOAD 256 #define WL_RRM_RPT_MIN_PAYLOAD 7 #define WL_RRM_RPT_FALG_ERR 0 -#define WL_RRM_RPT_FALG_OK 1 +#define WL_RRM_RPT_FALG_GRP_ID_PROPR (1 << 0) +#define WL_RRM_RPT_FALG_GRP_ID_0 (1 << 1) typedef struct { uint16 ver; /* version */ struct ether_addr addr; /* STA MAC addr */ @@ -5700,6 +6877,16 @@ typedef struct wl_bssload_static { } wl_bssload_static_t; +/* IO Var Operations - the Value of iov_op In wlc_ap_doiovar */ +typedef enum wlc_ap_iov_operation { + WLC_AP_IOV_OP_DELETE = -1, + WLC_AP_IOV_OP_DISABLE = 0, + WLC_AP_IOV_OP_ENABLE = 1, + WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE = 2, + WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE = 3, + WLC_AP_IOV_OP_MOVE = 4 +} wlc_ap_iov_oper_t; + /* LTE coex info */ /* Analogue of HCI Set MWS Signaling cmd */ typedef struct { @@ -5814,6 +7001,598 @@ wl_wlc_version_t; #define WLC_VERSION_MAJOR 3 #define WLC_VERSION_MINOR 0 +/* begin proxd definitions */ +#include + +#define WL_PROXD_API_VERSION 0x0300 /* version 3.0 */ + +/* Minimum supported API version */ +#define WL_PROXD_API_MIN_VERSION 0x0300 + +/* proximity detection methods */ +enum { + WL_PROXD_METHOD_NONE = 0, + WL_PROXD_METHOD_RSVD1 = 1, /* backward compatibility - RSSI, not supported */ + WL_PROXD_METHOD_TOF = 2, + WL_PROXD_METHOD_RSVD2 = 3, /* 11v only - if needed */ + WL_PROXD_METHOD_FTM = 4, /* IEEE rev mc/2014 */ + WL_PROXD_METHOD_MAX +}; +typedef int16 wl_proxd_method_t; + +/* global and method configuration flags */ +enum { + WL_PROXD_FLAG_NONE = 0x00000000, + WL_PROXD_FLAG_RX_ENABLED = 0x00000001, /* respond to requests */ + WL_PROXD_FLAG_RX_RANGE_REQ = 0x00000002, /* 11mc range requests enabled */ + WL_PROXD_FLAG_TX_LCI = 0x00000004, /* transmit location, if available */ + WL_PROXD_FLAG_TX_CIVIC = 0x00000008, /* tx civic loc, if available */ + WL_PROXD_FLAG_RX_AUTO_BURST = 0x00000010, /* respond to requests w/o host action */ + WL_PROXD_FLAG_TX_AUTO_BURST = 0x00000020, /* continue requests w/o host action */ + WL_PROXD_FLAG_AVAIL_PUBLISH = 0x00000040, /* publish availability */ + WL_PROXD_FLAG_AVAIL_SCHEDULE = 0x00000080, /* schedule using availability */ + WL_PROXD_FLAG_ALL = 0xffffffff +}; +typedef uint32 wl_proxd_flags_t; + +#define WL_PROXD_FLAGS_AVAIL (WL_PROXD_FLAG_AVAIL_PUBLISH | \ + WL_PROXD_FLAG_AVAIL_SCHEDULE) + +/* session flags */ +enum { + WL_PROXD_SESSION_FLAG_NONE = 0x00000000, /* no flags */ + WL_PROXD_SESSION_FLAG_INITIATOR = 0x00000001, /* local device is initiator */ + WL_PROXD_SESSION_FLAG_TARGET = 0x00000002, /* local device is target */ + WL_PROXD_SESSION_FLAG_ONE_WAY = 0x00000004, /* (initiated) 1-way rtt */ + WL_PROXD_SESSION_FLAG_AUTO_BURST = 0x00000008, /* created w/ rx_auto_burst */ + WL_PROXD_SESSION_FLAG_PERSIST = 0x00000010, /* good until cancelled */ + WL_PROXD_SESSION_FLAG_RTT_DETAIL = 0x00000020, /* rtt detail in results */ + WL_PROXD_SESSION_FLAG_TOF_COMPAT = 0x00000040, /* TOF compatibility - TBD */ + WL_PROXD_SESSION_FLAG_AOA = 0x00000080, /* AOA along w/ RTT */ + WL_PROXD_SESSION_FLAG_RX_AUTO_BURST = 0x00000100, /* Same as proxd flags above */ + WL_PROXD_SESSION_FLAG_TX_AUTO_BURST = 0x00000200, /* Same as proxd flags above */ + WL_PROXD_SESSION_FLAG_NAN_BSS = 0x00000400, /* Use NAN BSS, if applicable */ + WL_PROXD_SESSION_FLAG_TS1 = 0x00000800, /* e.g. FTM1 - cap or rx */ + WL_PROXD_SESSION_FLAG_REPORT_FAILURE= 0x00002000, /* report failure to target */ + WL_PROXD_SESSION_FLAG_INITIATOR_RPT = 0x00004000, /* report distance to target */ + WL_PROXD_SESSION_FLAG_NOCHANSWT = 0x00008000, /* No channel switching */ + WL_PROXD_SESSION_FLAG_NETRUAL = 0x00010000, /* netrual mode */ + WL_PROXD_SESSION_FLAG_SEQ_EN = 0x00020000, /* Toast */ + WL_PROXD_SESSION_FLAG_NO_PARAM_OVRD = 0x00040000, /* no param override from target */ + WL_PROXD_SESSION_FLAG_ASAP = 0x00080000, /* ASAP session */ + WL_PROXD_SESSION_FLAG_REQ_LCI = 0x00100000, /* transmit LCI req */ + WL_PROXD_SESSION_FLAG_REQ_CIV = 0x00200000, /* transmit civic loc req */ + WL_PROXD_SESSION_FLAG_COLLECT = 0x80000000, /* debug - collect */ + WL_PROXD_SESSION_FLAG_ALL = 0xffffffff +}; +typedef uint32 wl_proxd_session_flags_t; + +/* time units - mc supports up to 0.1ns resolution */ +enum { + WL_PROXD_TMU_TU = 0, /* 1024us */ + WL_PROXD_TMU_SEC = 1, + WL_PROXD_TMU_MILLI_SEC = 2, + WL_PROXD_TMU_MICRO_SEC = 3, + WL_PROXD_TMU_NANO_SEC = 4, + WL_PROXD_TMU_PICO_SEC = 5 +}; +typedef int16 wl_proxd_tmu_t; + +/* time interval e.g. 10ns */ +typedef struct wl_proxd_intvl { + uint32 intvl; + wl_proxd_tmu_t tmu; + uint8 pad[2]; +} wl_proxd_intvl_t; + +/* commands that can apply to proxd, method or a session */ +enum { + WL_PROXD_CMD_NONE = 0, + WL_PROXD_CMD_GET_VERSION = 1, + WL_PROXD_CMD_ENABLE = 2, + WL_PROXD_CMD_DISABLE = 3, + WL_PROXD_CMD_CONFIG = 4, + WL_PROXD_CMD_START_SESSION = 5, + WL_PROXD_CMD_BURST_REQUEST = 6, + WL_PROXD_CMD_STOP_SESSION = 7, + WL_PROXD_CMD_DELETE_SESSION = 8, + WL_PROXD_CMD_GET_RESULT = 9, + WL_PROXD_CMD_GET_INFO = 10, + WL_PROXD_CMD_GET_STATUS = 11, + WL_PROXD_CMD_GET_SESSIONS = 12, + WL_PROXD_CMD_GET_COUNTERS = 13, + WL_PROXD_CMD_CLEAR_COUNTERS = 14, + WL_PROXD_CMD_COLLECT = 15, + WL_PROXD_CMD_TUNE = 16, + WL_PROXD_CMD_DUMP = 17, + WL_PROXD_CMD_START_RANGING = 18, + WL_PROXD_CMD_STOP_RANGING = 19, + WL_PROXD_CMD_GET_RANGING_INFO = 20, + WL_PROXD_CMD_IS_TLV_SUPPORTED = 21, + + WL_PROXD_CMD_MAX +}; +typedef int16 wl_proxd_cmd_t; + +/* session ids: + * id 0 is reserved + * ids 1..0x7fff - allocated by host/app + * 0x8000-0xffff - allocated by firmware, used for auto/rx + */ +enum { + WL_PROXD_SESSION_ID_GLOBAL = 0 +}; + +#define WL_PROXD_SID_HOST_MAX 0x7fff +#define WL_PROXD_SID_HOST_ALLOC(_sid) ((_sid) > 0 && (_sid) <= WL_PROXD_SID_HOST_MAX) + +/* maximum number sessions that can be allocated, may be less if tunable */ +#define WL_PROXD_MAX_SESSIONS 16 + +typedef uint16 wl_proxd_session_id_t; + +/* status - TBD BCME_ vs proxd status - range reserved for BCME_ */ +enum { + WL_PROXD_E_POLICY = -1045, + WL_PROXD_E_INCOMPLETE = -1044, + WL_PROXD_E_OVERRIDDEN = -1043, + WL_PROXD_E_ASAP_FAILED = -1042, + WL_PROXD_E_NOTSTARTED = -1041, + WL_PROXD_E_INVALIDAVB = -1040, + WL_PROXD_E_INCAPABLE = -1039, + WL_PROXD_E_MISMATCH = -1038, + WL_PROXD_E_DUP_SESSION = -1037, + WL_PROXD_E_REMOTE_FAIL = -1036, + WL_PROXD_E_REMOTE_INCAPABLE = -1035, + WL_PROXD_E_SCHED_FAIL = -1034, + WL_PROXD_E_PROTO = -1033, + WL_PROXD_E_EXPIRED = -1032, + WL_PROXD_E_TIMEOUT = -1031, + WL_PROXD_E_NOACK = -1030, + WL_PROXD_E_DEFERRED = -1029, + WL_PROXD_E_INVALID_SID = -1028, + WL_PROXD_E_REMOTE_CANCEL = -1027, + WL_PROXD_E_CANCELED = -1026, /* local */ + WL_PROXD_E_INVALID_SESSION = -1025, + WL_PROXD_E_BAD_STATE = -1024, + WL_PROXD_E_ERROR = -1, + WL_PROXD_E_OK = 0 +}; +typedef int32 wl_proxd_status_t; + +/* session states */ +enum { + WL_PROXD_SESSION_STATE_NONE = 0, + WL_PROXD_SESSION_STATE_CREATED = 1, + WL_PROXD_SESSION_STATE_CONFIGURED = 2, + WL_PROXD_SESSION_STATE_STARTED = 3, + WL_PROXD_SESSION_STATE_DELAY = 4, + WL_PROXD_SESSION_STATE_USER_WAIT = 5, + WL_PROXD_SESSION_STATE_SCHED_WAIT = 6, + WL_PROXD_SESSION_STATE_BURST = 7, + WL_PROXD_SESSION_STATE_STOPPING = 8, + WL_PROXD_SESSION_STATE_ENDED = 9, + WL_PROXD_SESSION_STATE_DESTROYING = -1 +}; +typedef int16 wl_proxd_session_state_t; + +/* RTT sample flags */ +enum { + WL_PROXD_RTT_SAMPLE_NONE = 0x00, + WL_PROXD_RTT_SAMPLE_DISCARD = 0x01 +}; +typedef uint8 wl_proxd_rtt_sample_flags_t; + +typedef struct wl_proxd_rtt_sample { + uint8 id; /* id for the sample - non-zero */ + wl_proxd_rtt_sample_flags_t flags; + int16 rssi; + wl_proxd_intvl_t rtt; /* round trip time */ + uint32 ratespec; +} wl_proxd_rtt_sample_t; + +/* result flags */ +enum { + WL_PRXOD_RESULT_FLAG_NONE = 0x0000, + WL_PROXD_RESULT_FLAG_NLOS = 0x0001, /* LOS - if available */ + WL_PROXD_RESULT_FLAG_LOS = 0x0002, /* NLOS - if available */ + WL_PROXD_RESULT_FLAG_FATAL = 0x0004, /* Fatal error during burst */ + WL_PROXD_RESULT_FLAG_ALL = 0xffff +}; +typedef int16 wl_proxd_result_flags_t; + +/* rtt measurement result */ +typedef struct wl_proxd_rtt_result { + wl_proxd_session_id_t sid; + wl_proxd_result_flags_t flags; + wl_proxd_status_t status; + struct ether_addr peer; + wl_proxd_session_state_t state; /* current state */ + union { + wl_proxd_intvl_t retry_after; /* hint for errors */ + wl_proxd_intvl_t burst_duration; /* burst duration */ + } u; + wl_proxd_rtt_sample_t avg_rtt; + uint32 avg_dist; /* 1/256m units */ + uint16 sd_rtt; /* RTT standard deviation */ + uint8 num_valid_rtt; /* valid rtt cnt */ + uint8 num_ftm; /* actual num of ftm cnt */ + uint16 burst_num; /* in a session */ + uint16 num_rtt; /* 0 if no detail */ + wl_proxd_rtt_sample_t rtt[1]; /* variable */ +} wl_proxd_rtt_result_t; + +/* aoa measurement result */ +typedef struct wl_proxd_aoa_result { + wl_proxd_session_id_t sid; + wl_proxd_result_flags_t flags; + wl_proxd_status_t status; + struct ether_addr peer; + wl_proxd_session_state_t state; + uint16 burst_num; + uint8 pad[2]; + /* wl_proxd_aoa_sample_t sample_avg; TBD */ +} BWL_POST_PACKED_STRUCT wl_proxd_aoa_result_t; + +/* global stats */ +typedef struct wl_proxd_counters { + uint32 tx; /* tx frame count */ + uint32 rx; /* rx frame count */ + uint32 burst; /* total number of burst */ + uint32 sessions; /* total number of sessions */ + uint32 max_sessions; /* max concurrency */ + uint32 sched_fail; /* scheduling failures */ + uint32 timeouts; /* timeouts */ + uint32 protoerr; /* protocol errors */ + uint32 noack; /* tx w/o ack */ + uint32 txfail; /* any tx falure */ + uint32 lci_req_tx; /* tx LCI requests */ + uint32 lci_req_rx; /* rx LCI requests */ + uint32 lci_rep_tx; /* tx LCI reports */ + uint32 lci_rep_rx; /* rx LCI reports */ + uint32 civic_req_tx; /* tx civic requests */ + uint32 civic_req_rx; /* rx civic requests */ + uint32 civic_rep_tx; /* tx civic reports */ + uint32 civic_rep_rx; /* rx civic reports */ + uint32 rctx; /* ranging contexts created */ + uint32 rctx_done; /* count of ranging done */ + uint32 publish_err; /* availability publishing errors */ + uint32 on_chan; /* count of scheduler onchan */ + uint32 off_chan; /* count of scheduler offchan */ +} wl_proxd_counters_t; + +typedef struct wl_proxd_counters wl_proxd_session_counters_t; + +enum { + WL_PROXD_CAP_NONE = 0x0000, + WL_PROXD_CAP_ALL = 0xffff +}; +typedef int16 wl_proxd_caps_t; + +/* method capabilities */ +enum { + WL_PROXD_FTM_CAP_NONE = 0x0000, + WL_PROXD_FTM_CAP_FTM1 = 0x0001 +}; +typedef uint16 wl_proxd_ftm_caps_t; + +typedef struct BWL_PRE_PACKED_STRUCT wl_proxd_tlv_id_list { + uint16 num_ids; + uint16 ids[1]; +} BWL_POST_PACKED_STRUCT wl_proxd_tlv_id_list_t; + +typedef struct wl_proxd_session_id_list { + uint16 num_ids; + wl_proxd_session_id_t ids[1]; +} wl_proxd_session_id_list_t; + +/* tlvs returned for get_info on ftm method + * configuration: + * proxd flags + * event mask + * debug mask + * session defaults (session tlvs) + * status tlv - not supported for ftm method + * info tlv + */ +typedef struct wl_proxd_ftm_info { + wl_proxd_ftm_caps_t caps; + uint16 max_sessions; + uint16 num_sessions; + uint16 rx_max_burst; +} wl_proxd_ftm_info_t; + +/* tlvs returned for get_info on session + * session config (tlvs) + * session info tlv + */ +typedef struct wl_proxd_ftm_session_info { + uint16 sid; + uint8 bss_index; + uint8 pad; + struct ether_addr bssid; + wl_proxd_session_state_t state; + wl_proxd_status_t status; + uint16 burst_num; +} wl_proxd_ftm_session_info_t; + +typedef struct wl_proxd_ftm_session_status { + uint16 sid; + wl_proxd_session_state_t state; + wl_proxd_status_t status; + uint16 burst_num; +} wl_proxd_ftm_session_status_t; + +/* rrm range request */ +typedef struct wl_proxd_range_req { + uint16 num_repeat; + uint16 init_delay_range; /* in TUs */ + uint8 pad; + uint8 num_nbr; /* number of (possible) neighbors */ + nbr_element_t nbr[1]; +} wl_proxd_range_req_t; + +#define WL_PROXD_LCI_LAT_OFF 0 +#define WL_PROXD_LCI_LONG_OFF 5 +#define WL_PROXD_LCI_ALT_OFF 10 + +#define WL_PROXD_LCI_GET_LAT(_lci, _lat, _lat_err) { \ + unsigned _off = WL_PROXD_LCI_LAT_OFF; \ + _lat_err = (_lci)->data[(_off)] & 0x3f; \ + _lat = (_lci)->data[(_off)+1]; \ + _lat |= (_lci)->data[(_off)+2] << 8; \ + _lat |= (_lci)->data[_(_off)+3] << 16; \ + _lat |= (_lci)->data[(_off)+4] << 24; \ + _lat <<= 2; \ + _lat |= (_lci)->data[(_off)] >> 6; \ +} + +#define WL_PROXD_LCI_GET_LONG(_lci, _lcilong, _long_err) { \ + unsigned _off = WL_PROXD_LCI_LONG_OFF; \ + _long_err = (_lci)->data[(_off)] & 0x3f; \ + _lcilong = (_lci)->data[(_off)+1]; \ + _lcilong |= (_lci)->data[(_off)+2] << 8; \ + _lcilong |= (_lci)->data[_(_off)+3] << 16; \ + _lcilong |= (_lci)->data[(_off)+4] << 24; \ + __lcilong <<= 2; \ + _lcilong |= (_lci)->data[(_off)] >> 6; \ +} + +#define WL_PROXD_LCI_GET_ALT(_lci, _alt_type, _alt, _alt_err) { \ + unsigned _off = WL_PROXD_LCI_ALT_OFF; \ + _alt_type = (_lci)->data[_off] & 0x0f; \ + _alt_err = (_lci)->data[(_off)] >> 4; \ + _alt_err |= ((_lci)->data[(_off)+1] & 0x03) << 4; \ + _alt = (_lci)->data[(_off)+2]; \ + _alt |= (_lci)->data[(_off)+3] << 8; \ + _alt |= (_lci)->data[_(_off)+4] << 16; \ + _alt <<= 6; \ + _alt |= (_lci)->data[(_off) + 1] >> 2; \ +} + +#define WL_PROXD_LCI_VERSION(_lci) ((_lci)->data[15] >> 6) + +/* availability. advertising mechanism bss specific */ +/* availablity flags */ +enum { + WL_PROXD_AVAIL_NONE = 0, + WL_PROXD_AVAIL_NAN_PUBLISHED = 0x0001, + WL_PROXD_AVAIL_SCHEDULED = 0x0002 /* scheduled by proxd */ +}; +typedef int16 wl_proxd_avail_flags_t; + +/* time reference */ +enum { + WL_PROXD_TREF_NONE = 0, + WL_PROXD_TREF_DEV_TSF = 1, + WL_PROXD_TREF_NAN_DW = 2, + WL_PROXD_TREF_TBTT = 3, + WL_PROXD_TREF_MAX /* last entry */ +}; +typedef int16 wl_proxd_time_ref_t; + +/* proxd channel-time slot */ +typedef struct { + wl_proxd_intvl_t start; /* from ref */ + wl_proxd_intvl_t duration; /* from start */ + uint32 chanspec; +} wl_proxd_time_slot_t; + +typedef struct wl_proxd_avail24 { + wl_proxd_avail_flags_t flags; /* for query only */ + wl_proxd_time_ref_t time_ref; + uint16 max_slots; /* for query only */ + uint16 num_slots; + wl_proxd_time_slot_t slots[1]; /* ROM compat - not used */ + wl_proxd_intvl_t repeat; + wl_proxd_time_slot_t ts0[1]; +} wl_proxd_avail24_t; +#define WL_PROXD_AVAIL24_TIMESLOT(_avail24, _i) (&(_avail24)->ts0[(_i)]) +#define WL_PROXD_AVAIL24_TIMESLOT_OFFSET(_avail24) OFFSETOF(wl_proxd_avail24_t, ts0) +#define WL_PROXD_AVAIL24_TIMESLOTS(_avail24) WL_PROXD_AVAIL24_TIMESLOT(_avail24, 0) +#define WL_PROXD_AVAIL24_SIZE(_avail24, _num_slots) (\ + WL_PROXD_AVAIL24_TIMESLOT_OFFSET(_avail24) + \ + (_num_slots) * sizeof(*WL_PROXD_AVAIL24_TIMESLOT(_avail24, 0))) + +typedef struct wl_proxd_avail { + wl_proxd_avail_flags_t flags; /* for query only */ + wl_proxd_time_ref_t time_ref; + uint16 max_slots; /* for query only */ + uint16 num_slots; + wl_proxd_intvl_t repeat; + wl_proxd_time_slot_t slots[1]; +} wl_proxd_avail_t; +#define WL_PROXD_AVAIL_TIMESLOT(_avail, _i) (&(_avail)->slots[(_i)]) +#define WL_PROXD_AVAIL_TIMESLOT_OFFSET(_avail) OFFSETOF(wl_proxd_avail_t, slots) + +#define WL_PROXD_AVAIL_TIMESLOTS(_avail) WL_PROXD_AVAIL_TIMESLOT(_avail, 0) +#define WL_PROXD_AVAIL_SIZE(_avail, _num_slots) (\ + WL_PROXD_AVAIL_TIMESLOT_OFFSET(_avail) + \ + (_num_slots) * sizeof(*WL_PROXD_AVAIL_TIMESLOT(_avail, 0))) + +/* collect support TBD */ + +/* debugging */ +enum { + WL_PROXD_DEBUG_NONE = 0x00000000, + WL_PROXD_DEBUG_LOG = 0x00000001, + WL_PROXD_DEBUG_IOV = 0x00000002, + WL_PROXD_DEBUG_EVENT = 0x00000004, + WL_PROXD_DEBUG_SESSION = 0x00000008, + WL_PROXD_DEBUG_PROTO = 0x00000010, + WL_PROXD_DEBUG_SCHED = 0x00000020, + WL_PROXD_DEBUG_RANGING = 0x00000040, + WL_PROXD_DEBUG_ALL = 0xffffffff +}; +typedef uint32 wl_proxd_debug_mask_t; + +/* tlv IDs - data length 4 bytes unless overridden by type, alignment 32 bits */ +enum { + WL_PROXD_TLV_ID_NONE = 0, + WL_PROXD_TLV_ID_METHOD = 1, + WL_PROXD_TLV_ID_FLAGS = 2, + WL_PROXD_TLV_ID_CHANSPEC = 3, /* note: uint32 */ + WL_PROXD_TLV_ID_TX_POWER = 4, + WL_PROXD_TLV_ID_RATESPEC = 5, + WL_PROXD_TLV_ID_BURST_DURATION = 6, /* intvl - length of burst */ + WL_PROXD_TLV_ID_BURST_PERIOD = 7, /* intvl - between bursts */ + WL_PROXD_TLV_ID_BURST_FTM_SEP = 8, /* intvl - between FTMs */ + WL_PROXD_TLV_ID_BURST_NUM_FTM = 9, /* uint16 - per burst */ + WL_PROXD_TLV_ID_NUM_BURST = 10, /* uint16 */ + WL_PROXD_TLV_ID_FTM_RETRIES = 11, /* uint16 at FTM level */ + WL_PROXD_TLV_ID_BSS_INDEX = 12, /* uint8 */ + WL_PROXD_TLV_ID_BSSID = 13, + WL_PROXD_TLV_ID_INIT_DELAY = 14, /* intvl - optional, non-standalone only */ + WL_PROXD_TLV_ID_BURST_TIMEOUT = 15, /* expect response within - intvl */ + WL_PROXD_TLV_ID_EVENT_MASK = 16, /* interested events - in/out */ + WL_PROXD_TLV_ID_FLAGS_MASK = 17, /* interested flags - in only */ + WL_PROXD_TLV_ID_PEER_MAC = 18, /* mac address of peer */ + WL_PROXD_TLV_ID_FTM_REQ = 19, /* dot11_ftm_req */ + WL_PROXD_TLV_ID_LCI_REQ = 20, + WL_PROXD_TLV_ID_LCI = 21, + WL_PROXD_TLV_ID_CIVIC_REQ = 22, + WL_PROXD_TLV_ID_CIVIC = 23, + WL_PROXD_TLV_ID_AVAIL24 = 24, /* ROM compatibility */ + WL_PROXD_TLV_ID_SESSION_FLAGS = 25, + WL_PROXD_TLV_ID_SESSION_FLAGS_MASK = 26, /* in only */ + WL_PROXD_TLV_ID_RX_MAX_BURST = 27, /* uint16 - limit bursts per session */ + WL_PROXD_TLV_ID_RANGING_INFO = 28, /* ranging info */ + WL_PROXD_TLV_ID_RANGING_FLAGS = 29, /* uint16 */ + WL_PROXD_TLV_ID_RANGING_FLAGS_MASK = 30, /* uint16, in only */ + WL_PROXD_TLV_ID_NAN_MAP_ID = 31, + WL_PROXD_TLV_ID_DEV_ADDR = 32, + WL_PROXD_TLV_ID_AVAIL = 33, /* wl_proxd_avail_t */ + WL_PROXD_TLV_ID_TLV_ID = 34, /* uint16 tlv-id */ + WL_PROXD_TLV_ID_FTM_REQ_RETRIES = 35, /* uint16 FTM request retries */ + + /* output - 512 + x */ + WL_PROXD_TLV_ID_STATUS = 512, + WL_PROXD_TLV_ID_COUNTERS = 513, + WL_PROXD_TLV_ID_INFO = 514, + WL_PROXD_TLV_ID_RTT_RESULT = 515, + WL_PROXD_TLV_ID_AOA_RESULT = 516, + WL_PROXD_TLV_ID_SESSION_INFO = 517, + WL_PROXD_TLV_ID_SESSION_STATUS = 518, + WL_PROXD_TLV_ID_SESSION_ID_LIST = 519, + + /* debug tlvs can be added starting 1024 */ + WL_PROXD_TLV_ID_DEBUG_MASK = 1024, + WL_PROXD_TLV_ID_COLLECT = 1025, /* output only */ + WL_PROXD_TLV_ID_STRBUF = 1026, + + WL_PROXD_TLV_ID_MAX +}; + +typedef struct wl_proxd_tlv { + uint16 id; + uint16 len; + uint8 data[1]; +} wl_proxd_tlv_t; + +/* proxd iovar - applies to proxd, method or session */ +typedef struct wl_proxd_iov { + uint16 version; + uint16 len; + wl_proxd_cmd_t cmd; + wl_proxd_method_t method; + wl_proxd_session_id_t sid; + uint8 pad[2]; + wl_proxd_tlv_t tlvs[1]; /* variable */ +} wl_proxd_iov_t; + +#define WL_PROXD_IOV_HDR_SIZE OFFSETOF(wl_proxd_iov_t, tlvs) + +/* The following event definitions may move to bcmevent.h, but sharing proxd types + * across needs more invasive changes unrelated to proxd + */ +enum { + WL_PROXD_EVENT_NONE = 0, /* not an event, reserved */ + WL_PROXD_EVENT_SESSION_CREATE = 1, + WL_PROXD_EVENT_SESSION_START = 2, + WL_PROXD_EVENT_FTM_REQ = 3, + WL_PROXD_EVENT_BURST_START = 4, + WL_PROXD_EVENT_BURST_END = 5, + WL_PROXD_EVENT_SESSION_END = 6, + WL_PROXD_EVENT_SESSION_RESTART = 7, + WL_PROXD_EVENT_BURST_RESCHED = 8, /* burst rescheduled - e.g. partial TSF */ + WL_PROXD_EVENT_SESSION_DESTROY = 9, + WL_PROXD_EVENT_RANGE_REQ = 10, + WL_PROXD_EVENT_FTM_FRAME = 11, + WL_PROXD_EVENT_DELAY = 12, + WL_PROXD_EVENT_VS_INITIATOR_RPT = 13, /* (target) rx initiator-report */ + WL_PROXD_EVENT_RANGING = 14, + WL_PROXD_EVENT_LCI_MEAS_REP = 15, /* LCI measurement report */ + WL_PROXD_EVENT_CIVIC_MEAS_REP = 16, /* civic measurement report */ + + WL_PROXD_EVENT_MAX +}; +typedef int16 wl_proxd_event_type_t; + +/* proxd event mask - upto 32 events for now */ +typedef uint32 wl_proxd_event_mask_t; + +#define WL_PROXD_EVENT_MASK_ALL 0xfffffffe +#define WL_PROXD_EVENT_MASK_EVENT(_event_type) (1 << (_event_type)) +#define WL_PROXD_EVENT_ENABLED(_mask, _event_type) (\ + ((_mask) & WL_PROXD_EVENT_MASK_EVENT(_event_type)) != 0) + +/* proxd event - applies to proxd, method or session */ +typedef struct wl_proxd_event { + uint16 version; + uint16 len; + wl_proxd_event_type_t type; + wl_proxd_method_t method; + wl_proxd_session_id_t sid; + uint8 pad[2]; + wl_proxd_tlv_t tlvs[1]; /* variable */ +} wl_proxd_event_t; + +enum { + WL_PROXD_RANGING_STATE_NONE = 0, + WL_PROXD_RANGING_STATE_NOTSTARTED = 1, + WL_PROXD_RANGING_STATE_INPROGRESS = 2, + WL_PROXD_RANGING_STATE_DONE = 3 +}; +typedef int16 wl_proxd_ranging_state_t; + +/* proxd ranging flags */ +enum { + WL_PROXD_RANGING_FLAG_NONE = 0x0000, /* no flags */ + WL_PROXD_RANGING_FLAG_DEL_SESSIONS_ON_STOP = 0x0001, + WL_PROXD_RANGING_FLAG_ALL = 0xffff +}; +typedef uint16 wl_proxd_ranging_flags_t; + +struct wl_proxd_ranging_info { + wl_proxd_status_t status; + wl_proxd_ranging_state_t state; + wl_proxd_ranging_flags_t flags; + uint16 num_sids; + uint16 num_done; +}; +typedef struct wl_proxd_ranging_info wl_proxd_ranging_info_t; +#include +/* end proxd definitions */ /* require strict packing */ #include @@ -5852,7 +7631,7 @@ typedef struct wl_bssload_cfg { /* Multiple roaming profile suport */ #define WL_MAX_ROAM_PROF_BRACKETS 4 -#define WL_MAX_ROAM_PROF_VER 0 +#define WL_MAX_ROAM_PROF_VER 1 #define WL_ROAM_PROF_NONE (0 << 0) #define WL_ROAM_PROF_LAZY (1 << 0) @@ -5861,6 +7640,8 @@ typedef struct wl_bssload_cfg { #define WL_ROAM_PROF_SYNC_DTIM (1 << 6) #define WL_ROAM_PROF_DEFAULT (1 << 7) /* backward compatible single default profile */ +#define WL_FACTOR_TABLE_MAX_LIMIT 5 + typedef struct wl_roam_prof { int8 roam_flags; /* bit flags */ int8 roam_trigger; /* RSSI trigger level per profile/RSSI bracket */ @@ -5873,6 +7654,8 @@ typedef struct wl_roam_prof { uint16 init_scan_period; uint16 backoff_multiplier; uint16 max_scan_period; + uint8 channel_usage; + uint8 cu_avg_calc_dur; } wl_roam_prof_t; typedef struct wl_roam_prof_band { @@ -5927,4 +7710,356 @@ typedef struct wl_interface_info { /* no default structure packing */ #include +#define TBOW_MAX_SSID_LEN 32 +#define TBOW_MAX_PASSPHRASE_LEN 63 + +#define WL_TBOW_SETUPINFO_T_VERSION 1 /* version of tbow_setup_netinfo_t */ +typedef struct tbow_setup_netinfo { + uint32 version; + uint8 opmode; + uint8 pad; + uint8 macaddr[ETHER_ADDR_LEN]; + uint32 ssid_len; + uint8 ssid[TBOW_MAX_SSID_LEN]; + uint8 passphrase_len; + uint8 passphrase[TBOW_MAX_PASSPHRASE_LEN]; + chanspec_t chanspec; +} tbow_setup_netinfo_t; + +typedef enum tbow_ho_opmode { + TBOW_HO_MODE_START_GO = 0, + TBOW_HO_MODE_START_STA, + TBOW_HO_MODE_START_GC, + TBOW_HO_MODE_TEST_GO, + TBOW_HO_MODE_STOP_GO = 0x10, + TBOW_HO_MODE_STOP_STA, + TBOW_HO_MODE_STOP_GC, + TBOW_HO_MODE_TEARDOWN +} tbow_ho_opmode_t; + +/* Beacon trim feature statistics */ +/* Configuration params */ +#define M_BCNTRIM_N (0) /* Enable/Disable Beacon Trim */ +#define M_BCNTRIM_TIMEND (1) /* Waiting time for TIM IE to end */ +#define M_BCNTRIM_TSFTLRN (2) /* TSF tolerance value (usecs) */ +/* PSM internal use */ +#define M_BCNTRIM_PREVBCNLEN (3) /* Beacon length excluding the TIM IE */ +#define M_BCNTRIM_N_COUNTER (4) /* PSM's local beacon trim counter */ +#define M_BCNTRIM_STATE (5) /* PSM's Beacon trim status register */ +#define M_BCNTRIM_TIMLEN (6) /* TIM IE Length */ +#define M_BCNTRIM_BMPCTL (7) /* Bitmap control word */ +#define M_BCNTRIM_TSF_L (8) /* Lower TSF word */ +#define M_BCNTRIM_TSF_ML (9) /* Lower middle TSF word */ +#define M_BCNTRIM_RSSI (10) /* Partial beacon RSSI */ +#define M_BCNTRIM_CHANNEL (11) /* Partial beacon channel */ +/* Trimming Counters */ +#define M_BCNTRIM_SBCNRXED (12) /* Self-BSSID beacon received */ +#define M_BCNTRIM_CANTRIM (13) /* Num of beacons which can be trimmed */ +#define M_BCNTRIM_TRIMMED (14) /* # beacons which were trimmed */ +#define M_BCNTRIM_BCNLENCNG (15) /* # beacons trimmed due to length change */ +#define M_BCNTRIM_TSFADJ (16) /* # beacons not trimmed due to large TSF delta */ +#define M_BCNTRIM_TIMNOTFOUND (17) /* # beacons not trimmed due to TIM missing */ +#define M_RXTSFTMRVAL_WD0 (18) +#define M_RXTSFTMRVAL_WD1 (19) +#define M_RXTSFTMRVAL_WD2 (20) +#define M_RXTSFTMRVAL_WD3 (21) +#define BCNTRIM_STATS_NUMPARAMS (22) /* 16 bit words */ + +#define TXPWRCAP_MAX_NUM_CORES 8 +#define TXPWRCAP_MAX_NUM_ANTENNAS (TXPWRCAP_MAX_NUM_CORES * 2) + +typedef struct wl_txpwrcap_tbl { + uint8 num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES]; + /* Stores values for valid antennas */ + int8 pwrcap_cell_on[TXPWRCAP_MAX_NUM_ANTENNAS]; /* qdBm units */ + int8 pwrcap_cell_off[TXPWRCAP_MAX_NUM_ANTENNAS]; /* qdBm units */ +} wl_txpwrcap_tbl_t; + +/* -------------- dynamic BTCOEX --------------- */ +/* require strict packing */ +#include + +#define DCTL_TROWS 2 /* currently practical number of rows */ +#define DCTL_TROWS_MAX 4 /* 2 extra rows RFU */ +/* DYNCTL profile flags */ +#define DCTL_FLAGS_DYNCTL (1 << 0) /* 1 - enabled, 0 - legacy only */ +#define DCTL_FLAGS_DESENSE (1 << 1) /* auto desense is enabled */ +#define DCTL_FLAGS_MSWITCH (1 << 2) /* mode switching is enabled */ +/* for now AGG on/off is handled separately */ +#define DCTL_FLAGS_TX_AGG_OFF (1 << 3) /* TBD: allow TX agg Off */ +#define DCTL_FLAGS_RX_AGG_OFF (1 << 4) /* TBD: allow RX agg Off */ +/* used for dry run testing only */ +#define DCTL_FLAGS_DRYRUN (1 << 7) /* Eenables dynctl dry run mode */ +#define IS_DYNCTL_ON(prof) ((prof->flags & DCTL_FLAGS_DYNCTL) != 0) +#define IS_DESENSE_ON(prof) ((prof->flags & DCTL_FLAGS_DESENSE) != 0) +#define IS_MSWITCH_ON(prof) ((prof->flags & DCTL_FLAGS_MSWITCH) != 0) +/* desense level currently in use */ +#define DESENSE_OFF 0 +#define DFLT_DESENSE_MID 12 +#define DFLT_DESENSE_HIGH 2 + +/* + * dynctl data points(a set of btpwr & wlrssi thresholds) + * for mode & desense switching + */ +typedef struct btc_thr_data { + int8 mode; /* used by desense sw */ + int8 bt_pwr; /* BT tx power threshold */ + int8 bt_rssi; /* BT rssi threshold */ + /* wl rssi range when mode or desense change may be needed */ + int8 wl_rssi_high; + int8 wl_rssi_low; +} btc_thr_data_t; + +/* dynctl. profile data structure */ +#define DCTL_PROFILE_VER 0x01 +typedef BWL_PRE_PACKED_STRUCT struct dctl_prof { + uint8 version; /* dynctl profile version */ + /* dynctl profile flags bit:0 - dynctl On, bit:1 dsns On, bit:2 mode sw On, */ + uint8 flags; /* bit[6:3] reserved, bit7 - Dryrun (sim) - On */ + /* wl desense levels to apply */ + uint8 dflt_dsns_level; + uint8 low_dsns_level; + uint8 mid_dsns_level; + uint8 high_dsns_level; + /* mode switching hysteresis in dBm */ + int8 msw_btrssi_hyster; + /* default btcoex mode */ + uint8 default_btc_mode; + /* num of active rows in mode switching table */ + uint8 msw_rows; + /* num of rows in desense table */ + uint8 dsns_rows; + /* dynctl mode switching data table */ + btc_thr_data_t msw_data[DCTL_TROWS_MAX]; + /* dynctl desense switching data table */ + btc_thr_data_t dsns_data[DCTL_TROWS_MAX]; +} BWL_POST_PACKED_STRUCT dctl_prof_t; + +/* dynctl status info */ +typedef BWL_PRE_PACKED_STRUCT struct dynctl_status { + bool sim_on; /* true if simulation is On */ + uint16 bt_pwr_shm; /* BT per/task power as read from ucode */ + int8 bt_pwr; /* BT pwr extracted & converted to dBm */ + int8 bt_rssi; /* BT rssi in dBm */ + int8 wl_rssi; /* last wl rssi reading used by btcoex */ + uint8 dsns_level; /* current desense level */ + uint8 btc_mode; /* current btcoex mode */ + /* add more status items if needed, pad to 4 BB if needed */ +} BWL_POST_PACKED_STRUCT dynctl_status_t; + +/* dynctl simulation (dryrun data) */ +typedef BWL_PRE_PACKED_STRUCT struct dynctl_sim { + bool sim_on; /* simulation mode on/off */ + int8 btpwr; /* simulated BT power in dBm */ + int8 btrssi; /* simulated BT rssi in dBm */ + int8 wlrssi; /* simulated WL rssi in dBm */ +} BWL_POST_PACKED_STRUCT dynctl_sim_t; +/* no default structure packing */ +#include + +/* PTK key maintained per SCB */ +#define RSN_TEMP_ENCR_KEY_LEN 16 +typedef struct wpa_ptk { + uint8 kck[RSN_KCK_LENGTH]; /* EAPOL-Key Key Confirmation Key (KCK) */ + uint8 kek[RSN_KEK_LENGTH]; /* EAPOL-Key Key Encryption Key (KEK) */ + uint8 tk1[RSN_TEMP_ENCR_KEY_LEN]; /* Temporal Key 1 (TK1) */ + uint8 tk2[RSN_TEMP_ENCR_KEY_LEN]; /* Temporal Key 2 (TK2) */ +} wpa_ptk_t; + +/* GTK key maintained per SCB */ +typedef struct wpa_gtk { + uint32 idx; + uint32 key_len; + uint8 key[DOT11_MAX_KEY_SIZE]; +} wpa_gtk_t; + +/* FBT Auth Response Data structure */ +typedef struct wlc_fbt_auth_resp { + uint8 macaddr[ETHER_ADDR_LEN]; /* station mac address */ + uint8 pad[2]; + uint8 pmk_r1_name[WPA2_PMKID_LEN]; + wpa_ptk_t ptk; /* pairwise key */ + wpa_gtk_t gtk; /* group key */ + uint32 ie_len; + uint8 status; /* Status of parsing FBT authentication + Request in application + */ + uint8 ies[1]; /* IEs contains MDIE, RSNIE, + FBTIE (ANonce, SNonce,R0KH-ID, R1KH-ID) + */ +} wlc_fbt_auth_resp_t; + +/* FBT Action Response frame */ +typedef struct wlc_fbt_action_resp { + uint16 version; /* structure version */ + uint16 length; /* length of structure */ + uint8 macaddr[ETHER_ADDR_LEN]; /* station mac address */ + uint8 data_len; /* len of ie from Category */ + uint8 data[1]; /* data contains category, action, sta address, target ap, + status code,fbt response frame body + */ +} wlc_fbt_action_resp_t; + +#define MACDBG_PMAC_ADDR_INPUT_MAXNUM 16 +#define MACDBG_PMAC_OBJ_TYPE_LEN 8 + +typedef struct _wl_macdbg_pmac_param_t { + char type[MACDBG_PMAC_OBJ_TYPE_LEN]; + uint8 step; + uint8 num; + uint32 bitmap; + bool addr_raw; + uint8 addr_num; + uint16 addr[MACDBG_PMAC_ADDR_INPUT_MAXNUM]; +} wl_macdbg_pmac_param_t; + +/* IOVAR 'svmp_mem' parameter. Used to read/clear svmp memory */ +typedef struct svmp_mem { + uint32 addr; /* offset to read svmp memory from vasip base address */ + uint16 len; /* length in count of uint16's */ + uint16 val; /* set the range of addr/len with a value */ +} svmp_mem_t; + +#define WL_NAN_BAND_STR_SIZE 5 /* sizeof ("auto") */ + +/* Definitions of different NAN Bands */ +enum { /* mode selection for reading/writing tx iqlo cal coefficients */ + NAN_BAND_AUTO, + NAN_BAND_B, + NAN_BAND_A, + NAN_BAND_INVALID = 0xFF +}; + +#if defined(WL_LINKSTAT) +typedef struct { + uint32 preamble; + uint32 nss; + uint32 bw; + uint32 rateMcsIdx; + uint32 reserved; + uint32 bitrate; +} wifi_rate; + +typedef struct { + uint16 version; + uint16 length; + uint32 tx_mpdu; + uint32 rx_mpdu; + uint32 mpdu_lost; + uint32 retries; + uint32 retries_short; + uint32 retries_long; + wifi_rate rate; +} wifi_rate_stat_t; + +typedef int32 wifi_radio; + +typedef struct { + uint16 version; + uint16 length; + wifi_radio radio; + uint32 on_time; + uint32 tx_time; + uint32 rx_time; + uint32 on_time_scan; + uint32 on_time_nbd; + uint32 on_time_gscan; + uint32 on_time_roam_scan; + uint32 on_time_pno_scan; + uint32 on_time_hs20; + uint32 num_channels; + uint8 channels[1]; +} wifi_radio_stat; +#endif /* WL_LINKSTAT */ + +#ifdef WL11ULB +/* ULB Mode configured via "ulb_mode" IOVAR */ +enum { + ULB_MODE_DISABLED = 0, + ULB_MODE_STD_ALONE_MODE = 1, /* Standalone ULB Mode */ + ULB_MODE_DYN_MODE = 2, /* Dynamic ULB Mode */ + /* Add all other enums before this */ + MAX_SUPP_ULB_MODES +}; + +/* ULB BWs configured via "ulb_bw" IOVAR during Standalone Mode Only. + * Values of this enumeration are also used to specify 'Current Operational Bandwidth' + * and 'Primary Operational Bandwidth' sub-fields in 'ULB Operations' field (used in + * 'ULB Operations' Attribute or 'ULB Mode Switch' Attribute) + */ +typedef enum { + ULB_BW_DISABLED = 0, + ULB_BW_10MHZ = 1, /* Standalone ULB BW in 10 MHz BW */ + ULB_BW_5MHZ = 2, /* Standalone ULB BW in 5 MHz BW */ + ULB_BW_2P5MHZ = 3, /* Standalone ULB BW in 2.5 MHz BW */ + /* Add all other enums before this */ + MAX_SUPP_ULB_BW +} ulb_bw_type_t; +#endif /* WL11ULB */ + +#if defined(WLRCC) +#define MAX_ROAM_CHANNEL 20 + +typedef struct { + int n; + chanspec_t channels[MAX_ROAM_CHANNEL]; +} wl_roam_channel_list_t; +#endif + + +/* + * Neighbor Discover Offload: enable NDO feature + * Called by ipv6 event handler when interface comes up + * Set RA rate limit interval value(%) + */ +typedef struct nd_ra_ol_limits { + uint16 version; /* version of the iovar buffer */ + uint16 type; /* type of data provided */ + uint16 length; /* length of the entire structure */ + uint16 pad1; /* pad union to 4 byte boundary */ + union { + struct { + uint16 min_time; /* seconds, min time for RA offload hold */ + uint16 lifetime_percent; + /* percent, lifetime percentage for offload hold time */ + } lifetime_relative; + struct { + uint16 hold_time; /* seconds, RA offload hold time */ + uint16 pad2; /* unused */ + } fixed; + } limits; +} nd_ra_ol_limits_t; + +#define ND_RA_OL_LIMITS_VER 1 + +/* nd_ra_ol_limits sub-types */ +#define ND_RA_OL_LIMITS_REL_TYPE 0 /* relative, percent of RA lifetime */ +#define ND_RA_OL_LIMITS_FIXED_TYPE 1 /* fixed time */ + +/* buffer lengths for the different nd_ra_ol_limits types */ +#define ND_RA_OL_LIMITS_REL_TYPE_LEN 12 +#define ND_RA_OL_LIMITS_FIXED_TYPE_LEN 10 + +#define ND_RA_OL_SET "SET" +#define ND_RA_OL_GET "GET" +#define ND_PARAM_SIZE 50 +#define ND_VALUE_SIZE 5 +#define ND_PARAMS_DELIMETER " " +#define ND_PARAM_VALUE_DELLIMETER '=' +#define ND_LIMIT_STR_FMT ("%50s %50s") + +#define ND_RA_TYPE "TYPE" +#define ND_RA_MIN_TIME "MIN" +#define ND_RA_PER "PER" +#define ND_RA_HOLD "HOLD" + +/* + * Temperature Throttling control mode + */ +typedef struct wl_temp_control { + bool enable; + uint16 control_bit; +} wl_temp_control_t; + #endif /* _wlioctl_h_ */ diff --git a/drivers/amlogic/wifi/bcmdhd/include/wlioctl_utils.h b/drivers/amlogic/wifi/bcmdhd/include/wlioctl_utils.h new file mode 100644 index 0000000000000..c3fe428580b46 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/include/wlioctl_utils.h @@ -0,0 +1,53 @@ +/* + * Custom OID/ioctl related helper functions. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * <> + * + * $Id: wlioctl_utils.h 555740 2015-05-11 10:16:23Z $ + */ + +#ifndef _wlioctl_utils_h_ +#define _wlioctl_utils_h_ + +#include + +#ifndef BCMDRIVER +#define CCA_THRESH_MILLI 14 +#define CCA_THRESH_INTERFERE 6 + +extern cca_congest_channel_req_t * cca_per_chan_summary(cca_congest_channel_req_t *input, + cca_congest_channel_req_t *avg, bool percent); + +extern int cca_analyze(cca_congest_channel_req_t *input[], int num_chans, + uint flags, chanspec_t *answer); +#endif /* BCMDRIVER */ + +extern int wl_cntbuf_to_xtlv_format(void *ctx, void *cntbuf, + int buflen, uint32 corerev); + +/* Get data pointer of wlc layer counters tuple from xtlv formatted counters IOVar buffer. */ +#define GET_WLCCNT_FROM_CNTBUF(cntbuf) \ + bcm_get_data_from_xtlv_buf(((wl_cnt_info_t *)cntbuf)->data, \ + ((wl_cnt_info_t *)cntbuf)->datalen, WL_CNT_XTLV_WLC, \ + NULL, BCM_XTLV_OPTION_ALIGN32) + +#endif /* _wlioctl_utils_h_ */ diff --git a/drivers/net/wireless/bcmdhd/linux_osl.c b/drivers/amlogic/wifi/bcmdhd/linux_osl.c similarity index 76% rename from drivers/net/wireless/bcmdhd/linux_osl.c rename to drivers/amlogic/wifi/bcmdhd/linux_osl.c index 23014835032be..d8dac6c48420c 100644 --- a/drivers/net/wireless/bcmdhd/linux_osl.c +++ b/drivers/amlogic/wifi/bcmdhd/linux_osl.c @@ -1,9 +1,30 @@ /* * Linux OS Independent Layer * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: linux_osl.c 503131 2014-09-17 12:16:08Z $ + * + * <> + * + * $Id: linux_osl.c 602478 2015-11-26 04:46:12Z $ */ #define LINUX_PORT @@ -13,9 +34,12 @@ #include #include -#if defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__) + +#if !defined(STBLINUX) +#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING) #include -#endif /* BCM47XX_CA9 && __ARM_ARCH_7A__ */ +#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */ +#endif /* STBLINUX */ #include @@ -23,8 +47,9 @@ #include #include #include +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 8, 0)) #include - +#endif #ifdef BCM_SECURE_DMA @@ -50,16 +75,10 @@ #include -#ifdef BCM47XX_ACP_WAR -#include -extern spinlock_t l2x0_reg_lock; -#endif -#if defined(BCMPCIE) -#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_FLOWRING) -#include -#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_FLOWRING */ -#endif /* BCMPCIE */ +#ifdef BCM_OBJECT_TRACE +#include +#endif /* BCM_OBJECT_TRACE */ #define PCI_CFG_RETRY 10 @@ -67,11 +86,25 @@ extern spinlock_t l2x0_reg_lock; #define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */ #define DUMPBUFSZ 1024 +/* dependancy check */ +#if !defined(BCMPCIE) && defined(DHD_USE_STATIC_CTRLBUF) +#error "DHD_USE_STATIC_CTRLBUF suppored PCIE target only" +#endif /* !BCMPCIE && DHD_USE_STATIC_CTRLBUF */ + #ifdef CONFIG_DHD_USE_STATIC_BUF +#ifdef DHD_USE_STATIC_CTRLBUF +#define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1) +#define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2) +#define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4) + +#define PREALLOC_FREE_MAGIC 0xFEDC +#define PREALLOC_USED_MAGIC 0xFCDE +#else #define DHD_SKB_HDRSIZE 336 #define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE) #define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE) #define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE) +#endif /* DHD_USE_STATIC_CTRLBUF */ #define STATIC_BUF_MAX_NUM 16 #define STATIC_BUF_SIZE (PAGE_SIZE*2) @@ -85,41 +118,49 @@ typedef struct bcm_static_buf { static bcm_static_buf_t *bcm_static_buf = 0; -#define STATIC_PKT_MAX_NUM 8 -#if defined(ENHANCED_STATIC_BUF) +#ifdef DHD_USE_STATIC_CTRLBUF +#define STATIC_PKT_4PAGE_NUM 0 +#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE +#elif defined(ENHANCED_STATIC_BUF) #define STATIC_PKT_4PAGE_NUM 1 #define DHD_SKB_MAX_BUFSIZE DHD_SKB_4PAGE_BUFSIZE #else #define STATIC_PKT_4PAGE_NUM 0 -#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE -#endif /* ENHANCED_STATIC_BUF */ +#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE +#endif /* DHD_USE_STATIC_CTRLBUF */ + +#ifdef DHD_USE_STATIC_CTRLBUF +#define STATIC_PKT_1PAGE_NUM 0 +#define STATIC_PKT_2PAGE_NUM 64 +#else +#define STATIC_PKT_1PAGE_NUM 8 +#define STATIC_PKT_2PAGE_NUM 8 +#endif /* DHD_USE_STATIC_CTRLBUF */ + +#define STATIC_PKT_1_2PAGE_NUM \ + ((STATIC_PKT_1PAGE_NUM) + (STATIC_PKT_2PAGE_NUM)) +#define STATIC_PKT_MAX_NUM \ + ((STATIC_PKT_1_2PAGE_NUM) + (STATIC_PKT_4PAGE_NUM)) typedef struct bcm_static_pkt { - struct sk_buff *skb_4k[STATIC_PKT_MAX_NUM]; - struct sk_buff *skb_8k[STATIC_PKT_MAX_NUM]; +#ifdef DHD_USE_STATIC_CTRLBUF + struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM]; + unsigned char pkt_invalid[STATIC_PKT_2PAGE_NUM]; + spinlock_t osl_pkt_lock; + uint32 last_allocated_index; +#else + struct sk_buff *skb_4k[STATIC_PKT_1PAGE_NUM]; + struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM]; #ifdef ENHANCED_STATIC_BUF struct sk_buff *skb_16k; -#endif +#endif /* ENHANCED_STATIC_BUF */ struct semaphore osl_pkt_sem; - unsigned char pkt_use[STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM]; +#endif /* DHD_USE_STATIC_CTRLBUF */ + unsigned char pkt_use[STATIC_PKT_MAX_NUM]; } bcm_static_pkt_t; static bcm_static_pkt_t *bcm_static_skb = 0; -#if defined(BCMPCIE) && defined(DHD_USE_STATIC_FLOWRING) -#define STATIC_BUF_FLOWRING_SIZE ((PAGE_SIZE)*(7)) -#define STATIC_BUF_FLOWRING_NUM 42 -#define RINGID_TO_FLOWID(idx) ((idx) + (BCMPCIE_H2D_COMMON_MSGRINGS) \ - - (BCMPCIE_H2D_TXFLOWRINGID)) -typedef struct bcm_static_flowring_buf { - spinlock_t flowring_lock; - void *buf_ptr[STATIC_BUF_FLOWRING_NUM]; - unsigned char buf_use[STATIC_BUF_FLOWRING_NUM]; -} bcm_static_flowring_buf_t; - -bcm_static_flowring_buf_t *bcm_static_flowring = 0; -#endif /* BCMPCIE && DHD_USE_STATIC_FLOWRING */ - void* wifi_platform_prealloc(void *adapter, int section, unsigned long size); #endif /* CONFIG_DHD_USE_STATIC_BUF */ @@ -144,6 +185,7 @@ typedef struct osl_cmn_info osl_cmn_t; struct osl_info { osl_pubinfo_t pub; + uint32 flags; /* If specific cases to be handled in the OSL */ #ifdef CTFPOOL ctfpool_t *ctfpool; #endif /* CTFPOOL */ @@ -159,7 +201,6 @@ struct osl_info { struct list_head ctrace_list; int ctrace_num; #endif /* BCMDBG_CTRACE */ - uint32 flags; /* If specific cases to be handled in the OSL */ #ifdef BCM_SECURE_DMA struct cma_dev *cma; struct sec_mem_elem *sec_list_512; @@ -182,7 +223,6 @@ struct osl_info { } sec_cma_coherent[SEC_CMA_COHERENT_MAX]; #endif /* BCM_SECURE_DMA */ - }; #ifdef BCM_SECURE_DMA phys_addr_t g_contig_delta_va_pa; @@ -205,6 +245,18 @@ static void *osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bi static void osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa); #endif /* BCM_SECURE_DMA */ +#ifdef BCM_OBJECT_TRACE +/* don't clear the first 4 byte that is the pkt sn */ +#define OSL_PKTTAG_CLEAR(p) \ +do { \ + struct sk_buff *s = (struct sk_buff *)(p); \ + ASSERT(OSL_PKTTAG_SZ == 32); \ + *(uint32 *)(&s->cb[4]) = 0; \ + *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \ + *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \ + *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \ +} while (0) +#else #define OSL_PKTTAG_CLEAR(p) \ do { \ struct sk_buff *s = (struct sk_buff *)(p); \ @@ -214,11 +266,12 @@ do { \ *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \ *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \ } while (0) +#endif /* BCM_OBJECT_TRACE */ /* PCMCIA attribute space access macros */ /* Global ASSERT type flag */ -uint32 g_assert_type = 0; +uint32 g_assert_type = 1; module_param(g_assert_type, int, 0); static int16 linuxbcmerrormap[] = @@ -275,16 +328,18 @@ static int16 linuxbcmerrormap[] = -EIO, /* BCME_MICERR */ -ERANGE, /* BCME_REPLAY */ -EINVAL, /* BCME_IE_NOTFOUND */ + -EINVAL, /* BCME_DATA_NOTFOUND */ /* When an new error code is added to bcmutils.h, add os * specific error translation here as well */ /* check if BCME_LAST changed since the last time this function was updated */ -#if BCME_LAST != -52 +#if BCME_LAST != -53 #error "You need to add a OS error translation in the linuxbcmerrormap \ for new error code defined in bcmutils.h" #endif }; +uint lmtest = FALSE; /* translate bcmerrors into linux errors */ int @@ -339,6 +394,8 @@ osl_attach(void *pdev, uint bustype, bool pkttag) } atomic_add(1, &osh->cmn->refcount); + bcm_object_trace_init(); + /* Check that error map has the right number of entries in it */ ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1)); @@ -351,15 +408,9 @@ osl_attach(void *pdev, uint bustype, bool pkttag) osl_sec_dma_setup_contig_mem(osh, CMA_MEMBLOCK, CONT_ARMREGION); -#ifdef BCM47XX_CA9 - osh->contig_base_alloc_coherent_va = osl_sec_dma_ioremap(osh, - phys_to_page((u32)osh->contig_base_alloc), - CMA_DMA_DESC_MEMBLOCK, TRUE, TRUE); -#else osh->contig_base_alloc_coherent_va = osl_sec_dma_ioremap(osh, phys_to_page((u32)osh->contig_base_alloc), CMA_DMA_DESC_MEMBLOCK, FALSE, TRUE); -#endif /* BCM47XX_CA9 */ osh->contig_base_alloc_coherent = osh->contig_base_alloc; osl_sec_dma_init_consistent(osh); @@ -412,70 +463,50 @@ osl_attach(void *pdev, uint bustype, bool pkttag) int osl_static_mem_init(osl_t *osh, void *adapter) { #ifdef CONFIG_DHD_USE_STATIC_BUF - if (!bcm_static_buf && adapter) { - if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter, - 3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) { - printk("can not alloc static buf!\n"); - bcm_static_skb = NULL; - ASSERT(osh->magic == OS_HANDLE_MAGIC); - return -ENOMEM; - } - else - printk("alloc static buf at %p!\n", bcm_static_buf); - - - sema_init(&bcm_static_buf->static_sem, 1); + if (!bcm_static_buf && adapter) { + if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter, + 3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) { + printk("can not alloc static buf!\n"); + bcm_static_skb = NULL; + ASSERT(osh->magic == OS_HANDLE_MAGIC); + return -ENOMEM; + } else { + printk("alloc static buf at %p!\n", bcm_static_buf); + } - bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE; - } + sema_init(&bcm_static_buf->static_sem, 1); -#ifdef BCMSDIO - if (!bcm_static_skb && adapter) { - int i; - void *skb_buff_ptr = 0; - bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048); - skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0); - if (!skb_buff_ptr) { - printk("cannot alloc static buf!\n"); - bcm_static_buf = NULL; - bcm_static_skb = NULL; - ASSERT(osh->magic == OS_HANDLE_MAGIC); - return -ENOMEM; + bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE; } - bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) * - (STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM)); - for (i = 0; i < STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM; i++) - bcm_static_skb->pkt_use[i] = 0; +#if defined(BCMSDIO) || defined(DHD_USE_STATIC_CTRLBUF) + if (!bcm_static_skb && adapter) { + int i; + void *skb_buff_ptr = 0; + bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048); + skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0); + if (!skb_buff_ptr) { + printk("cannot alloc static buf!\n"); + bcm_static_buf = NULL; + bcm_static_skb = NULL; + ASSERT(osh->magic == OS_HANDLE_MAGIC); + return -ENOMEM; + } - sema_init(&bcm_static_skb->osl_pkt_sem, 1); - } -#endif /* BCMSDIO */ -#if defined(BCMPCIE) && defined(DHD_USE_STATIC_FLOWRING) - if (!bcm_static_flowring && adapter) { - int i; - void *flowring_ptr = 0; - bcm_static_flowring = - (bcm_static_flowring_buf_t *)((char *)bcm_static_buf + 4096); - flowring_ptr = wifi_platform_prealloc(adapter, 10, 0); - if (!flowring_ptr) { - printk("%s: flowring_ptr is NULL\n", __FUNCTION__); - bcm_static_buf = NULL; - bcm_static_skb = NULL; - bcm_static_flowring = NULL; - ASSERT(osh->magic == OS_HANDLE_MAGIC); - return -ENOMEM; - } + bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) * + (STATIC_PKT_MAX_NUM)); + for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { + bcm_static_skb->pkt_use[i] = 0; + } - bcopy(flowring_ptr, bcm_static_flowring->buf_ptr, - sizeof(void *) * STATIC_BUF_FLOWRING_NUM); - for (i = 0; i < STATIC_BUF_FLOWRING_NUM; i++) { - bcm_static_flowring->buf_use[i] = 0; +#ifdef DHD_USE_STATIC_CTRLBUF + spin_lock_init(&bcm_static_skb->osl_pkt_lock); + bcm_static_skb->last_allocated_index = 0; +#else + sema_init(&bcm_static_skb->osl_pkt_sem, 1); +#endif /* DHD_USE_STATIC_CTRLBUF */ } - - spin_lock_init(&bcm_static_flowring->flowring_lock); - } -#endif /* BCMPCIE && DHD_USE_STATIC_FLOWRING */ +#endif /* BCMSDIO || DHD_USE_STATIC_CTRLBUF */ #endif /* CONFIG_DHD_USE_STATIC_BUF */ return 0; @@ -496,6 +527,7 @@ osl_detach(osl_t *osh) { if (osh == NULL) return; + #ifdef BCM_SECURE_DMA osl_sec_dma_free_contig_mem(osh, CMA_MEMBLOCK, CONT_ARMREGION); osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, osh->sec_list_base_512); @@ -504,6 +536,9 @@ osl_detach(osl_t *osh) osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_MEMBLOCK); #endif /* BCM_SECURE_DMA */ + + bcm_object_trace_deinit(); + ASSERT(osh->magic == OS_HANDLE_MAGIC); atomic_sub(1, &osh->cmn->refcount); if (atomic_read(&osh->cmn->refcount) == 0) { @@ -523,11 +558,6 @@ int osl_static_mem_deinit(osl_t *osh, void *adapter) bcm_static_skb = 0; } #endif /* BCMSDIO */ -#if defined(BCMPCIE) && defined(DHD_USE_STATIC_FLOWRING) - if (bcm_static_flowring) { - bcm_static_flowring = 0; - } -#endif /* BCMPCIE && DHD_USE_STATIC_FLOWRING */ #endif /* CONFIG_DHD_USE_STATIC_BUF */ return 0; } @@ -540,6 +570,9 @@ static struct sk_buff *osl_alloc_skb(osl_t *osh, unsigned int len) #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_ZONE_DMA) flags |= GFP_ATOMIC; #endif +#ifdef DHD_USE_ATOMIC_PKTGET + flags = GFP_ATOMIC; +#endif /* DHD_USE_ATOMIC_PKTGET */ skb = __dev_alloc_skb(len, flags); #else skb = dev_alloc_skb(len); @@ -697,11 +730,6 @@ osl_ctfpool_stats(osl_t *osh, void *b) bcm_static_skb = 0; } #endif /* BCMSDIO */ -#if defined(BCMPCIE) && defined(DHD_USE_STATIC_FLOWRING) - if (bcm_static_flowring) { - bcm_static_flowring = 0; - } -#endif /* BCMPCIE && DHD_USE_STATIC_FLOWRING */ #endif /* CONFIG_DHD_USE_STATIC_BUF */ bb = b; @@ -802,7 +830,7 @@ osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt) #if defined(BCMDBG_CTRACE) int i; struct sk_buff *skb; -#endif +#endif #if defined(BCMDBG_CTRACE) if (skb_cnt > 1) { @@ -821,7 +849,7 @@ osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt) ADD_CTRACE(osh, skb, file, line); #endif /* BCMDBG_CTRACE */ } -#endif +#endif atomic_add(skb_cnt, &osh->cmn->pktalloced); } @@ -906,10 +934,20 @@ void * BCMFASTPATH #ifdef BCMDBG_CTRACE osl_pktget(osl_t *osh, uint len, int line, char *file) #else +#ifdef BCM_OBJECT_TRACE +osl_pktget(osl_t *osh, uint len, int line, const char *caller) +#else osl_pktget(osl_t *osh, uint len) +#endif /* BCM_OBJECT_TRACE */ #endif /* BCMDBG_CTRACE */ { struct sk_buff *skb; + uchar num = 0; + if (lmtest != FALSE) { + get_random_bytes(&num, sizeof(uchar)); + if ((num + 1) <= (256 * lmtest / 100)) + return NULL; + } #ifdef CTFPOOL /* Allocate from local pool */ @@ -927,6 +965,9 @@ osl_pktget(osl_t *osh, uint len) ADD_CTRACE(osh, skb, file, line); #endif atomic_inc(&osh->cmn->pktalloced); +#ifdef BCM_OBJECT_TRACE + bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, caller, line); +#endif /* BCM_OBJECT_TRACE */ } return ((void*) skb); @@ -979,7 +1020,11 @@ osl_pktfastfree(osl_t *osh, struct sk_buff *skb) /* Free the driver packet. Free the tag if present */ void BCMFASTPATH +#ifdef BCM_OBJECT_TRACE +osl_pktfree(osl_t *osh, void *p, bool send, int line, const char *caller) +#else osl_pktfree(osl_t *osh, void *p, bool send) +#endif /* BCM_OBJECT_TRACE */ { struct sk_buff *skb, *nskb; if (osh == NULL) @@ -992,6 +1037,22 @@ osl_pktfree(osl_t *osh, void *p, bool send) PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE); +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF) + if (skb && (skb->mac_len == PREALLOC_USED_MAGIC)) { + printk("%s: pkt %p is from static pool\n", + __FUNCTION__, p); + dump_stack(); + return; + } + + if (skb && (skb->mac_len == PREALLOC_FREE_MAGIC)) { + printk("%s: pkt %p is from static pool and not in used\n", + __FUNCTION__, p); + dump_stack(); + return; + } +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */ + /* perversion: we use skb->next to chain multi-skb packets */ while (skb) { nskb = skb->next; @@ -1002,6 +1063,10 @@ osl_pktfree(osl_t *osh, void *p, bool send) #endif +#ifdef BCM_OBJECT_TRACE + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, caller, line); +#endif /* BCM_OBJECT_TRACE */ + #ifdef CTFPOOL if (PKTISFAST(osh, skb)) { if (atomic_read(&skb->users) == 1) @@ -1028,6 +1093,9 @@ osl_pktget_static(osl_t *osh, uint len) { int i = 0; struct sk_buff *skb; +#ifdef DHD_USE_STATIC_CTRLBUF + unsigned long flags; +#endif /* DHD_USE_STATIC_CTRLBUF */ if (!bcm_static_skb) return osl_pktget(osh, len); @@ -1037,12 +1105,57 @@ osl_pktget_static(osl_t *osh, uint len) return osl_pktget(osh, len); } +#ifdef DHD_USE_STATIC_CTRLBUF + spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags); + + if (len <= DHD_SKB_2PAGE_BUFSIZE) { + uint32 index; + for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) { + index = bcm_static_skb->last_allocated_index % STATIC_PKT_2PAGE_NUM; + bcm_static_skb->last_allocated_index++; + if (bcm_static_skb->skb_8k[index] && + bcm_static_skb->pkt_use[index] == 0) { + break; + } + } + + if ((i != STATIC_PKT_2PAGE_NUM) && + (index >= 0) && (index < STATIC_PKT_2PAGE_NUM)) { + bcm_static_skb->pkt_use[index] = 1; + skb = bcm_static_skb->skb_8k[index]; + skb->data = skb->head; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb_set_tail_pointer(skb, NET_SKB_PAD); +#else + skb->tail = skb->data + NET_SKB_PAD; +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + skb->data += NET_SKB_PAD; + skb->cloned = 0; + skb->priority = 0; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb_set_tail_pointer(skb, len); +#else + skb->tail = skb->data + len; +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + skb->len = len; + skb->mac_len = PREALLOC_USED_MAGIC; + spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags); + return skb; + } + } + + spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags); + printk("%s: all static pkt in use!\n", __FUNCTION__); + return NULL; +#else down(&bcm_static_skb->osl_pkt_sem); if (len <= DHD_SKB_1PAGE_BUFSIZE) { for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { - if (bcm_static_skb->pkt_use[i] == 0) + if (bcm_static_skb->skb_4k[i] && + bcm_static_skb->pkt_use[i] == 0) { break; + } } if (i != STATIC_PKT_MAX_NUM) { @@ -1062,15 +1175,16 @@ osl_pktget_static(osl_t *osh, uint len) } if (len <= DHD_SKB_2PAGE_BUFSIZE) { - for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { - if (bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] - == 0) + for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) { + if (bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM] && + bcm_static_skb->pkt_use[i] == 0) { break; + } } - if (i != STATIC_PKT_MAX_NUM) { - bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 1; - skb = bcm_static_skb->skb_8k[i]; + if ((i >= STATIC_PKT_1PAGE_NUM) && (i < STATIC_PKT_1_2PAGE_NUM)) { + bcm_static_skb->pkt_use[i] = 1; + skb = bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]; #ifdef NET_SKBUFF_DATA_USES_OFFSET skb_set_tail_pointer(skb, len); #else @@ -1084,8 +1198,9 @@ osl_pktget_static(osl_t *osh, uint len) } #if defined(ENHANCED_STATIC_BUF) - if (bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] == 0) { - bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] = 1; + if (bcm_static_skb->skb_16k && + bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] == 0) { + bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 1; skb = bcm_static_skb->skb_16k; #ifdef NET_SKBUFF_DATA_USES_OFFSET @@ -1103,19 +1218,55 @@ osl_pktget_static(osl_t *osh, uint len) up(&bcm_static_skb->osl_pkt_sem); printk("%s: all static pkt in use!\n", __FUNCTION__); return osl_pktget(osh, len); +#endif /* DHD_USE_STATIC_CTRLBUF */ } void osl_pktfree_static(osl_t *osh, void *p, bool send) { int i; +#ifdef DHD_USE_STATIC_CTRLBUF + struct sk_buff *skb = (struct sk_buff *)p; + unsigned long flags; +#endif /* DHD_USE_STATIC_CTRLBUF */ + + if (!p) { + return; + } + if (!bcm_static_skb) { osl_pktfree(osh, p, send); return; } +#ifdef DHD_USE_STATIC_CTRLBUF + spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags); + + for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) { + if (p == bcm_static_skb->skb_8k[i]) { + if (bcm_static_skb->pkt_use[i] == 0) { + printk("%s: static pkt idx %d(%p) is double free\n", + __FUNCTION__, i, p); + } else { + bcm_static_skb->pkt_use[i] = 0; + } + + if (skb->mac_len != PREALLOC_USED_MAGIC) { + printk("%s: static pkt idx %d(%p) is not in used\n", + __FUNCTION__, i, p); + } + + skb->mac_len = PREALLOC_FREE_MAGIC; + spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags); + return; + } + } + + spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags); + printk("%s: packet %p does not exist in the pool\n", __FUNCTION__, p); +#else down(&bcm_static_skb->osl_pkt_sem); - for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { + for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) { if (p == bcm_static_skb->skb_4k[i]) { bcm_static_skb->pkt_use[i] = 0; up(&bcm_static_skb->osl_pkt_sem); @@ -1123,109 +1274,24 @@ osl_pktfree_static(osl_t *osh, void *p, bool send) } } - for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { - if (p == bcm_static_skb->skb_8k[i]) { - bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 0; + for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) { + if (p == bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]) { + bcm_static_skb->pkt_use[i] = 0; up(&bcm_static_skb->osl_pkt_sem); return; } } #ifdef ENHANCED_STATIC_BUF if (p == bcm_static_skb->skb_16k) { - bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] = 0; + bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 0; up(&bcm_static_skb->osl_pkt_sem); return; } #endif up(&bcm_static_skb->osl_pkt_sem); osl_pktfree(osh, p, send); +#endif /* DHD_USE_STATIC_CTRLBUF */ } - -#if defined(BCMPCIE) && defined(DHD_USE_STATIC_FLOWRING) -void* -osl_dma_alloc_consistent_static(osl_t *osh, uint size, uint16 align_bits, - uint *alloced, dmaaddr_t *pap, uint16 idx) -{ - void *va = NULL; - uint16 align = (1 << align_bits); - uint16 flow_id = RINGID_TO_FLOWID(idx); - unsigned long flags; - - ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); - - if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align)) - size += align; - - if ((flow_id < 0) || (flow_id >= STATIC_BUF_FLOWRING_NUM)) { - printk("%s: flow_id %d is wrong\n", __FUNCTION__, flow_id); - return osl_dma_alloc_consistent(osh, size, align_bits, - alloced, pap); - } - - if (!bcm_static_flowring) { - printk("%s: bcm_static_flowring is not initialized\n", - __FUNCTION__); - return osl_dma_alloc_consistent(osh, size, align_bits, - alloced, pap); - } - - if (size > STATIC_BUF_FLOWRING_SIZE) { - printk("%s: attempt to allocate huge packet, size=%d\n", - __FUNCTION__, size); - return osl_dma_alloc_consistent(osh, size, align_bits, - alloced, pap); - } - - *alloced = size; - - spin_lock_irqsave(&bcm_static_flowring->flowring_lock, flags); - if (bcm_static_flowring->buf_use[flow_id]) { - printk("%s: flowring %d is already alloced\n", - __FUNCTION__, flow_id); - spin_unlock_irqrestore(&bcm_static_flowring->flowring_lock, flags); - return NULL; - } - - va = bcm_static_flowring->buf_ptr[flow_id]; - if (va) { - *pap = (ulong)__virt_to_phys((ulong)va); - bcm_static_flowring->buf_use[flow_id] = 1; - } - spin_unlock_irqrestore(&bcm_static_flowring->flowring_lock, flags); - - return va; -} - -void -osl_dma_free_consistent_static(osl_t *osh, void *va, uint size, - dmaaddr_t pa, uint16 idx) -{ - uint16 flow_id = RINGID_TO_FLOWID(idx); - unsigned long flags; - - ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); - - if ((flow_id < 0) || (flow_id >= STATIC_BUF_FLOWRING_NUM)) { - printk("%s: flow_id %d is wrong\n", __FUNCTION__, flow_id); - return osl_dma_free_consistent(osh, va, size, pa); - } - - if (!bcm_static_flowring) { - printk("%s: bcm_static_flowring is not initialized\n", - __FUNCTION__); - return osl_dma_free_consistent(osh, va, size, pa); - } - - spin_lock_irqsave(&bcm_static_flowring->flowring_lock, flags); - if (bcm_static_flowring->buf_use[flow_id]) { - bcm_static_flowring->buf_use[flow_id] = 0; - } else { - printk("%s: flowring %d is already freed\n", - __FUNCTION__, flow_id); - } - spin_unlock_irqrestore(&bcm_static_flowring->flowring_lock, flags); -} -#endif /* BCMPCIE && DHD_USE_STATIC_FLOWRING */ #endif /* CONFIG_DHD_USE_STATIC_BUF */ uint32 @@ -1352,7 +1418,7 @@ osl_malloc(osl_t *osh, uint size) if (bcm_static_buf) { int i = 0; - if ((size >= PAGE_SIZE) && (size <= STATIC_BUF_SIZE)) + if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE)) { down(&bcm_static_buf->static_sem); @@ -1486,7 +1552,7 @@ osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced *alloced = size; #ifndef BCM_SECURE_DMA -#if defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__) +#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING) va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO); if (va) *pap = (ulong)__virt_to_phys((ulong)va); @@ -1494,14 +1560,21 @@ osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced { dma_addr_t pap_lin; struct pci_dev *hwdev = osh->pdev; -#ifdef PCIE_TX_DEFERRAL - va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, GFP_KERNEL); + gfp_t flags; +#ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL + flags = GFP_ATOMIC; +#else + flags = GFP_KERNEL; +#endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */ + va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, flags); +#ifdef BCMDMA64OSL + PHYSADDRLOSET(*pap, pap_lin & 0xffffffff); + PHYSADDRHISET(*pap, (pap_lin >> 32) & 0xffffffff); #else - va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, GFP_ATOMIC); -#endif *pap = (dmaaddr_t)pap_lin; +#endif /* BCMDMA64OSL */ } -#endif /* BCM47XX_CA9 && __ARM_ARCH_7A__ */ +#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */ #else va = osl_sec_dma_alloc_consistent(osh, size, align_bits, pap); #endif /* BCM_SECURE_DMA */ @@ -1511,17 +1584,22 @@ osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced void osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa) { -#ifndef BCM_SECURE_DMA -#if !defined(BCM47XX_CA9) || !defined(__ARM_ARCH_7A__) - struct pci_dev *hwdev = osh->pdev; -#endif +#ifdef BCMDMA64OSL + dma_addr_t paddr; +#endif /* BCMDMA64OSL */ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); -#if defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__) +#ifndef BCM_SECURE_DMA +#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING) kfree(va); #else - dma_free_coherent(&hwdev->dev, size, va, (dma_addr_t)pa); -#endif /* BCM47XX_CA9 && __ARM_ARCH_7A__ */ +#ifdef BCMDMA64OSL + PHYSADDRTOULONG(pa, paddr); + pci_free_consistent(osh->pdev, size, va, paddr); +#else + pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa); +#endif /* BCMDMA64OSL */ +#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */ #else osl_sec_dma_free_consistent(osh, va, size, pa); #endif /* BCM_SECURE_DMA */ @@ -1531,94 +1609,115 @@ dmaaddr_t BCMFASTPATH osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah) { int dir; -#ifdef BCM47XX_ACP_WAR - uint pa; -#endif +#ifdef BCMDMA64OSL + dmaaddr_t ret; + dma_addr_t map_addr; +#endif /* BCMDMA64OSL */ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE; -#if defined(__ARM_ARCH_7A__) && defined(BCMDMASGLISTOSL) - if (dmah != NULL) { - int32 nsegs, i, totsegs = 0, totlen = 0; - struct scatterlist *sg, _sg[MAX_DMA_SEGS * 2]; -#ifdef BCM47XX_ACP_WAR - struct scatterlist *s; -#endif - struct sk_buff *skb; - for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) { - sg = &_sg[totsegs]; - if (skb_is_nonlinear(skb)) { - nsegs = skb_to_sgvec(skb, sg, 0, PKTLEN(osh, skb)); - ASSERT((nsegs > 0) && (totsegs + nsegs <= MAX_DMA_SEGS)); -#ifdef BCM47XX_ACP_WAR - for_each_sg(sg, s, nsegs, i) { - if (sg_phys(s) >= ACP_WIN_LIMIT) { - dma_map_page(&((struct pci_dev *)osh->pdev)->dev, - sg_page(s), s->offset, s->length, dir); - } - } -#else - pci_map_sg(osh->pdev, sg, nsegs, dir); -#endif - } else { - nsegs = 1; - ASSERT(totsegs + nsegs <= MAX_DMA_SEGS); - sg->page_link = 0; - sg_set_buf(sg, PKTDATA(osh, skb), PKTLEN(osh, skb)); -#ifdef BCM47XX_ACP_WAR - if (virt_to_phys(PKTDATA(osh, skb)) >= ACP_WIN_LIMIT) -#endif - pci_map_single(osh->pdev, PKTDATA(osh, skb), PKTLEN(osh, skb), dir); - } - totsegs += nsegs; - totlen += PKTLEN(osh, skb); - } - dmah->nsegs = totsegs; - dmah->origsize = totlen; - for (i = 0, sg = _sg; i < totsegs; i++, sg++) { - dmah->segs[i].addr = sg_phys(sg); - dmah->segs[i].length = sg->length; - } - return dmah->segs[0].addr; - } -#endif /* __ARM_ARCH_7A__ && BCMDMASGLISTOSL */ -#ifdef BCM47XX_ACP_WAR - pa = virt_to_phys(va); - if (pa < ACP_WIN_LIMIT) - return (pa); -#endif + + +#ifdef BCMDMA64OSL + map_addr = pci_map_single(osh->pdev, va, size, dir); + PHYSADDRLOSET(ret, map_addr & 0xffffffff); + PHYSADDRHISET(ret, (map_addr >> 32) & 0xffffffff); + return ret; +#else return (pci_map_single(osh->pdev, va, size, dir)); +#endif /* BCMDMA64OSL */ } void BCMFASTPATH -osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction) +osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction) { int dir; +#ifdef BCMDMA64OSL + dma_addr_t paddr; +#endif /* BCMDMA64OSL */ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); -#ifdef BCM47XX_ACP_WAR - if (pa < ACP_WIN_LIMIT) - return; -#endif + + dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE; +#ifdef BCMDMA64OSL + PHYSADDRTOULONG(pa, paddr); + pci_unmap_single(osh->pdev, paddr, size, dir); +#else pci_unmap_single(osh->pdev, (uint32)pa, size, dir); +#endif /* BCMDMA64OSL */ +} + +/* OSL function for CPU relax */ +inline void BCMFASTPATH +osl_cpu_relax(void) +{ + cpu_relax(); } -#if defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__) +#if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING) || \ + defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890)) + +#include +/* + * Note that its gauranteed that the Ring is cache line aligned, but + * the messages are not. And we see that __dma_inv_range in + * arch/arm64/mm/cache.S invalidates only if the request size is + * cache line aligned. If not, it will Clean and invalidate. + * So we'll better invalidate the whole ring. + * + * Also, the latest Kernel versions invoke cache maintenance operations + * from arch/arm64/mm/dma-mapping.c, __swiotlb_sync_single_for_device + * Only if is_device_dma_coherent returns 0. Since we don't have BSP + * source, assuming that its the case, since we pass NULL for the dev ptr + */ inline void BCMFASTPATH osl_cache_flush(void *va, uint size) { + /* + * using long for address arithmatic is OK, in linux + * 32 bit its 4 bytes and 64 bit its 8 bytes + */ + unsigned long end_cache_line_start; + unsigned long end_addr; + unsigned long next_cache_line_start; + + end_addr = (unsigned long)va + size; + + /* Start address beyond the cache line we plan to operate */ + end_cache_line_start = (end_addr & ~(L1_CACHE_BYTES - 1)); + next_cache_line_start = end_cache_line_start + L1_CACHE_BYTES; + + /* Align the start address to cache line boundary */ + va = (void *)((unsigned long)va & ~(L1_CACHE_BYTES - 1)); + + /* Ensure that size is also aligned and extends partial line to full */ + size = next_cache_line_start - (unsigned long)va; + #ifndef BCM_SECURE_DMA -#ifdef BCM47XX_ACP_WAR - if (virt_to_phys(va) < ACP_WIN_LIMIT) - return; -#endif + +#ifdef CONFIG_ARM64 + /* + * virt_to_dma is not present in arm64/include/dma-mapping.h + * So have to convert the va to pa first and then get the dma addr + * of the same. + */ + { + phys_addr_t pa; + dma_addr_t dma_addr; + pa = virt_to_phys(va); + dma_addr = phys_to_dma(NULL, pa); + if (size > 0) + dma_sync_single_for_device(OSH_NULL, dma_addr, size, DMA_TX); + } +#else if (size > 0) dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_TX); +#endif /* !CONFIG_ARM64 */ #else phys_addr_t orig_pa = (phys_addr_t)(va - g_contig_delta_va_pa); if (size > 0) @@ -1629,12 +1728,44 @@ osl_cache_flush(void *va, uint size) inline void BCMFASTPATH osl_cache_inv(void *va, uint size) { + /* + * using long for address arithmatic is OK, in linux + * 32 bit its 4 bytes and 64 bit its 8 bytes + */ + unsigned long end_cache_line_start; + unsigned long end_addr; + unsigned long next_cache_line_start; + + end_addr = (unsigned long)va + size; + + /* Start address beyond the cache line we plan to operate */ + end_cache_line_start = (end_addr & ~(L1_CACHE_BYTES - 1)); + next_cache_line_start = end_cache_line_start + L1_CACHE_BYTES; + + /* Align the start address to cache line boundary */ + va = (void *)((unsigned long)va & ~(L1_CACHE_BYTES - 1)); + + /* Ensure that size is also aligned and extends partial line to full */ + size = next_cache_line_start - (unsigned long)va; + #ifndef BCM_SECURE_DMA -#ifdef BCM47XX_ACP_WAR - if (virt_to_phys(va) < ACP_WIN_LIMIT) - return; -#endif + +#ifdef CONFIG_ARM64 + /* + * virt_to_dma is not present in arm64/include/dma-mapping.h + * So have to convert the va to pa first and then get the dma addr + * of the same. + */ + { + phys_addr_t pa; + dma_addr_t dma_addr; + pa = virt_to_phys(va); + dma_addr = phys_to_dma(NULL, pa); + dma_sync_single_for_cpu(OSH_NULL, dma_addr, size, DMA_RX); + } +#else dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_RX); +#endif /* !CONFIG_ARM64 */ #else phys_addr_t orig_pa = (phys_addr_t)(va - g_contig_delta_va_pa); dma_sync_single_for_cpu(OSH_NULL, orig_pa, size, DMA_RX); @@ -1643,23 +1774,24 @@ osl_cache_inv(void *va, uint size) inline void osl_prefetch(const void *ptr) { - /* Borrowed from linux/linux-2.6/include/asm-arm/processor.h */ - __asm__ __volatile__( - "pld\t%0" - : - : "o" (*(char *)ptr) - : "cc"); + /* PLD instruction is not applicable in ARM 64. We don't care for now */ +#ifndef CONFIG_ARM64 + __asm__ __volatile__("pld\t%0" :: "o"(*(const char *)ptr) : "cc"); +#endif } int osl_arch_is_coherent(void) { -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) return 0; -#else - return arch_is_coherent(); -#endif } -#endif + + +inline int osl_acp_war_enab(void) +{ + return 0; +} + +#endif #if defined(BCMASSERT_LOG) void @@ -1679,12 +1811,28 @@ osl_assert(const char *exp, const char *file, int line) #ifdef BCMASSERT_LOG snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n", exp, basename, line); - printk("%s", tempbuf); #endif /* BCMASSERT_LOG */ +#if defined(BCMASSERT_LOG) + switch (g_assert_type) { + case 0: + panic("%s", tempbuf); + break; + case 1: + printk("%s", tempbuf); + break; + case 2: + printk("%s", tempbuf); + BUG(); + break; + default: + break; + } +#endif + } -#endif +#endif void osl_delay(uint usec) @@ -1718,7 +1866,11 @@ void * #ifdef BCMDBG_CTRACE osl_pktdup(osl_t *osh, void *skb, int line, char *file) #else +#ifdef BCM_OBJECT_TRACE +osl_pktdup(osl_t *osh, void *skb, int line, const char *caller) +#else osl_pktdup(osl_t *osh, void *skb) +#endif /* BCM_OBJECT_TRACE */ #endif /* BCMDBG_CTRACE */ { void * p; @@ -1767,6 +1919,10 @@ osl_pktdup(osl_t *osh, void *skb) /* Increment the packet counter */ atomic_inc(&osh->cmn->pktalloced); +#ifdef BCM_OBJECT_TRACE + bcm_object_trace_opr(p, BCM_OBJDBG_ADD_PKT, caller, line); +#endif /* BCM_OBJECT_TRACE */ + #ifdef BCMDBG_CTRACE ADD_CTRACE(osh, (struct sk_buff *)p, file, line); #endif @@ -1920,35 +2076,6 @@ osl_os_image_size(void *image) /* Linux Kernel: File Operations: end */ -#ifdef BCM47XX_ACP_WAR -inline void osl_pcie_rreg(osl_t *osh, ulong addr, void *v, uint size) -{ - uint32 flags; - int pci_access = 0; - - if (osh && BUSTYPE(osh->bustype) == PCI_BUS) - pci_access = 1; - - if (pci_access) - spin_lock_irqsave(&l2x0_reg_lock, flags); - switch (size) { - case sizeof(uint8): - *(uint8*)v = readb((volatile uint8*)(addr)); - break; - case sizeof(uint16): - *(uint16*)v = readw((volatile uint16*)(addr)); - break; - case sizeof(uint32): - *(uint32*)v = readl((volatile uint32*)(addr)); - break; - case sizeof(uint64): - *(uint64*)v = *((volatile uint64*)(addr)); - break; - } - if (pci_access) - spin_unlock_irqrestore(&l2x0_reg_lock, flags); -} -#endif /* BCM47XX_ACP_WAR */ /* APIs to set/get specific quirks in OSL layer */ void @@ -1962,6 +2089,7 @@ osl_is_flag_set(osl_t *osh, uint32 mask) { return (osh->flags & mask); } + #ifdef BCM_SECURE_DMA static void @@ -2060,13 +2188,6 @@ osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size) vunmap(contig_base_va); } -static void -osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, void *sec_list_base) -{ - if (sec_list_base) - kfree(sec_list_base); -} - static void osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, sec_mem_elem_t **list) { @@ -2102,6 +2223,13 @@ osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, sec_mem_elem } +static void +osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, void *sec_list_base) +{ + if (sec_list_base) + kfree(sec_list_base); +} + static sec_mem_elem_t * BCMFASTPATH osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, int direction, struct sec_cma_info *ptr_cma_info, uint offset) @@ -2160,7 +2288,6 @@ osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem) printf("%s free failed size=%d \n", __FUNCTION__, sec_mem_elem->size); } - static sec_mem_elem_t * BCMFASTPATH osl_sec_dma_find_rem_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info, dma_addr_t dma_handle) { @@ -2353,7 +2480,6 @@ osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p, hndd (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE)); return dma_handle; - } void BCMFASTPATH @@ -2487,3 +2613,42 @@ osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa) } #endif /* BCM_SECURE_DMA */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) && defined(TSQ_MULTIPLIER) +#include +#include +void +osl_pkt_orphan_partial(struct sk_buff *skb, int tsq) +{ + uint32 fraction; + static void *p_tcp_wfree = NULL; + + if (tsq <= 0) + return; + + if (!skb->destructor || skb->destructor == sock_wfree) + return; + + if (unlikely(!p_tcp_wfree)) { + char sym[KSYM_SYMBOL_LEN]; + sprint_symbol(sym, (unsigned long)skb->destructor); + sym[9] = 0; + if (!strcmp(sym, "tcp_wfree")) + p_tcp_wfree = skb->destructor; + else + return; + } + + if (unlikely(skb->destructor != p_tcp_wfree || !skb->sk)) + return; + + /* abstract a certain portion of skb truesize from the socket + * sk_wmem_alloc to allow more skb can be allocated for this + * socket for better cusion meeting WiFi device requirement + */ + fraction = skb->truesize * (tsq - 1) / tsq; + skb->truesize -= fraction; + atomic_sub(fraction, &skb->sk->sk_wmem_alloc); + skb_orphan(skb); +} +#endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */ diff --git a/drivers/amlogic/wifi/bcmdhd/pcie_core.c b/drivers/amlogic/wifi/bcmdhd/pcie_core.c new file mode 100644 index 0000000000000..c36bc62ecdb4f --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/pcie_core.c @@ -0,0 +1,115 @@ +/** @file pcie_core.c + * + * Contains PCIe related functions that are shared between different driver models (e.g. firmware + * builds, DHD builds, BMAC builds), in order to avoid code duplication. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: pcie_core.c 591285 2015-10-07 11:56:29Z $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie_core.h" + +/* local prototypes */ + +/* local variables */ + +/* function definitions */ + +#ifdef BCMDRIVER + +void pcie_watchdog_reset(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs) +{ + uint32 val, i, lsc; + uint16 cfg_offset[] = {PCIECFGREG_STATUS_CMD, PCIECFGREG_PM_CSR, + PCIECFGREG_MSI_CAP, PCIECFGREG_MSI_ADDR_L, + PCIECFGREG_MSI_ADDR_H, PCIECFGREG_MSI_DATA, + PCIECFGREG_LINK_STATUS_CTRL2, PCIECFGREG_RBAR_CTRL, + PCIECFGREG_PML1_SUB_CTRL1, PCIECFGREG_REG_BAR2_CONFIG, + PCIECFGREG_REG_BAR3_CONFIG}; + sbpcieregs_t *pcie = NULL; + uint32 origidx = si_coreidx(sih); + + /* Switch to PCIE2 core */ + pcie = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0); + BCM_REFERENCE(pcie); + ASSERT(pcie != NULL); + + /* Disable/restore ASPM Control to protect the watchdog reset */ + W_REG(osh, &sbpcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL); + lsc = R_REG(osh, &sbpcieregs->configdata); + val = lsc & (~PCIE_ASPM_ENAB); + W_REG(osh, &sbpcieregs->configdata, val); + + si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, 4); + OSL_DELAY(100000); + + W_REG(osh, &sbpcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL); + W_REG(osh, &sbpcieregs->configdata, lsc); + + if (sih->buscorerev <= 13) { + /* Write configuration registers back to the shadow registers + * cause shadow registers are cleared out after watchdog reset. + */ + for (i = 0; i < ARRAYSIZE(cfg_offset); i++) { + W_REG(osh, &sbpcieregs->configaddr, cfg_offset[i]); + val = R_REG(osh, &sbpcieregs->configdata); + W_REG(osh, &sbpcieregs->configdata, val); + } + } + si_setcoreidx(sih, origidx); +} + + +/* CRWLPCIEGEN2-117 pcie_pipe_Iddq should be controlled + * by the L12 state from MAC to save power by putting the + * SerDes analog in IDDQ mode + */ +void pcie_serdes_iddqdisable(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs) +{ + sbpcieregs_t *pcie = NULL; + uint crwlpciegen2_117_disable = 0; + uint32 origidx = si_coreidx(sih); + + crwlpciegen2_117_disable = PCIE_PipeIddqDisable0 | PCIE_PipeIddqDisable1; + /* Switch to PCIE2 core */ + pcie = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0); + BCM_REFERENCE(pcie); + ASSERT(pcie != NULL); + + OR_REG(osh, &sbpcieregs->control, + crwlpciegen2_117_disable); + + si_setcoreidx(sih, origidx); +} +#endif /* BCMDRIVER */ diff --git a/drivers/net/wireless/bcmdhd/sbutils.c b/drivers/amlogic/wifi/bcmdhd/sbutils.c similarity index 95% rename from drivers/net/wireless/bcmdhd/sbutils.c rename to drivers/amlogic/wifi/bcmdhd/sbutils.c index a1f14b7d82f5a..0804ef4551350 100644 --- a/drivers/net/wireless/bcmdhd/sbutils.c +++ b/drivers/amlogic/wifi/bcmdhd/sbutils.c @@ -2,9 +2,30 @@ * Misc utility routines for accessing chip-specific features * of the SiliconBackplane-based Broadcom chips. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: sbutils.c 467150 2014-04-02 17:30:43Z $ + * + * <> + * + * $Id: sbutils.c 514727 2014-11-12 03:02:48Z $ */ #include @@ -1084,4 +1105,4 @@ sb_dumpregs(si_t *sih, struct bcmstrbuf *b) sb_setcoreidx(sih, origidx); INTR_RESTORE(sii, intr_val); } -#endif +#endif diff --git a/drivers/net/wireless/bcmdhd/siutils.c b/drivers/amlogic/wifi/bcmdhd/siutils.c similarity index 86% rename from drivers/net/wireless/bcmdhd/siutils.c rename to drivers/amlogic/wifi/bcmdhd/siutils.c index 29d6dc2f33346..e879cafd1bdf6 100644 --- a/drivers/net/wireless/bcmdhd/siutils.c +++ b/drivers/amlogic/wifi/bcmdhd/siutils.c @@ -2,9 +2,30 @@ * Misc utility routines for accessing chip-specific features * of the SiliconBackplane-based Broadcom chips. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: siutils.c 497460 2014-08-19 15:14:13Z $ + * + * <> + * + * $Id: siutils.c 552034 2015-04-24 19:00:35Z $ */ #include @@ -16,8 +37,12 @@ #include #include #include +#ifdef BCMPCIEDEV +#include +#endif /* BCMPCIEDEV */ #include #include +#include #include #ifdef BCMSDIO #include @@ -70,7 +95,6 @@ static void si_config_gcigpio(si_t *sih, uint32 gci_pos, uint8 gcigpio, #endif /* BCMLTECOEX */ - /* global variable to indicate reservation/release of gpio's */ static uint32 si_gpioreservation = 0; @@ -222,6 +246,34 @@ si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh) return TRUE; } +uint32 +si_get_pmu_reg_addr(si_t *sih, uint32 offset) +{ + si_info_t *sii = SI_INFO(sih); + uint32 pmuaddr = INVALID_ADDR; + uint origidx = 0; + + SI_MSG(("%s: pmu access, offset: %x\n", __FUNCTION__, offset)); + if (!(sii->pub.cccaps & CC_CAP_PMU)) { + goto done; + } + if (AOB_ENAB(&sii->pub)) { + uint pmucoreidx; + pmuregs_t *pmu; + SI_MSG(("%s: AOBENAB: %x\n", __FUNCTION__, offset)); + origidx = sii->curidx; + pmucoreidx = si_findcoreidx(&sii->pub, PMU_CORE_ID, 0); + pmu = si_setcoreidx(&sii->pub, pmucoreidx); + pmuaddr = (uint32)(uintptr)((volatile uint8*)pmu + offset); + si_setcoreidx(sih, origidx); + } else + pmuaddr = SI_ENUM_BASE + offset; + +done: + SI_MSG(("%s: pmuaddr: %x\n", __FUNCTION__, pmuaddr)); + return pmuaddr; +} + static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, uint *origidx, void *regs) @@ -231,6 +283,10 @@ si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, uint i; uint pciidx, pcieidx, pcirev, pcierev; + /* first, enable backplane timeouts */ + if (CHIPTYPE(sii->pub.socitype) == SOCI_AI) + ai_enable_backplane_timeouts(&sii->pub); + cc = si_setcoreidx(&sii->pub, SI_CC_IDX); ASSERT((uintptr)cc); @@ -291,7 +347,9 @@ si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, /* now look at the chipstatus register to figure the pacakge */ /* for SDIO but downloaded on PCIE dev */ if (cid == PCIE2_CORE_ID) { - if ((CHIPID(sii->pub.chip) == BCM43602_CHIP_ID) || + if (BCM43602_CHIP(sii->pub.chip) || + (CHIPID(sii->pub.chip) == BCM4365_CHIP_ID) || + (CHIPID(sii->pub.chip) == BCM4366_CHIP_ID) || ((CHIPID(sii->pub.chip) == BCM4345_CHIP_ID || CHIPID(sii->pub.chip) == BCM43454_CHIP_ID) && CST4345_CHIPMODE_PCIE(sii->pub.chipst))) { @@ -338,9 +396,20 @@ si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, *origidx = i; } + #if defined(PCIE_FULL_DONGLE) - pci = FALSE; -#endif + if (pcie) { + if (pcie_gen2) + sii->pub.buscoretype = PCIE2_CORE_ID; + else + sii->pub.buscoretype = PCIE_CORE_ID; + sii->pub.buscorerev = pcierev; + sii->pub.buscoreidx = pcieidx; + } + BCM_REFERENCE(pci); + BCM_REFERENCE(pcirev); + BCM_REFERENCE(pciidx); +#else if (pci) { sii->pub.buscoretype = PCI_CORE_ID; sii->pub.buscorerev = pcirev; @@ -353,6 +422,7 @@ si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, sii->pub.buscorerev = pcierev; sii->pub.buscoreidx = pcieidx; } +#endif /* defined(PCIE_FULL_DONGLE) */ SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype, sii->pub.buscorerev)); @@ -397,8 +467,12 @@ si_chipid_fixup(si_t *sih) ASSERT(sii->chipnew == 0); switch (sih->chip) { - case BCM43570_CHIP_ID: + case BCM43567_CHIP_ID: + sii->chipnew = sih->chip; /* save it */ + sii->pub.chip = BCM43570_CHIP_ID; /* chip class */ + break; case BCM4358_CHIP_ID: + case BCM43566_CHIP_ID: sii->chipnew = sih->chip; /* save it */ sii->pub.chip = BCM43569_CHIP_ID; /* chip class */ break; @@ -407,7 +481,6 @@ si_chipid_fixup(si_t *sih) sii->pub.chip = BCM4354_CHIP_ID; /* chip class */ break; default: - ASSERT(0); break; } } @@ -428,7 +501,7 @@ si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, char *pvars = NULL; uint origidx; #if !defined(_CFEZ_) || defined(CFG_WL) -#endif +#endif ASSERT(GOODREGS(regs)); @@ -439,6 +512,7 @@ si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, sii->curmap = regs; sii->sdh = sdh; sii->osh = osh; + sii->second_bar0win = ~0x0; /* check to see if we are a si core mimic'ing a pci core */ @@ -467,12 +541,12 @@ si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, } sih->bustype = bustype; - if (bustype != BUSTYPE(bustype)) { +/* if (bustype != BUSTYPE(bustype)) { SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n", bustype, BUSTYPE(bustype))); return NULL; } - +*/ /* bus/core/clk setup for register access */ if (!si_buscore_prep(sii, bustype, devid, sdh)) { SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n", bustype)); @@ -488,18 +562,6 @@ si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, SI_ERROR(("%s: chipcommon register space is null \n", __FUNCTION__)); return NULL; } -#ifdef COSTOMER_HW4 -#ifdef CONFIG_MACH_UNIVERSAL5433 - /* old revision check */ - if (!check_rev()) { - /* abnormal link status */ - if (!check_pcie_link_status()) { - printk("%s : PCIE LINK is abnormal status\n", __FUNCTION__); - return NULL; - } - } -#endif /* CONFIG_MACH_UNIVERSAL5433 */ -#endif w = R_REG(osh, &cc->chipid); if ((w & 0xfffff) == 148277) w -= 65532; sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT; @@ -512,13 +574,9 @@ si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, dhd_conf_set_hw_oob_intr(sdh, sih->chip); #endif - if ((sih->chip == BCM4358_CHIP_ID) || - (sih->chip == BCM43570_CHIP_ID) || - (sih->chip == BCM4358_CHIP_ID)) { - si_chipid_fixup(sih); - } + si_chipid_fixup(sih); - if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && (sih->chiprev == 0) && + if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && CHIPREV(sih->chiprev == 0) && (sih->chippkg != BCM4329_289PIN_PKG_ID)) { sih->chippkg = BCM4329_182PIN_PKG_ID; } @@ -585,7 +643,7 @@ si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, if (bustype == PCI_BUS) { } -#endif +#endif #ifdef BCM_SDRBL /* 4360 rom bootloader in PCIE case, if the SDR is enabled, But preotection is * not turned on, then we want to hold arm in reset. @@ -624,6 +682,7 @@ si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, gpiopulldown |= 0x20500; } + W_REG(osh, &cc->gpiopullup, gpiopullup); W_REG(osh, &cc->gpiopulldown, gpiopulldown); si_setcoreidx(sih, origidx); @@ -1163,7 +1222,103 @@ si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val) return (ai_wrap_reg(sih, offset, mask, val)); return 0; } +/* si_backplane_access is used to read full backplane address from host for PCIE FD + * it uses secondary bar-0 window which lies at an offset of 16K from primary bar-0 + * Provides support for read/write of 1/2/4 bytes of backplane address + * Can be used to read/write + * 1. core regs + * 2. Wrapper regs + * 3. memory + * 4. BT area + * For accessing any 32 bit backplane address, [31 : 12] of backplane should be given in "region" + * [11 : 0] should be the "regoff" + * for reading 4 bytes from reg 0x200 of d11 core use it like below + * : si_backplane_access(sih, 0x18001000, 0x200, 4, 0, TRUE) + */ +static int si_backplane_addr_sane(uint addr, uint size) +{ + int bcmerror = BCME_OK; + /* For 2 byte access, address has to be 2 byte aligned */ + if (size == 2) { + if (addr & 0x1) { + bcmerror = BCME_ERROR; + } + } + /* For 4 byte access, address has to be 4 byte aligned */ + if (size == 4) { + if (addr & 0x3) { + bcmerror = BCME_ERROR; + } + } + + return bcmerror; +} +uint +si_backplane_access(si_t *sih, uint addr, uint size, uint *val, bool read) +{ + uint32 *r = NULL; + uint32 region = 0; + si_info_t *sii = SI_INFO(sih); + + /* Valid only for pcie bus */ + if (BUSTYPE(sih->bustype) != PCI_BUS) { + SI_ERROR(("Valid only for pcie bus \n")); + return BCME_ERROR; + } + + /* Split adrr into region and address offset */ + region = (addr & (0xFFFFF << 12)); + addr = addr & 0xFFF; + + /* check for address and size sanity */ + if (si_backplane_addr_sane(addr, size) != BCME_OK) + return BCME_ERROR; + + /* Update window if required */ + if (sii->second_bar0win != region) { + OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN, 4, region); + sii->second_bar0win = region; + } + + /* Estimate effective address + * sii->curmap : bar-0 virtual address + * PCI_SECOND_BAR0_OFFSET : secondar bar-0 offset + * regoff : actual reg offset + */ + r = (uint32 *)((char *)sii->curmap + PCI_SECOND_BAR0_OFFSET + addr); + + SI_VMSG(("si curmap %p region %x regaddr %x effective addr %p READ %d\n", + (char*)sii->curmap, region, addr, r, read)); + + switch (size) { + case sizeof(uint8) : + if (read) + *val = R_REG(sii->osh, (uint8*)r); + else + W_REG(sii->osh, (uint8*)r, *val); + break; + case sizeof(uint16) : + if (read) + *val = R_REG(sii->osh, (uint16*)r); + else + W_REG(sii->osh, (uint16*)r, *val); + break; + case sizeof(uint32) : + if (read) + *val = R_REG(sii->osh, (uint32*)r); + else + W_REG(sii->osh, (uint32*)r, *val); + break; + + default : + SI_ERROR(("Invalid size %d \n", size)); + return (BCME_ERROR); + break; + } + + return (BCME_OK); +} uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) { @@ -1293,6 +1448,17 @@ factor6(uint32 x) } } +/* + * Divide the clock by the divisor with protection for + * a zero divisor. + */ +static uint32 +divide_clock(uint32 clock, uint32 div) +{ + return div ? clock / div : 0; +} + + /** calculate the speed the SI would run at given a set of clockcontrol values */ uint32 si_clock_rate(uint32 pll_type, uint32 n, uint32 m) @@ -1350,10 +1516,10 @@ si_clock_rate(uint32 pll_type, uint32 n, uint32 m) switch (mc) { case CC_MC_BYPASS: return (clock); - case CC_MC_M1: return (clock / m1); - case CC_MC_M1M2: return (clock / (m1 * m2)); - case CC_MC_M1M2M3: return (clock / (m1 * m2 * m3)); - case CC_MC_M1M3: return (clock / (m1 * m3)); + case CC_MC_M1: return divide_clock(clock, m1); + case CC_MC_M1M2: return divide_clock(clock, m1 * m2); + case CC_MC_M1M2M3: return divide_clock(clock, m1 * m2 * m3); + case CC_MC_M1M3: return divide_clock(clock, m1 * m3); default: return (0); } } else { @@ -1375,6 +1541,7 @@ si_clock_rate(uint32 pll_type, uint32 n, uint32 m) return (clock); } + return 0; } /** @@ -1388,7 +1555,10 @@ si_chip_hostif(si_t *sih) switch (CHIPID(sih->chip)) { - case BCM43602_CHIP_ID: + case BCM43012_CHIP_ID: + hosti = CHIP_HOSTIF_SDIOMODE; + break; + CASE_BCM43602_CHIP: hosti = CHIP_HOSTIF_PCIEMODE; break; @@ -1476,7 +1646,7 @@ si_watchdog(si_t *sih, uint ticks) si_core_disable(sih, 1); si_setcore(sih, CC_CORE_ID, 0); } -#endif +#endif nb = (sih->ccrev < 26) ? 16 : ((sih->ccrev >= 37) ? 32 : 24); /* The mips compiler uses the sllv instruction, @@ -1491,6 +1661,14 @@ si_watchdog(si_t *sih, uint ticks) ticks = 2; else if (ticks > maxt) ticks = maxt; + if (CHIPID(sih->chip) == BCM43012_CHIP_ID) { + PMU_REG_NEW(sih, min_res_mask, ~0, DEFAULT_43012_MIN_RES_MASK); + PMU_REG_NEW(sih, watchdog_res_mask, ~0, DEFAULT_43012_MIN_RES_MASK); + PMU_REG_NEW(sih, pmustatus, PST_WDRESET, PST_WDRESET); + PMU_REG_NEW(sih, pmucontrol_ext, PCTL_EXT_FASTLPO_SWENAB, 0); + SPINWAIT((PMU_REG(sih, pmustatus, 0, 0) & PST_ILPFASTLPO), + PMU_MAX_TRANSITION_DLY); + } pmu_corereg(sih, SI_CC_IDX, pmuwatchdog, ~0, ticks); } else { @@ -1880,102 +2058,74 @@ si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val) return (si_corereg(sih, SI_CC_IDX, offs, mask, val)); } -void * -si_gpio_handler_register(si_t *sih, uint32 event, - bool level, gpio_handler_t cb, void *arg) +uint32 +si_gpio_int_enable(si_t *sih, bool enable) { - si_info_t *sii = SI_INFO(sih); - gpioh_item_t *gi; - - ASSERT(event); - ASSERT(cb != NULL); + uint offs; if (sih->ccrev < 11) - return NULL; + return 0xffffffff; - if ((gi = MALLOC(sii->osh, sizeof(gpioh_item_t))) == NULL) - return NULL; + offs = OFFSETOF(chipcregs_t, intmask); + return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0))); +} - bzero(gi, sizeof(gpioh_item_t)); - gi->event = event; - gi->handler = cb; - gi->arg = arg; - gi->level = level; +/** Return the size of the specified SYSMEM bank */ +static uint +sysmem_banksize(si_info_t *sii, sysmemregs_t *regs, uint8 idx, uint8 mem_type) +{ + uint banksize, bankinfo; + uint bankidx = idx | (mem_type << SYSMEM_BANKIDX_MEMTYPE_SHIFT); - gi->next = sii->gpioh_head; - sii->gpioh_head = gi; + ASSERT(mem_type <= SYSMEM_MEMTYPE_DEVRAM); - return (void *)(gi); + W_REG(sii->osh, ®s->bankidx, bankidx); + bankinfo = R_REG(sii->osh, ®s->bankinfo); + banksize = SYSMEM_BANKINFO_SZBASE * ((bankinfo & SYSMEM_BANKINFO_SZMASK) + 1); + return banksize; } -void -si_gpio_handler_unregister(si_t *sih, void *gpioh) +/** Return the RAM size of the SYSMEM core */ +uint32 +si_sysmem_size(si_t *sih) { si_info_t *sii = SI_INFO(sih); - gpioh_item_t *p, *n; + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; - if (sih->ccrev < 11) - return; + sysmemregs_t *regs; + bool wasup; + uint32 coreinfo; + uint memsize = 0; + uint8 i; + uint nb; - ASSERT(sii->gpioh_head != NULL); - if ((void*)sii->gpioh_head == gpioh) { - sii->gpioh_head = sii->gpioh_head->next; - MFREE(sii->osh, gpioh, sizeof(gpioh_item_t)); - return; - } else { - p = sii->gpioh_head; - n = p->next; - while (n) { - if ((void*)n == gpioh) { - p->next = n->next; - MFREE(sii->osh, gpioh, sizeof(gpioh_item_t)); - return; - } - p = n; - n = n->next; - } - } + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); - ASSERT(0); /* Not found in list */ -} + /* Switch to SYSMEM core */ + if (!(regs = si_setcore(sih, SYSMEM_CORE_ID, 0))) + goto done; -void -si_gpio_handler_process(si_t *sih) -{ - si_info_t *sii = SI_INFO(sih); - gpioh_item_t *h; - uint32 level = si_gpioin(sih); - uint32 levelp = si_gpiointpolarity(sih, 0, 0, 0); - uint32 edge = si_gpioevent(sih, GPIO_REGEVT, 0, 0); - uint32 edgep = si_gpioevent(sih, GPIO_REGEVT_INTPOL, 0, 0); - - for (h = sii->gpioh_head; h != NULL; h = h->next) { - if (h->handler) { - uint32 status = (h->level ? level : edge) & h->event; - uint32 polarity = (h->level ? levelp : edgep) & h->event; - - /* polarity bitval is opposite of status bitval */ - if ((h->level && (status ^ polarity)) || (!h->level && status)) - h->handler(status, h->arg); - } - } + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + coreinfo = R_REG(sii->osh, ®s->coreinfo); - si_gpioevent(sih, GPIO_REGEVT, edge, edge); /* clear edge-trigger status */ -} + nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + for (i = 0; i < nb; i++) + memsize += sysmem_banksize(sii, regs, i, SYSMEM_MEMTYPE_RAM); -uint32 -si_gpio_int_enable(si_t *sih, bool enable) -{ - uint offs; + si_setcoreidx(sih, origidx); - if (sih->ccrev < 11) - return 0xffffffff; +done: + INTR_RESTORE(sii, intr_val); - offs = OFFSETOF(chipcregs_t, intmask); - return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0))); + return memsize; } - /** Return the size of the specified SOCRAM bank */ static uint socram_banksize(si_info_t *sii, sbsocramregs_t *regs, uint8 idx, uint8 mem_type) @@ -2320,7 +2470,13 @@ si_socram_size(si_t *sih) memsize += (1 << ((lss - 1) + SR_BSZ_BASE)); } else { uint8 i; - uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + uint nb; + /* length of SRAM Banks increased for corerev greater than 23 */ + if (corerev >= 23) { + nb = (coreinfo & (SRCI_SRNB_MASK | SRCI_SRNB_MASK_EXT)) >> SRCI_SRNB_SHIFT; + } else { + nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + } for (i = 0; i < nb; i++) memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM); } @@ -2521,7 +2677,10 @@ si_chipcontrl_btshd0_4331(si_t *sih, bool on) origidx = si_coreidx(sih); - cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } val = R_REG(sii->osh, &cc->chipcontrol); @@ -2548,7 +2707,10 @@ si_chipcontrl_restore(si_t *sih, uint32 val) chipcregs_t *cc; uint origidx = si_coreidx(sih); - cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } W_REG(sii->osh, &cc->chipcontrol, val); si_setcoreidx(sih, origidx); } @@ -2561,7 +2723,10 @@ si_chipcontrl_read(si_t *sih) uint origidx = si_coreidx(sih); uint32 val; - cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return -1; + } val = R_REG(sii->osh, &cc->chipcontrol); si_setcoreidx(sih, origidx); return val; @@ -2575,7 +2740,10 @@ si_chipcontrl_epa4331(si_t *sih, bool on) uint origidx = si_coreidx(sih); uint32 val; - cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } val = R_REG(sii->osh, &cc->chipcontrol); if (on) { @@ -2585,7 +2753,7 @@ si_chipcontrl_epa4331(si_t *sih, bool on) W_REG(sii->osh, &cc->chipcontrol, val); } else { /* Ext PA Controls for 4331 12x12 Package */ - if (sih->chiprev > 0) { + if (CHIPREV(sih->chiprev) > 0) { W_REG(sii->osh, &cc->chipcontrol, val | (CCTRL4331_EXTPA_EN) | (CCTRL4331_EXTPA_EN2)); } else { @@ -2609,7 +2777,10 @@ si_chipcontrl_srom4360(si_t *sih, bool on) uint origidx = si_coreidx(sih); uint32 val; - cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } val = R_REG(sii->osh, &cc->chipcontrol); if (on) { @@ -2626,6 +2797,40 @@ si_chipcontrl_srom4360(si_t *sih, bool on) si_setcoreidx(sih, origidx); } +void +si_clk_srom4365(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + uint32 val; + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + val = R_REG(sii->osh, &cc->clkdiv2); + W_REG(sii->osh, &cc->clkdiv2, ((val&~0xf) | 0x4)); + + si_setcoreidx(sih, origidx); +} + +void +si_d11rsdb_core1_alt_reg_clk_dis(si_t *sih) +{ +#if defined(WLRSDB) && !defined(WLRSDB_DISABLED) + ai_d11rsdb_core1_alt_reg_clk_dis(sih); +#endif /* defined(WLRSDB) && !defined(WLRSDB_DISABLED) */ +} + +void +si_d11rsdb_core1_alt_reg_clk_en(si_t *sih) +{ +#if defined(WLRSDB) && !defined(WLRSDB_DISABLED) + ai_d11rsdb_core1_alt_reg_clk_en(sih); +#endif /* defined(WLRSDB) && !defined(WLRSDB_DISABLED) */ +} + void si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl) { @@ -2645,7 +2850,10 @@ si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl) sii = SI_INFO(sih); origidx = si_coreidx(sih); - cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } val = R_REG(sii->osh, &cc->chipcontrol); @@ -2658,7 +2866,7 @@ si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl) } si_setcoreidx(sih, origidx); } -#endif +#endif uint si_pll_reset(si_t *sih) @@ -2676,7 +2884,10 @@ si_epa_4313war(si_t *sih) chipcregs_t *cc; uint origidx = si_coreidx(sih); - cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } /* EPA Fix */ W_REG(sii->osh, &cc->gpiocontrol, @@ -2690,6 +2901,11 @@ si_clk_pmu_htavail_set(si_t *sih, bool set_clear) { } +void +si_pmu_avb_clk_set(si_t *sih, osl_t *osh, bool set_flag) +{ +} + /** Re-enable synth_pwrsw resource in min_res_mask for 4313 */ void si_pmu_synth_pwrsw_4313_war(si_t *sih) @@ -2704,7 +2920,10 @@ si_btcombo_p250_4313_war(si_t *sih) chipcregs_t *cc; uint origidx = si_coreidx(sih); - cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } W_REG(sii->osh, &cc->gpiocontrol, R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_5_6_EN_MASK); @@ -2720,7 +2939,10 @@ si_btc_enable_chipcontrol(si_t *sih) chipcregs_t *cc; uint origidx = si_coreidx(sih); - cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } /* BT fix */ W_REG(sii->osh, &cc->chipcontrol, @@ -2735,7 +2957,10 @@ si_btcombo_43228_war(si_t *sih) chipcregs_t *cc; uint origidx = si_coreidx(sih); - cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } W_REG(sii->osh, &cc->gpioouten, GPIO_CTRL_7_6_EN_MASK); W_REG(sii->osh, &cc->gpioout, GPIO_OUT_7_EN_MASK); @@ -2836,7 +3061,7 @@ si_is_sprom_available(si_t *sih) case BCM43570_CHIP_ID: case BCM4358_CHIP_ID: return (sih->chipst & CST4350_SPROM_PRESENT) != 0; - case BCM43602_CHIP_ID: + CASE_BCM43602_CHIP: return (sih->chipst & CST43602_SPROM_PRESENT) != 0; case BCM43131_CHIP_ID: case BCM43217_CHIP_ID: @@ -2844,6 +3069,8 @@ si_is_sprom_available(si_t *sih) case BCM43228_CHIP_ID: case BCM43428_CHIP_ID: return (sih->chipst & CST43228_OTP_PRESENT) != CST43228_OTP_PRESENT; + case BCM43012_CHIP_ID: + return FALSE; default: return TRUE; } @@ -2872,20 +3099,30 @@ int si_set_sromctl(si_t *sih, uint32 value) chipcregs_t *cc; uint origidx = si_coreidx(sih); osl_t *osh = si_osh(sih); + int ret = BCME_OK; cc = si_setcoreidx(sih, SI_CC_IDX); ASSERT((uintptr)cc); /* get chipcommon rev */ - if (si_corerev(sih) < 32) - return BCME_UNSUPPORTED; - - W_REG(osh, &cc->sromcontrol, value); + if (si_corerev(sih) >= 32) { + /* SpromCtrl is only accessible if CoreCapabilities.SpromSupported and + * SpromPresent is 1. + */ + if ((R_REG(osh, &cc->capabilities) & CC_CAP_SROM) != 0 && + (R_REG(osh, &cc->sromcontrol) & SRC_PRESENT)) { + W_REG(osh, &cc->sromcontrol, value); + } else { + ret = BCME_NODEVICE; + } + } else { + ret = BCME_UNSUPPORTED; + } /* return to the original core */ si_setcoreidx(sih, origidx); - return BCME_OK; + return ret; } uint @@ -2953,7 +3190,6 @@ si_pcie_survive_perst(si_t *sih, uint32 mask, uint32 val) static void si_watchdog_reset(si_t *sih) { - si_info_t *sii = SI_INFO(sih); uint32 i; /* issue a watchdog reset */ @@ -3023,3 +3259,39 @@ void si_pcie_prep_D3(si_t *sih, bool enter_D3) { } + + + +void +si_pll_sr_reinit(si_t *sih) +{ +} + +void +si_pll_closeloop(si_t *sih) +{ +#if defined(SAVERESTORE) + uint32 data; + + /* disable PLL open loop operation */ + switch (CHIPID(sih->chip)) { +#ifdef SAVERESTORE + case BCM43430_CHIP_ID: + if (SR_ENAB() && sr_isenab(sih)) { + /* read back the pll openloop state */ + data = si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL8, 0, 0); + /* current mode is openloop (possible POR) */ + if ((data & PMU1_PLLCTL8_OPENLOOP_MASK) != 0) { + si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL8, + PMU1_PLLCTL8_OPENLOOP_MASK, 0); + si_pmu_pllupd(sih); + } + } + break; +#endif /* SAVERESTORE */ + default: + /* any unsupported chip bail */ + return; + } +#endif +} diff --git a/drivers/net/wireless/bcmdhd/siutils_priv.h b/drivers/amlogic/wifi/bcmdhd/siutils_priv.h similarity index 80% rename from drivers/net/wireless/bcmdhd/siutils_priv.h rename to drivers/amlogic/wifi/bcmdhd/siutils_priv.h index d36a9167c5224..41f0d0d321b79 100644 --- a/drivers/net/wireless/bcmdhd/siutils_priv.h +++ b/drivers/amlogic/wifi/bcmdhd/siutils_priv.h @@ -1,9 +1,30 @@ /* * Include file private to the SOC Interconnect support files. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: siutils_priv.h 474902 2014-05-02 18:31:33Z $ + * + * <> + * + * $Id: siutils_priv.h 520760 2014-12-15 00:54:16Z $ */ #ifndef _siutils_priv_h_ @@ -25,14 +46,6 @@ typedef uint32 (*si_intrsoff_t)(void *intr_arg); typedef void (*si_intrsrestore_t)(void *intr_arg, uint32 arg); typedef bool (*si_intrsenabled_t)(void *intr_arg); -typedef struct gpioh_item { - void *arg; - bool level; - gpio_handler_t handler; - uint32 event; - struct gpioh_item *next; -} gpioh_item_t; - #define SI_GPIO_MAX 16 @@ -58,6 +71,9 @@ typedef struct si_cores_info { void *wrappers[SI_MAXCORES]; /* other cores wrapper va */ uint32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */ + void *wrappers2[SI_MAXCORES]; /* other cores wrapper va */ + uint32 wrapba2[SI_MAXCORES]; /* address of controlling wrapper */ + uint32 cia[SI_MAXCORES]; /* erom cia entry for each core */ uint32 cib[SI_MAXCORES]; /* erom cia entry for each core */ } si_cores_info_t; @@ -77,8 +93,6 @@ typedef struct si_info { void *pch; /* PCI/E core handle */ - gpioh_item_t *gpioh_head; /* GPIO event handlers list */ - bool memseg; /* flag to toggle MEM_SEG register */ char *vars; @@ -96,6 +110,10 @@ typedef struct si_info { void *cores_info; gci_gpio_item_t *gci_gpio_head; /* gci gpio interrupts head */ uint chipnew; /* new chip number */ + uint second_bar0win; /* Backplane region */ + uint num_br; /* # discovered bridges */ + uint32 br_wrapba[SI_MAXBR]; /* address of bridge controlling wrapper */ + uint32 xtalfreq; } si_info_t; @@ -153,10 +171,11 @@ typedef struct si_info { /* Force fast clock for 4360b0 */ #define PCI_FORCEHT(si) \ - (((PCIE_GEN1(si)) && (si->pub.chip == BCM4311_CHIP_ID) && ((si->pub.chiprev <= 1))) || \ - ((PCI(si) || PCIE_GEN1(si)) && (si->pub.chip == BCM4321_CHIP_ID)) || \ - (PCIE_GEN1(si) && (si->pub.chip == BCM4716_CHIP_ID)) || \ - (PCIE_GEN1(si) && (si->pub.chip == BCM4748_CHIP_ID))) + (((PCIE_GEN1(si)) && (CHIPID(si->pub.chip) == BCM4311_CHIP_ID) && \ + ((CHIPREV(si->pub.chiprev) <= 1))) || \ + ((PCI(si) || PCIE_GEN1(si)) && (CHIPID(si->pub.chip) == BCM4321_CHIP_ID)) || \ + (PCIE_GEN1(si) && (CHIPID(si->pub.chip) == BCM4716_CHIP_ID)) || \ + (PCIE_GEN1(si) && (CHIPID(si->pub.chip) == BCM4748_CHIP_ID))) /* GPIO Based LED powersave defines */ #define DEFAULT_GPIO_ONTIME 10 /* Default: 10% on */ @@ -196,7 +215,7 @@ extern bool sb_taclear(si_t *sih, bool details); #if defined(BCMDBG_PHYDUMP) extern void sb_dumpregs(si_t *sih, struct bcmstrbuf *b); -#endif +#endif /* Wake-on-wireless-LAN (WOWL) */ extern bool sb_pci_pmecap(si_t *sih); @@ -221,6 +240,7 @@ extern uint ai_corerev(si_t *sih); extern uint32 *ai_corereg_addr(si_t *sih, uint coreidx, uint regoff); extern bool ai_iscoreup(si_t *sih); extern void *ai_setcoreidx(si_t *sih, uint coreidx); +extern void *ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx); extern uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val); extern void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val); extern uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val); @@ -228,6 +248,9 @@ extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits); extern void ai_d11rsdb_core_reset(si_t *sih, uint32 bits, uint32 resetbits, void *p, void *s); +extern void ai_d11rsdb_core1_alt_reg_clk_en(si_t *sih); +extern void ai_d11rsdb_core1_alt_reg_clk_dis(si_t *sih); + extern void ai_core_disable(si_t *sih, uint32 bits); extern void ai_d11rsdb_core_disable(const si_info_t *sii, uint32 bits, aidmp_t *pmacai, aidmp_t *smacai); @@ -236,10 +259,12 @@ extern uint32 ai_addrspace(si_t *sih, uint asidx); extern uint32 ai_addrspacesize(si_t *sih, uint asidx); extern void ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size); extern uint ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val); +extern void ai_enable_backplane_timeouts(si_t *sih); +extern void ai_clear_backplane_to(si_t *sih); #if defined(BCMDBG_PHYDUMP) extern void ai_dumpregs(si_t *sih, struct bcmstrbuf *b); -#endif +#endif #define ub_scan(a, b, c) do {} while (0) diff --git a/drivers/net/wireless/bcmdhd/uamp_api.h b/drivers/amlogic/wifi/bcmdhd/uamp_api.h similarity index 79% rename from drivers/net/wireless/bcmdhd/uamp_api.h rename to drivers/amlogic/wifi/bcmdhd/uamp_api.h index e4f7e35a490c7..0d04a9d86037a 100644 --- a/drivers/net/wireless/bcmdhd/uamp_api.h +++ b/drivers/amlogic/wifi/bcmdhd/uamp_api.h @@ -3,9 +3,30 @@ * * Description: Universal AMP API * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: uamp_api.h 467328 2014-04-03 01:23:40Z $ + * + * <> + * + * $Id: uamp_api.h 514727 2014-11-12 03:02:48Z $ * */ diff --git a/drivers/net/wireless/bcmdhd/wl_android.c b/drivers/amlogic/wifi/bcmdhd/wl_android.c similarity index 54% rename from drivers/net/wireless/bcmdhd/wl_android.c rename to drivers/amlogic/wifi/bcmdhd/wl_android.c index b16347fc05c0d..1961e6a7795cf 100644 --- a/drivers/net/wireless/bcmdhd/wl_android.c +++ b/drivers/amlogic/wifi/bcmdhd/wl_android.c @@ -1,9 +1,30 @@ /* * Linux cfg80211 driver - Android related functions * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: wl_android.c 505064 2014-09-26 09:40:28Z $ + * + * <> + * + * $Id: wl_android.c 608788 2015-12-29 10:59:33Z $ */ #include @@ -35,6 +56,9 @@ #ifdef WL_NAN #include #endif /* WL_NAN */ +#ifdef DHDTCPACK_SUPPRESS +#include +#endif /* DHDTCPACK_SUPPRESS */ #ifndef WL_CFG80211 #define htod32(i) i @@ -45,35 +69,8 @@ #define dtohchanspec(i) i #endif -/* message levels */ -#define ANDROID_ERROR_LEVEL 0x0001 -#define ANDROID_TRACE_LEVEL 0x0002 -#define ANDROID_INFO_LEVEL 0x0004 - uint android_msg_level = ANDROID_ERROR_LEVEL; -#define ANDROID_ERROR(x) \ - do { \ - if (android_msg_level & ANDROID_ERROR_LEVEL) { \ - printk(KERN_ERR "ANDROID-ERROR) "); \ - printk x; \ - } \ - } while (0) -#define ANDROID_TRACE(x) \ - do { \ - if (android_msg_level & ANDROID_TRACE_LEVEL) { \ - printk(KERN_ERR "ANDROID-TRACE) "); \ - printk x; \ - } \ - } while (0) -#define ANDROID_INFO(x) \ - do { \ - if (android_msg_level & ANDROID_INFO_LEVEL) { \ - printk(KERN_ERR "ANDROID-INFO) "); \ - printk x; \ - } \ - } while (0) - /* * Android private command strings, PLEASE define new private commands here * so they can be updated easily in the future (if needed) @@ -85,16 +82,10 @@ uint android_msg_level = ANDROID_ERROR_LEVEL; #define CMD_SCAN_PASSIVE "SCAN-PASSIVE" #define CMD_RSSI "RSSI" #define CMD_LINKSPEED "LINKSPEED" -#ifdef PKT_FILTER_SUPPORT #define CMD_RXFILTER_START "RXFILTER-START" #define CMD_RXFILTER_STOP "RXFILTER-STOP" #define CMD_RXFILTER_ADD "RXFILTER-ADD" #define CMD_RXFILTER_REMOVE "RXFILTER-REMOVE" -#if defined(CUSTOM_PLATFORM_NV_TEGRA) -#define CMD_PKT_FILTER_MODE "PKT_FILTER_MODE" -#define CMD_PKT_FILTER_PORTS "PKT_FILTER_PORTS" -#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */ -#endif /* PKT_FILTER_SUPPORT */ #define CMD_BTCOEXSCAN_START "BTCOEXSCAN-START" #define CMD_BTCOEXSCAN_STOP "BTCOEXSCAN-STOP" #define CMD_BTCOEXMODE "BTCOEXMODE" @@ -110,38 +101,49 @@ uint android_msg_level = ANDROID_ERROR_LEVEL; #define CMD_P2P_GET_NOA "P2P_GET_NOA" #endif /* WL_ENABLE_P2P_IF */ #define CMD_P2P_SD_OFFLOAD "P2P_SD_" +#define CMD_P2P_LISTEN_OFFLOAD "P2P_LO_" #define CMD_P2P_SET_PS "P2P_SET_PS" +#define CMD_P2P_ECSA "P2P_ECSA" #define CMD_SET_AP_WPS_P2P_IE "SET_AP_WPS_P2P_IE" #define CMD_SETROAMMODE "SETROAMMODE" #define CMD_SETIBSSBEACONOUIDATA "SETIBSSBEACONOUIDATA" #define CMD_MIRACAST "MIRACAST" +#ifdef WL_NAN #define CMD_NAN "NAN_" -#define CMD_GET_CHANNEL "GET_CHANNEL" -#define CMD_SET_ROAM "SET_ROAM_TRIGGER" -#define CMD_GET_ROAM "GET_ROAM_TRIGGER" -#define CMD_GET_KEEP_ALIVE "GET_KEEP_ALIVE" -#define CMD_GET_PM "GET_PM" -#define CMD_SET_PM "SET_PM" -#define CMD_MONITOR "MONITOR" +#endif /* WL_NAN */ +#define CMD_COUNTRY_DELIMITER "/" +#ifdef WL11ULB +#define CMD_ULB_MODE "ULB_MODE" +#define CMD_ULB_BW "ULB_BW" +#endif /* WL11ULB */ #if defined(WL_SUPPORT_AUTO_CHANNEL) #define CMD_GET_BEST_CHANNELS "GET_BEST_CHANNELS" #endif /* WL_SUPPORT_AUTO_CHANNEL */ -#if defined(CUSTOM_PLATFORM_NV_TEGRA) -#define CMD_SETMIRACAST "SETMIRACAST" -#define CMD_ASSOCRESPIE "ASSOCRESPIE" -#define CMD_RXRATESTATS "RXRATESTATS" -#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */ - +#define CMD_80211_MODE "MODE" /* 802.11 mode a/b/g/n/ac */ +#define CMD_CHANSPEC "CHANSPEC" +#define CMD_DATARATE "DATARATE" +#define CMD_ASSOC_CLIENTS "ASSOCLIST" +#define CMD_SET_CSA "SETCSA" +#ifdef WL_SUPPORT_AUTO_CHANNEL +#define CMD_SET_HAPD_AUTO_CHANNEL "HAPD_AUTO_CHANNEL" +#endif /* WL_SUPPORT_AUTO_CHANNEL */ +#ifdef CUSTOMER_HW4_PRIVATE_CMD +#ifdef SUPPORT_SET_LPC +#define CMD_HAPD_LPC_ENABLED "HAPD_LPC_ENABLED" +#endif /* SUPPORT_SET_LPC */ +#ifdef SUPPORT_TRIGGER_HANG_EVENT +#define CMD_TEST_FORCE_HANG "TEST_FORCE_HANG" +#endif /* SUPPORT_TRIGGER_HANG_EVENT */ +#ifdef TEST_TX_POWER_CONTROL +#define CMD_TEST_SET_TX_POWER "TEST_SET_TX_POWER" +#define CMD_TEST_GET_TX_POWER "TEST_GET_TX_POWER" +#endif /* TEST_TX_POWER_CONTROL */ +#define CMD_SARLIMIT_TX_CONTROL "SET_TX_POWER_CALLING" +#endif /* CUSTOMER_HW4_PRIVATE_CMD */ #define CMD_KEEP_ALIVE "KEEPALIVE" -/* CCX Private Commands */ -#ifdef BCMCCX -#define CMD_GETCCKM_RN "get cckm_rn" -#define CMD_SETCCKM_KRK "set cckm_krk" -#define CMD_GET_ASSOC_RES_IES "get assoc_res_ies" -#endif #ifdef PNO_SUPPORT #define CMD_PNOSSIDCLR_SET "PNOSSIDCLR" @@ -151,38 +153,82 @@ uint android_msg_level = ANDROID_ERROR_LEVEL; #define CMD_WLS_BATCHING "WLS_BATCHING" #endif /* PNO_SUPPORT */ -#define CMD_OKC_SET_PMK "SET_PMK" -#define CMD_OKC_ENABLE "OKC_ENABLE" - #define CMD_HAPD_MAC_FILTER "HAPD_MAC_FILTER" -#ifdef WLFBT -#define CMD_GET_FTKEY "GET_FTKEY" -#endif +#ifdef CUSTOMER_HW4_PRIVATE_CMD + + +#if defined(SUPPORT_RANDOM_MAC_SCAN) +#define ENABLE_RANDOM_MAC "ENABLE_RANDOM_MAC" +#define DISABLE_RANDOM_MAC "DISABLE_RANDOM_MAC" +#endif /* SUPPORT_RANDOM_MAC_SCAN */ + + +#define CMD_CHANGE_RL "CHANGE_RL" +#define CMD_RESTORE_RL "RESTORE_RL" + +#define CMD_SET_RMC_ENABLE "SETRMCENABLE" +#define CMD_SET_RMC_TXRATE "SETRMCTXRATE" +#define CMD_SET_RMC_ACTPERIOD "SETRMCACTIONPERIOD" +#define CMD_SET_RMC_IDLEPERIOD "SETRMCIDLEPERIOD" +#define CMD_SET_RMC_LEADER "SETRMCLEADER" +#define CMD_SET_RMC_EVENT "SETRMCEVENT" + +#define CMD_SET_SCSCAN "SETSINGLEANT" +#define CMD_GET_SCSCAN "GETSINGLEANT" + +/* FCC_PWR_LIMIT_2G */ +#define CUSTOMER_HW4_ENABLE 0 +#define CUSTOMER_HW4_DISABLE -1 +#define CUSTOMER_HW4_EN_CONVERT(i) (i += 1) + +#ifdef WLTDLS +#define CMD_TDLS_RESET "TDLS_RESET" +#endif /* WLTDLS */ + +#ifdef IPV6_NDO_SUPPORT +#define CMD_NDRA_LIMIT "NDRA_LIMIT" +#endif /* IPV6_NDO_SUPPORT */ + +#endif /* CUSTOMER_HW4_PRIVATE_CMD */ -#ifdef WLAIBSS -#define CMD_SETIBSSTXFAILEVENT "SETIBSSTXFAILEVENT" -#define CMD_GET_IBSS_PEER_INFO "GETIBSSPEERINFO" -#define CMD_GET_IBSS_PEER_INFO_ALL "GETIBSSPEERINFOALL" -#define CMD_SETIBSSROUTETABLE "SETIBSSROUTETABLE" -#define CMD_SETIBSSAMPDU "SETIBSSAMPDU" -#define CMD_SETIBSSANTENNAMODE "SETIBSSANTENNAMODE" -#endif /* WLAIBSS */ #define CMD_ROAM_OFFLOAD "SETROAMOFFLOAD" -#define CMD_ROAM_OFFLOAD_APLIST "SETROAMOFFLAPLIST" -#define CMD_GET_LINK_STATUS "GETLINKSTATUS" +#define CMD_ROAM_OFFLOAD_APLIST "SETROAMOFFLAPLIST" +#define CMD_INTERFACE_CREATE "INTERFACE_CREATE" +#define CMD_INTERFACE_DELETE "INTERFACE_DELETE" + +#if defined(DHD_ENABLE_BIGDATA_LOGGING) +#define CMD_GET_BSS_INFO "GETBSSINFO" +#define CMD_GET_ASSOC_REJECT_INFO "GETASSOCREJECTINFO" +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ #ifdef P2PRESP_WFDIE_SRC #define CMD_P2P_SET_WFDIE_RESP "P2P_SET_WFDIE_RESP" #define CMD_P2P_GET_WFDIE_RESP "P2P_GET_WFDIE_RESP" #endif /* P2PRESP_WFDIE_SRC */ -/* related with CMD_GET_LINK_STATUS */ -#define WL_ANDROID_LINK_VHT 0x01 -#define WL_ANDROID_LINK_MIMO 0x02 -#define WL_ANDROID_LINK_AP_VHT_SUPPORT 0x04 -#define WL_ANDROID_LINK_AP_MIMO_SUPPORT 0x08 +#define CMD_DFS_AP_MOVE "DFS_AP_MOVE" +#define CMD_WBTEXT_ENABLE "WBTEXT_ENABLE" +#define CMD_WBTEXT_PROFILE_CONFIG "WBTEXT_PROFILE_CONFIG" +#define CMD_WBTEXT_WEIGHT_CONFIG "WBTEXT_WEIGHT_CONFIG" +#define CMD_WBTEXT_TABLE_CONFIG "WBTEXT_TABLE_CONFIG" +#define CMD_WBTEXT_DELTA_CONFIG "WBTEXT_DELTA_CONFIG" + +#ifdef WLWFDS +#define CMD_ADD_WFDS_HASH "ADD_WFDS_HASH" +#define CMD_DEL_WFDS_HASH "DEL_WFDS_HASH" +#endif /* WLWFDS */ + +#ifdef SET_RPS_CPUS +#define CMD_RPSMODE "RPSMODE" +#endif /* SET_RPS_CPUS */ + +#ifdef BT_WIFI_HANDOVER +#define CMD_TBOW_TEARDOWN "TBOW_TEARDOWN" +#endif /* BT_WIFI_HANDOVER */ + +#define CMD_MURX_BFE_CAP "MURX_BFE_CAP" /* miracast related definition */ #define MIRACAST_MODE_OFF 0 @@ -226,6 +272,15 @@ static LIST_HEAD(miracast_resume_list); static u8 miracast_cur_mode; #endif +#ifdef DHD_LOG_DUMP +#define CMD_NEW_DEBUG_PRINT_DUMP "DEBUG_DUMP" +extern void dhd_schedule_log_dump(dhd_pub_t *dhdp); +extern int dhd_bus_mem_dump(dhd_pub_t *dhd); +#endif /* DHD_LOG_DUMP */ +#ifdef DHD_TRACE_WAKE_LOCK +extern void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp); +#endif /* DHD_TRACE_WAKE_LOCK */ + struct io_cfg { s8 *iovar; s32 param; @@ -263,63 +318,6 @@ typedef struct _compat_android_wifi_priv_cmd { (JOIN_PREF_WPA_TUPLE_SIZE * JOIN_PREF_MAX_WPA_TUPLES)) #endif /* BCMFW_ROAM_ENABLE */ -#ifdef WL_GENL -static s32 wl_genl_handle_msg(struct sk_buff *skb, struct genl_info *info); -static int wl_genl_init(void); -static int wl_genl_deinit(void); - -extern struct net init_net; -/* attribute policy: defines which attribute has which type (e.g int, char * etc) - * possible values defined in net/netlink.h - */ -static struct nla_policy wl_genl_policy[BCM_GENL_ATTR_MAX + 1] = { - [BCM_GENL_ATTR_STRING] = { .type = NLA_NUL_STRING }, - [BCM_GENL_ATTR_MSG] = { .type = NLA_BINARY }, -}; - -#define WL_GENL_VER 1 -/* family definition */ -static struct genl_family wl_genl_family = { - .id = GENL_ID_GENERATE, /* Genetlink would generate the ID */ - .hdrsize = 0, - .name = "bcm-genl", /* Netlink I/F for Android */ - .version = WL_GENL_VER, /* Version Number */ - .maxattr = BCM_GENL_ATTR_MAX, -}; - -/* commands: mapping between the command enumeration and the actual function */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) -struct genl_ops wl_genl_ops[] = { - { - .cmd = BCM_GENL_CMD_MSG, - .flags = 0, - .policy = wl_genl_policy, - .doit = wl_genl_handle_msg, - .dumpit = NULL, - }, -}; -#else -struct genl_ops wl_genl_ops = { - .cmd = BCM_GENL_CMD_MSG, - .flags = 0, - .policy = wl_genl_policy, - .doit = wl_genl_handle_msg, - .dumpit = NULL, - -}; -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */ - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) -static struct genl_multicast_group wl_genl_mcast[] = { - { .name = "bcm-genl-mcast", }, -}; -#else -static struct genl_multicast_group wl_genl_mcast = { - .id = GENL_ID_GENERATE, /* Genetlink would generate the ID */ - .name = "bcm-genl-mcast", -}; -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */ -#endif /* WL_GENL */ /** * Extern function declarations (TODO: move them to dhd_linux.h) @@ -338,6 +336,8 @@ int wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len) { return 0; } int wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len) { return 0; } +int wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len) +{ return 0; } #endif /* WL_CFG80211 */ @@ -363,6 +363,43 @@ int g_wifi_on = TRUE; /** * Local (static) function definitions */ + +#ifdef WLWFDS +static int wl_android_set_wfds_hash( + struct net_device *dev, char *command, int total_len, bool enable) +{ + int error = 0; + wl_p2p_wfds_hash_t *wfds_hash = NULL; + char *smbuf = NULL; + smbuf = kmalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL); + + if (smbuf == NULL) { + ANDROID_ERROR(("%s: failed to allocated memory %d bytes\n", + __FUNCTION__, WLC_IOCTL_MAXLEN)); + return -ENOMEM; + } + + if (enable) { + wfds_hash = (wl_p2p_wfds_hash_t *)(command + strlen(CMD_ADD_WFDS_HASH) + 1); + error = wldev_iovar_setbuf(dev, "p2p_add_wfds_hash", wfds_hash, + sizeof(wl_p2p_wfds_hash_t), smbuf, WLC_IOCTL_MAXLEN, NULL); + } + else { + wfds_hash = (wl_p2p_wfds_hash_t *)(command + strlen(CMD_DEL_WFDS_HASH) + 1); + error = wldev_iovar_setbuf(dev, "p2p_del_wfds_hash", wfds_hash, + sizeof(wl_p2p_wfds_hash_t), smbuf, WLC_IOCTL_MAXLEN, NULL); + } + + if (error) { + ANDROID_ERROR(("%s: failed to %s, error=%d\n", __FUNCTION__, command, error)); + } + + if (smbuf) + kfree(smbuf); + return error; +} +#endif /* WLWFDS */ + static int wl_android_get_link_speed(struct net_device *net, char *command, int total_len) { int link_speed; @@ -383,15 +420,42 @@ static int wl_android_get_link_speed(struct net_device *net, char *command, int static int wl_android_get_rssi(struct net_device *net, char *command, int total_len) { wlc_ssid_t ssid = {0}; - int rssi; int bytes_written = 0; - int error; + int error = 0; + scb_val_t scbval; + char *delim = NULL; + + delim = strchr(command, ' '); + /* For Ap mode rssi command would be + * driver rssi + * for STA/GC mode + * driver rssi + */ + if (delim) { + /* Ap/GO mode + * driver rssi + */ + ANDROID_TRACE(("%s: cmd:%s\n", __FUNCTION__, delim)); + /* skip space from delim after finding char */ + delim++; + if (!(bcm_ether_atoe((delim), &scbval.ea))) + { + ANDROID_ERROR(("%s:address err\n", __FUNCTION__)); + return -1; + } + scbval.val = htod32(0); + ANDROID_TRACE(("%s: address:"MACDBG, __FUNCTION__, MAC2STRDBG(scbval.ea.octet))); + } + else { + /* STA/GC mode */ + memset(&scbval, 0, sizeof(scb_val_t)); + } - error = wldev_get_rssi(net, &rssi); + error = wldev_get_rssi(net, &scbval); if (error) return -1; #if defined(RSSIOFFSET) - rssi = wl_update_rssi_offset(net, rssi); + scbval.val = wl_update_rssi_offset(net, scbval.val); #endif error = wldev_get_ssid(net, &ssid); @@ -403,8 +467,8 @@ static int wl_android_get_rssi(struct net_device *net, char *command, int total_ memcpy(command, ssid.SSID, ssid.SSID_len); bytes_written = ssid.SSID_len; } - bytes_written += snprintf(&command[bytes_written], total_len, " rssi %d", rssi); - ANDROID_INFO(("%s: command result is %s (%d)\n", __FUNCTION__, command, bytes_written)); + bytes_written += snprintf(&command[bytes_written], total_len, " rssi %d", scbval.val); + ANDROID_TRACE(("%s: command result is %s (%d)\n", __FUNCTION__, command, bytes_written)); return bytes_written; } @@ -416,17 +480,20 @@ static int wl_android_set_suspendopt(struct net_device *dev, char *command, int suspend_flag = *(command + strlen(CMD_SETSUSPENDOPT) + 1) - '0'; - if (suspend_flag != 0) + if (suspend_flag != 0) { suspend_flag = 1; + } ret_now = net_os_set_suspend_disable(dev, suspend_flag); if (ret_now != suspend_flag) { - if (!(ret = net_os_set_suspend(dev, ret_now, 1))) + if (!(ret = net_os_set_suspend(dev, ret_now, 1))) { ANDROID_INFO(("%s: Suspend Flag %d -> %d\n", __FUNCTION__, ret_now, suspend_flag)); - else + } else { ANDROID_ERROR(("%s: failed %d\n", __FUNCTION__, ret)); + } } + return ret; } @@ -450,6 +517,213 @@ static int wl_android_set_suspendmode(struct net_device *dev, char *command, int return ret; } +#ifdef WL_CFG80211 +int wl_android_get_80211_mode(struct net_device *dev, char *command, int total_len) +{ + uint8 mode[4]; + int error = 0; + int bytes_written = 0; + + error = wldev_get_mode(dev, mode); + if (error) + return -1; + + ANDROID_INFO(("%s: mode:%s\n", __FUNCTION__, mode)); + bytes_written = snprintf(command, total_len, "%s %s", CMD_80211_MODE, mode); + ANDROID_INFO(("%s: command:%s EXIT\n", __FUNCTION__, command)); + return bytes_written; + +} + +extern chanspec_t +wl_chspec_driver_to_host(chanspec_t chanspec); +int wl_android_get_chanspec(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int bytes_written = 0; + int chsp = {0}; + uint16 band = 0; + uint16 bw = 0; + uint16 channel = 0; + u32 sb = 0; + chanspec_t chanspec; + + /* command is + * driver chanspec + */ + error = wldev_iovar_getint(dev, "chanspec", &chsp); + if (error) + return -1; + + chanspec = wl_chspec_driver_to_host(chsp); + ANDROID_INFO(("%s:return value of chanspec:%x\n", __FUNCTION__, chanspec)); + + channel = chanspec & WL_CHANSPEC_CHAN_MASK; + band = chanspec & WL_CHANSPEC_BAND_MASK; + bw = chanspec & WL_CHANSPEC_BW_MASK; + + ANDROID_INFO(("%s:channel:%d band:%d bandwidth:%d\n", __FUNCTION__, channel, band, bw)); + + if (bw == WL_CHANSPEC_BW_80) + bw = WL_CH_BANDWIDTH_80MHZ; + else if (bw == WL_CHANSPEC_BW_40) + bw = WL_CH_BANDWIDTH_40MHZ; + else if (bw == WL_CHANSPEC_BW_20) + bw = WL_CH_BANDWIDTH_20MHZ; + else + bw = WL_CH_BANDWIDTH_20MHZ; + + if (bw == WL_CH_BANDWIDTH_40MHZ) { + if (CHSPEC_SB_UPPER(chanspec)) { + channel += CH_10MHZ_APART; + } else { + channel -= CH_10MHZ_APART; + } + } + else if (bw == WL_CH_BANDWIDTH_80MHZ) { + sb = chanspec & WL_CHANSPEC_CTL_SB_MASK; + if (sb == WL_CHANSPEC_CTL_SB_LL) { + channel -= (CH_10MHZ_APART + CH_20MHZ_APART); + } else if (sb == WL_CHANSPEC_CTL_SB_LU) { + channel -= CH_10MHZ_APART; + } else if (sb == WL_CHANSPEC_CTL_SB_UL) { + channel += CH_10MHZ_APART; + } else { + /* WL_CHANSPEC_CTL_SB_UU */ + channel += (CH_10MHZ_APART + CH_20MHZ_APART); + } + } + bytes_written = snprintf(command, total_len, "%s channel %d band %s bw %d", CMD_CHANSPEC, + channel, band == WL_CHANSPEC_BAND_5G ? "5G":"2G", bw); + + ANDROID_INFO(("%s: command:%s EXIT\n", __FUNCTION__, command)); + return bytes_written; + +} +#endif + +/* returns current datarate datarate returned from firmware are in 500kbps */ +int wl_android_get_datarate(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int datarate = 0; + int bytes_written = 0; + + error = wldev_get_datarate(dev, &datarate); + if (error) + return -1; + + ANDROID_INFO(("%s:datarate:%d\n", __FUNCTION__, datarate)); + + bytes_written = snprintf(command, total_len, "%s %d", CMD_DATARATE, (datarate/2)); + return bytes_written; +} +int wl_android_get_assoclist(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int bytes_written = 0; + uint i; + char mac_buf[MAX_NUM_OF_ASSOCLIST * + sizeof(struct ether_addr) + sizeof(uint)] = {0}; + struct maclist *assoc_maclist = (struct maclist *)mac_buf; + + ANDROID_TRACE(("%s: ENTER\n", __FUNCTION__)); + + assoc_maclist->count = htod32(MAX_NUM_OF_ASSOCLIST); + + error = wldev_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, sizeof(mac_buf), false); + if (error) + return -1; + + assoc_maclist->count = dtoh32(assoc_maclist->count); + bytes_written = snprintf(command, total_len, "%s listcount: %d Stations:", + CMD_ASSOC_CLIENTS, assoc_maclist->count); + + for (i = 0; i < assoc_maclist->count; i++) { + bytes_written += snprintf(command + bytes_written, total_len, " " MACDBG, + MAC2STRDBG(assoc_maclist->ea[i].octet)); + } + return bytes_written; + +} + +#ifdef WL_CFG80211 +extern chanspec_t +wl_chspec_host_to_driver(chanspec_t chanspec); +static int wl_android_set_csa(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + char smbuf[WLC_IOCTL_SMLEN]; + wl_chan_switch_t csa_arg; + u32 chnsp = 0; + int err = 0; + + ANDROID_INFO(("%s: command:%s\n", __FUNCTION__, command)); + + command = (command + strlen(CMD_SET_CSA)); + /* Order is mode, count channel */ + if (!*++command) { + ANDROID_ERROR(("%s:error missing arguments\n", __FUNCTION__)); + return -1; + } + csa_arg.mode = bcm_atoi(command); + + if (csa_arg.mode != 0 && csa_arg.mode != 1) { + ANDROID_ERROR(("Invalid mode\n")); + return -1; + } + + if (!*++command) { + ANDROID_ERROR(("%s:error missing count\n", __FUNCTION__)); + return -1; + } + command++; + csa_arg.count = bcm_atoi(command); + + csa_arg.reg = 0; + csa_arg.chspec = 0; + command += 2; + if (!*command) { + ANDROID_ERROR(("%s:error missing channel\n", __FUNCTION__)); + return -1; + } + + chnsp = wf_chspec_aton(command); + if (chnsp == 0) { + ANDROID_ERROR(("%s:chsp is not correct\n", __FUNCTION__)); + return -1; + } + chnsp = wl_chspec_host_to_driver(chnsp); + csa_arg.chspec = chnsp; + + if (chnsp & WL_CHANSPEC_BAND_5G) { + u32 chanspec = chnsp; + err = wldev_iovar_getint(dev, "per_chan_info", &chanspec); + if (!err) { + if ((chanspec & WL_CHAN_RADAR) || (chanspec & WL_CHAN_PASSIVE)) { + ANDROID_ERROR(("Channel is radar sensitive\n")); + return -1; + } + if (chanspec == 0) { + ANDROID_ERROR(("Invalid hw channel\n")); + return -1; + } + } else { + ANDROID_ERROR(("does not support per_chan_info\n")); + return -1; + } + ANDROID_INFO(("non radar sensitivity\n")); + } + error = wldev_iovar_setbuf(dev, "csa", &csa_arg, sizeof(csa_arg), + smbuf, sizeof(smbuf), NULL); + if (error) { + ANDROID_ERROR(("%s:set csa failed:%d\n", __FUNCTION__, error)); + return -1; + } + return 0; +} +#endif + static int wl_android_get_band(struct net_device *dev, char *command, int total_len) { uint band; @@ -463,69 +737,285 @@ static int wl_android_get_band(struct net_device *dev, char *command, int total_ return bytes_written; } +#ifdef CUSTOMER_HW4_PRIVATE_CMD -#ifdef PNO_SUPPORT -#define PNO_PARAM_SIZE 50 -#define VALUE_SIZE 50 -#define LIMIT_STR_FMT ("%50s %50s") -static int -wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len) +#ifdef FCC_PWR_LIMIT_2G +int +wl_android_set_fcc_pwr_limit_2g(struct net_device *dev, char *command, int total_len) { - int err = BCME_OK; - uint i, tokens; - char *pos, *pos2, *token, *token2, *delim; - char param[PNO_PARAM_SIZE], value[VALUE_SIZE]; - struct dhd_pno_batch_params batch_params; - ANDROID_INFO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len)); - if (total_len < strlen(CMD_WLS_BATCHING)) { - ANDROID_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len)); - err = BCME_ERROR; - goto exit; + int error = 0; + int enable = 0; + + sscanf(command+sizeof("SET_FCC_CHANNEL"), "%d", &enable); + + if ((enable != CUSTOMER_HW4_ENABLE) && (enable != CUSTOMER_HW4_DISABLE)) { + ANDROID_ERROR(("%s: Invalid data\n", __FUNCTION__)); + return BCME_ERROR; } - pos = command + strlen(CMD_WLS_BATCHING) + 1; - memset(&batch_params, 0, sizeof(struct dhd_pno_batch_params)); - if (!strncmp(pos, PNO_BATCHING_SET, strlen(PNO_BATCHING_SET))) { - pos += strlen(PNO_BATCHING_SET) + 1; - while ((token = strsep(&pos, PNO_PARAMS_DELIMETER)) != NULL) { + CUSTOMER_HW4_EN_CONVERT(enable); + + ANDROID_ERROR(("%s: fccpwrlimit2g set (%d)\n", __FUNCTION__, enable)); + error = wldev_iovar_setint(dev, "fccpwrlimit2g", enable); + if (error) { + ANDROID_ERROR(("%s: fccpwrlimit2g set returned (%d)\n", __FUNCTION__, error)); + return BCME_ERROR; + } + + return error; +} + +int +wl_android_get_fcc_pwr_limit_2g(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int enable = 0; + int bytes_written = 0; + + error = wldev_iovar_getint(dev, "fccpwrlimit2g", &enable); + if (error) { + ANDROID_ERROR(("%s: fccpwrlimit2g get error (%d)\n", __FUNCTION__, error)); + return BCME_ERROR; + } + ANDROID_ERROR(("%s: fccpwrlimit2g get (%d)\n", __FUNCTION__, enable)); + + bytes_written = snprintf(command, total_len, "%s %d", CMD_GET_FCC_PWR_LIMIT_2G, enable); + + return bytes_written; +} +#endif /* FCC_PWR_LIMIT_2G */ + +#ifdef IPV6_NDO_SUPPORT +int +wl_android_nd_ra_limit(struct net_device *dev, char *command, int total_len) +{ + int err = 0; + int bytes_written = 0; + uint tokens; + char *pos, *token, *delim; + char smbuf[WLC_IOCTL_SMLEN]; + char param[ND_PARAM_SIZE+1], value[ND_VALUE_SIZE+1]; + uint16 type = 0xff, min = 0, per = 0, hold = 0; + nd_ra_ol_limits_t ra_ol_limit; + + WL_TRACE(("command=%s, len=%d\n", command, total_len)); + pos = command + strlen(CMD_NDRA_LIMIT) + 1; + memset(&ra_ol_limit, 0, sizeof(nd_ra_ol_limits_t)); + + if (!strncmp(pos, ND_RA_OL_SET, strlen(ND_RA_OL_SET))) { + WL_TRACE(("SET NDRA_LIMIT\n")); + pos += strlen(ND_RA_OL_SET) + 1; + while ((token = strsep(&pos, ND_PARAMS_DELIMETER)) != NULL) { memset(param, 0, sizeof(param)); memset(value, 0, sizeof(value)); - if (token == NULL || !*token) - break; - if (*token == '\0') - continue; - delim = strchr(token, PNO_PARAM_VALUE_DELLIMETER); + + delim = strchr(token, ND_PARAM_VALUE_DELLIMETER); if (delim != NULL) *delim = ' '; - tokens = sscanf(token, LIMIT_STR_FMT, param, value); - if (!strncmp(param, PNO_PARAM_SCANFREQ, strlen(PNO_PARAM_SCANFREQ))) { - batch_params.scan_fr = simple_strtol(value, NULL, 0); - ANDROID_INFO(("scan_freq : %d\n", batch_params.scan_fr)); - } else if (!strncmp(param, PNO_PARAM_BESTN, strlen(PNO_PARAM_BESTN))) { - batch_params.bestn = simple_strtol(value, NULL, 0); - ANDROID_INFO(("bestn : %d\n", batch_params.bestn)); - } else if (!strncmp(param, PNO_PARAM_MSCAN, strlen(PNO_PARAM_MSCAN))) { - batch_params.mscan = simple_strtol(value, NULL, 0); - ANDROID_INFO(("mscan : %d\n", batch_params.mscan)); - } else if (!strncmp(param, PNO_PARAM_CHANNEL, strlen(PNO_PARAM_CHANNEL))) { - i = 0; - pos2 = value; - tokens = sscanf(value, "<%s>", value); - if (tokens != 1) { - err = BCME_ERROR; - ANDROID_ERROR(("%s : invalid format for channel" - " <> params\n", __FUNCTION__)); + tokens = sscanf(token, ND_LIMIT_STR_FMT, param, value); + if (!strncmp(param, ND_RA_TYPE, strlen(ND_RA_TYPE))) { + type = simple_strtol(value, NULL, 0); + } else if (!strncmp(param, ND_RA_MIN_TIME, strlen(ND_RA_MIN_TIME))) { + min = simple_strtol(value, NULL, 0); + } else if (!strncmp(param, ND_RA_PER, strlen(ND_RA_PER))) { + per = simple_strtol(value, NULL, 0); + if (per > 100) { + ANDROID_ERROR(("Invalid PERCENT %d\n", per)); + err = BCME_BADARG; goto exit; } - while ((token2 = strsep(&pos2, - PNO_PARAM_CHANNEL_DELIMETER)) != NULL) { - if (token2 == NULL || !*token2) - break; - if (*token2 == '\0') - continue; - if (*token2 == 'A' || *token2 == 'B') { - batch_params.band = (*token2 == 'A')? + } else if (!strncmp(param, ND_RA_HOLD, strlen(ND_RA_HOLD))) { + hold = simple_strtol(value, NULL, 0); + } + } + + ra_ol_limit.version = htod32(ND_RA_OL_LIMITS_VER); + ra_ol_limit.type = htod32(type); + if (type == ND_RA_OL_LIMITS_REL_TYPE) { + if ((min == 0) || (per == 0)) { + ANDROID_ERROR(("Invalid min_time %d, percent %d\n", min, per)); + err = BCME_BADARG; + goto exit; + } + ra_ol_limit.length = htod32(ND_RA_OL_LIMITS_REL_TYPE_LEN); + ra_ol_limit.limits.lifetime_relative.min_time = htod32(min); + ra_ol_limit.limits.lifetime_relative.lifetime_percent = htod32(per); + } else if (type == ND_RA_OL_LIMITS_FIXED_TYPE) { + if (hold == 0) { + ANDROID_ERROR(("Invalid hold_time %d\n", hold)); + err = BCME_BADARG; + goto exit; + } + ra_ol_limit.length = htod32(ND_RA_OL_LIMITS_FIXED_TYPE_LEN); + ra_ol_limit.limits.fixed.hold_time = htod32(hold); + } else { + ANDROID_ERROR(("unknown TYPE %d\n", type)); + err = BCME_BADARG; + goto exit; + } + + err = wldev_iovar_setbuf(dev, "nd_ra_limit_intv", &ra_ol_limit, + sizeof(nd_ra_ol_limits_t), smbuf, sizeof(smbuf), NULL); + if (err) { + ANDROID_ERROR(("Failed to set nd_ra_limit_intv, error = %d\n", err)); + goto exit; + } + + WL_TRACE(("TYPE %d, MIN %d, PER %d, HOLD %d\n", type, min, per, hold)); + } else if (!strncmp(pos, ND_RA_OL_GET, strlen(ND_RA_OL_GET))) { + WL_TRACE(("GET NDRA_LIMIT\n")); + err = wldev_iovar_getbuf(dev, "nd_ra_limit_intv", NULL, 0, + smbuf, sizeof(smbuf), NULL); + if (err) { + ANDROID_ERROR(("Failed to get nd_ra_limit_intv, error = %d\n", err)); + goto exit; + } + + memcpy(&ra_ol_limit, (uint8 *)smbuf, sizeof(nd_ra_ol_limits_t)); + type = ra_ol_limit.type; + if (ra_ol_limit.version != ND_RA_OL_LIMITS_VER) { + ANDROID_ERROR(("Invalid Version %d\n", ra_ol_limit.version)); + err = BCME_VERSION; + goto exit; + } + + if (ra_ol_limit.type == ND_RA_OL_LIMITS_REL_TYPE) { + min = ra_ol_limit.limits.lifetime_relative.min_time; + per = ra_ol_limit.limits.lifetime_relative.lifetime_percent; + ANDROID_ERROR(("TYPE %d, MIN %d, PER %d\n", type, min, per)); + bytes_written = snprintf(command, total_len, + "%s GET TYPE %d, MIN %d, PER %d", CMD_NDRA_LIMIT, type, min, per); + } else if (ra_ol_limit.type == ND_RA_OL_LIMITS_FIXED_TYPE) { + hold = ra_ol_limit.limits.fixed.hold_time; + ANDROID_ERROR(("TYPE %d, HOLD %d\n", type, hold)); + bytes_written = snprintf(command, total_len, + "%s GET TYPE %d, HOLD %d", CMD_NDRA_LIMIT, type, hold); + } else { + ANDROID_ERROR(("unknown TYPE %d\n", type)); + err = BCME_ERROR; + goto exit; + } + + return bytes_written; + } else { + ANDROID_ERROR(("unknown command\n")); + err = BCME_ERROR; + goto exit; + } + +exit: + return err; +} +#endif /* IPV6_NDO_SUPPORT */ +#ifdef WLTDLS +int wl_android_tdls_reset(struct net_device *dev) +{ + int ret = 0; + ret = dhd_tdls_enable(dev, false, false, NULL); + if (ret < 0) { + ANDROID_ERROR(("Disable tdls failed. %d\n", ret)); + return ret; + } + ret = dhd_tdls_enable(dev, true, true, NULL); + if (ret < 0) { + ANDROID_ERROR(("enable tdls failed. %d\n", ret)); + return ret; + } + return 0; +} +#endif /* WLTDLS */ +#endif /* CUSTOMER_HW4_PRIVATE_CMD */ +static int wl_android_wbtext(struct net_device *dev, char *command, int total_len) +{ + int error = 0, argc = 0; + int data, bytes_written; + + argc = sscanf(command+sizeof("WBTEXT_ENABLE"), "%d", &data); + if (!argc) { + error = wldev_iovar_getint(dev, "wnm_bsstrans_resp", &data); + if (error) { + ANDROID_ERROR(("%s: Failed to set wbtext error = %d\n", + __FUNCTION__, error)); + } + bytes_written = snprintf(command, total_len, "WBTEXT %s\n", + (data == WL_BSSTRANS_POLICY_PRODUCT)? "ENABLED" : "DISABLED"); + return bytes_written; + } else { + if (data) + data = WL_BSSTRANS_POLICY_PRODUCT; + + error = wldev_iovar_setint(dev, "wnm_bsstrans_resp", data); + if (error) { + ANDROID_ERROR(("%s: Failed to set wbtext error = %d\n", + __FUNCTION__, error)); + } + } + return error; +} + +#ifdef PNO_SUPPORT +#define PNO_PARAM_SIZE 50 +#define VALUE_SIZE 50 +#define LIMIT_STR_FMT ("%50s %50s") +static int +wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len) +{ + int err = BCME_OK; + uint i, tokens; + char *pos, *pos2, *token, *token2, *delim; + char param[PNO_PARAM_SIZE], value[VALUE_SIZE]; + struct dhd_pno_batch_params batch_params; + ANDROID_INFO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len)); + if (total_len < strlen(CMD_WLS_BATCHING)) { + ANDROID_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len)); + err = BCME_ERROR; + goto exit; + } + pos = command + strlen(CMD_WLS_BATCHING) + 1; + memset(&batch_params, 0, sizeof(struct dhd_pno_batch_params)); + + if (!strncmp(pos, PNO_BATCHING_SET, strlen(PNO_BATCHING_SET))) { + pos += strlen(PNO_BATCHING_SET) + 1; + while ((token = strsep(&pos, PNO_PARAMS_DELIMETER)) != NULL) { + memset(param, 0, sizeof(param)); + memset(value, 0, sizeof(value)); + if (token == NULL || !*token) + break; + if (*token == '\0') + continue; + delim = strchr(token, PNO_PARAM_VALUE_DELLIMETER); + if (delim != NULL) + *delim = ' '; + + tokens = sscanf(token, LIMIT_STR_FMT, param, value); + if (!strncmp(param, PNO_PARAM_SCANFREQ, strlen(PNO_PARAM_SCANFREQ))) { + batch_params.scan_fr = simple_strtol(value, NULL, 0); + ANDROID_INFO(("scan_freq : %d\n", batch_params.scan_fr)); + } else if (!strncmp(param, PNO_PARAM_BESTN, strlen(PNO_PARAM_BESTN))) { + batch_params.bestn = simple_strtol(value, NULL, 0); + ANDROID_INFO(("bestn : %d\n", batch_params.bestn)); + } else if (!strncmp(param, PNO_PARAM_MSCAN, strlen(PNO_PARAM_MSCAN))) { + batch_params.mscan = simple_strtol(value, NULL, 0); + ANDROID_INFO(("mscan : %d\n", batch_params.mscan)); + } else if (!strncmp(param, PNO_PARAM_CHANNEL, strlen(PNO_PARAM_CHANNEL))) { + i = 0; + pos2 = value; + tokens = sscanf(value, "<%s>", value); + if (tokens != 1) { + err = BCME_ERROR; + ANDROID_ERROR(("%s : invalid format for channel" + " <> params\n", __FUNCTION__)); + goto exit; + } + while ((token2 = strsep(&pos2, + PNO_PARAM_CHANNEL_DELIMETER)) != NULL) { + if (token2 == NULL || !*token2) + break; + if (*token2 == '\0') + continue; + if (*token2 == 'A' || *token2 == 'B') { + batch_params.band = (*token2 == 'A')? WLC_BAND_5G : WLC_BAND_2G; ANDROID_INFO(("band : %s\n", (*token2 == 'A')? "A" : "B")); @@ -551,7 +1041,7 @@ wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len) ANDROID_ERROR(("failed to configure batch scan\n")); } else { memset(command, 0, total_len); - err = sprintf(command, "%d", err); + err = snprintf(command, total_len, "%d", err); } } else if (!strncmp(pos, PNO_BATCHING_GET, strlen(PNO_BATCHING_GET))) { err = dhd_dev_pno_get_for_batch(dev, command, total_len); @@ -566,7 +1056,7 @@ wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len) ANDROID_ERROR(("failed to stop batching scan\n")); } else { memset(command, 0, total_len); - err = sprintf(command, "OK"); + err = snprintf(command, total_len, "OK"); } } else { ANDROID_ERROR(("%s : unknown command\n", __FUNCTION__)); @@ -579,7 +1069,7 @@ wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len) #ifndef WL_SCHED_SCAN static int wl_android_set_pno_setup(struct net_device *dev, char *command, int total_len) { - wlc_ssid_t ssids_local[MAX_PFN_LIST_COUNT]; + wlc_ssid_ext_t ssids_local[MAX_PFN_LIST_COUNT]; int res = -1; int nssid = 0; cmd_tlv_t *cmd_tlv_temp; @@ -691,89 +1181,6 @@ static int wl_android_get_p2p_dev_addr(struct net_device *ndev, char *command, i return bytes_written; } -#ifdef BCMCCX -static int wl_android_get_cckm_rn(struct net_device *dev, char *command) -{ - int error, rn; - - ANDROID_TRACE(("%s:wl_android_get_cckm_rn\n", dev->name)); - - error = wldev_iovar_getint(dev, "cckm_rn", &rn); - if (unlikely(error)) { - ANDROID_ERROR(("wl_android_get_cckm_rn error (%d)\n", error)); - return -1; - } - memcpy(command, &rn, sizeof(int)); - - return sizeof(int); -} - -static int wl_android_set_cckm_krk(struct net_device *dev, char *command) -{ - int error; - unsigned char key[16]; - static char iovar_buf[WLC_IOCTL_MEDLEN]; - - ANDROID_TRACE(("%s: wl_iw_set_cckm_krk\n", dev->name)); - - memset(iovar_buf, 0, sizeof(iovar_buf)); - memcpy(key, command+strlen("set cckm_krk")+1, 16); - - error = wldev_iovar_setbuf(dev, "cckm_krk", key, sizeof(key), - iovar_buf, WLC_IOCTL_MEDLEN, NULL); - if (unlikely(error)) - { - ANDROID_ERROR((" cckm_krk set error (%d)\n", error)); - return -1; - } - return 0; -} - -static int wl_android_get_assoc_res_ies(struct net_device *dev, char *command) -{ - int error; - u8 buf[WL_ASSOC_INFO_MAX]; - wl_assoc_info_t assoc_info; - u32 resp_ies_len = 0; - int bytes_written = 0; - - ANDROID_TRACE(("%s: wl_iw_get_assoc_res_ies\n", dev->name)); - - error = wldev_iovar_getbuf(dev, "assoc_info", NULL, 0, buf, WL_ASSOC_INFO_MAX, NULL); - if (unlikely(error)) { - ANDROID_ERROR(("could not get assoc info (%d)\n", error)); - return -1; - } - - memcpy(&assoc_info, buf, sizeof(wl_assoc_info_t)); - assoc_info.req_len = htod32(assoc_info.req_len); - assoc_info.resp_len = htod32(assoc_info.resp_len); - assoc_info.flags = htod32(assoc_info.flags); - - if (assoc_info.resp_len) { - resp_ies_len = assoc_info.resp_len - sizeof(struct dot11_assoc_resp); - } - - /* first 4 bytes are ie len */ - memcpy(command, &resp_ies_len, sizeof(u32)); - bytes_written = sizeof(u32); - - /* get the association resp IE's if there are any */ - if (resp_ies_len) { - error = wldev_iovar_getbuf(dev, "assoc_resp_ies", NULL, 0, - buf, WL_ASSOC_INFO_MAX, NULL); - if (unlikely(error)) { - ANDROID_ERROR(("could not get assoc resp_ies (%d)\n", error)); - return -1; - } - - memcpy(command+sizeof(u32), buf, resp_ies_len); - bytes_written += resp_ies_len; - } - return bytes_written; -} - -#endif /* BCMCCX */ int wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist) @@ -853,19 +1260,28 @@ wl_android_set_mac_address_filter(struct net_device *dev, const char* str) int macmode = MACLIST_MODE_DISABLED; struct maclist *list; char eabuf[ETHER_ADDR_STR_LEN]; + char *token; /* string should look like below (macmode/macnum/maclist) */ /* 1 2 00:11:22:33:44:55 00:11:22:33:44:ff */ /* get the MAC filter mode */ - macmode = bcm_atoi(strsep((char**)&str, " ")); + token = strsep((char**)&str, " "); + if (!token) { + return -1; + } + macmode = bcm_atoi(token); if (macmode < MACLIST_MODE_DISABLED || macmode > MACLIST_MODE_ALLOW) { ANDROID_ERROR(("%s : invalid macmode %d\n", __FUNCTION__, macmode)); return -1; } - macnum = bcm_atoi(strsep((char**)&str, " ")); + token = strsep((char**)&str, " "); + if (!token) { + return -1; + } + macnum = bcm_atoi(token); if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) { ANDROID_ERROR(("%s : invalid number of MAC address entries %d\n", __FUNCTION__, macnum)); @@ -907,17 +1323,11 @@ wl_android_set_mac_address_filter(struct net_device *dev, const char* str) int wl_android_wifi_on(struct net_device *dev) { int ret = 0; -#ifdef CONFIG_MACH_UNIVERSAL5433 - int retry; - /* Do not retry old revision Helsinki Prime */ - if (!check_rev()) { - retry = 1; - } else { - retry = POWERUP_MAX_RETRY; - } -#else int retry = POWERUP_MAX_RETRY; -#endif /* CONFIG_MACH_UNIVERSAL5433 */ +#ifdef IAPSTA_PREINIT + int bytes_written = 0; + struct dhd_conf *conf; +#endif if (!dev) { ANDROID_ERROR(("%s: dev is null\n", __FUNCTION__)); @@ -936,8 +1346,9 @@ int wl_android_wifi_on(struct net_device *dev) #ifdef BCMPCIE ret = dhd_net_bus_devreset(dev, FALSE); #endif /* BCMPCIE */ - if (ret == 0) + if (ret == 0) { break; + } ANDROID_ERROR(("\nfailed to power up wifi chip, retry again (%d left) **\n\n", retry)); #ifdef BCMPCIE @@ -964,6 +1375,15 @@ int wl_android_wifi_on(struct net_device *dev) } } #endif /* !BCMPCIE */ + +#ifdef IAPSTA_PREINIT + conf = dhd_get_conf(dev); + if (conf) { + wl_android_ext_priv_cmd(dev, conf->iapsta_init, 0, &bytes_written); + wl_android_ext_priv_cmd(dev, conf->iapsta_config, 0, &bytes_written); + wl_android_ext_priv_cmd(dev, conf->iapsta_enable, 0, &bytes_written); + } +#endif g_wifi_on = TRUE; } @@ -983,7 +1403,7 @@ int wl_android_wifi_on(struct net_device *dev) #endif } -int wl_android_wifi_off(struct net_device *dev) +int wl_android_wifi_off(struct net_device *dev, bool on_failure) { int ret = 0; @@ -994,8 +1414,8 @@ int wl_android_wifi_off(struct net_device *dev) printf("%s in 1\n", __FUNCTION__); dhd_net_if_lock(dev); - printf("%s in 2: g_wifi_on=%d\n", __FUNCTION__, g_wifi_on); - if (g_wifi_on) { + printf("%s in 2: g_wifi_on=%d, on_failure=%d\n", __FUNCTION__, g_wifi_on, on_failure); + if (g_wifi_on || on_failure) { #if defined(BCMSDIO) || defined(BCMPCIE) ret = dhd_net_bus_devreset(dev, TRUE); #ifdef BCMSDIO @@ -1078,10 +1498,14 @@ static int wl_android_get_connection_stats(struct net_device *dev, char *command, int total_len) { wl_cnt_t* cnt = NULL; +#ifndef DISABLE_IF_COUNTERS + wl_if_stats_t* if_stats = NULL; +#endif /* DISABLE_IF_COUNTERS */ + int link_speed = 0; struct connection_stats *output; unsigned int bufsize = 0; - int bytes_written = 0; + int bytes_written = -1; int ret = 0; ANDROID_INFO(("%s: enter Get Connection Stats\n", __FUNCTION__)); @@ -1093,30 +1517,83 @@ wl_android_get_connection_stats(struct net_device *dev, char *command, int total bufsize = total_len; if (bufsize < sizeof(struct connection_stats)) { - ANDROID_ERROR(("%s: not enough buffer size, provided=%u, requires=%u\n", + ANDROID_ERROR(("%s: not enough buffer size, provided=%u, requires=%zu\n", __FUNCTION__, bufsize, sizeof(struct connection_stats))); goto error; } - if ((cnt = kmalloc(sizeof(*cnt), GFP_KERNEL)) == NULL) { - ANDROID_ERROR(("kmalloc failed\n")); - return -1; + output = (struct connection_stats *)command; + +#ifndef DISABLE_IF_COUNTERS + if ((if_stats = kmalloc(sizeof(*if_stats), GFP_KERNEL)) == NULL) { + ANDROID_ERROR(("%s(%d): kmalloc failed\n", __FUNCTION__, __LINE__)); + goto error; } - memset(cnt, 0, sizeof(*cnt)); + memset(if_stats, 0, sizeof(*if_stats)); - ret = wldev_iovar_getbuf(dev, "counters", NULL, 0, (char *)cnt, sizeof(wl_cnt_t), NULL); + ret = wldev_iovar_getbuf(dev, "if_counters", NULL, 0, + (char *)if_stats, sizeof(*if_stats), NULL); if (ret) { - ANDROID_ERROR(("%s: wldev_iovar_getbuf() failed, ret=%d\n", + ANDROID_ERROR(("%s: if_counters not supported ret=%d\n", __FUNCTION__, ret)); - goto error; - } - if (dtoh16(cnt->version) > WL_CNT_T_VERSION) { - ANDROID_ERROR(("%s: incorrect version of wl_cnt_t, expected=%u got=%u\n", - __FUNCTION__, WL_CNT_T_VERSION, cnt->version)); - goto error; + /* In case if_stats IOVAR is not supported, get information from counters. */ +#endif /* DISABLE_IF_COUNTERS */ + if ((cnt = kmalloc(sizeof(*cnt), GFP_KERNEL)) == NULL) { + ANDROID_ERROR(("%s(%d): kmalloc failed\n", __FUNCTION__, __LINE__)); + goto error; + } + memset(cnt, 0, sizeof(*cnt)); + + ret = wldev_iovar_getbuf(dev, "counters", NULL, 0, + (char *)cnt, sizeof(wl_cnt_t), NULL); + if (ret) { + ANDROID_ERROR(("%s: wldev_iovar_getbuf() failed, ret=%d\n", + __FUNCTION__, ret)); + goto error; + } + + if (dtoh16(cnt->version) > WL_CNT_T_VERSION) { + ANDROID_ERROR(("%s: incorrect version of wl_cnt_t, expected=%u got=%u\n", + __FUNCTION__, WL_CNT_T_VERSION, cnt->version)); + goto error; + } + + output->txframe = dtoh32(cnt->txframe); + output->txbyte = dtoh32(cnt->txbyte); + output->txerror = dtoh32(cnt->txerror); + output->rxframe = dtoh32(cnt->rxframe); + output->rxbyte = dtoh32(cnt->rxbyte); + output->txfail = dtoh32(cnt->txfail); + output->txretry = dtoh32(cnt->txretry); + output->txretrie = dtoh32(cnt->txretrie); + output->txrts = dtoh32(cnt->txrts); + output->txnocts = dtoh32(cnt->txnocts); + output->txexptime = dtoh32(cnt->txexptime); +#ifndef DISABLE_IF_COUNTERS + } else { + /* Populate from if_stats. */ + if (dtoh16(if_stats->version) > WL_IF_STATS_T_VERSION) { + ANDROID_ERROR(("%s: incorrect version of wl_if_stats_t, expected=%u got=%u\n", + __FUNCTION__, WL_IF_STATS_T_VERSION, if_stats->version)); + goto error; + } + + output->txframe = (uint32)dtoh64(if_stats->txframe); + output->txbyte = (uint32)dtoh64(if_stats->txbyte); + output->txerror = (uint32)dtoh64(if_stats->txerror); + output->rxframe = (uint32)dtoh64(if_stats->rxframe); + output->rxbyte = (uint32)dtoh64(if_stats->rxbyte); + output->txfail = (uint32)dtoh64(if_stats->txfail); + output->txretry = (uint32)dtoh64(if_stats->txretry); + output->txretrie = (uint32)dtoh64(if_stats->txretrie); + /* Unavailable */ + output->txrts = 0; + output->txnocts = 0; + output->txexptime = 0; } +#endif /* DISABLE_IF_COUNTERS */ /* link_speed is in kbps */ ret = wldev_get_link_speed(dev, &link_speed); @@ -1126,18 +1603,6 @@ wl_android_get_connection_stats(struct net_device *dev, char *command, int total goto error; } - output = (struct connection_stats *)command; - output->txframe = dtoh32(cnt->txframe); - output->txbyte = dtoh32(cnt->txbyte); - output->txerror = dtoh32(cnt->txerror); - output->rxframe = dtoh32(cnt->rxframe); - output->rxbyte = dtoh32(cnt->rxbyte); - output->txfail = dtoh32(cnt->txfail); - output->txretry = dtoh32(cnt->txretry); - output->txretrie = dtoh32(cnt->txretrie); - output->txrts = dtoh32(cnt->txrts); - output->txnocts = dtoh32(cnt->txnocts); - output->txexptime = dtoh32(cnt->txexptime); output->txrate = link_speed; /* Channel idle ratio. */ @@ -1145,77 +1610,452 @@ wl_android_get_connection_stats(struct net_device *dev, char *command, int total output->chan_idle = 0; }; - kfree(cnt); - bytes_written = sizeof(struct connection_stats); - return bytes_written; error: +#ifndef DISABLE_IF_COUNTERS + if (if_stats) { + kfree(if_stats); + } +#endif /* DISABLE_IF_COUNTERS */ if (cnt) { kfree(cnt); } - return -1; + + return bytes_written; } #endif /* CONNECTION_STATISTICS */ + +#ifdef CUSTOMER_HW4_PRIVATE_CMD +#endif /* CUSTOMER_HW4_PRIVATE_CMD */ + +/* SoftAP feature */ +#define APCS_BAND_2G_LEGACY1 20 +#define APCS_BAND_2G_LEGACY2 0 +#define APCS_BAND_AUTO "band=auto" +#define APCS_BAND_2G "band=2g" +#define APCS_BAND_5G "band=5g" +#define APCS_MAX_2G_CHANNELS 11 +#define APCS_MAX_RETRY 10 +#define APCS_DEFAULT_2G_CH 1 +#define APCS_DEFAULT_5G_CH 149 +#if defined(WL_SUPPORT_AUTO_CHANNEL) static int -wl_android_set_pmk(struct net_device *dev, char *command, int total_len) +wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str, + char* command, int total_len) { - uchar pmk[33]; - int error = 0; - char smbuf[WLC_IOCTL_SMLEN]; -#ifdef OKC_DEBUG - int i = 0; -#endif + int channel = 0; + int chosen = 0; + int retry = 0; + int ret = 0; + int spect = 0; + u8 *reqbuf = NULL; + uint32 band = WLC_BAND_2G; + uint32 buf_size; + + if (cmd_str) { + ANDROID_INFO(("Command: %s len:%d \n", cmd_str, (int)strlen(cmd_str))); + if (strncmp(cmd_str, APCS_BAND_AUTO, strlen(APCS_BAND_AUTO)) == 0) { + band = WLC_BAND_AUTO; + } else if (strncmp(cmd_str, APCS_BAND_5G, strlen(APCS_BAND_5G)) == 0) { + band = WLC_BAND_5G; + } else if (strncmp(cmd_str, APCS_BAND_2G, strlen(APCS_BAND_2G)) == 0) { + band = WLC_BAND_2G; + } else { + /* + * For backward compatibility: Some platforms used to issue argument 20 or 0 + * to enforce the 2G channel selection + */ + channel = bcm_atoi(cmd_str); + if ((channel == APCS_BAND_2G_LEGACY1) || + (channel == APCS_BAND_2G_LEGACY2)) { + band = WLC_BAND_2G; + } else { + ANDROID_ERROR(("Invalid argument\n")); + return -EINVAL; + } + } + } else { + /* If no argument is provided, default to 2G */ + ANDROID_ERROR(("No argument given default to 2.4G scan\n")); + band = WLC_BAND_2G; + } + ANDROID_INFO(("HAPD_AUTO_CHANNEL = %d, band=%d \n", channel, band)); - bzero(pmk, sizeof(pmk)); - memcpy((char *)pmk, command + strlen("SET_PMK "), 32); - error = wldev_iovar_setbuf(dev, "okc_info_pmk", pmk, 32, smbuf, sizeof(smbuf), NULL); - if (error) { - ANDROID_ERROR(("Failed to set PMK for OKC, error = %d\n", error)); + if ((ret = wldev_ioctl(dev, WLC_GET_SPECT_MANAGMENT, &spect, sizeof(spect), false)) < 0) { + ANDROID_ERROR(("ACS: error getting the spect\n")); + goto done; } -#ifdef OKC_DEBUG - ANDROID_ERROR(("PMK is ")); - for (i = 0; i < 32; i++) - ANDROID_ERROR(("%02X ", pmk[i])); - ANDROID_ERROR(("\n")); -#endif - return error; -} + if (spect > 0) { + /* If STA is connected, return is STA channel, else ACS can be issued, + * set spect to 0 and proceed with ACS + */ + channel = wl_cfg80211_get_sta_channel(); + if (channel) { + channel = (channel <= CH_MAX_2G_CHANNEL) ? channel : APCS_DEFAULT_2G_CH; + goto done2; + } -static int -wl_android_okc_enable(struct net_device *dev, char *command, int total_len) -{ - int error = 0; - char okc_enable = 0; + if ((ret = wl_cfg80211_set_spect(dev, 0) < 0)) { + ANDROID_ERROR(("ACS: error while setting spect\n")); + goto done; + } + } - okc_enable = command[strlen(CMD_OKC_ENABLE) + 1] - '0'; - error = wldev_iovar_setint(dev, "okc_enable", okc_enable); - if (error) { - ANDROID_ERROR(("Failed to %s OKC, error = %d\n", - okc_enable ? "enable" : "disable", error)); + reqbuf = kzalloc(CHANSPEC_BUF_SIZE, GFP_KERNEL); + if (reqbuf == NULL) { + ANDROID_ERROR(("failed to allocate chanspec buffer\n")); + return -ENOMEM; } - wldev_iovar_setint(dev, "ccx_enable", 0); + if (band == WLC_BAND_AUTO) { + ANDROID_INFO(("ACS full channel scan \n")); + reqbuf[0] = htod32(0); + } else if (band == WLC_BAND_5G) { + ANDROID_INFO(("ACS 5G band scan \n")); + if ((ret = wl_cfg80211_get_chanspecs_5g(dev, reqbuf, CHANSPEC_BUF_SIZE)) < 0) { + ANDROID_ERROR(("ACS 5g chanspec retreival failed! \n")); + goto done; + } + } else if (band == WLC_BAND_2G) { + /* + * If channel argument is not provided/ argument 20 is provided, + * Restrict channel to 2GHz, 20MHz BW, No SB + */ + ANDROID_INFO(("ACS 2G band scan \n")); + if ((ret = wl_cfg80211_get_chanspecs_2g(dev, reqbuf, CHANSPEC_BUF_SIZE)) < 0) { + ANDROID_ERROR(("ACS 2g chanspec retreival failed! \n")); + goto done; + } + } else { + ANDROID_ERROR(("ACS: No band chosen\n")); + goto done2; + } - return error; -} + buf_size = (band == WLC_BAND_AUTO) ? sizeof(int) : CHANSPEC_BUF_SIZE; + ret = wldev_ioctl(dev, WLC_START_CHANNEL_SEL, (void *)reqbuf, + buf_size, true); + if (ret < 0) { + ANDROID_ERROR(("can't start auto channel scan, err = %d\n", ret)); + channel = 0; + goto done; + } + /* Wait for auto channel selection, max 3000 ms */ + if ((band == WLC_BAND_2G) || (band == WLC_BAND_5G)) { + OSL_SLEEP(500); + } else { + /* + * Full channel scan at the minimum takes 1.2secs + * even with parallel scan. max wait time: 3500ms + */ + OSL_SLEEP(1000); + } + retry = APCS_MAX_RETRY; + while (retry--) { + ret = wldev_ioctl(dev, WLC_GET_CHANNEL_SEL, &chosen, + sizeof(chosen), false); + if (ret < 0) { + chosen = 0; + } else { + chosen = dtoh32(chosen); + } -int wl_android_set_roam_mode(struct net_device *dev, char *command, int total_len) -{ - int error = 0; - int mode = 0; + if (chosen) { + int chosen_band; + int apcs_band; +#ifdef D11AC_IOTYPES + if (wl_cfg80211_get_ioctl_version() == 1) { + channel = LCHSPEC_CHANNEL((chanspec_t)chosen); + } else { + channel = CHSPEC_CHANNEL((chanspec_t)chosen); + } +#else + channel = CHSPEC_CHANNEL((chanspec_t)chosen); +#endif /* D11AC_IOTYPES */ + apcs_band = (band == WLC_BAND_AUTO) ? WLC_BAND_2G : band; + chosen_band = (channel <= CH_MAX_2G_CHANNEL) ? WLC_BAND_2G : WLC_BAND_5G; + if (apcs_band == chosen_band) { + ANDROID_ERROR(("selected channel = %d\n", channel)); + break; + } + } + ANDROID_INFO(("%d tried, ret = %d, chosen = 0x%x\n", + (APCS_MAX_RETRY - retry), ret, chosen)); + OSL_SLEEP(250); + } - if (sscanf(command, "%*s %d", &mode) != 1) { - ANDROID_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__)); - return -1; +done: + if ((retry == 0) || (ret < 0)) { + /* On failure, fallback to a default channel */ + if ((band == WLC_BAND_5G)) { + channel = APCS_DEFAULT_5G_CH; + } else { + channel = APCS_DEFAULT_2G_CH; + } + ANDROID_ERROR(("ACS failed. Fall back to default channel (%d) \n", channel)); + } +done2: + if (spect > 0) { + if ((ret = wl_cfg80211_set_spect(dev, spect) < 0)) { + ANDROID_ERROR(("ACS: error while setting spect\n")); + } } - error = wldev_iovar_setint(dev, "roam_off", mode); - if (error) { + if (reqbuf) { + kfree(reqbuf); + } + + if (channel) { + snprintf(command, 4, "%d", channel); + ANDROID_INFO(("command result is %s \n", command)); + return strlen(command); + } else { + return ret; + } +} +#endif /* WL_SUPPORT_AUTO_CHANNEL */ + +#ifdef CUSTOMER_HW4_PRIVATE_CMD + + +#ifdef SUPPORT_SET_LPC +static int +wl_android_set_lpc(struct net_device *dev, const char* string_num) +{ + int lpc_enabled, ret; + s32 val = 1; + + lpc_enabled = bcm_atoi(string_num); + ANDROID_INFO(("%s : HAPD_LPC_ENABLED = %d\n", __FUNCTION__, lpc_enabled)); + + ret = wldev_ioctl(dev, WLC_DOWN, &val, sizeof(s32), true); + if (ret < 0) + ANDROID_ERROR(("WLC_DOWN error %d\n", ret)); + + wldev_iovar_setint(dev, "lpc", lpc_enabled); + + ret = wldev_ioctl(dev, WLC_UP, &val, sizeof(s32), true); + if (ret < 0) + ANDROID_ERROR(("WLC_UP error %d\n", ret)); + + return 1; +} +#endif /* SUPPORT_SET_LPC */ + +static int +wl_android_ch_res_rl(struct net_device *dev, bool change) +{ + int error = 0; + s32 srl = 7; + s32 lrl = 4; + printk("%s enter\n", __FUNCTION__); + if (change) { + srl = 4; + lrl = 2; + } + error = wldev_ioctl(dev, WLC_SET_SRL, &srl, sizeof(s32), true); + if (error) { + ANDROID_ERROR(("Failed to set SRL, error = %d\n", error)); + } + error = wldev_ioctl(dev, WLC_SET_LRL, &lrl, sizeof(s32), true); + if (error) { + ANDROID_ERROR(("Failed to set LRL, error = %d\n", error)); + } + return error; +} + + +static int +wl_android_rmc_enable(struct net_device *net, int rmc_enable) +{ + int err; + + err = wldev_iovar_setint(net, "rmc_ackreq", rmc_enable); + return err; +} + +static int +wl_android_rmc_set_leader(struct net_device *dev, const char* straddr) +{ + int error = BCME_OK; + char smbuf[WLC_IOCTL_SMLEN]; + wl_rmc_entry_t rmc_entry; + ANDROID_INFO(("%s: Set new RMC leader %s\n", __FUNCTION__, straddr)); + + memset(&rmc_entry, 0, sizeof(wl_rmc_entry_t)); + if (!bcm_ether_atoe(straddr, &rmc_entry.addr)) { + if (strlen(straddr) == 1 && bcm_atoi(straddr) == 0) { + ANDROID_INFO(("%s: Set auto leader selection mode\n", __FUNCTION__)); + memset(&rmc_entry, 0, sizeof(wl_rmc_entry_t)); + } else { + ANDROID_ERROR(("%s: No valid mac address provided\n", + __FUNCTION__)); + return BCME_ERROR; + } + } + + error = wldev_iovar_setbuf(dev, "rmc_ar", &rmc_entry, sizeof(wl_rmc_entry_t), + smbuf, sizeof(smbuf), NULL); + + if (error != BCME_OK) { + ANDROID_ERROR(("%s: Unable to set RMC leader, error = %d\n", + __FUNCTION__, error)); + } + + return error; +} + +static int wl_android_set_rmc_event(struct net_device *dev, char *command, int total_len) +{ + int err = 0; + int pid = 0; + + if (sscanf(command, CMD_SET_RMC_EVENT " %d", &pid) <= 0) { + ANDROID_ERROR(("Failed to get Parameter from : %s\n", command)); + return -1; + } + + /* set pid, and if the event was happened, let's send a notification through netlink */ + wl_cfg80211_set_rmc_pid(pid); + + ANDROID_TRACE(("RMC pid=%d\n", pid)); + + return err; +} + +int wl_android_get_singlecore_scan(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int bytes_written = 0; + int mode = 0; + + error = wldev_iovar_getint(dev, "scan_ps", &mode); + if (error) { + ANDROID_ERROR(("%s: Failed to get single core scan Mode, error = %d\n", + __FUNCTION__, error)); + return -1; + } + + bytes_written = snprintf(command, total_len, "%s %d", CMD_GET_SCSCAN, mode); + + return bytes_written; +} + +int wl_android_set_singlecore_scan(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int mode = 0; + + if (sscanf(command, "%*s %d", &mode) != 1) { + ANDROID_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__)); + return -1; + } + + error = wldev_iovar_setint(dev, "scan_ps", mode); + if (error) { + ANDROID_ERROR(("%s[1]: Failed to set Mode %d, error = %d\n", + __FUNCTION__, mode, error)); + return -1; + } + + return error; +} +#ifdef TEST_TX_POWER_CONTROL +static int +wl_android_set_tx_power(struct net_device *dev, const char* string_num) +{ + int err = 0; + s32 dbm; + enum nl80211_tx_power_setting type; + + dbm = bcm_atoi(string_num); + + if (dbm < -1) { + ANDROID_ERROR(("%s: dbm is negative...\n", __FUNCTION__)); + return -EINVAL; + } + + if (dbm == -1) + type = NL80211_TX_POWER_AUTOMATIC; + else + type = NL80211_TX_POWER_FIXED; + + err = wl_set_tx_power(dev, type, dbm); + if (unlikely(err)) { + ANDROID_ERROR(("%s: error (%d)\n", __FUNCTION__, err)); + return err; + } + + return 1; +} + +static int +wl_android_get_tx_power(struct net_device *dev, char *command, int total_len) +{ + int err; + int bytes_written; + s32 dbm = 0; + + err = wl_get_tx_power(dev, &dbm); + if (unlikely(err)) { + ANDROID_ERROR(("%s: error (%d)\n", __FUNCTION__, err)); + return err; + } + + bytes_written = snprintf(command, total_len, "%s %d", + CMD_TEST_GET_TX_POWER, dbm); + + ANDROID_ERROR(("%s: GET_TX_POWER: dBm=%d\n", __FUNCTION__, dbm)); + + return bytes_written; +} +#endif /* TEST_TX_POWER_CONTROL */ + +static int +wl_android_set_sarlimit_txctrl(struct net_device *dev, const char* string_num) +{ + int err = 0; + int setval = 0; + s32 mode = bcm_atoi(string_num); + + /* As Samsung specific and their requirement, '0' means activate sarlimit + * and '-1' means back to normal state (deactivate sarlimit) + */ + if (mode == 0) { + ANDROID_INFO(("%s: SAR limit control activated\n", __FUNCTION__)); + setval = 1; + } else if (mode == -1) { + ANDROID_INFO(("%s: SAR limit control deactivated\n", __FUNCTION__)); + setval = 0; + } else { + return -EINVAL; + } + + err = wldev_iovar_setint(dev, "sar_enable", setval); + if (unlikely(err)) { + ANDROID_ERROR(("%s: error (%d)\n", __FUNCTION__, err)); + return err; + } + return 1; +} +#endif /* CUSTOMER_HW4_PRIVATE_CMD */ + +int wl_android_set_roam_mode(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int mode = 0; + + if (sscanf(command, "%*s %d", &mode) != 1) { + ANDROID_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__)); + return -1; + } + + error = wldev_iovar_setint(dev, "roam_off", mode); + if (error) { ANDROID_ERROR(("%s: Failed to set roaming Mode %d, error = %d\n", __FUNCTION__, mode, error)); return -1; @@ -1534,7 +2374,45 @@ wl_android_iolist_resume(struct net_device *dev, struct list_head *head) kfree(config); } } +#ifdef WL11ULB +static int +wl_android_set_ulb_mode(struct net_device *dev, char *command, int total_len) +{ + int mode = 0; + + ANDROID_INFO(("set ulb mode (%s) \n", command)); + if (sscanf(command, "%*s %d", &mode) != 1) { + ANDROID_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__)); + return -1; + } + return wl_cfg80211_set_ulb_mode(dev, mode); +} +static int +wl_android_set_ulb_bw(struct net_device *dev, char *command, int total_len) +{ + int bw = 0; + u8 *pos; + char *ifname = NULL; + ANDROID_INFO(("set ulb bw (%s) \n", command)); + /* + * For sta/ap: IFNAME= DRIVER ULB_BW ifname + * For p2p: IFNAME=wlan0 DRIVER ULB_BW p2p-dev-wlan0 + */ + if (total_len < strlen(CMD_ULB_BW) + 2) + return -EINVAL; + + pos = command + strlen(CMD_ULB_BW) + 1; + bw = bcm_atoi(pos); + + if ((strlen(pos) >= 5)) { + ifname = pos + 2; + } + + ANDROID_INFO(("[ULB] ifname:%s ulb_bw:%d \n", ifname, bw)); + return wl_cfg80211_set_ulb_bw(dev, bw, ifname); +} +#endif /* WL11ULB */ static int wl_android_set_miracast(struct net_device *dev, char *command, int total_len) { @@ -1549,8 +2427,9 @@ wl_android_set_miracast(struct net_device *dev, char *command, int total_len) ANDROID_INFO(("%s: enter miracast mode %d\n", __FUNCTION__, mode)); - if (miracast_cur_mode == mode) + if (miracast_cur_mode == mode) { return 0; + } wl_android_iolist_resume(dev, &miracast_resume_list); miracast_cur_mode = MIRACAST_MODE_OFF; @@ -1566,27 +2445,29 @@ wl_android_set_miracast(struct net_device *dev, char *command, int total_len) ANDROID_ERROR(("%s: Connected station's beacon interval: " "%d and set mchan_algo to %d \n", __FUNCTION__, val, config.param)); - } - else { + } else { config.param = MIRACAST_MCHAN_ALGO; } ret = wl_android_iolist_add(dev, &miracast_resume_list, &config); - if (ret) + if (ret) { goto resume; + } /* setting mchan_bw to platform specific value */ config.iovar = "mchan_bw"; config.param = MIRACAST_MCHAN_BW; ret = wl_android_iolist_add(dev, &miracast_resume_list, &config); - if (ret) + if (ret) { goto resume; + } /* setting apmdu to platform specific value */ config.iovar = "ampdu_mpdu"; config.param = MIRACAST_AMPDU_SIZE; ret = wl_android_iolist_add(dev, &miracast_resume_list, &config); - if (ret) + if (ret) { goto resume; + } /* FALLTROUGH */ /* Source mode shares most configurations with sink mode. * Fall through here to avoid code duplication @@ -1596,18 +2477,27 @@ wl_android_set_miracast(struct net_device *dev, char *command, int total_len) config.iovar = "roam_off"; config.param = 1; ret = wl_android_iolist_add(dev, &miracast_resume_list, &config); - if (ret) + if (ret) { goto resume; + } + /* tunr off pm */ - val = 0; - config.iovar = NULL; - config.ioctl = WLC_GET_PM; - config.arg = &val; - config.len = sizeof(int); - ret = wl_android_iolist_add(dev, &miracast_resume_list, &config); - if (ret) + ret = wldev_ioctl(dev, WLC_GET_PM, &val, sizeof(val), false); + if (ret) { goto resume; + } + if (val != PM_OFF) { + val = PM_OFF; + config.iovar = NULL; + config.ioctl = WLC_GET_PM; + config.arg = &val; + config.len = sizeof(int); + ret = wl_android_iolist_add(dev, &miracast_resume_list, &config); + if (ret) { + goto resume; + } + } break; case MIRACAST_MODE_OFF: default: @@ -1710,359 +2600,65 @@ wl_netlink_send_msg(int pid, int type, int seq, void *data, size_t size) return ret; } -#ifdef WLAIBSS -static int wl_android_set_ibss_txfail_event(struct net_device *dev, char *command, int total_len) + +int wl_keep_alive_set(struct net_device *dev, char* extra, int total_len) { - int err = 0; - int retry = 0; - int pid = 0; - aibss_txfail_config_t txfail_config = {0, 0, 0, 0}; - char smbuf[WLC_IOCTL_SMLEN]; + char buf[256]; + const char *str; + wl_mkeep_alive_pkt_t mkeep_alive_pkt; + wl_mkeep_alive_pkt_t *mkeep_alive_pktp; + int buf_len; + int str_len; + int res = -1; + uint period_msec = 0; - if (sscanf(command, CMD_SETIBSSTXFAILEVENT " %d %d", &retry, &pid) <= 0) { - ANDROID_ERROR(("Failed to get Parameter from : %s\n", command)); - return -1; + if (extra == NULL) + { + ANDROID_ERROR(("%s: extra is NULL\n", __FUNCTION__)); + return -1; } - - /* set pid, and if the event was happened, let's send a notification through netlink */ - wl_cfg80211_set_txfail_pid(pid); - - /* If retry value is 0, it disables the functionality for TX Fail. */ - if (retry > 0) { - txfail_config.max_tx_retry = retry; - txfail_config.bcn_timeout = 0; /* 0 : disable tx fail from beacon */ + if (sscanf(extra, "%d", &period_msec) != 1) + { + ANDROID_ERROR(("%s: sscanf error. check period_msec value\n", __FUNCTION__)); + return -EINVAL; } - txfail_config.version = AIBSS_TXFAIL_CONFIG_VER_0; - txfail_config.len = sizeof(txfail_config); - - err = wldev_iovar_setbuf(dev, "aibss_txfail_config", (void *) &txfail_config, - sizeof(aibss_txfail_config_t), smbuf, WLC_IOCTL_SMLEN, NULL); - ANDROID_TRACE(("retry=%d, pid=%d, err=%d\n", retry, pid, err)); + ANDROID_ERROR(("%s: period_msec is %d\n", __FUNCTION__, period_msec)); - return ((err == 0)?total_len:err); -} + memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t)); -static int wl_android_get_ibss_peer_info(struct net_device *dev, char *command, - int total_len, bool bAll) -{ - int error; - int bytes_written = 0; - void *buf = NULL; - bss_peer_list_info_t peer_list_info; - bss_peer_info_t *peer_info; - int i; - bool found = false; - struct ether_addr mac_ea; + str = "mkeep_alive"; + str_len = strlen(str); + strncpy(buf, str, str_len); + buf[ str_len ] = '\0'; + mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1); + mkeep_alive_pkt.period_msec = period_msec; + buf_len = str_len + 1; + mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION); + mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN); - ANDROID_TRACE(("get ibss peer info(%s)\n", bAll?"true":"false")); + /* Setup keep alive zero for null packet generation */ + mkeep_alive_pkt.keep_alive_id = 0; + mkeep_alive_pkt.len_bytes = 0; + buf_len += WL_MKEEP_ALIVE_FIXED_LEN; + /* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and + * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no + * guarantee that the buffer is properly aligned. + */ + memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN); - if (!bAll) { - if (sscanf (command, "GETIBSSPEERINFO %02x:%02x:%02x:%02x:%02x:%02x", - (unsigned int *)&mac_ea.octet[0], (unsigned int *)&mac_ea.octet[1], - (unsigned int *)&mac_ea.octet[2], (unsigned int *)&mac_ea.octet[3], - (unsigned int *)&mac_ea.octet[4], (unsigned int *)&mac_ea.octet[5]) != 6) { - ANDROID_TRACE(("invalid MAC address\n")); - return -1; - } + if ((res = wldev_ioctl(dev, WLC_SET_VAR, buf, buf_len, TRUE)) < 0) + { + ANDROID_ERROR(("%s:keep_alive set failed. res[%d]\n", __FUNCTION__, res)); } - - if ((buf = kmalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL)) == NULL) { - ANDROID_ERROR(("kmalloc failed\n")); - return -1; + else + { + ANDROID_ERROR(("%s:keep_alive set ok. res[%d]\n", __FUNCTION__, res)); } - error = wldev_iovar_getbuf(dev, "bss_peer_info", NULL, 0, buf, WLC_IOCTL_MAXLEN, NULL); - if (unlikely(error)) { - ANDROID_ERROR(("could not get ibss peer info (%d)\n", error)); - kfree(buf); - return -1; - } - - memcpy(&peer_list_info, buf, sizeof(peer_list_info)); - peer_list_info.version = htod16(peer_list_info.version); - peer_list_info.bss_peer_info_len = htod16(peer_list_info.bss_peer_info_len); - peer_list_info.count = htod32(peer_list_info.count); - - ANDROID_TRACE(("ver:%d, len:%d, count:%d\n", peer_list_info.version, - peer_list_info.bss_peer_info_len, peer_list_info.count)); - - if (peer_list_info.count > 0) { - if (bAll) - bytes_written += sprintf(&command[bytes_written], "%u ", - peer_list_info.count); - - peer_info = (bss_peer_info_t *) ((void *)buf + BSS_PEER_LIST_INFO_FIXED_LEN); - - - for (i = 0; i < peer_list_info.count; i++) { - - ANDROID_TRACE(("index:%d rssi:%d, tx:%u, rx:%u\n", i, peer_info->rssi, - peer_info->tx_rate, peer_info->rx_rate)); - - if (!bAll && - memcmp(&mac_ea, &peer_info->ea, sizeof(struct ether_addr)) == 0) { - found = true; - } - - if (bAll || found) { - bytes_written += sprintf(&command[bytes_written], MACF, - ETHER_TO_MACF(peer_info->ea)); - bytes_written += sprintf(&command[bytes_written], " %u %d ", - peer_info->tx_rate/1000, peer_info->rssi); - } - - if (found) - break; - - peer_info = (bss_peer_info_t *)((void *)peer_info+sizeof(bss_peer_info_t)); - } - } - else { - ANDROID_ERROR(("could not get ibss peer info : no item\n")); - } - bytes_written += sprintf(&command[bytes_written], "%s", "\0"); - - ANDROID_TRACE(("command(%u):%s\n", total_len, command)); - ANDROID_TRACE(("bytes_written:%d\n", bytes_written)); - - kfree(buf); - return bytes_written; -} - -int wl_android_set_ibss_routetable(struct net_device *dev, char *command, int total_len) -{ - - char *pcmd = command; - char *str = NULL; - - ibss_route_tbl_t *route_tbl = NULL; - char *ioctl_buf = NULL; - u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; - s32 err = BCME_OK; - uint32 route_tbl_len; - uint32 entries; - char *endptr; - uint32 i = 0; - struct ipv4_addr dipaddr; - struct ether_addr ea; - - route_tbl_len = sizeof(ibss_route_tbl_t) + - (MAX_IBSS_ROUTE_TBL_ENTRY - 1) * sizeof(ibss_route_entry_t); - route_tbl = (ibss_route_tbl_t *)kzalloc(route_tbl_len, kflags); - if (!route_tbl) { - ANDROID_ERROR(("Route TBL alloc failed\n")); - return -ENOMEM; - } - ioctl_buf = kzalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL); - if (!ioctl_buf) { - ANDROID_ERROR(("ioctl memory alloc failed\n")); - if (route_tbl) { - kfree(route_tbl); - } - return -ENOMEM; - } - memset(ioctl_buf, 0, WLC_IOCTL_MEDLEN); - - /* drop command */ - str = bcmstrtok(&pcmd, " ", NULL); - - /* get count */ - str = bcmstrtok(&pcmd, " ", NULL); - if (!str) { - ANDROID_ERROR(("Invalid number parameter %s\n", str)); - err = -EINVAL; - goto exit; - } - entries = bcm_strtoul(str, &endptr, 0); - if (*endptr != '\0') { - ANDROID_ERROR(("Invalid number parameter %s\n", str)); - err = -EINVAL; - goto exit; - } - ANDROID_INFO(("Routing table count:%d\n", entries)); - route_tbl->num_entry = entries; - - for (i = 0; i < entries; i++) { - str = bcmstrtok(&pcmd, " ", NULL); - if (!str || !bcm_atoipv4(str, &dipaddr)) { - ANDROID_ERROR(("Invalid ip string %s\n", str)); - err = -EINVAL; - goto exit; - } - - - str = bcmstrtok(&pcmd, " ", NULL); - if (!str || !bcm_ether_atoe(str, &ea)) { - ANDROID_ERROR(("Invalid ethernet string %s\n", str)); - err = -EINVAL; - goto exit; - } - bcopy(&dipaddr, &route_tbl->route_entry[i].ipv4_addr, IPV4_ADDR_LEN); - bcopy(&ea, &route_tbl->route_entry[i].nexthop, ETHER_ADDR_LEN); - } - - route_tbl_len = sizeof(ibss_route_tbl_t) + - ((!entries?0:(entries - 1)) * sizeof(ibss_route_entry_t)); - err = wldev_iovar_setbuf(dev, "ibss_route_tbl", - route_tbl, route_tbl_len, ioctl_buf, WLC_IOCTL_MEDLEN, NULL); - if (err != BCME_OK) { - ANDROID_ERROR(("Fail to set iovar %d\n", err)); - err = -EINVAL; - } - -exit: - if (route_tbl) - kfree(route_tbl); - if (ioctl_buf) - kfree(ioctl_buf); - return err; - -} - -int -wl_android_set_ibss_ampdu(struct net_device *dev, char *command, int total_len) -{ - char *pcmd = command; - char *str = NULL, *endptr = NULL; - struct ampdu_aggr aggr; - char smbuf[WLC_IOCTL_SMLEN]; - int idx; - int err = 0; - int wme_AC2PRIO[AC_COUNT][2] = { - {PRIO_8021D_VO, PRIO_8021D_NC}, /* AC_VO - 3 */ - {PRIO_8021D_CL, PRIO_8021D_VI}, /* AC_VI - 2 */ - {PRIO_8021D_BK, PRIO_8021D_NONE}, /* AC_BK - 1 */ - {PRIO_8021D_BE, PRIO_8021D_EE}}; /* AC_BE - 0 */ - - ANDROID_TRACE(("set ibss ampdu:%s\n", command)); - - memset(&aggr, 0, sizeof(aggr)); - /* Cofigure all priorities */ - aggr.conf_TID_bmap = NBITMASK(NUMPRIO); - - /* acquire parameters */ - /* drop command */ - str = bcmstrtok(&pcmd, " ", NULL); - - for (idx = 0; idx < AC_COUNT; idx++) { - bool on; - str = bcmstrtok(&pcmd, " ", NULL); - if (!str) { - ANDROID_ERROR(("Invalid parameter : %s\n", pcmd)); - return -EINVAL; - } - on = bcm_strtoul(str, &endptr, 0) ? TRUE : FALSE; - if (*endptr != '\0') { - ANDROID_ERROR(("Invalid number format %s\n", str)); - return -EINVAL; - } - if (on) { - setbit(&aggr.enab_TID_bmap, wme_AC2PRIO[idx][0]); - setbit(&aggr.enab_TID_bmap, wme_AC2PRIO[idx][1]); - } - } - - err = wldev_iovar_setbuf(dev, "ampdu_txaggr", (void *)&aggr, - sizeof(aggr), smbuf, WLC_IOCTL_SMLEN, NULL); - - return ((err == 0) ? total_len : err); -} - -int wl_android_set_ibss_antenna(struct net_device *dev, char *command, int total_len) -{ - char *pcmd = command; - char *str = NULL; - int txchain, rxchain; - int err = 0; - - ANDROID_TRACE(("set ibss antenna:%s\n", command)); - - /* acquire parameters */ - /* drop command */ - str = bcmstrtok(&pcmd, " ", NULL); - - /* TX chain */ - str = bcmstrtok(&pcmd, " ", NULL); - if (!str) { - ANDROID_ERROR(("Invalid parameter : %s\n", pcmd)); - return -EINVAL; - } - txchain = bcm_atoi(str); - - /* RX chain */ - str = bcmstrtok(&pcmd, " ", NULL); - if (!str) { - ANDROID_ERROR(("Invalid parameter : %s\n", pcmd)); - return -EINVAL; - } - rxchain = bcm_atoi(str); - - err = wldev_iovar_setint(dev, "txchain", txchain); - if (err != 0) - return err; - err = wldev_iovar_setint(dev, "rxchain", rxchain); - return ((err == 0)?total_len:err); -} -#endif /* WLAIBSS */ - -int wl_keep_alive_set(struct net_device *dev, char* extra, int total_len) -{ - char buf[256]; - const char *str; - wl_mkeep_alive_pkt_t mkeep_alive_pkt; - wl_mkeep_alive_pkt_t *mkeep_alive_pktp; - int buf_len; - int str_len; - int res = -1; - uint period_msec = 0; - - if (extra == NULL) - { - ANDROID_ERROR(("%s: extra is NULL\n", __FUNCTION__)); - return -1; - } - if (sscanf(extra, "%d", &period_msec) != 1) - { - ANDROID_ERROR(("%s: sscanf error. check period_msec value\n", __FUNCTION__)); - return -EINVAL; - } - ANDROID_ERROR(("%s: period_msec is %d\n", __FUNCTION__, period_msec)); - - memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t)); - - str = "mkeep_alive"; - str_len = strlen(str); - strncpy(buf, str, str_len); - buf[ str_len ] = '\0'; - mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1); - mkeep_alive_pkt.period_msec = period_msec; - buf_len = str_len + 1; - mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION); - mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN); - - /* Setup keep alive zero for null packet generation */ - mkeep_alive_pkt.keep_alive_id = 0; - mkeep_alive_pkt.len_bytes = 0; - buf_len += WL_MKEEP_ALIVE_FIXED_LEN; - /* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and - * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no - * guarantee that the buffer is properly aligned. - */ - memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN); - - if ((res = wldev_ioctl(dev, WLC_SET_VAR, buf, buf_len, TRUE)) < 0) - { - ANDROID_ERROR(("%s:keep_alive set failed. res[%d]\n", __FUNCTION__, res)); - } - else - { - ANDROID_ERROR(("%s:keep_alive set ok. res[%d]\n", __FUNCTION__, res)); - } - - return res; -} - + return res; +} +#ifdef WL_CFG80211 static const char * get_string_by_separator(char *result, int result_len, const char *src, char separator) { @@ -2071,8 +2667,9 @@ get_string_by_separator(char *result, int result_len, const char *src, char sepa *result++ = *src++; } *result = 0; - if (*src == separator) + if (*src == separator) { ++src; + } return src; } @@ -2084,11 +2681,20 @@ wl_android_set_roam_offload_bssid_list(struct net_device *dev, const char *cmd) roamoffl_bssid_list_t *bssid_list; const char *str = cmd; char *ioctl_buf; + dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(); str = get_string_by_separator(sbuf, 32, str, ','); cnt = bcm_atoi(sbuf); cnt = MIN(cnt, MAX_ROAMOFFL_BSSID_NUM); - size = sizeof(int) + sizeof(struct ether_addr) * cnt; + + if ((cnt > 0) && + (((dhdp->op_mode & DHD_FLAG_STA_MODE) && (dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) || + FALSE)) { + ANDROID_ERROR(("Can't set ROAMOFFL_BSSID when enabled STA-SoftAP or WES\n")); + return -EINVAL; + } + + size = sizeof(int32) + sizeof(struct ether_addr) * cnt; ANDROID_ERROR(("ROAM OFFLOAD BSSID LIST %d BSSIDs, size %d\n", cnt, size)); bssid_list = kmalloc(size, GFP_KERNEL); if (bssid_list == NULL) { @@ -2107,22 +2713,18 @@ wl_android_set_roam_offload_bssid_list(struct net_device *dev, const char *cmd) for (i = 0; i < cnt; i++) { str = get_string_by_separator(sbuf, 32, str, ','); - if (bcm_ether_atoe(sbuf, &bssid_list->bssid[i]) == 0) { - ANDROID_ERROR(("%s: Invalid station MAC Address!!!\n", __FUNCTION__)); - kfree(bssid_list); - kfree(ioctl_buf); - return -1; - } + bcm_ether_atoe(sbuf, &bssid_list->bssid[i]); } - bssid_list->cnt = cnt; + bssid_list->cnt = (int32)cnt; err = wldev_iovar_setbuf(dev, "roamoffl_bssid_list", - bssid_list, size, ioctl_buf, ioctl_buf_len, NULL); + bssid_list, size, ioctl_buf, ioctl_buf_len, NULL); kfree(bssid_list); kfree(ioctl_buf); return err; } +#endif #ifdef P2PRESP_WFDIE_SRC static int wl_android_get_wfdie_resp(struct net_device *dev, char *command, int total_len) @@ -2159,314 +2761,116 @@ static int wl_android_set_wfdie_resp(struct net_device *dev, int only_resp_wfdsr } #endif /* P2PRESP_WFDIE_SRC */ -static int wl_android_get_link_status(struct net_device *dev, char *command, - int total_len) +#ifdef BT_WIFI_HANDOVER +static int +wl_tbow_teardown(struct net_device *dev, char *command, int total_len) { - int bytes_written, error, result = 0, single_stream, stf = -1, i, nss = 0, mcs_map; - uint32 rspec; - uint encode, rate, txexp; - struct wl_bss_info *bi; - int datalen = sizeof(uint32) + sizeof(wl_bss_info_t); - char buf[datalen]; - - /* get BSS information */ - *(u32 *) buf = htod32(datalen); - error = wldev_ioctl(dev, WLC_GET_BSS_INFO, (void *)buf, datalen, false); - if (unlikely(error)) { - ANDROID_ERROR(("Could not get bss info %d\n", error)); - return -1; - } - - bi = (struct wl_bss_info *) (buf + sizeof(uint32)); - - for (i = 0; i < ETHER_ADDR_LEN; i++) { - if (bi->BSSID.octet[i] > 0) { - break; - } - } - - if (i == ETHER_ADDR_LEN) { - ANDROID_TRACE(("No BSSID\n")); - return -1; - } - - /* check VHT capability at beacon */ - if (bi->vht_cap) { - if (CHSPEC_IS5G(bi->chanspec)) { - result |= WL_ANDROID_LINK_AP_VHT_SUPPORT; - } - } - - /* get a rspec (radio spectrum) rate */ - error = wldev_iovar_getint(dev, "nrate", &rspec); - if (unlikely(error) || rspec == 0) { - ANDROID_ERROR(("get link status error (%d)\n", error)); - return -1; - } - - encode = (rspec & WL_RSPEC_ENCODING_MASK); - rate = (rspec & WL_RSPEC_RATE_MASK); - txexp = (rspec & WL_RSPEC_TXEXP_MASK) >> WL_RSPEC_TXEXP_SHIFT; + int err = BCME_OK; + char buf[WLC_IOCTL_SMLEN]; + tbow_setup_netinfo_t netinfo; + memset(&netinfo, 0, sizeof(netinfo)); + netinfo.opmode = TBOW_HO_MODE_TEARDOWN; - switch (encode) { - case WL_RSPEC_ENCODE_HT: - /* check Rx MCS Map for HT */ - for (i = 0; i < MAX_STREAMS_SUPPORTED; i++) { - int8 bitmap = 0xFF; - if (i == MAX_STREAMS_SUPPORTED-1) { - bitmap = 0x7F; - } - if (bi->basic_mcs[i] & bitmap) { - nss++; - } - } - break; - case WL_RSPEC_ENCODE_VHT: - /* check Rx MCS Map for VHT */ - for (i = 1; i <= VHT_CAP_MCS_MAP_NSS_MAX; i++) { - mcs_map = VHT_MCS_MAP_GET_MCS_PER_SS(i, dtoh16(bi->vht_rxmcsmap)); - if (mcs_map != VHT_CAP_MCS_MAP_NONE) { - nss++; - } - } - break; + err = wldev_iovar_setbuf_bsscfg(dev, "tbow_doho", &netinfo, + sizeof(tbow_setup_netinfo_t), buf, WLC_IOCTL_SMLEN, 0, NULL); + if (err < 0) { + ANDROID_ERROR(("tbow_doho iovar error %d\n", err)); + return err; } + return err; +} +#endif /* BT_WIFI_HANOVER */ - /* check MIMO capability with nss in beacon */ - if (nss > 1) { - result |= WL_ANDROID_LINK_AP_MIMO_SUPPORT; - } +#ifdef SET_RPS_CPUS +static int +wl_android_set_rps_cpus(struct net_device *dev, char *command, int total_len) +{ + int error, enable; - single_stream = (encode == WL_RSPEC_ENCODE_RATE) || - ((encode == WL_RSPEC_ENCODE_HT) && rate < 8) || - ((encode == WL_RSPEC_ENCODE_VHT) && - ((rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT) == 1); + enable = command[strlen(CMD_RPSMODE) + 1] - '0'; + error = dhd_rps_cpus_enable(dev, enable); - if (txexp == 0) { - if ((rspec & WL_RSPEC_STBC) && single_stream) { - stf = OLD_NRATE_STF_STBC; +#if defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE) && defined(WL_CFG80211) + if (!error) { + void *dhdp = wl_cfg80211_get_dhdp(); + if (enable) { + ANDROID_TRACE(("%s : set ack suppress. TCPACK_SUP_HOLD.\n", __FUNCTION__)); + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_HOLD); } else { - stf = (single_stream) ? OLD_NRATE_STF_SISO : OLD_NRATE_STF_SDM; + ANDROID_TRACE(("%s : clear ack suppress.\n", __FUNCTION__)); + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF); } - } else if (txexp == 1 && single_stream) { - stf = OLD_NRATE_STF_CDD; } +#endif /* DHDTCPACK_SUPPRESS && BCMPCIE && WL_CFG80211 */ - /* check 11ac (VHT) */ - if (encode == WL_RSPEC_ENCODE_VHT) { - if (CHSPEC_IS5G(bi->chanspec)) { - result |= WL_ANDROID_LINK_VHT; - } - } - - /* check MIMO */ - if (result & WL_ANDROID_LINK_AP_MIMO_SUPPORT) { - switch (stf) { - case OLD_NRATE_STF_SISO: - break; - case OLD_NRATE_STF_CDD: - case OLD_NRATE_STF_STBC: - result |= WL_ANDROID_LINK_MIMO; - break; - case OLD_NRATE_STF_SDM: - if (!single_stream) { - result |= WL_ANDROID_LINK_MIMO; - } - break; - } - } - - ANDROID_TRACE(("%s:result=%d, stf=%d, single_stream=%d, mcs map=%d\n", - __FUNCTION__, result, stf, single_stream, nss)); - - bytes_written = sprintf(command, "%s %d", CMD_GET_LINK_STATUS, result); - - return bytes_written; -} - -int -wl_android_get_channel( -struct net_device *dev, char* command, int total_len) -{ - int ret; - channel_info_t ci; - int bytes_written = 0; - - if (!(ret = wldev_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(channel_info_t), FALSE))) { - ANDROID_TRACE(("hw_channel %d\n", ci.hw_channel)); - ANDROID_TRACE(("target_channel %d\n", ci.target_channel)); - ANDROID_TRACE(("scan_channel %d\n", ci.scan_channel)); - bytes_written = snprintf(command, sizeof(channel_info_t)+2, "channel %d", ci.hw_channel); - ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command)); - } - - return bytes_written; + return error; } - -int -wl_android_set_roam_trigger( -struct net_device *dev, char* command, int total_len) +#endif /* SET_RPS_CPUS */ +#ifdef P2P_LISTEN_OFFLOADING +s32 +wl_cfg80211_p2plo_offload(struct net_device *dev, char *cmd, char* buf, int len) { int ret = 0; - int roam_trigger[2]; - - sscanf(command, "%*s %10d", &roam_trigger[0]); - roam_trigger[1] = WLC_BAND_ALL; - ret = wldev_ioctl(dev, WLC_SET_ROAM_TRIGGER, roam_trigger, sizeof(roam_trigger), 1); - if (ret) - ANDROID_ERROR(("WLC_SET_ROAM_TRIGGER ERROR %d ret=%d\n", roam_trigger[0], ret)); + ANDROID_ERROR(("Entry cmd:%s arg_len:%d \n", cmd, len)); + if (strncmp(cmd, "P2P_LO_START", strlen("P2P_LO_START")) == 0) { + ret = wl_cfg80211_p2plo_listen_start(dev, buf, len); + } else if (strncmp(cmd, "P2P_LO_STOP", strlen("P2P_LO_STOP")) == 0) { + ret = wl_cfg80211_p2plo_listen_stop(dev); + } else { + ANDROID_ERROR(("Request for Unsupported CMD:%s \n", buf)); + ret = -EINVAL; + } return ret; } +#endif /* P2P_LISTEN_OFFLOADING */ +#ifdef WL_CFG80211 int -wl_android_get_roam_trigger( -struct net_device *dev, char *command, int total_len) +wl_android_murx_bfe_cap(struct net_device *dev, int val) { - int ret; - int bytes_written; - int roam_trigger[2] = {0, 0}; - int trigger[2]= {0, 0}; - - roam_trigger[1] = WLC_BAND_2G; - ret = wldev_ioctl(dev, WLC_GET_ROAM_TRIGGER, roam_trigger, sizeof(roam_trigger), 0); - if (!ret) - trigger[0] = roam_trigger[0]; - else - ANDROID_ERROR(("2G WLC_GET_ROAM_TRIGGER ERROR %d ret=%d\n", roam_trigger[0], ret)); - - roam_trigger[1] = WLC_BAND_5G; - ret = wldev_ioctl(dev, WLC_GET_ROAM_TRIGGER, roam_trigger, sizeof(roam_trigger), 0); - if (!ret) - trigger[1] = roam_trigger[0]; - else - ANDROID_ERROR(("5G WLC_GET_ROAM_TRIGGER ERROR %d ret=%d\n", roam_trigger[0], ret)); - - ANDROID_TRACE(("roam_trigger %d %d\n", trigger[0], trigger[1])); - bytes_written = snprintf(command, total_len, "%d %d", trigger[0], trigger[1]); + int err = BCME_OK; + int iface_count = wl_cfg80211_iface_count(); - return bytes_written; + if (iface_count > 1) { + ANDROID_ERROR(("%s: murx_bfe_cap change is not allowed when " + "there are multiple interfaces\n", __FUNCTION__)); + return -EINVAL; + } + /* Now there is only single interface */ + err = wldev_iovar_setint(dev, "murx_bfe_cap", val); + if (err) { + ANDROID_ERROR(("%s: Failed to set murx_bfe_cap IOVAR to %d," + "error %d\n", __FUNCTION__, val, err)); + err = -EINVAL; + } + return err; } +#endif -s32 -wl_android_get_keep_alive(struct net_device *dev, char *command, int total_len) { - - wl_mkeep_alive_pkt_t *mkeep_alive_pktp; - int bytes_written = -1; - int res = -1, len, i = 0; - char* str = "mkeep_alive"; - - ANDROID_TRACE(("%s: command = %s\n", __FUNCTION__, command)); +int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd) +{ +#define PRIVATE_COMMAND_MAX_LEN 8192 + int ret = 0; + char *command = NULL; + int bytes_written = 0; + android_wifi_priv_cmd priv_cmd; - len = WLC_IOCTL_MEDLEN; - mkeep_alive_pktp = kmalloc(len, GFP_KERNEL); - memset(mkeep_alive_pktp, 0, len); - strcpy((char*)mkeep_alive_pktp, str); + net_os_wake_lock(net); - if ((res = wldev_ioctl(dev, WLC_GET_VAR, mkeep_alive_pktp, len, FALSE))<0) { - ANDROID_ERROR(("%s: GET mkeep_alive ERROR %d\n", __FUNCTION__, res)); - goto exit; - } else { - printf("Id :%d\n" - "Period (msec) :%d\n" - "Length :%d\n" - "Packet :0x", - mkeep_alive_pktp->keep_alive_id, - dtoh32(mkeep_alive_pktp->period_msec), - dtoh16(mkeep_alive_pktp->len_bytes)); - for (i=0; ilen_bytes; i++) { - printf("%02x", mkeep_alive_pktp->data[i]); - } - printf("\n"); - } - bytes_written = snprintf(command, total_len, "mkeep_alive_period_msec %d ", dtoh32(mkeep_alive_pktp->period_msec)); - bytes_written += snprintf(command+bytes_written, total_len, "0x"); - for (i=0; ilen_bytes; i++) { - bytes_written += snprintf(command+bytes_written, total_len, "%x", mkeep_alive_pktp->data[i]); - } - ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command)); - -exit: - kfree(mkeep_alive_pktp); - return bytes_written; -} - -int -wl_android_set_pm(struct net_device *dev,char *command, int total_len) -{ - int pm, ret = -1; - - ANDROID_TRACE(("%s: cmd %s\n", __FUNCTION__, command)); - - sscanf(command, "%*s %d", &pm); - - ret = wldev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), FALSE); - if (ret) - ANDROID_ERROR(("WLC_SET_PM ERROR %d ret=%d\n", pm, ret)); - - return ret; -} - -int -wl_android_get_pm(struct net_device *dev,char *command, int total_len) -{ - - int ret = 0; - int pm_local; - char *pm; - int bytes_written=-1; - - ret = wldev_ioctl(dev, WLC_GET_PM, &pm_local, sizeof(pm_local),FALSE); - if (!ret) { - ANDROID_TRACE(("%s: PM = %d\n", __func__, pm_local)); - if (pm_local == PM_OFF) - pm = "PM_OFF"; - else if(pm_local == PM_MAX) - pm = "PM_MAX"; - else if(pm_local == PM_FAST) - pm = "PM_FAST"; - else { - pm_local = 0; - pm = "Invalid"; - } - bytes_written = snprintf(command, total_len, "PM %s", pm); - ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command)); - } - return bytes_written; -} - -static int -wl_android_set_monitor(struct net_device *dev, char *command, int total_len) -{ - int val; - int ret = 0; - int bytes_written; - - sscanf(command, "%*s %d", &val); - bytes_written = wldev_ioctl(dev, WLC_SET_MONITOR, &val, sizeof(int), 1); - if (bytes_written) - ANDROID_ERROR(("WLC_SET_MONITOR ERROR %d ret=%d\n", val, ret)); - return bytes_written; -} - -int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd) -{ -#define PRIVATE_COMMAND_MAX_LEN 8192 - int ret = 0; - char *command = NULL; - int bytes_written = 0; - android_wifi_priv_cmd priv_cmd; - - net_os_wake_lock(net); - - if (!ifr->ifr_data) { - ret = -EINVAL; + if (!ifr->ifr_data) { + ret = -EINVAL; goto exit; } #ifdef CONFIG_COMPAT - if (is_compat_task()) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)) + if (in_compat_syscall()) +#else + if (is_compat_task()) +#endif + { compat_android_wifi_priv_cmd compat_priv_cmd; if (copy_from_user(&compat_priv_cmd, ifr->ifr_data, sizeof(compat_android_wifi_priv_cmd))) { @@ -2521,14 +2925,16 @@ int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd) } if (strnicmp(command, CMD_STOP, strlen(CMD_STOP)) == 0) { - bytes_written = wl_android_wifi_off(net); + bytes_written = wl_android_wifi_off(net, FALSE); } +#ifdef WL_CFG80211 else if (strnicmp(command, CMD_SCAN_ACTIVE, strlen(CMD_SCAN_ACTIVE)) == 0) { - /* TBD: SCAN-ACTIVE */ + wl_cfg80211_set_passive_scan(net, command); } else if (strnicmp(command, CMD_SCAN_PASSIVE, strlen(CMD_SCAN_PASSIVE)) == 0) { - /* TBD: SCAN-PASSIVE */ + wl_cfg80211_set_passive_scan(net, command); } +#endif else if (strnicmp(command, CMD_RSSI, strlen(CMD_RSSI)) == 0) { bytes_written = wl_android_get_rssi(net, command, priv_cmd.total_len); } @@ -2550,15 +2956,6 @@ int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd) int filter_num = *(command + strlen(CMD_RXFILTER_REMOVE) + 1) - '0'; bytes_written = net_os_rxfilter_add_remove(net, FALSE, filter_num); } -#if defined(CUSTOM_PLATFORM_NV_TEGRA) - else if (strnicmp(command, CMD_PKT_FILTER_MODE, strlen(CMD_PKT_FILTER_MODE)) == 0) { - dhd_set_packet_filter_mode(net, &command[strlen(CMD_PKT_FILTER_MODE) + 1]); - } else if (strnicmp(command, CMD_PKT_FILTER_PORTS, strlen(CMD_PKT_FILTER_PORTS)) == 0) { - bytes_written = dhd_set_packet_filter_ports(net, - &command[strlen(CMD_PKT_FILTER_PORTS) + 1]); - ret = bytes_written; - } -#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */ #endif /* PKT_FILTER_SUPPORT */ else if (strnicmp(command, CMD_BTCOEXSCAN_START, strlen(CMD_BTCOEXSCAN_START)) == 0) { /* TBD: BTCOEXSCAN-START */ @@ -2589,29 +2986,11 @@ int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd) } else if (strnicmp(command, CMD_SETBAND, strlen(CMD_SETBAND)) == 0) { uint band = *(command + strlen(CMD_SETBAND) + 1) - '0'; - if (dhd_conf_get_band(dhd_get_pub(net)) != WLC_BAND_AUTO) { printf("%s: Band is fixed in config.txt\n", __FUNCTION__); goto exit; } - -#ifdef WL_HOST_BAND_MGMT - s32 ret = 0; - if ((ret = wl_cfg80211_set_band(net, band)) < 0) { - if (ret == BCME_UNSUPPORTED) { - /* If roam_var is unsupported, fallback to the original method */ - ANDROID_ERROR(("WL_HOST_BAND_MGMT defined, " - "but roam_band iovar unsupported in the firmware\n")); - } else { - bytes_written = -1; - goto exit; - } - } - if ((band == WLC_BAND_AUTO) || (ret == BCME_UNSUPPORTED)) - bytes_written = wldev_set_band(net, band); -#else bytes_written = wldev_set_band(net, band); -#endif /* WL_HOST_BAND_MGMT */ } else if (strnicmp(command, CMD_GETBAND, strlen(CMD_GETBAND)) == 0) { bytes_written = wl_android_get_band(net, command, priv_cmd.total_len); @@ -2619,16 +2998,50 @@ int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd) #ifdef WL_CFG80211 /* CUSTOMER_SET_COUNTRY feature is define for only GGSM model */ else if (strnicmp(command, CMD_COUNTRY, strlen(CMD_COUNTRY)) == 0) { + /* + * Usage examples: + * DRIVER COUNTRY US + * DRIVER COUNTRY US/7 + */ char *country_code = command + strlen(CMD_COUNTRY) + 1; -#ifdef CUSTOMER_HW5 - /* Customer_hw5 want to keep connections */ - bytes_written = wldev_set_country(net, country_code, true, false); -#else - bytes_written = wldev_set_country(net, country_code, true, true); -#endif + char *rev_info_delim = country_code + 2; /* 2 bytes of country code */ + int revinfo = -1; + if ((rev_info_delim) && + (strnicmp(rev_info_delim, CMD_COUNTRY_DELIMITER, + strlen(CMD_COUNTRY_DELIMITER)) == 0) && + (rev_info_delim + 1)) { + revinfo = bcm_atoi(rev_info_delim + 1); + } + bytes_written = wldev_set_country(net, country_code, true, true, revinfo); +#ifdef FCC_PWR_LIMIT_2G + if (wldev_iovar_setint(net, "fccpwrlimit2g", FALSE)) { + ANDROID_ERROR(("%s: fccpwrlimit2g deactivation is failed\n", __FUNCTION__)); + } else { + ANDROID_ERROR(("%s: fccpwrlimit2g is deactivated\n", __FUNCTION__)); + } +#endif /* FCC_PWR_LIMIT_2G */ + } + else if (strnicmp(command, CMD_SET_CSA, strlen(CMD_SET_CSA)) == 0) { + bytes_written = wl_android_set_csa(net, command, priv_cmd.total_len); + } else if (strnicmp(command, CMD_80211_MODE, strlen(CMD_80211_MODE)) == 0) { + bytes_written = wl_android_get_80211_mode(net, command, priv_cmd.total_len); + } else if (strnicmp(command, CMD_CHANSPEC, strlen(CMD_CHANSPEC)) == 0) { + bytes_written = wl_android_get_chanspec(net, command, priv_cmd.total_len); } #endif /* WL_CFG80211 */ + else if (strnicmp(command, CMD_DATARATE, strlen(CMD_DATARATE)) == 0) { + bytes_written = wl_android_get_datarate(net, command, priv_cmd.total_len); + } else if (strnicmp(command, CMD_ASSOC_CLIENTS, strlen(CMD_ASSOC_CLIENTS)) == 0) { + bytes_written = wl_android_get_assoclist(net, command, priv_cmd.total_len); + } +#ifdef CUSTOMER_HW4_PRIVATE_CMD +#ifdef WLTDLS + else if (strnicmp(command, CMD_TDLS_RESET, strlen(CMD_TDLS_RESET)) == 0) { + bytes_written = wl_android_tdls_reset(net); + } +#endif /* WLTDLS */ +#endif /* CUSTOMER_HW4_PRIVATE_CMD */ #ifdef PNO_SUPPORT else if (strnicmp(command, CMD_PNOSSIDCLR_SET, strlen(CMD_PNOSSIDCLR_SET)) == 0) { @@ -2655,22 +3068,13 @@ int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd) bytes_written = wl_cfg80211_set_p2p_noa(net, command + skip, priv_cmd.total_len - skip); } -#ifdef WL_SDO - else if (strnicmp(command, CMD_P2P_SD_OFFLOAD, strlen(CMD_P2P_SD_OFFLOAD)) == 0) { - u8 *buf = command; - u8 *cmd_id = NULL; - int len; - - cmd_id = strsep((char **)&buf, " "); - /* if buf == NULL, means no arg */ - if (buf == NULL) - len = 0; - else - len = strlen(buf); - - bytes_written = wl_cfg80211_sd_offload(net, cmd_id, buf, len); +#ifdef P2P_LISTEN_OFFLOADING + else if (strnicmp(command, CMD_P2P_LISTEN_OFFLOAD, strlen(CMD_P2P_LISTEN_OFFLOAD)) == 0) { + u8 *sub_command = strchr(command, ' '); + bytes_written = wl_cfg80211_p2plo_offload(net, command, sub_command, + sub_command ? strlen(sub_command) : 0); } -#endif /* WL_SDO */ +#endif /* P2P_LISTEN_OFFLOADING */ #ifdef WL_NAN else if (strnicmp(command, CMD_NAN, strlen(CMD_NAN)) == 0) { bytes_written = wl_cfg80211_nan_cmd_handler(net, command, @@ -2687,6 +3091,11 @@ int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd) bytes_written = wl_cfg80211_set_p2p_ps(net, command + skip, priv_cmd.total_len - skip); } + else if (strnicmp(command, CMD_P2P_ECSA, strlen(CMD_P2P_ECSA)) == 0) { + int skip = strlen(CMD_P2P_ECSA) + 1; + bytes_written = wl_cfg80211_set_p2p_ecsa(net, command + skip, + priv_cmd.total_len - skip); + } #ifdef WL_CFG80211 else if (strnicmp(command, CMD_SET_AP_WPS_P2P_IE, strlen(CMD_SET_AP_WPS_P2P_IE)) == 0) { @@ -2694,28 +3103,7 @@ int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd) bytes_written = wl_cfg80211_set_wps_p2p_ie(net, command + skip, priv_cmd.total_len - skip, *(command + skip - 2) - '0'); } -#ifdef WLFBT - else if (strnicmp(command, CMD_GET_FTKEY, strlen(CMD_GET_FTKEY)) == 0) { - wl_cfg80211_get_fbt_key(command); - bytes_written = FBT_KEYLEN; - } -#endif /* WLFBT */ #endif /* WL_CFG80211 */ - else if (strnicmp(command, CMD_OKC_SET_PMK, strlen(CMD_OKC_SET_PMK)) == 0) - bytes_written = wl_android_set_pmk(net, command, priv_cmd.total_len); - else if (strnicmp(command, CMD_OKC_ENABLE, strlen(CMD_OKC_ENABLE)) == 0) - bytes_written = wl_android_okc_enable(net, command, priv_cmd.total_len); -#ifdef BCMCCX - else if (strnicmp(command, CMD_GETCCKM_RN, strlen(CMD_GETCCKM_RN)) == 0) { - bytes_written = wl_android_get_cckm_rn(net, command); - } - else if (strnicmp(command, CMD_SETCCKM_KRK, strlen(CMD_SETCCKM_KRK)) == 0) { - bytes_written = wl_android_set_cckm_krk(net, command); - } - else if (strnicmp(command, CMD_GET_ASSOC_RES_IES, strlen(CMD_GET_ASSOC_RES_IES)) == 0) { - bytes_written = wl_android_get_assoc_res_ies(net, command); - } -#endif /* BCMCCX */ #if defined(WL_SUPPORT_AUTO_CHANNEL) else if (strnicmp(command, CMD_GET_BEST_CHANNELS, strlen(CMD_GET_BEST_CHANNELS)) == 0) { @@ -2723,6 +3111,88 @@ int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd) priv_cmd.total_len); } #endif /* WL_SUPPORT_AUTO_CHANNEL */ +#if defined(WL_SUPPORT_AUTO_CHANNEL) + else if (strnicmp(command, CMD_SET_HAPD_AUTO_CHANNEL, + strlen(CMD_SET_HAPD_AUTO_CHANNEL)) == 0) { + int skip = strlen(CMD_SET_HAPD_AUTO_CHANNEL) + 1; + bytes_written = wl_android_set_auto_channel(net, (const char*)command+skip, command, + priv_cmd.total_len); + } +#endif /* WL_SUPPORT_AUTO_CHANNEL */ +#ifdef CUSTOMER_HW4_PRIVATE_CMD +#ifdef SUPPORT_SET_LPC + else if (strnicmp(command, CMD_HAPD_LPC_ENABLED, + strlen(CMD_HAPD_LPC_ENABLED)) == 0) { + int skip = strlen(CMD_HAPD_LPC_ENABLED) + 3; + wl_android_set_lpc(net, (const char*)command+skip); + } +#endif /* SUPPORT_SET_LPC */ +#ifdef SUPPORT_TRIGGER_HANG_EVENT + else if (strnicmp(command, CMD_TEST_FORCE_HANG, + strlen(CMD_TEST_FORCE_HANG)) == 0) { + int skip = strlen(CMD_TEST_FORCE_HANG) + 1; + net_os_send_hang_message_reason(net, (const char*)command+skip); + } +#endif /* SUPPORT_TRIGGER_HANG_EVENT */ + else if (strnicmp(command, CMD_CHANGE_RL, strlen(CMD_CHANGE_RL)) == 0) + bytes_written = wl_android_ch_res_rl(net, true); + else if (strnicmp(command, CMD_RESTORE_RL, strlen(CMD_RESTORE_RL)) == 0) + bytes_written = wl_android_ch_res_rl(net, false); + else if (strnicmp(command, CMD_SET_RMC_ENABLE, strlen(CMD_SET_RMC_ENABLE)) == 0) { + int rmc_enable = *(command + strlen(CMD_SET_RMC_ENABLE) + 1) - '0'; + bytes_written = wl_android_rmc_enable(net, rmc_enable); + } + else if (strnicmp(command, CMD_SET_RMC_TXRATE, strlen(CMD_SET_RMC_TXRATE)) == 0) { + int rmc_txrate; + sscanf(command, "%*s %10d", &rmc_txrate); + bytes_written = wldev_iovar_setint(net, "rmc_txrate", rmc_txrate * 2); + } + else if (strnicmp(command, CMD_SET_RMC_ACTPERIOD, strlen(CMD_SET_RMC_ACTPERIOD)) == 0) { + int actperiod; + sscanf(command, "%*s %10d", &actperiod); + bytes_written = wldev_iovar_setint(net, "rmc_actf_time", actperiod); + } + else if (strnicmp(command, CMD_SET_RMC_IDLEPERIOD, strlen(CMD_SET_RMC_IDLEPERIOD)) == 0) { + int acktimeout; + sscanf(command, "%*s %10d", &acktimeout); + acktimeout *= 1000; + bytes_written = wldev_iovar_setint(net, "rmc_acktmo", acktimeout); + } + else if (strnicmp(command, CMD_SET_RMC_LEADER, strlen(CMD_SET_RMC_LEADER)) == 0) { + int skip = strlen(CMD_SET_RMC_LEADER) + 1; + bytes_written = wl_android_rmc_set_leader(net, (const char*)command+skip); + } + else if (strnicmp(command, CMD_SET_RMC_EVENT, + strlen(CMD_SET_RMC_EVENT)) == 0) + bytes_written = wl_android_set_rmc_event(net, command, priv_cmd.total_len); + else if (strnicmp(command, CMD_GET_SCSCAN, strlen(CMD_GET_SCSCAN)) == 0) { + bytes_written = wl_android_get_singlecore_scan(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_SET_SCSCAN, strlen(CMD_SET_SCSCAN)) == 0) { + bytes_written = wl_android_set_singlecore_scan(net, command, priv_cmd.total_len); + } +#ifdef TEST_TX_POWER_CONTROL + else if (strnicmp(command, CMD_TEST_SET_TX_POWER, + strlen(CMD_TEST_SET_TX_POWER)) == 0) { + int skip = strlen(CMD_TEST_SET_TX_POWER) + 1; + wl_android_set_tx_power(net, (const char*)command+skip); + } + else if (strnicmp(command, CMD_TEST_GET_TX_POWER, + strlen(CMD_TEST_GET_TX_POWER)) == 0) { + wl_android_get_tx_power(net, command, priv_cmd.total_len); + } +#endif /* TEST_TX_POWER_CONTROL */ + else if (strnicmp(command, CMD_SARLIMIT_TX_CONTROL, + strlen(CMD_SARLIMIT_TX_CONTROL)) == 0) { + int skip = strlen(CMD_SARLIMIT_TX_CONTROL) + 1; + wl_android_set_sarlimit_txctrl(net, (const char*)command+skip); + } +#ifdef IPV6_NDO_SUPPORT + else if (strnicmp(command, CMD_NDRA_LIMIT, strlen(CMD_NDRA_LIMIT)) == 0) { + bytes_written = wl_android_nd_ra_limit(net, command, priv_cmd.total_len); + } +#endif /* IPV6_NDO_SUPPORT */ +#endif /* CUSTOMER_HW4_PRIVATE_CMD */ else if (strnicmp(command, CMD_HAPD_MAC_FILTER, strlen(CMD_HAPD_MAC_FILTER)) == 0) { int skip = strlen(CMD_HAPD_MAC_FILTER) + 1; wl_android_set_mac_address_filter(net, (const char*)command+skip); @@ -2737,39 +3207,16 @@ int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd) #ifdef WL_CFG80211 else if (strnicmp(command, CMD_MIRACAST, strlen(CMD_MIRACAST)) == 0) bytes_written = wl_android_set_miracast(net, command, priv_cmd.total_len); -#if defined(CUSTOM_PLATFORM_NV_TEGRA) - else if (strnicmp(command, CMD_SETMIRACAST, strlen(CMD_SETMIRACAST)) == 0) - bytes_written = wldev_miracast_tuning(net, command, priv_cmd.total_len); - else if (strnicmp(command, CMD_ASSOCRESPIE, strlen(CMD_ASSOCRESPIE)) == 0) - bytes_written = wldev_get_assoc_resp_ie(net, command, priv_cmd.total_len); - else if (strnicmp(command, CMD_RXRATESTATS, strlen(CMD_RXRATESTATS)) == 0) - bytes_written = wldev_get_rx_rate_stats(net, command, priv_cmd.total_len); -#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */ +#ifdef WL11ULB + else if (strnicmp(command, CMD_ULB_MODE, strlen(CMD_ULB_MODE)) == 0) + bytes_written = wl_android_set_ulb_mode(net, command, priv_cmd.total_len); + else if (strnicmp(command, CMD_ULB_BW, strlen(CMD_ULB_BW)) == 0) + bytes_written = wl_android_set_ulb_bw(net, command, priv_cmd.total_len); +#endif /* WL11ULB */ else if (strnicmp(command, CMD_SETIBSSBEACONOUIDATA, strlen(CMD_SETIBSSBEACONOUIDATA)) == 0) bytes_written = wl_android_set_ibss_beacon_ouidata(net, command, priv_cmd.total_len); #endif -#ifdef WLAIBSS - else if (strnicmp(command, CMD_SETIBSSTXFAILEVENT, - strlen(CMD_SETIBSSTXFAILEVENT)) == 0) - bytes_written = wl_android_set_ibss_txfail_event(net, command, priv_cmd.total_len); - else if (strnicmp(command, CMD_GET_IBSS_PEER_INFO_ALL, - strlen(CMD_GET_IBSS_PEER_INFO_ALL)) == 0) - bytes_written = wl_android_get_ibss_peer_info(net, command, priv_cmd.total_len, - TRUE); - else if (strnicmp(command, CMD_GET_IBSS_PEER_INFO, - strlen(CMD_GET_IBSS_PEER_INFO)) == 0) - bytes_written = wl_android_get_ibss_peer_info(net, command, priv_cmd.total_len, - FALSE); - else if (strnicmp(command, CMD_SETIBSSROUTETABLE, - strlen(CMD_SETIBSSROUTETABLE)) == 0) - bytes_written = wl_android_set_ibss_routetable(net, command, - priv_cmd.total_len); - else if (strnicmp(command, CMD_SETIBSSAMPDU, strlen(CMD_SETIBSSAMPDU)) == 0) - bytes_written = wl_android_set_ibss_ampdu(net, command, priv_cmd.total_len); - else if (strnicmp(command, CMD_SETIBSSANTENNAMODE, strlen(CMD_SETIBSSANTENNAMODE)) == 0) - bytes_written = wl_android_set_ibss_antenna(net, command, priv_cmd.total_len); -#endif /* WLAIBSS */ else if (strnicmp(command, CMD_KEEP_ALIVE, strlen(CMD_KEEP_ALIVE)) == 0) { int skip = strlen(CMD_KEEP_ALIVE) + 1; bytes_written = wl_keep_alive_set(net, command + skip, priv_cmd.total_len - skip); @@ -2784,6 +3231,18 @@ int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd) command + strlen(CMD_ROAM_OFFLOAD_APLIST) + 1); } #endif +#if defined(WL_VIRTUAL_APSTA) + else if (strnicmp(command, CMD_INTERFACE_CREATE, strlen(CMD_INTERFACE_CREATE)) == 0) { + char *name = (command + strlen(CMD_INTERFACE_CREATE) +1); + ANDROID_INFO(("Creating %s interface\n", name)); + bytes_written = wl_cfg80211_interface_create(net, name); + } + else if (strnicmp(command, CMD_INTERFACE_DELETE, strlen(CMD_INTERFACE_DELETE)) == 0) { + char *name = (command + strlen(CMD_INTERFACE_DELETE) +1); + ANDROID_INFO(("Deleteing %s interface\n", name)); + bytes_written = wl_cfg80211_interface_delete(net, name); + } +#endif /* defined (WL_VIRTUAL_APSTA) */ #ifdef P2PRESP_WFDIE_SRC else if (strnicmp(command, CMD_P2P_SET_WFDIE_RESP, strlen(CMD_P2P_SET_WFDIE_RESP)) == 0) { @@ -2794,38 +3253,109 @@ int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd) bytes_written = wl_android_get_wfdie_resp(net, command, priv_cmd.total_len); } #endif /* P2PRESP_WFDIE_SRC */ - else if (strnicmp(command, CMD_GET_LINK_STATUS, strlen(CMD_GET_LINK_STATUS)) == 0) { - bytes_written = wl_android_get_link_status(net, command, priv_cmd.total_len); +#ifdef WL_CFG80211 + else if (strnicmp(command, CMD_DFS_AP_MOVE, strlen(CMD_DFS_AP_MOVE)) == 0) { + char *data = (command + strlen(CMD_DFS_AP_MOVE) +1); + bytes_written = wl_cfg80211_dfs_ap_move(net, data, command, priv_cmd.total_len); } -#ifdef CONNECTION_STATISTICS - else if (strnicmp(command, CMD_GET_CONNECTION_STATS, - strlen(CMD_GET_CONNECTION_STATS)) == 0) { - bytes_written = wl_android_get_connection_stats(net, command, - priv_cmd.total_len); +#endif + else if (strnicmp(command, CMD_WBTEXT_ENABLE, strlen(CMD_WBTEXT_ENABLE)) == 0) { + bytes_written = wl_android_wbtext(net, command, priv_cmd.total_len); + } +#ifdef WL_CFG80211 + else if (strnicmp(command, CMD_WBTEXT_PROFILE_CONFIG, + strlen(CMD_WBTEXT_PROFILE_CONFIG)) == 0) { + char *data = (command + strlen(CMD_WBTEXT_PROFILE_CONFIG) + 1); + bytes_written = wl_cfg80211_wbtext_config(net, data, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_WBTEXT_WEIGHT_CONFIG, + strlen(CMD_WBTEXT_WEIGHT_CONFIG)) == 0) { + char *data = (command + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1); + bytes_written = wl_cfg80211_wbtext_weight_config(net, data, + command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_WBTEXT_TABLE_CONFIG, + strlen(CMD_WBTEXT_TABLE_CONFIG)) == 0) { + char *data = (command + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1); + bytes_written = wl_cfg80211_wbtext_table_config(net, data, + command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_WBTEXT_DELTA_CONFIG, + strlen(CMD_WBTEXT_DELTA_CONFIG)) == 0) { + char *data = (command + strlen(CMD_WBTEXT_DELTA_CONFIG) + 1); + bytes_written = wl_cfg80211_wbtext_delta_config(net, data, + command, priv_cmd.total_len); } #endif - else if(strnicmp(command, CMD_GET_CHANNEL, strlen(CMD_GET_CHANNEL)) == 0) { - bytes_written = wl_android_get_channel(net, command, priv_cmd.total_len); +#ifdef SET_RPS_CPUS + else if (strnicmp(command, CMD_RPSMODE, strlen(CMD_RPSMODE)) == 0) { + bytes_written = wl_android_set_rps_cpus(net, command, priv_cmd.total_len); } - else if (strnicmp(command, CMD_SET_ROAM, strlen(CMD_SET_ROAM)) == 0) { - bytes_written = wl_android_set_roam_trigger(net, command, priv_cmd.total_len); +#endif /* SET_RPS_CPUS */ +#ifdef WLWFDS + else if (strnicmp(command, CMD_ADD_WFDS_HASH, strlen(CMD_ADD_WFDS_HASH)) == 0) { + bytes_written = wl_android_set_wfds_hash(net, command, priv_cmd.total_len, 1); } - else if (strnicmp(command, CMD_GET_ROAM, strlen(CMD_GET_ROAM)) == 0) { - bytes_written = wl_android_get_roam_trigger(net, command, priv_cmd.total_len); + else if (strnicmp(command, CMD_DEL_WFDS_HASH, strlen(CMD_DEL_WFDS_HASH)) == 0) { + bytes_written = wl_android_set_wfds_hash(net, command, priv_cmd.total_len, 0); } - else if (strnicmp(command, CMD_GET_KEEP_ALIVE, strlen(CMD_GET_KEEP_ALIVE)) == 0) { - int skip = strlen(CMD_GET_KEEP_ALIVE) + 1; - bytes_written = wl_android_get_keep_alive(net, command+skip, priv_cmd.total_len-skip); +#endif /* WLWFDS */ +#ifdef BT_WIFI_HANDOVER + else if (strnicmp(command, CMD_TBOW_TEARDOWN, strlen(CMD_TBOW_TEARDOWN)) == 0) { + ret = wl_tbow_teardown(net, command, priv_cmd.total_len); } - else if (strnicmp(command, CMD_GET_PM, strlen(CMD_GET_PM)) == 0) { - bytes_written = wl_android_get_pm(net, command, priv_cmd.total_len); +#endif /* BT_WIFI_HANDOVER */ +#ifdef FCC_PWR_LIMIT_2G + else if (strnicmp(command, CMD_GET_FCC_PWR_LIMIT_2G, + strlen(CMD_GET_FCC_PWR_LIMIT_2G)) == 0) { + bytes_written = wl_android_get_fcc_pwr_limit_2g(net, command, priv_cmd.total_len); } - else if (strnicmp(command, CMD_SET_PM, strlen(CMD_SET_PM)) == 0) { - bytes_written = wl_android_set_pm(net, command, priv_cmd.total_len); + else if (strnicmp(command, CMD_SET_FCC_PWR_LIMIT_2G, + strlen(CMD_SET_FCC_PWR_LIMIT_2G)) == 0) { + bytes_written = wl_android_set_fcc_pwr_limit_2g(net, command, priv_cmd.total_len); } - else if (strnicmp(command, CMD_MONITOR, strlen(CMD_MONITOR)) == 0) { - bytes_written = wl_android_set_monitor(net, command, priv_cmd.total_len); - } else { +#endif /* FCC_PWR_LIMIT_2G */ +#ifdef WL_CFG80211 + else if (strnicmp(command, CMD_MURX_BFE_CAP, + strlen(CMD_MURX_BFE_CAP)) == 0) { + uint val = *(command + strlen(CMD_MURX_BFE_CAP) + 1) - '0'; + bytes_written = wl_android_murx_bfe_cap(net, val); + } +#endif +#if defined(DHD_ENABLE_BIGDATA_LOGGING) + else if (strnicmp(command, CMD_GET_BSS_INFO, strlen(CMD_GET_BSS_INFO)) == 0) { + bytes_written = wl_cfg80211_get_bss_info(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_GET_ASSOC_REJECT_INFO, strlen(CMD_GET_ASSOC_REJECT_INFO)) + == 0) { + bytes_written = wl_cfg80211_get_connect_failed_status(net, command, + priv_cmd.total_len); + } +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ +#if defined(SUPPORT_RANDOM_MAC_SCAN) + else if (strnicmp(command, ENABLE_RANDOM_MAC, strlen(ENABLE_RANDOM_MAC)) == 0) { + bytes_written = wl_cfg80211_set_random_mac(net, TRUE); + } else if (strnicmp(command, DISABLE_RANDOM_MAC, strlen(DISABLE_RANDOM_MAC)) == 0) { + bytes_written = wl_cfg80211_set_random_mac(net, FALSE); + } +#endif /* SUPPORT_RANDOM_MAC_SCAN */ +#ifdef DHD_LOG_DUMP + else if (strnicmp(command, CMD_NEW_DEBUG_PRINT_DUMP, + strlen(CMD_NEW_DEBUG_PRINT_DUMP)) == 0) { + dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(); +#ifdef DHD_TRACE_WAKE_LOCK + dhd_wk_lock_stats_dump(dhdp); +#endif /* DHD_TRACE_WAKE_LOCK */ + dhd_schedule_log_dump(dhdp); +#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + dhdp->memdump_type = DUMP_TYPE_BY_SYSDUMP; + dhd_bus_mem_dump(dhdp); +#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */ + } +#endif /* DHD_LOG_DUMP */ + else if (wl_android_ext_priv_cmd(net, command, priv_cmd.total_len, &bytes_written) == 0) { + } + else { ANDROID_ERROR(("Unknown PRIVATE command %s - ignored\n", command)); snprintf(command, 3, "OK"); bytes_written = strlen("OK"); @@ -2846,6 +3376,13 @@ int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd) ret = -EFAULT; } } +#ifdef CONNECTION_STATISTICS + else if (strnicmp(command, CMD_GET_CONNECTION_STATS, + strlen(CMD_GET_CONNECTION_STATS)) == 0) { + bytes_written = wl_android_get_connection_stats(net, command, + priv_cmd.total_len); + } +#endif else { ret = bytes_written; } @@ -2871,9 +3408,6 @@ int wl_android_init(void) bcm_strncpy_s(iface_name, IFNAMSIZ, "wlan", IFNAMSIZ); } -#ifdef WL_GENL - wl_genl_init(); -#endif wl_netlink_init(); return ret; @@ -2884,9 +3418,6 @@ int wl_android_exit(void) int ret = 0; struct io_cfg *cur, *q; -#ifdef WL_GENL - wl_genl_deinit(); -#endif /* WL_GENL */ wl_netlink_deinit(); list_for_each_entry_safe(cur, q, &miracast_resume_list, list) { @@ -2908,830 +3439,3 @@ void wl_android_post_init(void) if (!dhd_download_fw_on_driverload) g_wifi_on = FALSE; } - -#ifdef WL_GENL -/* Generic Netlink Initializaiton */ -static int wl_genl_init(void) -{ - int ret; - - ANDROID_TRACE(("GEN Netlink Init\n\n")); - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) - /* register new family */ - ret = genl_register_family(&wl_genl_family); - if (ret != 0) - goto failure; - - /* register functions (commands) of the new family */ - ret = genl_register_ops(&wl_genl_family, &wl_genl_ops); - if (ret != 0) { - ANDROID_ERROR(("register ops failed: %i\n", ret)); - genl_unregister_family(&wl_genl_family); - goto failure; - } - - ret = genl_register_mc_group(&wl_genl_family, &wl_genl_mcast); -#else - ret = genl_register_family_with_ops_groups(&wl_genl_family, wl_genl_ops, wl_genl_mcast); -#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */ - if (ret != 0) { - ANDROID_ERROR(("register mc_group failed: %i\n", ret)); -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) - genl_unregister_ops(&wl_genl_family, &wl_genl_ops); -#endif - genl_unregister_family(&wl_genl_family); - goto failure; - } - - return 0; - -failure: - ANDROID_ERROR(("Registering Netlink failed!!\n")); - return -1; -} - -/* Generic netlink deinit */ -static int wl_genl_deinit(void) -{ - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) - if (genl_unregister_ops(&wl_genl_family, &wl_genl_ops) < 0) - ANDROID_ERROR(("Unregister wl_genl_ops failed\n")); -#endif - if (genl_unregister_family(&wl_genl_family) < 0) - ANDROID_ERROR(("Unregister wl_genl_ops failed\n")); - - return 0; -} - -s32 wl_event_to_bcm_event(u16 event_type) -{ - u16 event = -1; - - switch (event_type) { - case WLC_E_SERVICE_FOUND: - event = BCM_E_SVC_FOUND; - break; - case WLC_E_P2PO_ADD_DEVICE: - event = BCM_E_DEV_FOUND; - break; - case WLC_E_P2PO_DEL_DEVICE: - event = BCM_E_DEV_LOST; - break; - /* Above events are supported from BCM Supp ver 47 Onwards */ -#ifdef BT_WIFI_HANDOVER - case WLC_E_BT_WIFI_HANDOVER_REQ: - event = BCM_E_DEV_BT_WIFI_HO_REQ; - break; -#endif /* BT_WIFI_HANDOVER */ - - default: - ANDROID_ERROR(("Event not supported\n")); - } - - return event; -} - -s32 -wl_genl_send_msg( - struct net_device *ndev, - u32 event_type, - u8 *buf, - u16 len, - u8 *subhdr, - u16 subhdr_len) -{ - int ret = 0; - struct sk_buff *skb = NULL; - void *msg; - u32 attr_type = 0; - bcm_event_hdr_t *hdr = NULL; - int mcast = 1; /* By default sent as mutlicast type */ - int pid = 0; - u8 *ptr = NULL, *p = NULL; - u32 tot_len = sizeof(bcm_event_hdr_t) + subhdr_len + len; - u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; - - - ANDROID_TRACE(("Enter \n")); - - /* Decide between STRING event and Data event */ - if (event_type == 0) - attr_type = BCM_GENL_ATTR_STRING; - else - attr_type = BCM_GENL_ATTR_MSG; - - skb = genlmsg_new(NLMSG_GOODSIZE, kflags); - if (skb == NULL) { - ret = -ENOMEM; - goto out; - } - - msg = genlmsg_put(skb, 0, 0, &wl_genl_family, 0, BCM_GENL_CMD_MSG); - if (msg == NULL) { - ret = -ENOMEM; - goto out; - } - - - if (attr_type == BCM_GENL_ATTR_STRING) { - /* Add a BCM_GENL_MSG attribute. Since it is specified as a string. - * make sure it is null terminated - */ - if (subhdr || subhdr_len) { - ANDROID_ERROR(("No sub hdr support for the ATTR STRING type \n")); - ret = -EINVAL; - goto out; - } - - ret = nla_put_string(skb, BCM_GENL_ATTR_STRING, buf); - if (ret != 0) { - ANDROID_ERROR(("nla_put_string failed\n")); - goto out; - } - } else { - /* ATTR_MSG */ - - /* Create a single buffer for all */ - p = ptr = kzalloc(tot_len, kflags); - if (!ptr) { - ret = -ENOMEM; - ANDROID_ERROR(("ENOMEM!!\n")); - goto out; - } - - /* Include the bcm event header */ - hdr = (bcm_event_hdr_t *)ptr; - hdr->event_type = wl_event_to_bcm_event(event_type); - hdr->len = len + subhdr_len; - ptr += sizeof(bcm_event_hdr_t); - - /* Copy subhdr (if any) */ - if (subhdr && subhdr_len) { - memcpy(ptr, subhdr, subhdr_len); - ptr += subhdr_len; - } - - /* Copy the data */ - if (buf && len) { - memcpy(ptr, buf, len); - } - - ret = nla_put(skb, BCM_GENL_ATTR_MSG, tot_len, p); - if (ret != 0) { - ANDROID_ERROR(("nla_put_string failed\n")); - goto out; - } - } - - if (mcast) { - int err = 0; - /* finalize the message */ - genlmsg_end(skb, msg); - /* NETLINK_CB(skb).dst_group = 1; */ - -#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) - if ((err = genlmsg_multicast(skb, 0, wl_genl_mcast.id, GFP_ATOMIC)) < 0) -#else - if ((err = genlmsg_multicast(&wl_genl_family, skb, 0, 0, GFP_ATOMIC)) < 0) -#endif - ANDROID_ERROR(("genlmsg_multicast for attr(%d) failed. Error:%d \n", - attr_type, err)); - else - ANDROID_TRACE(("Multicast msg sent successfully. attr_type:%d len:%d \n", - attr_type, tot_len)); - } else { - NETLINK_CB(skb).dst_group = 0; /* Not in multicast group */ - - /* finalize the message */ - genlmsg_end(skb, msg); - - /* send the message back */ - if (genlmsg_unicast(&init_net, skb, pid) < 0) - ANDROID_ERROR(("genlmsg_unicast failed\n")); - } - -out: - if (p) - kfree(p); - if (ret) - nlmsg_free(skb); - - return ret; -} - -static s32 -wl_genl_handle_msg( - struct sk_buff *skb, - struct genl_info *info) -{ - struct nlattr *na; - u8 *data = NULL; - - ANDROID_TRACE(("Enter \n")); - - if (info == NULL) { - return -EINVAL; - } - - na = info->attrs[BCM_GENL_ATTR_MSG]; - if (!na) { - ANDROID_ERROR(("nlattribute NULL\n")); - return -EINVAL; - } - - data = (char *)nla_data(na); - if (!data) { - ANDROID_ERROR(("Invalid data\n")); - return -EINVAL; - } else { - /* Handle the data */ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) || defined(WL_COMPAT_WIRELESS) - ANDROID_TRACE(("%s: Data received from pid (%d) \n", __func__, - info->snd_pid)); -#else - ANDROID_TRACE(("%s: Data received from pid (%d) \n", __func__, - info->snd_portid)); -#endif /* (LINUX_VERSION < VERSION(3, 7, 0) || WL_COMPAT_WIRELESS */ - } - - return 0; -} -#endif /* WL_GENL */ - - -#if defined(RSSIAVG) -void -wl_free_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl) -{ - wl_rssi_cache_t *node, *cur, **rssi_head; - int i=0; - - rssi_head = &rssi_cache_ctrl->m_cache_head; - node = *rssi_head; - - for (;node;) { - ANDROID_INFO(("%s: Free %d with BSSID %pM\n", - __FUNCTION__, i, &node->BSSID)); - cur = node; - node = cur->next; - kfree(cur); - i++; - } - *rssi_head = NULL; -} - -void -wl_delete_dirty_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl) -{ - wl_rssi_cache_t *node, *prev, **rssi_head; - int i = -1, tmp = 0; - struct timeval now; - - do_gettimeofday(&now); - - rssi_head = &rssi_cache_ctrl->m_cache_head; - node = *rssi_head; - prev = node; - for (;node;) { - i++; - if (now.tv_sec > node->tv.tv_sec) { - if (node == *rssi_head) { - tmp = 1; - *rssi_head = node->next; - } else { - tmp = 0; - prev->next = node->next; - } - ANDROID_INFO(("%s: Del %d with BSSID %pM\n", - __FUNCTION__, i, &node->BSSID)); - kfree(node); - if (tmp == 1) { - node = *rssi_head; - prev = node; - } else { - node = prev->next; - } - continue; - } - prev = node; - node = node->next; - } -} - -void -wl_delete_disconnected_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl, u8 *bssid) -{ - wl_rssi_cache_t *node, *prev, **rssi_head; - int i = -1, tmp = 0; - - rssi_head = &rssi_cache_ctrl->m_cache_head; - node = *rssi_head; - prev = node; - for (;node;) { - i++; - if (!memcmp(&node->BSSID, bssid, ETHER_ADDR_LEN)) { - if (node == *rssi_head) { - tmp = 1; - *rssi_head = node->next; - } else { - tmp = 0; - prev->next = node->next; - } - ANDROID_INFO(("%s: Del %d with BSSID %pM\n", - __FUNCTION__, i, &node->BSSID)); - kfree(node); - if (tmp == 1) { - node = *rssi_head; - prev = node; - } else { - node = prev->next; - } - continue; - } - prev = node; - node = node->next; - } -} - -void -wl_reset_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl) -{ - wl_rssi_cache_t *node, **rssi_head; - - rssi_head = &rssi_cache_ctrl->m_cache_head; - - /* reset dirty */ - node = *rssi_head; - for (;node;) { - node->dirty += 1; - node = node->next; - } -} - -int -wl_update_connected_rssi_cache(struct net_device *net, wl_rssi_cache_ctrl_t *rssi_cache_ctrl, int *rssi_avg) -{ - wl_rssi_cache_t *node, *prev, *leaf, **rssi_head; - int j, k=0; - int rssi, error=0; - struct ether_addr bssid; - struct timeval now, timeout; - - if (!g_wifi_on) - return 0; - - error = wldev_ioctl(net, WLC_GET_BSSID, &bssid, sizeof(bssid), false); - if (error == BCME_NOTASSOCIATED) { - ANDROID_INFO(("%s: Not Associated! res:%d\n", __FUNCTION__, error)); - return 0; - } - if (error) { - ANDROID_ERROR(("Could not get bssid (%d)\n", error)); - } - error = wldev_get_rssi(net, &rssi); - if (error) { - ANDROID_ERROR(("Could not get rssi (%d)\n", error)); - return error; - } - - do_gettimeofday(&now); - timeout.tv_sec = now.tv_sec + RSSICACHE_TIMEOUT; - if (timeout.tv_sec < now.tv_sec) { - /* - * Integer overflow - assume long enough timeout to be assumed - * to be infinite, i.e., the timeout would never happen. - */ - ANDROID_TRACE(("%s: Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu", - __FUNCTION__, RSSICACHE_TIMEOUT, now.tv_sec, timeout.tv_sec)); - } - - /* update RSSI */ - rssi_head = &rssi_cache_ctrl->m_cache_head; - node = *rssi_head; - prev = NULL; - for (;node;) { - if (!memcmp(&node->BSSID, &bssid, ETHER_ADDR_LEN)) { - ANDROID_INFO(("%s: Update %d with BSSID %pM, RSSI=%d\n", - __FUNCTION__, k, &bssid, rssi)); - for (j = 0; j < RSSIAVG_LEN-1; j++) - node->RSSI[j] = node->RSSI[j+1]; - node->RSSI[j] = rssi; - node->dirty = 0; - node->tv = timeout; - goto exit; - } - prev = node; - node = node->next; - k++; - } - - leaf = kmalloc(sizeof(wl_rssi_cache_t), GFP_KERNEL); - if (!leaf) { - ANDROID_ERROR(("%s: Memory alloc failure %d\n", - __FUNCTION__, (int)sizeof(wl_rssi_cache_t))); - return 0; - } - ANDROID_INFO(("%s: Add %d with cached BSSID %pM, RSSI=%3d in the leaf\n", - __FUNCTION__, k, &bssid, rssi)); - - leaf->next = NULL; - leaf->dirty = 0; - leaf->tv = timeout; - memcpy(&leaf->BSSID, &bssid, ETHER_ADDR_LEN); - for (j=0; jRSSI[j] = rssi; - - if (!prev) - *rssi_head = leaf; - else - prev->next = leaf; - -exit: - *rssi_avg = (int)wl_get_avg_rssi(rssi_cache_ctrl, &bssid); - - return error; -} - -void -wl_update_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl, wl_scan_results_t *ss_list) -{ - wl_rssi_cache_t *node, *prev, *leaf, **rssi_head; - wl_bss_info_t *bi = NULL; - int i, j, k; - struct timeval now, timeout; - - if (!ss_list->count) - return; - - do_gettimeofday(&now); - timeout.tv_sec = now.tv_sec + RSSICACHE_TIMEOUT; - if (timeout.tv_sec < now.tv_sec) { - /* - * Integer overflow - assume long enough timeout to be assumed - * to be infinite, i.e., the timeout would never happen. - */ - ANDROID_TRACE(("%s: Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu", - __FUNCTION__, RSSICACHE_TIMEOUT, now.tv_sec, timeout.tv_sec)); - } - - rssi_head = &rssi_cache_ctrl->m_cache_head; - - /* update RSSI */ - for (i = 0; i < ss_list->count; i++) { - node = *rssi_head; - prev = NULL; - k = 0; - bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : ss_list->bss_info; - for (;node;) { - if (!memcmp(&node->BSSID, &bi->BSSID, ETHER_ADDR_LEN)) { - ANDROID_INFO(("%s: Update %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n", - __FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID)); - for (j = 0; j < RSSIAVG_LEN-1; j++) - node->RSSI[j] = node->RSSI[j+1]; - node->RSSI[j] = dtoh16(bi->RSSI); - node->dirty = 0; - node->tv = timeout; - break; - } - prev = node; - node = node->next; - k++; - } - - if (node) - continue; - - leaf = kmalloc(sizeof(wl_rssi_cache_t), GFP_KERNEL); - if (!leaf) { - ANDROID_ERROR(("%s: Memory alloc failure %d\n", - __FUNCTION__, (int)sizeof(wl_rssi_cache_t))); - return; - } - ANDROID_INFO(("%s: Add %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\" in the leaf\n", - __FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID)); - - leaf->next = NULL; - leaf->dirty = 0; - leaf->tv = timeout; - memcpy(&leaf->BSSID, &bi->BSSID, ETHER_ADDR_LEN); - for (j=0; jRSSI[j] = dtoh16(bi->RSSI); - - if (!prev) - *rssi_head = leaf; - else - prev->next = leaf; - } -} - -int16 -wl_get_avg_rssi(wl_rssi_cache_ctrl_t *rssi_cache_ctrl, void *addr) -{ - wl_rssi_cache_t *node, **rssi_head; - int j, rssi_sum, rssi=RSSI_MINVAL; - - rssi_head = &rssi_cache_ctrl->m_cache_head; - - node = *rssi_head; - for (;node;) { - if (!memcmp(&node->BSSID, addr, ETHER_ADDR_LEN)) { - rssi_sum = 0; - rssi = 0; - for (j=0; jRSSI[RSSIAVG_LEN-j-1]; - rssi = rssi_sum / j; - break; - } - node = node->next; - } - rssi = MIN(rssi, RSSI_MAXVAL); - if (rssi == RSSI_MINVAL) { - ANDROID_ERROR(("%s: BSSID %pM does not in RSSI cache\n", - __FUNCTION__, addr)); - } - return (int16)rssi; -} -#endif - -#if defined(RSSIOFFSET) -int -wl_update_rssi_offset(struct net_device *net, int rssi) -{ -#if defined(RSSIOFFSET_NEW) - int j; -#endif - - if (!g_wifi_on) - return rssi; - -#if defined(RSSIOFFSET_NEW) - for (j=0; jm_cache_head; - node = *bss_head; - - for (;node;) { - ANDROID_TRACE(("%s: Free %d with BSSID %pM\n", - __FUNCTION__, i, &node->results.bss_info->BSSID)); - cur = node; - node = cur->next; - kfree(cur); - i++; - } - *bss_head = NULL; -} - -void -wl_delete_dirty_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl) -{ - wl_bss_cache_t *node, *prev, **bss_head; - int i = -1, tmp = 0; - struct timeval now; - - do_gettimeofday(&now); - - bss_head = &bss_cache_ctrl->m_cache_head; - node = *bss_head; - prev = node; - for (;node;) { - i++; - if (now.tv_sec > node->tv.tv_sec) { - if (node == *bss_head) { - tmp = 1; - *bss_head = node->next; - } else { - tmp = 0; - prev->next = node->next; - } - ANDROID_TRACE(("%s: Del %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n", - __FUNCTION__, i, &node->results.bss_info->BSSID, - dtoh16(node->results.bss_info->RSSI), node->results.bss_info->SSID)); - kfree(node); - if (tmp == 1) { - node = *bss_head; - prev = node; - } else { - node = prev->next; - } - continue; - } - prev = node; - node = node->next; - } -} - -void -wl_delete_disconnected_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl, u8 *bssid) -{ - wl_bss_cache_t *node, *prev, **bss_head; - int i = -1, tmp = 0; - - bss_head = &bss_cache_ctrl->m_cache_head; - node = *bss_head; - prev = node; - for (;node;) { - i++; - if (!memcmp(&node->results.bss_info->BSSID, bssid, ETHER_ADDR_LEN)) { - if (node == *bss_head) { - tmp = 1; - *bss_head = node->next; - } else { - tmp = 0; - prev->next = node->next; - } - ANDROID_TRACE(("%s: Del %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n", - __FUNCTION__, i, &node->results.bss_info->BSSID, - dtoh16(node->results.bss_info->RSSI), node->results.bss_info->SSID)); - kfree(node); - if (tmp == 1) { - node = *bss_head; - prev = node; - } else { - node = prev->next; - } - continue; - } - prev = node; - node = node->next; - } -} - -void -wl_reset_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl) -{ - wl_bss_cache_t *node, **bss_head; - - bss_head = &bss_cache_ctrl->m_cache_head; - - /* reset dirty */ - node = *bss_head; - for (;node;) { - node->dirty += 1; - node = node->next; - } -} - -void dump_bss_cache( -#if defined(RSSIAVG) - wl_rssi_cache_ctrl_t *rssi_cache_ctrl, -#endif - wl_bss_cache_t *node) -{ - int k = 0; - int16 rssi; - - for (;node;) { -#if defined(RSSIAVG) - rssi = wl_get_avg_rssi(rssi_cache_ctrl, &node->results.bss_info->BSSID); -#else - rssi = dtoh16(node->results.bss_info->RSSI); -#endif - ANDROID_TRACE(("%s: dump %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n", - __FUNCTION__, k, &node->results.bss_info->BSSID, rssi, node->results.bss_info->SSID)); - k++; - node = node->next; - } -} - -void -wl_update_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl, -#if defined(RSSIAVG) - wl_rssi_cache_ctrl_t *rssi_cache_ctrl, -#endif - wl_scan_results_t *ss_list) -{ - wl_bss_cache_t *node, *prev, *leaf, **bss_head; - wl_bss_info_t *bi = NULL; - int i, k=0; -#if defined(SORT_BSS_BY_RSSI) - int16 rssi, rssi_node; -#endif - struct timeval now, timeout; - - if (!ss_list->count) - return; - - do_gettimeofday(&now); - timeout.tv_sec = now.tv_sec + BSSCACHE_TIMEOUT; - if (timeout.tv_sec < now.tv_sec) { - /* - * Integer overflow - assume long enough timeout to be assumed - * to be infinite, i.e., the timeout would never happen. - */ - ANDROID_TRACE(("%s: Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu", - __FUNCTION__, BSSCACHE_TIMEOUT, now.tv_sec, timeout.tv_sec)); - } - - bss_head = &bss_cache_ctrl->m_cache_head; - - for (i=0; i < ss_list->count; i++) { - node = *bss_head; - prev = NULL; - bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : ss_list->bss_info; - - for (;node;) { - if (!memcmp(&node->results.bss_info->BSSID, &bi->BSSID, ETHER_ADDR_LEN)) { - if (node == *bss_head) - *bss_head = node->next; - else { - prev->next = node->next; - } - break; - } - prev = node; - node = node->next; - } - - leaf = kmalloc(dtoh32(bi->length) + sizeof(wl_bss_cache_t), GFP_KERNEL); - if (!leaf) { - ANDROID_ERROR(("%s: Memory alloc failure %d\n", __FUNCTION__, - dtoh32(bi->length) + (int)sizeof(wl_bss_cache_t))); - return; - } - if (node) { - kfree(node); - node = NULL; - ANDROID_TRACE(("%s: Update %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n", - __FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID)); - } else - ANDROID_TRACE(("%s: Add %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n", - __FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID)); - - memcpy(leaf->results.bss_info, bi, dtoh32(bi->length)); - leaf->next = NULL; - leaf->dirty = 0; - leaf->tv = timeout; - leaf->results.count = 1; - leaf->results.version = ss_list->version; - k++; - - if (*bss_head == NULL) - *bss_head = leaf; - else { -#if defined(SORT_BSS_BY_RSSI) - node = *bss_head; -#if defined(RSSIAVG) - rssi = wl_get_avg_rssi(rssi_cache_ctrl, &leaf->results.bss_info->BSSID); -#else - rssi = dtoh16(leaf->results.bss_info->RSSI); -#endif - for (;node;) { -#if defined(RSSIAVG) - rssi_node = wl_get_avg_rssi(rssi_cache_ctrl, &node->results.bss_info->BSSID); -#else - rssi_node = dtoh16(node->results.bss_info->RSSI); -#endif - if (rssi > rssi_node) { - leaf->next = node; - if (node == *bss_head) - *bss_head = leaf; - else - prev->next = leaf; - break; - } - prev = node; - node = node->next; - } - if (node == NULL) - prev->next = leaf; -#else - leaf->next = *bss_head; - *bss_head = leaf; -#endif - } - } - dump_bss_cache( -#if defined(RSSIAVG) - rssi_cache_ctrl, -#endif - *bss_head); -} - -void -wl_release_bss_cache_ctrl(wl_bss_cache_ctrl_t *bss_cache_ctrl) -{ - ANDROID_TRACE(("%s:\n", __FUNCTION__)); - wl_free_bss_cache(bss_cache_ctrl); -} -#endif diff --git a/drivers/net/wireless/bcmdhd/wl_android.h b/drivers/amlogic/wifi/bcmdhd/wl_android.h similarity index 51% rename from drivers/net/wireless/bcmdhd/wl_android.h rename to drivers/amlogic/wifi/bcmdhd/wl_android.h index 1f3ba019bb2b2..aa6f5552ae597 100644 --- a/drivers/net/wireless/bcmdhd/wl_android.h +++ b/drivers/amlogic/wifi/bcmdhd/wl_android.h @@ -1,9 +1,30 @@ /* * Linux cfg80211 driver - Android related functions * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: wl_android.h 487838 2014-06-27 05:51:44Z $ + * + * <> + * + * $Id: wl_android.h 608194 2015-12-24 04:34:35Z $ */ #ifndef _wl_android_ @@ -16,14 +37,11 @@ /* If any feature uses the Generic Netlink Interface, put it here to enable WL_GENL * automatically */ -#if defined(WL_SDO) || defined(BT_WIFI_HANDOVER) || defined(WL_NAN) +#if defined(BT_WIFI_HANDOVER) || defined(WL_NAN) #define WL_GENL #endif -#ifdef WL_GENL -#include -#endif /** * Android platform dependent functions, feel free to add Android specific functions here @@ -31,6 +49,33 @@ * or cfg, define them as static in wl_android.c */ +/* message levels */ +#define ANDROID_ERROR_LEVEL 0x0001 +#define ANDROID_TRACE_LEVEL 0x0002 +#define ANDROID_INFO_LEVEL 0x0004 + +#define ANDROID_ERROR(x) \ + do { \ + if (android_msg_level & ANDROID_ERROR_LEVEL) { \ + printk(KERN_ERR "ANDROID-ERROR) "); \ + printk x; \ + } \ + } while (0) +#define ANDROID_TRACE(x) \ + do { \ + if (android_msg_level & ANDROID_TRACE_LEVEL) { \ + printk(KERN_ERR "ANDROID-TRACE) "); \ + printk x; \ + } \ + } while (0) +#define ANDROID_INFO(x) \ + do { \ + if (android_msg_level & ANDROID_INFO_LEVEL) { \ + printk(KERN_ERR "ANDROID-INFO) "); \ + printk x; \ + } \ + } while (0) + /** * wl_android_init will be called from module init function (dhd_module_init now), similarly * wl_android_exit will be called from module exit function (dhd_module_cleanup now) @@ -39,50 +84,89 @@ int wl_android_init(void); int wl_android_exit(void); void wl_android_post_init(void); int wl_android_wifi_on(struct net_device *dev); -int wl_android_wifi_off(struct net_device *dev); +int wl_android_wifi_off(struct net_device *dev, bool on_failure); int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd); +#ifdef WL_EXT_IAPSTA +int wl_android_ext_attach_netdev(struct net_device *net, uint8 bssidx); +int wl_android_ext_dettach_netdev(void); +void wl_android_ext_iapsta_disconnect_sta(struct net_device *dev, u32 channel); +#endif +int wl_android_ext_priv_cmd(struct net_device *net, char *command, int total_len, + int *bytes_written); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)) +#define strnicmp(str1, str2, len) strncasecmp((str1), (str2), (len)) +#endif -#ifdef WL_GENL -typedef struct bcm_event_hdr { - u16 event_type; - u16 len; -} bcm_event_hdr_t; +typedef enum IF_STATE { + IF_STATE_INIT = 1, + IF_STATE_DISALBE, + IF_STATE_ENABLE +} if_state_t; + +typedef enum APSTAMODE { + ISTAONLY_MODE = 1, + IAPONLY_MODE, + IAPSTA_MODE, + IDUALAP_MODE, + IGOSTA_MODE, + IGCSTA_MODE +} apstamode_t; + +typedef enum IFMODE { + ISTA_MODE = 1, + IAP_MODE +} ifmode_t; + +typedef enum BGNMODE { + IEEE80211B = 1, + IEEE80211G, + IEEE80211BG, + IEEE80211BGN, + IEEE80211BGNAC +} bgnmode_t; + +typedef enum AUTHMODE { + AUTH_OPEN, + AUTH_SHARED, + AUTH_WPAPSK, + AUTH_WPA2PSK, + AUTH_WPAWPA2PSK +} authmode_t; + +typedef enum ENCMODE { + ENC_NONE, + ENC_WEP, + ENC_TKIP, + ENC_AES, + ENC_TKIPAES +} encmode_t; + +/* i/f query */ +typedef struct wl_if_info { + struct net_device *dev; + if_state_t ifstate; + ifmode_t ifmode; + uint bssidx; + char ifname[IFNAMSIZ+1]; + char ssid[DOT11_MAX_SSID_LEN]; + struct ether_addr bssid; + bgnmode_t bgnmode; + int hidden; + int maxassoc; + uint16 channel; + authmode_t amode; + encmode_t emode; + char key[100]; +} wl_apsta_if_t; + +typedef struct wl_apsta_params { + struct wl_if_info pif; // primary device + struct wl_if_info vif; // virtual device + int ioctl_ver; + bool init; + apstamode_t apstamode; +} wl_apsta_params_t; -/* attributes (variables): the index in this enum is used as a reference for the type, - * userspace application has to indicate the corresponding type - * the policy is used for security considerations - */ -enum { - BCM_GENL_ATTR_UNSPEC, - BCM_GENL_ATTR_STRING, - BCM_GENL_ATTR_MSG, - __BCM_GENL_ATTR_MAX -}; -#define BCM_GENL_ATTR_MAX (__BCM_GENL_ATTR_MAX - 1) - -/* commands: enumeration of all commands (functions), - * used by userspace application to identify command to be ececuted - */ -enum { - BCM_GENL_CMD_UNSPEC, - BCM_GENL_CMD_MSG, - __BCM_GENL_CMD_MAX -}; -#define BCM_GENL_CMD_MAX (__BCM_GENL_CMD_MAX - 1) - -/* Enum values used by the BCM supplicant to identify the events */ -enum { - BCM_E_UNSPEC, - BCM_E_SVC_FOUND, - BCM_E_DEV_FOUND, - BCM_E_DEV_LOST, - BCM_E_DEV_BT_WIFI_HO_REQ, - BCM_E_MAX -}; - -s32 wl_genl_send_msg(struct net_device *ndev, u32 event_type, - u8 *string, u16 len, u8 *hdr, u16 hdrlen); -#endif /* WL_GENL */ s32 wl_netlink_send_msg(int pid, int type, int seq, void *data, size_t size); /* hostap mac mode */ @@ -93,12 +177,17 @@ s32 wl_netlink_send_msg(int pid, int type, int seq, void *data, size_t size); /* max number of assoc list */ #define MAX_NUM_OF_ASSOCLIST 64 +/* Bandwidth */ +#define WL_CH_BANDWIDTH_20MHZ 20 +#define WL_CH_BANDWIDTH_40MHZ 40 +#define WL_CH_BANDWIDTH_80MHZ 80 /* max number of mac filter list * restrict max number to 10 as maximum cmd string size is 255 */ #define MAX_NUM_MAC_FILT 10 int wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist); +int wl_android_set_roam_offload_bssid_list(struct net_device *dev, const char *cmd); /* terence: * BSSCACHE: Cache bss list @@ -176,7 +265,7 @@ void wl_free_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl); void wl_delete_dirty_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl); void wl_delete_disconnected_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl, u8 *bssid); void wl_reset_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl); -void wl_update_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl, +void wl_update_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl, #if defined(RSSIAVG) wl_rssi_cache_ctrl_t *rssi_cache_ctrl, #endif diff --git a/drivers/amlogic/wifi/bcmdhd/wl_android_ext.c b/drivers/amlogic/wifi/bcmdhd/wl_android_ext.c new file mode 100644 index 0000000000000..cf40743f00967 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/wl_android_ext.c @@ -0,0 +1,2178 @@ + + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define htod32(i) i +#define htod16(i) i +#define dtoh32(i) i +#define dtoh16(i) i +#define htodchanspec(i) i +#define dtohchanspec(i) i +#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) + +#define CMD_CHANNEL "CHANNEL" +#define CMD_CHANNELS "CHANNELS" +#define CMD_ROAM_TRIGGER "ROAM_TRIGGER" +#define CMD_KEEP_ALIVE "KEEP_ALIVE" +#define CMD_PM "PM" +#define CMD_MONITOR "MONITOR" +#define CMD_SET_SUSPEND_BCN_LI_DTIM "SET_SUSPEND_BCN_LI_DTIM" + +#ifdef WL_EXT_IAPSTA +#define CMD_IAPSTA_INIT "IAPSTA_INIT" +#define CMD_IAPSTA_CONFIG "IAPSTA_CONFIG" +#define CMD_IAPSTA_ENABLE "IAPSTA_ENABLE" +#define CMD_IAPSTA_DISABLE "IAPSTA_DISABLE" +#ifdef PROP_TXSTATUS +#ifdef PROP_TXSTATUS_VSDB +#include +extern int disable_proptx; +#endif /* PROP_TXSTATUS_VSDB */ +#endif +#endif +#ifdef IDHCPC +#define CMD_DHCPC_ENABLE "DHCPC_ENABLE" +#define CMD_DHCPC_DUMP "DHCPC_DUMP" +#endif +#define CMD_WL "WL" + +#define IEEE80211_BAND_2GHZ 0 +#define IEEE80211_BAND_5GHZ 1 + +int wl_ext_ioctl(struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set) +{ + int ret; + + ret = wldev_ioctl(dev, cmd, arg, len, set); + if (ret) + ANDROID_ERROR(("%s: cmd=%d ret=%d\n", __FUNCTION__, cmd, ret)); + return ret; +} + +int wl_ext_iovar_getint(struct net_device *dev, s8 *iovar, s32 *val) +{ + int ret; + + ret = wldev_iovar_getint(dev, iovar, val); + if (ret) + ANDROID_ERROR(("%s: iovar=%s, ret=%d\n", __FUNCTION__, iovar, ret)); + + return ret; +} + +int wl_ext_iovar_setint(struct net_device *dev, s8 *iovar, s32 val) +{ + int ret; + + ret = wldev_iovar_setint(dev, iovar, val); + if (ret) + ANDROID_ERROR(("%s: iovar=%s, ret=%d\n", __FUNCTION__, iovar, ret)); + + return ret; +} + +int wl_ext_iovar_getbuf(struct net_device *dev, s8 *iovar_name, + void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync) +{ + int ret; + + ret = wldev_iovar_getbuf(dev, iovar_name, param, paramlen, buf, buflen, buf_sync); + if (ret != 0) + ANDROID_ERROR(("%s: iovar=%s, ret=%d\n", __FUNCTION__, iovar_name, ret)); + + return ret; +} + +int wl_ext_iovar_setbuf(struct net_device *dev, s8 *iovar_name, + void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync) +{ + int ret; + + ret = wldev_iovar_setbuf(dev, iovar_name, param, paramlen, buf, buflen, buf_sync); + if (ret != 0) + ANDROID_ERROR(("%s: iovar=%s, ret=%d\n", __FUNCTION__, iovar_name, ret)); + + return ret; +} + +#ifdef WL_EXT_IAPSTA +int wl_ext_iovar_setbuf_bsscfg(struct net_device *dev, s8 *iovar_name, + void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync) +{ + int ret; + + ret = wldev_iovar_setbuf_bsscfg(dev, iovar_name, param, paramlen, + buf, buflen, bsscfg_idx, buf_sync); + if (ret < 0) + ANDROID_ERROR(("%s: iovar_name=%s ret=%d\n", __FUNCTION__, iovar_name, ret)); + + return ret; +} + +int wl_ext_iovar_getbuf_bsscfg(struct net_device *dev, s8 *iovar_name, + void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync) +{ + int ret; + + ret = wldev_iovar_getbuf_bsscfg(dev, iovar_name, param, paramlen, + buf, buflen, bsscfg_idx, buf_sync); + if (ret < 0) + ANDROID_ERROR(("%s: iovar_name=%s ret=%d\n", __FUNCTION__, iovar_name, ret)); + + return ret; +} +#endif + +/* Return a legacy chanspec given a new chanspec + * Returns INVCHANSPEC on error + */ +static chanspec_t +wl_ext_chspec_to_legacy(chanspec_t chspec) +{ + chanspec_t lchspec; + + if (wf_chspec_malformed(chspec)) { + ANDROID_ERROR(("wl_ext_chspec_to_legacy: input chanspec (0x%04X) malformed\n", + chspec)); + return INVCHANSPEC; + } + + /* get the channel number */ + lchspec = CHSPEC_CHANNEL(chspec); + + /* convert the band */ + if (CHSPEC_IS2G(chspec)) { + lchspec |= WL_LCHANSPEC_BAND_2G; + } else { + lchspec |= WL_LCHANSPEC_BAND_5G; + } + + /* convert the bw and sideband */ + if (CHSPEC_IS20(chspec)) { + lchspec |= WL_LCHANSPEC_BW_20; + lchspec |= WL_LCHANSPEC_CTL_SB_NONE; + } else if (CHSPEC_IS40(chspec)) { + lchspec |= WL_LCHANSPEC_BW_40; + if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_L) { + lchspec |= WL_LCHANSPEC_CTL_SB_LOWER; + } else { + lchspec |= WL_LCHANSPEC_CTL_SB_UPPER; + } + } else { + /* cannot express the bandwidth */ + char chanbuf[CHANSPEC_STR_LEN]; + ANDROID_ERROR(( + "wl_ext_chspec_to_legacy: unable to convert chanspec %s (0x%04X) " + "to pre-11ac format\n", + wf_chspec_ntoa(chspec, chanbuf), chspec)); + return INVCHANSPEC; + } + + return lchspec; +} + +/* given a chanspec value, do the endian and chanspec version conversion to + * a chanspec_t value + * Returns INVCHANSPEC on error + */ +static chanspec_t +wl_ext_chspec_host_to_driver(int ioctl_ver, chanspec_t chanspec) +{ + if (ioctl_ver == 1) { + chanspec = wl_ext_chspec_to_legacy(chanspec); + if (chanspec == INVCHANSPEC) { + return chanspec; + } + } + chanspec = htodchanspec(chanspec); + + return chanspec; +} + +static int +wl_ext_get_ioctl_ver(struct net_device *dev, int *ioctl_ver) +{ + int ret = 0; + s32 val = 0; + + val = 1; + ret = wl_ext_ioctl(dev, WLC_GET_VERSION, &val, sizeof(val), 0); + if (ret) { + ANDROID_ERROR(("WLC_GET_VERSION failed, err=%d\n", ret)); + return ret; + } + val = dtoh32(val); + if (val != WLC_IOCTL_VERSION && val != 1) { + ANDROID_ERROR(("Version mismatch, please upgrade. Got %d, expected %d or 1\n", + val, WLC_IOCTL_VERSION)); + return BCME_VERSION; + } + *ioctl_ver = val; + + return ret; +} + +static int +wl_ext_set_chanspec(struct net_device *dev, uint16 channel) +{ + s32 _chan = channel; + chanspec_t chspec = 0; + chanspec_t fw_chspec = 0; + u32 bw = WL_CHANSPEC_BW_20; + s32 err = BCME_OK; + s32 bw_cap = 0; + s8 iovar_buf[WLC_IOCTL_SMLEN]; + struct { + u32 band; + u32 bw_cap; + } param = {0, 0}; + uint band; + int ioctl_ver = 0; + + if (_chan <= CH_MAX_2G_CHANNEL) + band = IEEE80211_BAND_2GHZ; + else + band = IEEE80211_BAND_5GHZ; + wl_ext_get_ioctl_ver(dev, &ioctl_ver); + + if (band == IEEE80211_BAND_5GHZ) { + param.band = WLC_BAND_5G; + err = wldev_iovar_getbuf(dev, "bw_cap", ¶m, sizeof(param), + iovar_buf, WLC_IOCTL_SMLEN, NULL); + if (err) { + if (err != BCME_UNSUPPORTED) { + ANDROID_ERROR(("bw_cap failed, %d\n", err)); + return err; + } else { + err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap); + if (err) { + ANDROID_ERROR(("error get mimo_bw_cap (%d)\n", err)); + } + if (bw_cap != WLC_N_BW_20ALL) + bw = WL_CHANSPEC_BW_40; + } + } else { + if (WL_BW_CAP_80MHZ(iovar_buf[0])) + bw = WL_CHANSPEC_BW_80; + else if (WL_BW_CAP_40MHZ(iovar_buf[0])) + bw = WL_CHANSPEC_BW_40; + else + bw = WL_CHANSPEC_BW_20; + + } + } + else if (band == IEEE80211_BAND_2GHZ) + bw = WL_CHANSPEC_BW_20; + +set_channel: + chspec = wf_channel2chspec(_chan, bw); + if (wf_chspec_valid(chspec)) { + fw_chspec = wl_ext_chspec_host_to_driver(ioctl_ver, chspec); + if (fw_chspec != INVCHANSPEC) { + if ((err = wldev_iovar_setint(dev, "chanspec", fw_chspec)) == BCME_BADCHAN) { + if (bw == WL_CHANSPEC_BW_80) + goto change_bw; + wl_ext_ioctl(dev, WLC_SET_CHANNEL, &_chan, sizeof(_chan), 1); + printf("%s: channel %d\n", __FUNCTION__, _chan); + } else if (err) { + ANDROID_ERROR(("%s: failed to set chanspec error %d\n", __FUNCTION__, err)); + } else + printf("%s: channel %d, 0x%x\n", __FUNCTION__, channel, chspec); + } else { + ANDROID_ERROR(("%s: failed to convert host chanspec to fw chanspec\n", __FUNCTION__)); + err = BCME_ERROR; + } + } else { +change_bw: + if (bw == WL_CHANSPEC_BW_80) + bw = WL_CHANSPEC_BW_40; + else if (bw == WL_CHANSPEC_BW_40) + bw = WL_CHANSPEC_BW_20; + else + bw = 0; + if (bw) + goto set_channel; + ANDROID_ERROR(("%s: Invalid chanspec 0x%x\n", __FUNCTION__, chspec)); + err = BCME_ERROR; + } + + return err; +} + +int +wl_ext_channel(struct net_device *dev, char* command, int total_len) +{ + int ret; + int channel=0; + channel_info_t ci; + int bytes_written = 0; + + ANDROID_TRACE(("%s: cmd %s\n", __FUNCTION__, command)); + + sscanf(command, "%*s %d", &channel); + + if (channel > 0) { + ret = wl_ext_set_chanspec(dev, channel); + } else { + if (!(ret = wldev_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(channel_info_t), FALSE))) { + ANDROID_TRACE(("hw_channel %d\n", ci.hw_channel)); + ANDROID_TRACE(("target_channel %d\n", ci.target_channel)); + ANDROID_TRACE(("scan_channel %d\n", ci.scan_channel)); + bytes_written = snprintf(command, sizeof(channel_info_t)+2, "channel %d", ci.hw_channel); + ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command)); + ret = bytes_written; + } + } + + return ret; +} + +int +wl_ext_channels(struct net_device *dev, char* command, int total_len) +{ + int ret, i; + int bytes_written = -1; + u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)]; + wl_uint32_list_t *list; + + ANDROID_TRACE(("%s: cmd %s\n", __FUNCTION__, command)); + + memset(valid_chan_list, 0, sizeof(valid_chan_list)); + list = (wl_uint32_list_t *)(void *) valid_chan_list; + list->count = htod32(WL_NUMCHANNELS); + ret = wldev_ioctl(dev, WLC_GET_VALID_CHANNELS, valid_chan_list, sizeof(valid_chan_list), 0); + if (ret<0) { + ANDROID_ERROR(("%s: get channels failed with %d\n", __FUNCTION__, ret)); + } else { + bytes_written = snprintf(command, total_len, "channels"); + for (i = 0; i < dtoh32(list->count); i++) { + bytes_written += snprintf(command+bytes_written, total_len, " %d", dtoh32(list->element[i])); + printf("%d ", dtoh32(list->element[i])); + } + printf("\n"); + ret = bytes_written; + } + + return ret; +} + +int +wl_ext_roam_trigger(struct net_device *dev, char* command, int total_len) +{ + int ret = 0; + int roam_trigger[2] = {0, 0}; + int trigger[2]= {0, 0}; + int bytes_written=-1; + + sscanf(command, "%*s %10d", &roam_trigger[0]); + + if (roam_trigger[0]) { + roam_trigger[1] = WLC_BAND_ALL; + ret = wldev_ioctl(dev, WLC_SET_ROAM_TRIGGER, roam_trigger, sizeof(roam_trigger), 1); + if (ret) + ANDROID_ERROR(("WLC_SET_ROAM_TRIGGER ERROR %d ret=%d\n", roam_trigger[0], ret)); + } else { + roam_trigger[1] = WLC_BAND_2G; + ret = wldev_ioctl(dev, WLC_GET_ROAM_TRIGGER, roam_trigger, sizeof(roam_trigger), 0); + if (!ret) + trigger[0] = roam_trigger[0]; + else + ANDROID_ERROR(("2G WLC_GET_ROAM_TRIGGER ERROR %d ret=%d\n", roam_trigger[0], ret)); + + roam_trigger[1] = WLC_BAND_5G; + ret = wldev_ioctl(dev, WLC_GET_ROAM_TRIGGER, roam_trigger, sizeof(roam_trigger), 0); + if (!ret) + trigger[1] = roam_trigger[0]; + else + ANDROID_ERROR(("5G WLC_GET_ROAM_TRIGGER ERROR %d ret=%d\n", roam_trigger[0], ret)); + + ANDROID_TRACE(("roam_trigger %d %d\n", trigger[0], trigger[1])); + bytes_written = snprintf(command, total_len, "%d %d", trigger[0], trigger[1]); + ret = bytes_written; + } + + return ret; +} + +static int +wl_ext_pattern_atoh(char *src, char *dst) +{ + int i; + if (strncmp(src, "0x", 2) != 0 && + strncmp(src, "0X", 2) != 0) { + ANDROID_ERROR(("Mask invalid format. Needs to start with 0x\n")); + return -1; + } + src = src + 2; /* Skip past 0x */ + if (strlen(src) % 2 != 0) { + DHD_ERROR(("Mask invalid format. Needs to be of even length\n")); + return -1; + } + for (i = 0; *src != '\0'; i++) { + char num[3]; + bcm_strncpy_s(num, sizeof(num), src, 2); + num[2] = '\0'; + dst[i] = (uint8)strtoul(num, NULL, 16); + src += 2; + } + return i; +} + +int +wl_ext_keep_alive(struct net_device *dev, char *command, int total_len) +{ + wl_mkeep_alive_pkt_t *mkeep_alive_pktp; + int ret = -1, i; + int id, period=-1, len_bytes=0, buf_len=0; + char data[200]="\0"; + char buf[WLC_IOCTL_SMLEN]="\0", iovar_buf[WLC_IOCTL_SMLEN]="\0"; + int bytes_written = -1; + + ANDROID_TRACE(("%s: command = %s\n", __FUNCTION__, command)); + sscanf(command, "%*s %d %d %s", &id, &period, data); + ANDROID_TRACE(("%s: id=%d, period=%d, data=%s\n", __FUNCTION__, id, period, data)); + + if (period >= 0) { + mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *)buf; + mkeep_alive_pktp->version = htod16(WL_MKEEP_ALIVE_VERSION); + mkeep_alive_pktp->length = htod16(WL_MKEEP_ALIVE_FIXED_LEN); + mkeep_alive_pktp->keep_alive_id = id; + buf_len += WL_MKEEP_ALIVE_FIXED_LEN; + mkeep_alive_pktp->period_msec = period; + if (strlen(data)) { + len_bytes = wl_ext_pattern_atoh(data, (char *) mkeep_alive_pktp->data); + buf_len += len_bytes; + } + mkeep_alive_pktp->len_bytes = htod16(len_bytes); + + ret = wl_ext_iovar_setbuf(dev, "mkeep_alive", buf, buf_len, + iovar_buf, sizeof(iovar_buf), NULL); + } else { + if (id < 0) + id = 0; + ret = wl_ext_iovar_getbuf(dev, "mkeep_alive", &id, sizeof(id), buf, sizeof(buf), NULL); + if (ret) { + goto exit; + } else { + mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) buf; + printf("Id :%d\n" + "Period (msec) :%d\n" + "Length :%d\n" + "Packet :0x", + mkeep_alive_pktp->keep_alive_id, + dtoh32(mkeep_alive_pktp->period_msec), + dtoh16(mkeep_alive_pktp->len_bytes)); + for (i=0; ilen_bytes; i++) { + printf("%02x", mkeep_alive_pktp->data[i]); + } + printf("\n"); + } + bytes_written = snprintf(command, total_len, "mkeep_alive_period_msec %d ", dtoh32(mkeep_alive_pktp->period_msec)); + bytes_written += snprintf(command+bytes_written, total_len, "0x"); + for (i=0; ilen_bytes; i++) { + bytes_written += snprintf(command+bytes_written, total_len, "%x", mkeep_alive_pktp->data[i]); + } + ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command)); + ret = bytes_written; + } + +exit: + return ret; +} + +int +wl_ext_pm(struct net_device *dev, char *command, int total_len) +{ + int pm=-1, ret = -1; + char *pm_local; + int bytes_written=-1; + + ANDROID_TRACE(("%s: cmd %s\n", __FUNCTION__, command)); + + sscanf(command, "%*s %d", &pm); + + if (pm >= 0) { + ret = wldev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), FALSE); + if (ret) + ANDROID_ERROR(("WLC_SET_PM ERROR %d ret=%d\n", pm, ret)); + } else { + ret = wldev_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm), FALSE); + if (!ret) { + ANDROID_TRACE(("%s: PM = %d\n", __func__, pm)); + if (pm == PM_OFF) + pm_local = "PM_OFF"; + else if(pm == PM_MAX) + pm_local = "PM_MAX"; + else if(pm == PM_FAST) + pm_local = "PM_FAST"; + else { + pm = 0; + pm_local = "Invalid"; + } + bytes_written = snprintf(command, total_len, "PM %s", pm_local); + ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command)); + ret = bytes_written; + } + } + + return ret; +} + +static int +wl_ext_monitor(struct net_device *dev, char *command, int total_len) +{ + int val, ret = -1; + int bytes_written=-1; + + sscanf(command, "%*s %d", &val); + + if (val >=0) { + ret = wldev_ioctl(dev, WLC_SET_MONITOR, &val, sizeof(int), 1); + if (ret) + ANDROID_ERROR(("WLC_SET_MONITOR ERROR %d ret=%d\n", val, ret)); + } else { + ret = wldev_ioctl(dev, WLC_GET_MONITOR, &val, sizeof(val), FALSE); + if (!ret) { + ANDROID_TRACE(("%s: monitor = %d\n", __FUNCTION__, val)); + bytes_written = snprintf(command, total_len, "monitor %d", val); + ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command)); + ret = bytes_written; + } + } + + return ret; +} + +#ifdef WL_EXT_IAPSTA +struct wl_apsta_params g_apsta_params; +static int +wl_ext_parse_wep(char *key, struct wl_wsec_key *wsec_key) +{ + char hex[] = "XX"; + unsigned char *data = wsec_key->data; + char *keystr = key; + + switch (strlen(keystr)) { + case 5: + case 13: + case 16: + wsec_key->len = strlen(keystr); + memcpy(data, keystr, wsec_key->len + 1); + break; + case 12: + case 28: + case 34: + case 66: + /* strip leading 0x */ + if (!strnicmp(keystr, "0x", 2)) + keystr += 2; + else + return -1; + /* fall through */ + case 10: + case 26: + case 32: + case 64: + wsec_key->len = strlen(keystr) / 2; + while (*keystr) { + strncpy(hex, keystr, 2); + *data++ = (char) strtoul(hex, NULL, 16); + keystr += 2; + } + break; + default: + return -1; + } + + switch (wsec_key->len) { + case 5: + wsec_key->algo = CRYPTO_ALGO_WEP1; + break; + case 13: + wsec_key->algo = CRYPTO_ALGO_WEP128; + break; + case 16: + /* default to AES-CCM */ + wsec_key->algo = CRYPTO_ALGO_AES_CCM; + break; + case 32: + wsec_key->algo = CRYPTO_ALGO_TKIP; + break; + default: + return -1; + } + + /* Set as primary wsec_key by default */ + wsec_key->flags |= WL_PRIMARY_KEY; + + return 0; +} + +static int +wl_ext_set_bgnmode(struct wl_if_info *cur_if) +{ + struct net_device *dev = cur_if->dev; + bgnmode_t bgnmode = cur_if->bgnmode; + int val; + + if (bgnmode == 0) + return 0; + + wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1); + if (bgnmode == IEEE80211B) { + wl_ext_iovar_setint(dev, "nmode", 0); + val = 0; + wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1); + ANDROID_TRACE(("%s: Network mode: B only\n", __FUNCTION__)); + } else if (bgnmode == IEEE80211G) { + wl_ext_iovar_setint(dev, "nmode", 0); + val = 2; + wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1); + ANDROID_TRACE(("%s: Network mode: G only\n", __FUNCTION__)); + } else if (bgnmode == IEEE80211BG) { + wl_ext_iovar_setint(dev, "nmode", 0); + val = 1; + wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1); + ANDROID_TRACE(("%s: Network mode: : B/G mixed\n", __FUNCTION__)); + } else if (bgnmode == IEEE80211BGN) { + wl_ext_iovar_setint(dev, "nmode", 0); + wl_ext_iovar_setint(dev, "nmode", 1); + wl_ext_iovar_setint(dev, "vhtmode", 0); + val = 1; + wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1); + ANDROID_TRACE(("%s: Network mode: : B/G/N mixed\n", __FUNCTION__)); + } else if (bgnmode == IEEE80211BGNAC) { + wl_ext_iovar_setint(dev, "nmode", 0); + wl_ext_iovar_setint(dev, "nmode", 1); + wl_ext_iovar_setint(dev, "vhtmode", 1); + val = 1; + wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1); + ANDROID_TRACE(("%s: Network mode: : B/G/N/AC mixed\n", __FUNCTION__)); + } + wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1); + + return 0; +} + +static int +wl_ext_set_amode(struct wl_if_info *cur_if, struct wl_apsta_params *apsta_params) +{ + struct net_device *dev = cur_if->dev; + authmode_t amode = cur_if->amode; + int auth=0, wpa_auth=0; + + if (amode == AUTH_OPEN) { + auth = 0; + wpa_auth = 0; + ANDROID_TRACE(("%s: Authentication: Open System\n", __FUNCTION__)); + } else if (amode == AUTH_SHARED) { + auth = 1; + wpa_auth = 0; + ANDROID_TRACE(("%s: Authentication: Shared Key\n", __FUNCTION__)); + } else if (amode == AUTH_WPAPSK) { + auth = 0; + wpa_auth = 4; + ANDROID_TRACE(("%s: Authentication: WPA-PSK\n", __FUNCTION__)); + } else if (amode == AUTH_WPA2PSK) { + auth = 0; + wpa_auth = 128; + ANDROID_TRACE(("%s: Authentication: WPA2-PSK\n", __FUNCTION__)); + } else if (amode == AUTH_WPAWPA2PSK) { + auth = 0; + wpa_auth = 132; + ANDROID_TRACE(("%s: Authentication: WPA/WPA2-PSK\n", __FUNCTION__)); + } + wl_ext_iovar_setint(dev, "auth", auth); + + wl_ext_iovar_setint(dev, "wpa_auth", wpa_auth); + + return 0; +} + +static int +wl_ext_set_emode(struct wl_if_info *cur_if, struct wl_apsta_params *apsta_params) +{ + struct net_device *dev = cur_if->dev; + int wsec=0; + struct wl_wsec_key wsec_key; + wsec_pmk_t psk; + encmode_t emode = cur_if->emode; + char *key = cur_if->key; + + memset(&wsec_key, 0, sizeof(wsec_key)); + memset(&psk, 0, sizeof(psk)); + if (emode == ENC_NONE) { + wsec = 0; + ANDROID_TRACE(("%s: Encryption: No securiy\n", __FUNCTION__)); + } else if (emode == ENC_WEP) { + wsec = 1; + wl_ext_parse_wep(key, &wsec_key); + ANDROID_TRACE(("%s: Encryption: WEP\n", __FUNCTION__)); + ANDROID_TRACE(("%s: Key: %s\n", __FUNCTION__, wsec_key.data)); + } else if (emode == ENC_TKIP) { + wsec = 2; + psk.key_len = strlen(key); + psk.flags = WSEC_PASSPHRASE; + memcpy(psk.key, key, strlen(key)); + ANDROID_TRACE(("%s: Encryption: TKIP\n", __FUNCTION__)); + ANDROID_TRACE(("%s: Key: %s\n", __FUNCTION__, psk.key)); + } else if (emode == ENC_AES) { + wsec = 4; + psk.key_len = strlen(key); + psk.flags = WSEC_PASSPHRASE; + memcpy(psk.key, key, strlen(key)); + ANDROID_TRACE(("%s: Encryption: AES\n", __FUNCTION__)); + ANDROID_TRACE(("%s: Key: %s\n", __FUNCTION__, psk.key)); + } else if (emode == ENC_TKIPAES) { + wsec = 6; + psk.key_len = strlen(key); + psk.flags = WSEC_PASSPHRASE; + memcpy(psk.key, key, strlen(key)); + ANDROID_TRACE(("%s: Encryption: TKIP/AES\n", __FUNCTION__)); + ANDROID_TRACE(("%s: Key: %s\n", __FUNCTION__, psk.key)); + } + + wl_ext_iovar_setint(dev, "wsec", wsec); + + if (wsec == 1) { + wl_ext_ioctl(dev, WLC_SET_KEY, &wsec_key, sizeof(wsec_key), 1); + } else if (emode == ENC_TKIP || emode == ENC_AES || emode == ENC_TKIPAES) { + if (dev) { + if (cur_if->ifmode == ISTA_MODE) + wl_ext_iovar_setint(dev, "sup_wpa", 1); + wl_ext_ioctl(dev, WLC_SET_WSEC_PMK, &psk, sizeof(psk), 1); + } else { + ANDROID_ERROR(("%s: apdev is null\n", __FUNCTION__)); + } + } + + return 0; +} + +static int +wl_ext_iapsta_init(struct net_device *dev, char *command, int total_len) +{ + s32 val = 0; + char *pch, *pick_tmp, *param; + wlc_ssid_t ssid = { 0, {0} }; + s8 iovar_buf[WLC_IOCTL_SMLEN]; + struct wl_apsta_params *apsta_params = &g_apsta_params; + wl_interface_create_t iface; + struct dhd_pub *dhd; + wl_p2p_if_t ifreq; + + if (apsta_params->init) { + ANDROID_ERROR(("%s: don't init twice\n", __FUNCTION__)); + return -1; + } + + dhd = dhd_get_pub(dev); + memset(apsta_params, 0, sizeof(struct wl_apsta_params)); + + ANDROID_TRACE(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len)); + + pick_tmp = command; + param = bcmstrtok(&pick_tmp, " ", 0); // skip iapsta_init + param = bcmstrtok(&pick_tmp, " ", 0); + while (param != NULL) { + if (!strcmp(param, "mode")) { + pch = bcmstrtok(&pick_tmp, " ", 0); + if (pch) { + if (!strcmp(pch, "sta")) { + apsta_params->apstamode = ISTAONLY_MODE; + } else if (!strcmp(pch, "ap")) { + apsta_params->apstamode = IAPONLY_MODE; + } else if (!strcmp(pch, "apsta")) { + apsta_params->apstamode = IAPSTA_MODE; + } else if (!strcmp(pch, "dualap")) { + apsta_params->apstamode = IDUALAP_MODE; + } else if (!strcmp(pch, "gosta")) { + if (!FW_SUPPORTED(dhd, p2p)) { + return -1; + } + apsta_params->apstamode = IGOSTA_MODE; + } else if (!strcmp(pch, "gcsta")) { + if (!FW_SUPPORTED(dhd, p2p)) { + return -1; + } + apsta_params->apstamode = IGCSTA_MODE; + } else { + ANDROID_ERROR(("%s: mode [sta|ap|apsta|dualap]\n", __FUNCTION__)); + return -1; + } + } + } else if (!strcmp(param, "vifname")) { + pch = bcmstrtok(&pick_tmp, " ", 0); + if (pch) + strcpy(apsta_params->vif.ifname, pch); + else { + ANDROID_ERROR(("%s: vifname [wlan1]\n", __FUNCTION__)); + return -1; + } + } + param = bcmstrtok(&pick_tmp, " ", 0); + } + + if (apsta_params->apstamode == 0) { + ANDROID_ERROR(("%s: mode [sta|ap|apsta|dualap]\n", __FUNCTION__)); + return -1; + } + + apsta_params->pif.dev = dev; + apsta_params->pif.bssidx = 0; + strcpy(apsta_params->pif.ifname, dev->name); + strcpy(apsta_params->pif.ssid, "tttp"); + apsta_params->pif.maxassoc = -1; + apsta_params->pif.channel = 1; + + if (!strlen(apsta_params->vif.ifname)) + strcpy(apsta_params->vif.ifname, "wlan1"); + strcpy(apsta_params->vif.ssid, "tttv"); + apsta_params->vif.maxassoc = -1; + apsta_params->vif.channel = 1; + + if (apsta_params->apstamode == ISTAONLY_MODE) { + apsta_params->pif.ifmode = ISTA_MODE; + apsta_params->pif.ifstate = IF_STATE_INIT; + wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1); + wl_ext_iovar_setint(dev, "apsta", 1); // keep 1 as we set in dhd_preinit_ioctls + // don't set WLC_SET_AP to 0, some parameters will be reset, such as bcn_timeout and roam_off + wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1); + } else if (apsta_params->apstamode == IAPONLY_MODE) { + apsta_params->pif.ifmode = IAP_MODE; + apsta_params->pif.ifstate = IF_STATE_INIT; + wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1); +#ifdef ARP_OFFLOAD_SUPPORT + /* IF SoftAP is enabled, disable arpoe */ + dhd_arp_offload_set(dhd, 0); + dhd_arp_offload_enable(dhd, FALSE); +#endif /* ARP_OFFLOAD_SUPPORT */ + wl_ext_iovar_setint(dev, "mpc", 0); + wl_ext_iovar_setint(dev, "apsta", 0); + val = 1; + wl_ext_ioctl(dev, WLC_SET_AP, &val, sizeof(val), 1); + } else if (apsta_params->apstamode == IAPSTA_MODE) { + apsta_params->pif.ifmode = ISTA_MODE; + apsta_params->pif.ifstate = IF_STATE_INIT; + apsta_params->vif.ifmode = IAP_MODE; + apsta_params->vif.ifstate = IF_STATE_INIT; + wl_ext_iovar_setint(dev, "mpc", 0); + wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1); + wl_ext_iovar_setint(dev, "apsta", 1); + wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1); + if (FW_SUPPORTED(dhd, rsdb)) { + bzero(&iface, sizeof(wl_interface_create_t)); + iface.ver = WL_INTERFACE_CREATE_VER; + iface.flags = WL_INTERFACE_CREATE_AP; + wl_ext_iovar_getbuf_bsscfg(dev, "interface_create", &iface, sizeof(iface), iovar_buf, + WLC_IOCTL_SMLEN, 1, NULL); + } else { + wl_ext_iovar_setbuf_bsscfg(dev, "ssid", &ssid, sizeof(ssid), iovar_buf, + WLC_IOCTL_SMLEN, 1, NULL); + } + } + else if (apsta_params->apstamode == IDUALAP_MODE) { + apsta_params->pif.ifmode = IAP_MODE; + apsta_params->pif.ifstate = IF_STATE_INIT; + apsta_params->vif.ifmode = IAP_MODE; + apsta_params->vif.ifstate = IF_STATE_INIT; + wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1); + wl_ext_iovar_setint(dev, "apsta", 0); + wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1); + val = 1; + wl_ext_ioctl(dev, WLC_SET_AP, &val, sizeof(val), 1); + /* IF SoftAP is enabled, disable arpoe or wlan1 will ping fail */ +#ifdef ARP_OFFLOAD_SUPPORT + /* IF SoftAP is enabled, disable arpoe */ + dhd_arp_offload_set(dhd, 0); + dhd_arp_offload_enable(dhd, FALSE); +#endif /* ARP_OFFLOAD_SUPPORT */ + bzero(&iface, sizeof(wl_interface_create_t)); + iface.ver = WL_INTERFACE_CREATE_VER; + iface.flags = WL_INTERFACE_CREATE_AP; + wl_ext_iovar_getbuf_bsscfg(dev, "interface_create", &iface, sizeof(iface), iovar_buf, + WLC_IOCTL_SMLEN, 1, NULL); + } + else if (apsta_params->apstamode == IGOSTA_MODE) { + apsta_params->pif.ifmode = ISTA_MODE; + apsta_params->pif.ifstate = IF_STATE_INIT; + apsta_params->vif.ifmode = IAP_MODE; + apsta_params->vif.ifstate = IF_STATE_INIT; + wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1); + wl_ext_iovar_setint(dev, "apsta", 1); + wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1); + bzero(&ifreq, sizeof(wl_p2p_if_t)); + ifreq.type = htod32(WL_P2P_IF_GO); + wl_ext_iovar_setbuf(dev, "p2p_ifadd", &ifreq, sizeof(ifreq), + iovar_buf, WLC_IOCTL_SMLEN, NULL); + } + else if (apsta_params->apstamode == IGCSTA_MODE) { + apsta_params->pif.ifmode = ISTA_MODE; + apsta_params->pif.ifstate = IF_STATE_INIT; + apsta_params->vif.ifmode = ISTA_MODE; + apsta_params->vif.ifstate = IF_STATE_INIT; + wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1); + wl_ext_iovar_setint(dev, "apsta", 1); + wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1); + bzero(&ifreq, sizeof(wl_p2p_if_t)); + ifreq.type = htod32(WL_P2P_IF_CLIENT); + wl_ext_iovar_setbuf(dev, "p2p_ifadd", &ifreq, sizeof(ifreq), + iovar_buf, WLC_IOCTL_SMLEN, NULL); + } + + wl_ext_get_ioctl_ver(dev, &apsta_params->ioctl_ver); + printf("%s: apstamode=%d\n", __FUNCTION__, apsta_params->apstamode); + + apsta_params->init = TRUE; + + return 0; +} + +static int +wl_ext_iapsta_config(struct net_device *dev, char *command, int total_len) +{ + int i; + char *pch, *pick_tmp, *param; + struct wl_apsta_params *apsta_params = &g_apsta_params; + char ifname[IFNAMSIZ+1]; + struct wl_if_info *cur_if = &apsta_params->pif; + + if (!apsta_params->init) { + ANDROID_ERROR(("%s: please init first\n", __FUNCTION__)); + return -1; + } + + ANDROID_TRACE(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len)); + + pick_tmp = command; + param = bcmstrtok(&pick_tmp, " ", 0); // skip iapsta_config + param = bcmstrtok(&pick_tmp, " ", 0); + + if (param != NULL) { + if (strcmp(param, "ifname")) { + ANDROID_ERROR(("%s: first arg must be ifname\n", __FUNCTION__)); + return -1; + } + } + + while (param != NULL) { + if (!strcmp(param, "ifname")) { + pch = bcmstrtok(&pick_tmp, " ", 0); + if (pch) + strcpy(ifname, pch); + else { + ANDROID_ERROR(("%s: ifname [wlanX]\n", __FUNCTION__)); + return -1; + } + if (!strcmp(apsta_params->pif.dev->name, ifname)) { + cur_if = &apsta_params->pif; + } else if (!strcmp(apsta_params->vif.ifname, ifname)) { + cur_if = &apsta_params->vif; + } else { + ANDROID_ERROR(("%s: wrong ifname=%s in apstamode=%d\n", __FUNCTION__, + ifname, apsta_params->apstamode)); + return -1; + } + } else if (!strcmp(param, "ssid")) { + pch = bcmstrtok(&pick_tmp, " ", 0); + if (pch) + strcpy(cur_if->ssid, pch); + } else if (!strcmp(param, "bssid")) { + pch = bcmstrtok(&pick_tmp, ": ", 0); + for (i=0; i<6 && pch; i++) { + ((u8 *)&cur_if->bssid)[i] = (int)simple_strtol(pch, NULL, 16); + pch = bcmstrtok(&pick_tmp, ": ", 0); + } + } else if (!strcmp(param, "bgnmode")) { + pch = bcmstrtok(&pick_tmp, " ", 0); + if (pch) { + if (!strcmp(pch, "b")) + cur_if->bgnmode = IEEE80211B; + else if (!strcmp(pch, "g")) + cur_if->bgnmode = IEEE80211G; + else if (!strcmp(pch, "bg")) + cur_if->bgnmode = IEEE80211BG; + else if (!strcmp(pch, "bgn")) + cur_if->bgnmode = IEEE80211BGN; + else if (!strcmp(pch, "bgnac")) + cur_if->bgnmode = IEEE80211BGNAC; + else { + ANDROID_ERROR(("%s: bgnmode [b|g|bg|bgn|bgnac]\n", __FUNCTION__)); + return -1; + } + } + } else if (!strcmp(param, "hidden")) { + pch = bcmstrtok(&pick_tmp, " ", 0); + if (pch) { + if (!strcmp(pch, "n")) + cur_if->hidden = 0; + else if (!strcmp(pch, "y")) + cur_if->hidden = 1; + else { + ANDROID_ERROR(("%s: hidden [y|n]\n", __FUNCTION__)); + return -1; + } + } + } else if (!strcmp(param, "maxassoc")) { + pch = bcmstrtok(&pick_tmp, " ", 0); + if (pch) + cur_if->maxassoc = (int)simple_strtol(pch, NULL, 10); + } else if (!strcmp(param, "chan")) { + pch = bcmstrtok(&pick_tmp, " ", 0); + if (pch) + cur_if->channel = (int)simple_strtol(pch, NULL, 10); + } else if (!strcmp(param, "amode")) { + pch = bcmstrtok(&pick_tmp, " ", 0); + if (pch) { + if (!strcmp(pch, "open")) + cur_if->amode = AUTH_OPEN; + else if (!strcmp(pch, "shared")) + cur_if->amode = AUTH_SHARED; + else if (!strcmp(pch, "wpapsk")) + cur_if->amode = AUTH_WPAPSK; + else if (!strcmp(pch, "wpa2psk")) + cur_if->amode = AUTH_WPA2PSK; + else if (!strcmp(pch, "wpawpa2psk")) + cur_if->amode = AUTH_WPAWPA2PSK; + else { + ANDROID_ERROR(("%s: amode [open|shared|wpapsk|wpa2psk|wpawpa2psk]\n", + __FUNCTION__)); + return -1; + } + } + } else if (!strcmp(param, "emode")) { + pch = bcmstrtok(&pick_tmp, " ", 0); + if (pch) { + if (!strcmp(pch, "none")) + cur_if->emode = ENC_NONE; + else if (!strcmp(pch, "wep")) + cur_if->emode = ENC_WEP; + else if (!strcmp(pch, "tkip")) + cur_if->emode = ENC_TKIP; + else if (!strcmp(pch, "aes")) + cur_if->emode = ENC_AES; + else if (!strcmp(pch, "tkipaes")) + cur_if->emode = ENC_TKIPAES; + else { + ANDROID_ERROR(("%s: emode [none|wep|tkip|aes|tkipaes]\n", + __FUNCTION__)); + return -1; + } + } + } else if (!strcmp(param, "key")) { + pch = bcmstrtok(&pick_tmp, " ", 0); + if (pch) { + strcpy(cur_if->key, pch); + } + } + param = bcmstrtok(&pick_tmp, " ", 0); + } + + return 0; +} + +static int +wl_ext_iapsta_disable(struct net_device *dev, char *command, int total_len) +{ + char *pch, *pick_tmp, *param; + s8 iovar_buf[WLC_IOCTL_SMLEN]; + wlc_ssid_t ssid = { 0, {0} }; + scb_val_t scbval; + struct { + s32 tmp; + s32 cfg; + s32 val; + } bss_setbuf; + struct wl_apsta_params *apsta_params = &g_apsta_params; + apstamode_t apstamode = apsta_params->apstamode; + char ifname[IFNAMSIZ+1]; + struct wl_if_info *cur_if; + struct dhd_pub *dhd; + + if (!apsta_params->init) { + ANDROID_ERROR(("%s: please init first\n", __FUNCTION__)); + return -1; + } + + ANDROID_TRACE(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len)); + dhd = dhd_get_pub(dev); + + pick_tmp = command; + param = bcmstrtok(&pick_tmp, " ", 0); // skip iapsta_disable + param = bcmstrtok(&pick_tmp, " ", 0); + while (param != NULL) { + if (!strcmp(param, "ifname")) { + pch = bcmstrtok(&pick_tmp, " ", 0); + if (pch) + strcpy(ifname, pch); + else { + ANDROID_ERROR(("%s: ifname [wlanX]\n", __FUNCTION__)); + return -1; + } + } + param = bcmstrtok(&pick_tmp, " ", 0); + } + if (!strcmp(apsta_params->pif.dev->name, ifname)) { + cur_if = &apsta_params->pif; + } else if (!strcmp(apsta_params->vif.ifname, ifname)) { + cur_if = &apsta_params->vif; + } else { + ANDROID_ERROR(("%s: wrong ifname=%s\n", __FUNCTION__, ifname)); + return -1; + } + if (!cur_if->dev) { + ANDROID_ERROR(("%s: %s is not ready\n", __FUNCTION__, ifname)); + return -1; + } + + if (cur_if->ifmode == ISTA_MODE) { + wl_ext_ioctl(cur_if->dev, WLC_DISASSOC, NULL, 0, 1); + } else if (cur_if->ifmode == IAP_MODE) { + // deauthenticate all STA first + memcpy(scbval.ea.octet, ðer_bcast, ETHER_ADDR_LEN); + wl_ext_ioctl(cur_if->dev, WLC_SCB_DEAUTHENTICATE, &scbval.ea, ETHER_ADDR_LEN, 1); + } + + if (apstamode == IAPONLY_MODE) { + wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1); + wl_ext_ioctl(dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1); // reset ssid + wl_ext_iovar_setint(dev, "mpc", 1); + } else if ((apstamode==IAPSTA_MODE || apstamode==IGOSTA_MODE) && + cur_if->ifmode == IAP_MODE) { + // vif is AP mode + bss_setbuf.tmp = 0xffffffff; + bss_setbuf.cfg = 0; // must be 0, or wlan1 can not be down + bss_setbuf.val = htod32(0); + wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf, sizeof(bss_setbuf), + iovar_buf, WLC_IOCTL_SMLEN, NULL); + wl_ext_iovar_setint(dev, "mpc", 1); +#ifdef ARP_OFFLOAD_SUPPORT + /* IF SoftAP is disabled, enable arpoe back for STA mode. */ + dhd_arp_offload_set(dhd, dhd_arp_mode); + dhd_arp_offload_enable(dhd, TRUE); +#endif /* ARP_OFFLOAD_SUPPORT */ + } else if (apstamode == IDUALAP_MODE) { + bss_setbuf.tmp = 0xffffffff; + bss_setbuf.cfg = 0; // must be 0, or wlan1 can not be down + bss_setbuf.val = htod32(0); + wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf, sizeof(bss_setbuf), + iovar_buf, WLC_IOCTL_SMLEN, NULL); + } + +#ifdef PROP_TXSTATUS_VSDB +#if defined(BCMSDIO) + if (cur_if==&apsta_params->vif && dhd->conf->disable_proptx!=0) { + bool enabled; + dhd_wlfc_get_enable(dhd, &enabled); + if (enabled) { + dhd_wlfc_deinit(dhd); + } + } +#endif +#endif /* PROP_TXSTATUS_VSDB */ + + cur_if->ifstate = IF_STATE_DISALBE; + printf("%s: apstamode=%d, ifname=%s\n", __FUNCTION__, apstamode, ifname); + + return 0; +} + +static int +wl_ext_iapsta_enable(struct net_device *dev, char *command, int total_len) +{ + int ret = 0; + s32 val = 0; + char *pch, *pick_tmp, *param; + s8 iovar_buf[WLC_IOCTL_SMLEN]; + wlc_ssid_t ssid = { 0, {0} }; + struct { + s32 cfg; + s32 val; + } bss_setbuf; + struct wl_apsta_params *apsta_params = &g_apsta_params; + apstamode_t apstamode = apsta_params->apstamode; + char ifname[IFNAMSIZ+1]; + struct wl_if_info *cur_if; + char cmd[128] = "iapsta_stop ifname "; + struct dhd_pub *dhd; + + if (!apsta_params->init) { + ANDROID_ERROR(("%s: please init first\n", __FUNCTION__)); + return -1; + } + + ANDROID_TRACE(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len)); + dhd = dhd_get_pub(dev); + + pick_tmp = command; + param = bcmstrtok(&pick_tmp, " ", 0); // skip iapsta_enable + param = bcmstrtok(&pick_tmp, " ", 0); + while (param != NULL) { + if (!strcmp(param, "ifname")) { + pch = bcmstrtok(&pick_tmp, " ", 0); + if (pch) + strcpy(ifname, pch); + else { + ANDROID_ERROR(("%s: ifname [wlanX]\n", __FUNCTION__)); + return -1; + } + } + param = bcmstrtok(&pick_tmp, " ", 0); + } + if (!strcmp(apsta_params->pif.dev->name, ifname)) { + cur_if = &apsta_params->pif; + } else if (!strcmp(apsta_params->vif.ifname, ifname)) { + cur_if = &apsta_params->vif; + } else { + ANDROID_ERROR(("%s: wrong ifname=%s\n", __FUNCTION__, ifname)); + return -1; + } + if (!cur_if->dev) { + ANDROID_ERROR(("%s: %s is not ready\n", __FUNCTION__, ifname)); + return -1; + } + ssid.SSID_len = strlen(cur_if->ssid); + memcpy(ssid.SSID, cur_if->ssid, ssid.SSID_len); + ANDROID_TRACE(("%s: apstamode=%d, bssidx=%d\n", __FUNCTION__, apstamode, cur_if->bssidx)); + + snprintf(cmd, 128, "iapsta_stop ifname %s", cur_if->ifname); + ret = wl_ext_iapsta_disable(dev, cmd, strlen(cmd)); + if (ret) + goto exit; + + if (cur_if == &apsta_params->vif) { + wl_ext_iovar_setbuf(cur_if->dev, "cur_etheraddr", (u8 *)cur_if->dev->dev_addr, + ETHER_ADDR_LEN, iovar_buf, WLC_IOCTL_SMLEN, NULL); + } + + // set ssid for AP + if (cur_if->ifmode == IAP_MODE) { + wl_ext_iovar_setint(dev, "mpc", 0); + if (apstamode == IAPONLY_MODE) { + wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1); + } else if (apstamode==IAPSTA_MODE || apstamode==IGOSTA_MODE) { + wl_ext_iovar_setbuf_bsscfg(cur_if->dev, "ssid", &ssid, sizeof(ssid), + iovar_buf, WLC_IOCTL_SMLEN, cur_if->bssidx, NULL); + } + } + + if (cur_if->ifmode == IAP_MODE) { + wl_ext_set_bgnmode(cur_if); + wl_ext_set_chanspec(cur_if->dev, cur_if->channel); + } + wl_ext_set_amode(cur_if, apsta_params); + wl_ext_set_emode(cur_if, apsta_params); + + if (apstamode == ISTAONLY_MODE || apstamode == IGCSTA_MODE) { + if (!ETHER_ISBCAST(&cur_if->bssid) && !ETHER_ISNULLADDR(&cur_if->bssid)) { + printf("%s: BSSID: %pM\n", __FUNCTION__, &cur_if->bssid); + wl_ext_ioctl(cur_if->dev, WLC_SET_BSSID, &cur_if->bssid, ETHER_ADDR_LEN, 1); + } + val = 1; + wl_ext_ioctl(dev, WLC_SET_INFRA, &val, sizeof(val), 1); + } + if (cur_if->ifmode == IAP_MODE) { + if (cur_if->maxassoc >= 0) + wl_ext_iovar_setint(dev, "maxassoc", cur_if->maxassoc); + printf("%s: Broadcast SSID: %s\n", __FUNCTION__, cur_if->hidden ? "OFF":"ON"); + // terence: fix me, hidden does not work in dualAP mode + wl_ext_ioctl(cur_if->dev, WLC_SET_CLOSED, &cur_if->hidden, sizeof(cur_if->hidden), 1); + } + + if (apstamode == ISTAONLY_MODE || apstamode == IGCSTA_MODE) { + wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1); + } else if (apstamode == IAPONLY_MODE) { + wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1); + wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1); + } else if (apstamode == IAPSTA_MODE || apstamode == IGOSTA_MODE) { + if (cur_if->ifmode == ISTA_MODE) { + wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1); + } else { + if (FW_SUPPORTED(dhd, rsdb)) { + wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1); + } else { + bss_setbuf.cfg = htod32(cur_if->bssidx); + bss_setbuf.val = htod32(1); + wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf, sizeof(bss_setbuf), + iovar_buf, WLC_IOCTL_SMLEN, NULL); + } +#ifdef ARP_OFFLOAD_SUPPORT + /* IF SoftAP is enabled, disable arpoe */ + dhd_arp_offload_set(dhd, 0); + dhd_arp_offload_enable(dhd, FALSE); +#endif /* ARP_OFFLOAD_SUPPORT */ + } + } + else if (apstamode == IDUALAP_MODE) { + wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1); + } + +#ifdef PROP_TXSTATUS_VSDB +#if defined(BCMSDIO) + if (cur_if==&apsta_params->vif && !disable_proptx) { + bool enabled; + dhd_wlfc_get_enable(dhd, &enabled); + if (!enabled) { + dhd_wlfc_init(dhd); + wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1); + } + } +#endif +#endif /* PROP_TXSTATUS_VSDB */ + + printf("%s: ifname=%s, SSID: %s\n", __FUNCTION__, ifname, cur_if->ssid); + + cur_if->ifstate = IF_STATE_ENABLE; + +exit: + return ret; +} + +void +wl_android_ext_iapsta_disconnect_sta(struct net_device *dev, u32 channel) +{ + struct wl_apsta_params *apsta_params = &g_apsta_params; + struct wl_if_info *cur_if = &apsta_params->vif; + scb_val_t scbval; + int ret; + channel_info_t ci; + struct dhd_pub *dhd; + + if (apsta_params->apstamode==IAPSTA_MODE && cur_if->ifstate==IF_STATE_ENABLE) { + dhd = dhd_get_pub(dev); + if (!FW_SUPPORTED(dhd, vsdb)) { + if (!(ret = wldev_ioctl(cur_if->dev, WLC_GET_CHANNEL, &ci, sizeof(channel_info_t), FALSE))) { + if (channel != ci.target_channel) { + printf("%s: deauthenticate all STA on vif\n", __FUNCTION__); + memcpy(scbval.ea.octet, ðer_bcast, ETHER_ADDR_LEN); + wl_ext_ioctl(cur_if->dev, WLC_SCB_DEAUTHENTICATE, &scbval.ea, ETHER_ADDR_LEN, 1); + } + } + } + } +} + +int wl_android_ext_attach_netdev(struct net_device *net, uint8 bssidx) +{ + g_apsta_params.vif.dev = net; + g_apsta_params.vif.bssidx = bssidx; + if (strlen(g_apsta_params.vif.ifname)) { + memset(net->name, 0, sizeof(IFNAMSIZ)); + strcpy(net->name, g_apsta_params.vif.ifname); + net->name[IFNAMSIZ - 1] = '\0'; + } + if (g_apsta_params.pif.dev) { + memcpy(net->dev_addr, g_apsta_params.pif.dev->dev_addr, ETHER_ADDR_LEN); + net->dev_addr[0] |= 0x02; + } + + return 0; +} + +int wl_android_ext_dettach_netdev(void) +{ + struct wl_apsta_params *apsta_params = &g_apsta_params; + + ANDROID_TRACE(("%s: Enter\n", __FUNCTION__)); + memset(apsta_params, 0, sizeof(struct wl_apsta_params)); + + return 0; +} +#endif + +#ifdef IDHCPC +int wl_ext_ip_dump(int ip, char *buf) +{ + unsigned char bytes[4]; + int bytes_written=-1; + + bytes[0] = ip & 0xFF; + bytes[1] = (ip >> 8) & 0xFF; + bytes[2] = (ip >> 16) & 0xFF; + bytes[3] = (ip >> 24) & 0xFF; + bytes_written = sprintf(buf, "%d.%d.%d.%d", bytes[0], bytes[1], bytes[2], bytes[3]); + + return bytes_written; +} + +/* +terence 20170215: +dhd_priv dhcpc_dump ifname [wlan0|wlan1] +dhd_priv dhcpc_enable [0|1] +*/ +int +wl_ext_dhcpc_enable(struct net_device *dev, char *command, int total_len) +{ + int enable = -1, ret = -1; + int bytes_written = -1; + + ANDROID_TRACE(("%s: cmd %s\n", __FUNCTION__, command)); + + sscanf(command, "%*s %d", &enable); + + if (enable >= 0) + ret = wl_ext_iovar_setint(dev, "dhcpc_enable", enable); + else { + ret = wl_ext_iovar_getint(dev, "dhcpc_enable", &enable); + if (!ret) { + bytes_written = snprintf(command, total_len, "%d", enable); + ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command)); + ret = bytes_written; + } + } + + return ret; +} + +int +wl_ext_dhcpc_dump(struct net_device *dev, char *command, int total_len) +{ + + int ret = 0; + int bytes_written = 0; + uint32 ip_addr; + char buf[20]=""; + + ret = wl_ext_iovar_getint(dev, "dhcpc_ip_addr", &ip_addr); + if (!ret) { + wl_ext_ip_dump(ip_addr, buf); + bytes_written += snprintf(command+bytes_written, total_len, "ipaddr %s ", buf); + } + + ret = wl_ext_iovar_getint(dev, "dhcpc_ip_mask", &ip_addr); + if (!ret) { + wl_ext_ip_dump(ip_addr, buf); + bytes_written += snprintf(command+bytes_written, total_len, "mask %s ", buf); + } + + ret = wl_ext_iovar_getint(dev, "dhcpc_ip_gateway", &ip_addr); + if (!ret) { + wl_ext_ip_dump(ip_addr, buf); + bytes_written += snprintf(command+bytes_written, total_len, "gw %s ", buf); + } + + ret = wl_ext_iovar_getint(dev, "dhcpc_ip_dnsserv", &ip_addr); + if (!ret) { + wl_ext_ip_dump(ip_addr, buf); + bytes_written += snprintf(command+bytes_written, total_len, "dnsserv %s ", buf); + } + + if (!bytes_written) + bytes_written = -1; + + ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command)); + + return bytes_written; +} +#endif + +/* +dhd_priv dhd [string] ==> Not ready +1. Get dhd val: + Ex: dhd_priv dhd bussleep +2. Set dhd val: + Ex: dhd_priv dhd bussleep 1 + +dhd_priv wl [WLC_GET_PM] ==> Ready to get int val +dhd_priv wl [WLC_SET_PM] [int] ==> Ready to set int val +dhd_priv wl [string] ==> Ready to get int val +dhd_priv wl [string] [int] ==> Ready to set int val +Ex: get/set WLC_PM + dhd_priv wl 85 + dhd_priv wl 86 1 +Ex: get/set mpc + dhd_priv wl mpc + dhd_priv wl mpc 1 +*/ +int +wl_ext_iovar(struct net_device *dev, char *command, int total_len) +{ + int ret = 0; + char wl[3]="\0", arg[20]="\0", cmd_str[20]="\0", val_str[20]="\0"; + int cmd=-1, val=0; + int bytes_written=-1; + + ANDROID_TRACE(("%s: cmd %s\n", __FUNCTION__, command)); + + sscanf(command, "%s %d %s", wl, &cmd, arg); + if (cmd < 0) + sscanf(command, "%s %s %s", wl, cmd_str, val_str); + + if (!strcmp(wl, "wl")) { + if (cmd>=0 && cmd!=WLC_GET_VAR && cmd!=WLC_SET_VAR) { + ret = sscanf(arg, "%d", &val); + if (ret > 0) { // set + ret = wl_ext_ioctl(dev, cmd, &val, sizeof(val), TRUE); + } else { // get + ret = wl_ext_ioctl(dev, cmd, &val, sizeof(val), FALSE); + if (!ret) { + bytes_written = snprintf(command, total_len, "%d", val); + ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command)); + ret = bytes_written; + } + } + } else if (strlen(cmd_str)) { + ret = sscanf(val_str, "%d", &val); + if (ret > 0) { // set + ret = wl_ext_iovar_setint(dev, cmd_str, val); + } else { // get + ret = wl_ext_iovar_getint(dev, cmd_str, &val); + if (!ret) { + bytes_written = snprintf(command, total_len, "%d", val); + ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command)); + ret = bytes_written; + } + } + } + } + + return ret; +} + +int wl_android_ext_priv_cmd(struct net_device *net, char *command, int total_len, + int *bytes_written) +{ + int ret = 0; + + if (strnicmp(command, CMD_CHANNELS, strlen(CMD_CHANNELS)) == 0) { + *bytes_written = wl_ext_channels(net, command, total_len); + } + else if (strnicmp(command, CMD_CHANNEL, strlen(CMD_CHANNEL)) == 0) { + *bytes_written = wl_ext_channel(net, command, total_len); + } + else if (strnicmp(command, CMD_ROAM_TRIGGER, strlen(CMD_ROAM_TRIGGER)) == 0) { + *bytes_written = wl_ext_roam_trigger(net, command, total_len); + } + else if (strnicmp(command, CMD_KEEP_ALIVE, strlen(CMD_KEEP_ALIVE)) == 0) { + *bytes_written = wl_ext_keep_alive(net, command, total_len); + } + else if (strnicmp(command, CMD_PM, strlen(CMD_PM)) == 0) { + *bytes_written = wl_ext_pm(net, command, total_len); + } + else if (strnicmp(command, CMD_MONITOR, strlen(CMD_MONITOR)) == 0) { + *bytes_written = wl_ext_monitor(net, command, total_len); + } + else if (strnicmp(command, CMD_SET_SUSPEND_BCN_LI_DTIM, strlen(CMD_SET_SUSPEND_BCN_LI_DTIM)) == 0) { + int bcn_li_dtim; + bcn_li_dtim = (int)simple_strtol((command + strlen(CMD_SET_SUSPEND_BCN_LI_DTIM) + 1), NULL, 10); + *bytes_written = net_os_set_suspend_bcn_li_dtim(net, bcn_li_dtim); + } +#ifdef WL_EXT_IAPSTA + else if (strnicmp(command, CMD_IAPSTA_INIT, strlen(CMD_IAPSTA_INIT)) == 0) { + *bytes_written = wl_ext_iapsta_init(net, command, total_len); + } + else if (strnicmp(command, CMD_IAPSTA_CONFIG, strlen(CMD_IAPSTA_CONFIG)) == 0) { + *bytes_written = wl_ext_iapsta_config(net, command, total_len); + } + else if (strnicmp(command, CMD_IAPSTA_ENABLE, strlen(CMD_IAPSTA_ENABLE)) == 0) { + *bytes_written = wl_ext_iapsta_enable(net, command, total_len); + } + else if (strnicmp(command, CMD_IAPSTA_DISABLE, strlen(CMD_IAPSTA_DISABLE)) == 0) { + *bytes_written = wl_ext_iapsta_disable(net, command, total_len); + } +#endif +#ifdef IDHCPC + else if (strnicmp(command, CMD_DHCPC_ENABLE, strlen(CMD_DHCPC_ENABLE)) == 0) { + *bytes_written = wl_ext_dhcpc_enable(net, command, total_len); + } + else if (strnicmp(command, CMD_DHCPC_DUMP, strlen(CMD_DHCPC_DUMP)) == 0) { + *bytes_written = wl_ext_dhcpc_dump(net, command, total_len); + } +#endif + else if (strnicmp(command, CMD_WL, strlen(CMD_WL)) == 0) { + *bytes_written = wl_ext_iovar(net, command, total_len); + } + else + ret = -1; + + return ret; +} + +#if defined(RSSIAVG) +void +wl_free_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl) +{ + wl_rssi_cache_t *node, *cur, **rssi_head; + int i=0; + + rssi_head = &rssi_cache_ctrl->m_cache_head; + node = *rssi_head; + + for (;node;) { + ANDROID_INFO(("%s: Free %d with BSSID %pM\n", + __FUNCTION__, i, &node->BSSID)); + cur = node; + node = cur->next; + kfree(cur); + i++; + } + *rssi_head = NULL; +} + +void +wl_delete_dirty_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl) +{ + wl_rssi_cache_t *node, *prev, **rssi_head; + int i = -1, tmp = 0; + struct timeval now; + + do_gettimeofday(&now); + + rssi_head = &rssi_cache_ctrl->m_cache_head; + node = *rssi_head; + prev = node; + for (;node;) { + i++; + if (now.tv_sec > node->tv.tv_sec) { + if (node == *rssi_head) { + tmp = 1; + *rssi_head = node->next; + } else { + tmp = 0; + prev->next = node->next; + } + ANDROID_INFO(("%s: Del %d with BSSID %pM\n", + __FUNCTION__, i, &node->BSSID)); + kfree(node); + if (tmp == 1) { + node = *rssi_head; + prev = node; + } else { + node = prev->next; + } + continue; + } + prev = node; + node = node->next; + } +} + +void +wl_delete_disconnected_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl, u8 *bssid) +{ + wl_rssi_cache_t *node, *prev, **rssi_head; + int i = -1, tmp = 0; + + rssi_head = &rssi_cache_ctrl->m_cache_head; + node = *rssi_head; + prev = node; + for (;node;) { + i++; + if (!memcmp(&node->BSSID, bssid, ETHER_ADDR_LEN)) { + if (node == *rssi_head) { + tmp = 1; + *rssi_head = node->next; + } else { + tmp = 0; + prev->next = node->next; + } + ANDROID_INFO(("%s: Del %d with BSSID %pM\n", + __FUNCTION__, i, &node->BSSID)); + kfree(node); + if (tmp == 1) { + node = *rssi_head; + prev = node; + } else { + node = prev->next; + } + continue; + } + prev = node; + node = node->next; + } +} + +void +wl_reset_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl) +{ + wl_rssi_cache_t *node, **rssi_head; + + rssi_head = &rssi_cache_ctrl->m_cache_head; + + /* reset dirty */ + node = *rssi_head; + for (;node;) { + node->dirty += 1; + node = node->next; + } +} + +int +wl_update_connected_rssi_cache(struct net_device *net, wl_rssi_cache_ctrl_t *rssi_cache_ctrl, int *rssi_avg) +{ + wl_rssi_cache_t *node, *prev, *leaf, **rssi_head; + int j, k=0; + int rssi, error=0; + struct ether_addr bssid; + struct timeval now, timeout; + + if (!g_wifi_on) + return 0; + + error = wldev_ioctl(net, WLC_GET_BSSID, &bssid, sizeof(bssid), false); + if (error == BCME_NOTASSOCIATED) { + ANDROID_INFO(("%s: Not Associated! res:%d\n", __FUNCTION__, error)); + return 0; + } + if (error) { + ANDROID_ERROR(("Could not get bssid (%d)\n", error)); + } + error = wldev_get_rssi(net, &rssi); + if (error) { + ANDROID_ERROR(("Could not get rssi (%d)\n", error)); + return error; + } + + do_gettimeofday(&now); + timeout.tv_sec = now.tv_sec + RSSICACHE_TIMEOUT; + if (timeout.tv_sec < now.tv_sec) { + /* + * Integer overflow - assume long enough timeout to be assumed + * to be infinite, i.e., the timeout would never happen. + */ + ANDROID_TRACE(("%s: Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu", + __FUNCTION__, RSSICACHE_TIMEOUT, now.tv_sec, timeout.tv_sec)); + } + + /* update RSSI */ + rssi_head = &rssi_cache_ctrl->m_cache_head; + node = *rssi_head; + prev = NULL; + for (;node;) { + if (!memcmp(&node->BSSID, &bssid, ETHER_ADDR_LEN)) { + ANDROID_INFO(("%s: Update %d with BSSID %pM, RSSI=%d\n", + __FUNCTION__, k, &bssid, rssi)); + for (j=0; jRSSI[j] = node->RSSI[j+1]; + node->RSSI[j] = rssi; + node->dirty = 0; + node->tv = timeout; + goto exit; + } + prev = node; + node = node->next; + k++; + } + + leaf = kmalloc(sizeof(wl_rssi_cache_t), GFP_KERNEL); + if (!leaf) { + ANDROID_ERROR(("%s: Memory alloc failure %d\n", + __FUNCTION__, (int)sizeof(wl_rssi_cache_t))); + return 0; + } + ANDROID_INFO(("%s: Add %d with cached BSSID %pM, RSSI=%3d in the leaf\n", + __FUNCTION__, k, &bssid, rssi)); + + leaf->next = NULL; + leaf->dirty = 0; + leaf->tv = timeout; + memcpy(&leaf->BSSID, &bssid, ETHER_ADDR_LEN); + for (j=0; jRSSI[j] = rssi; + + if (!prev) + *rssi_head = leaf; + else + prev->next = leaf; + +exit: + *rssi_avg = (int)wl_get_avg_rssi(rssi_cache_ctrl, &bssid); + + return error; +} + +void +wl_update_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl, wl_scan_results_t *ss_list) +{ + wl_rssi_cache_t *node, *prev, *leaf, **rssi_head; + wl_bss_info_t *bi = NULL; + int i, j, k; + struct timeval now, timeout; + + if (!ss_list->count) + return; + + do_gettimeofday(&now); + timeout.tv_sec = now.tv_sec + RSSICACHE_TIMEOUT; + if (timeout.tv_sec < now.tv_sec) { + /* + * Integer overflow - assume long enough timeout to be assumed + * to be infinite, i.e., the timeout would never happen. + */ + ANDROID_TRACE(("%s: Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu", + __FUNCTION__, RSSICACHE_TIMEOUT, now.tv_sec, timeout.tv_sec)); + } + + rssi_head = &rssi_cache_ctrl->m_cache_head; + + /* update RSSI */ + for (i = 0; i < ss_list->count; i++) { + node = *rssi_head; + prev = NULL; + k = 0; + bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : ss_list->bss_info; + for (;node;) { + if (!memcmp(&node->BSSID, &bi->BSSID, ETHER_ADDR_LEN)) { + ANDROID_INFO(("%s: Update %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n", + __FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID)); + for (j=0; jRSSI[j] = node->RSSI[j+1]; + node->RSSI[j] = dtoh16(bi->RSSI); + node->dirty = 0; + node->tv = timeout; + break; + } + prev = node; + node = node->next; + k++; + } + + if (node) + continue; + + leaf = kmalloc(sizeof(wl_rssi_cache_t), GFP_KERNEL); + if (!leaf) { + ANDROID_ERROR(("%s: Memory alloc failure %d\n", + __FUNCTION__, (int)sizeof(wl_rssi_cache_t))); + return; + } + ANDROID_INFO(("%s: Add %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\" in the leaf\n", + __FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID)); + + leaf->next = NULL; + leaf->dirty = 0; + leaf->tv = timeout; + memcpy(&leaf->BSSID, &bi->BSSID, ETHER_ADDR_LEN); + for (j=0; jRSSI[j] = dtoh16(bi->RSSI); + + if (!prev) + *rssi_head = leaf; + else + prev->next = leaf; + } +} + +int16 +wl_get_avg_rssi(wl_rssi_cache_ctrl_t *rssi_cache_ctrl, void *addr) +{ + wl_rssi_cache_t *node, **rssi_head; + int j, rssi_sum, rssi=RSSI_MINVAL; + + rssi_head = &rssi_cache_ctrl->m_cache_head; + + node = *rssi_head; + for (;node;) { + if (!memcmp(&node->BSSID, addr, ETHER_ADDR_LEN)) { + rssi_sum = 0; + rssi = 0; + for (j=0; jRSSI[RSSIAVG_LEN-j-1]; + rssi = rssi_sum / j; + break; + } + node = node->next; + } + rssi = MIN(rssi, RSSI_MAXVAL); + if (rssi == RSSI_MINVAL) { + ANDROID_ERROR(("%s: BSSID %pM does not in RSSI cache\n", + __FUNCTION__, addr)); + } + return (int16)rssi; +} +#endif + +#if defined(RSSIOFFSET) +int +wl_update_rssi_offset(struct net_device *net, int rssi) +{ +#if defined(RSSIOFFSET_NEW) + int j; +#endif + + if (!g_wifi_on) + return rssi; + +#if defined(RSSIOFFSET_NEW) + for (j=0; jm_cache_head; + node = *bss_head; + + for (;node;) { + ANDROID_TRACE(("%s: Free %d with BSSID %pM\n", + __FUNCTION__, i, &node->results.bss_info->BSSID)); + cur = node; + node = cur->next; + kfree(cur); + i++; + } + *bss_head = NULL; +} + +void +wl_delete_dirty_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl) +{ + wl_bss_cache_t *node, *prev, **bss_head; + int i = -1, tmp = 0; + struct timeval now; + + do_gettimeofday(&now); + + bss_head = &bss_cache_ctrl->m_cache_head; + node = *bss_head; + prev = node; + for (;node;) { + i++; + if (now.tv_sec > node->tv.tv_sec) { + if (node == *bss_head) { + tmp = 1; + *bss_head = node->next; + } else { + tmp = 0; + prev->next = node->next; + } + ANDROID_TRACE(("%s: Del %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n", + __FUNCTION__, i, &node->results.bss_info->BSSID, + dtoh16(node->results.bss_info->RSSI), node->results.bss_info->SSID)); + kfree(node); + if (tmp == 1) { + node = *bss_head; + prev = node; + } else { + node = prev->next; + } + continue; + } + prev = node; + node = node->next; + } +} + +void +wl_delete_disconnected_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl, u8 *bssid) +{ + wl_bss_cache_t *node, *prev, **bss_head; + int i = -1, tmp = 0; + + bss_head = &bss_cache_ctrl->m_cache_head; + node = *bss_head; + prev = node; + for (;node;) { + i++; + if (!memcmp(&node->results.bss_info->BSSID, bssid, ETHER_ADDR_LEN)) { + if (node == *bss_head) { + tmp = 1; + *bss_head = node->next; + } else { + tmp = 0; + prev->next = node->next; + } + ANDROID_TRACE(("%s: Del %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n", + __FUNCTION__, i, &node->results.bss_info->BSSID, + dtoh16(node->results.bss_info->RSSI), node->results.bss_info->SSID)); + kfree(node); + if (tmp == 1) { + node = *bss_head; + prev = node; + } else { + node = prev->next; + } + continue; + } + prev = node; + node = node->next; + } +} + +void +wl_reset_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl) +{ + wl_bss_cache_t *node, **bss_head; + + bss_head = &bss_cache_ctrl->m_cache_head; + + /* reset dirty */ + node = *bss_head; + for (;node;) { + node->dirty += 1; + node = node->next; + } +} + +void dump_bss_cache( +#if defined(RSSIAVG) + wl_rssi_cache_ctrl_t *rssi_cache_ctrl, +#endif + wl_bss_cache_t *node) +{ + int k = 0; + int16 rssi; + + for (;node;) { +#if defined(RSSIAVG) + rssi = wl_get_avg_rssi(rssi_cache_ctrl, &node->results.bss_info->BSSID); +#else + rssi = dtoh16(node->results.bss_info->RSSI); +#endif + ANDROID_TRACE(("%s: dump %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n", + __FUNCTION__, k, &node->results.bss_info->BSSID, rssi, node->results.bss_info->SSID)); + k++; + node = node->next; + } +} + +void +wl_update_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl, +#if defined(RSSIAVG) + wl_rssi_cache_ctrl_t *rssi_cache_ctrl, +#endif + wl_scan_results_t *ss_list) +{ + wl_bss_cache_t *node, *prev, *leaf, **bss_head; + wl_bss_info_t *bi = NULL; + int i, k=0; +#if defined(SORT_BSS_BY_RSSI) + int16 rssi, rssi_node; +#endif + struct timeval now, timeout; + + if (!ss_list->count) + return; + + do_gettimeofday(&now); + timeout.tv_sec = now.tv_sec + BSSCACHE_TIMEOUT; + if (timeout.tv_sec < now.tv_sec) { + /* + * Integer overflow - assume long enough timeout to be assumed + * to be infinite, i.e., the timeout would never happen. + */ + ANDROID_TRACE(("%s: Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu", + __FUNCTION__, BSSCACHE_TIMEOUT, now.tv_sec, timeout.tv_sec)); + } + + bss_head = &bss_cache_ctrl->m_cache_head; + + for (i=0; i < ss_list->count; i++) { + node = *bss_head; + prev = NULL; + bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : ss_list->bss_info; + + for (;node;) { + if (!memcmp(&node->results.bss_info->BSSID, &bi->BSSID, ETHER_ADDR_LEN)) { + if (node == *bss_head) + *bss_head = node->next; + else { + prev->next = node->next; + } + break; + } + prev = node; + node = node->next; + } + + leaf = kmalloc(dtoh32(bi->length) + sizeof(wl_bss_cache_t), GFP_KERNEL); + if (!leaf) { + ANDROID_ERROR(("%s: Memory alloc failure %d\n", __FUNCTION__, + dtoh32(bi->length) + (int)sizeof(wl_bss_cache_t))); + return; + } + if (node) { + kfree(node); + node = NULL; + ANDROID_TRACE(("%s: Update %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n", + __FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID)); + } else + ANDROID_TRACE(("%s: Add %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n", + __FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID)); + + memcpy(leaf->results.bss_info, bi, dtoh32(bi->length)); + leaf->next = NULL; + leaf->dirty = 0; + leaf->tv = timeout; + leaf->results.count = 1; + leaf->results.version = ss_list->version; + k++; + + if (*bss_head == NULL) + *bss_head = leaf; + else { +#if defined(SORT_BSS_BY_RSSI) + node = *bss_head; +#if defined(RSSIAVG) + rssi = wl_get_avg_rssi(rssi_cache_ctrl, &leaf->results.bss_info->BSSID); +#else + rssi = dtoh16(leaf->results.bss_info->RSSI); +#endif + for (;node;) { +#if defined(RSSIAVG) + rssi_node = wl_get_avg_rssi(rssi_cache_ctrl, &node->results.bss_info->BSSID); +#else + rssi_node = dtoh16(node->results.bss_info->RSSI); +#endif + if (rssi > rssi_node) { + leaf->next = node; + if (node == *bss_head) + *bss_head = leaf; + else + prev->next = leaf; + break; + } + prev = node; + node = node->next; + } + if (node == NULL) + prev->next = leaf; +#else + leaf->next = *bss_head; + *bss_head = leaf; +#endif + } + } + dump_bss_cache( +#if defined(RSSIAVG) + rssi_cache_ctrl, +#endif + *bss_head); +} + +void +wl_release_bss_cache_ctrl(wl_bss_cache_ctrl_t *bss_cache_ctrl) +{ + ANDROID_TRACE(("%s:\n", __FUNCTION__)); + wl_free_bss_cache(bss_cache_ctrl); +} +#endif + + diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.c b/drivers/amlogic/wifi/bcmdhd/wl_cfg80211.c similarity index 68% rename from drivers/net/wireless/bcmdhd/wl_cfg80211.c rename to drivers/amlogic/wifi/bcmdhd/wl_cfg80211.c index 7f10d51274d05..12ae90396ddd7 100644 --- a/drivers/net/wireless/bcmdhd/wl_cfg80211.c +++ b/drivers/amlogic/wifi/bcmdhd/wl_cfg80211.c @@ -1,9 +1,30 @@ /* * Linux cfg80211 driver * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: wl_cfg80211.c 506036 2014-10-02 11:33:14Z $ + * + * <> + * + * $Id: wl_cfg80211.c 610196 2016-01-06 11:20:45Z $ */ /* */ #include @@ -19,16 +40,6 @@ #include #include -#include -#include -#include -#include -#include -#include -#ifdef PNO_SUPPORT -#include -#endif /* PNO_SUPPORT */ - #include #include #include @@ -46,7 +57,21 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include +#ifdef PNO_SUPPORT +#include +#endif /* PNO_SUPPORT */ + +#if defined(WL_VENDOR_EXT_SUPPORT) #include +#endif /* defined(WL_VENDOR_EXT_SUPPORT) */ + #ifdef WL_NAN #include #endif /* WL_NAN */ @@ -56,6 +81,10 @@ #include #endif +#ifdef BCMPCIE +#include +#endif + #ifdef WL11U #if !defined(WL_ENABLE_P2P_IF) && !defined(WL_CFG80211_P2P_DEV_IF) #error You should enable 'WL_ENABLE_P2P_IF' or 'WL_CFG80211_P2P_DEV_IF' \ @@ -63,44 +92,19 @@ #endif /* !WL_ENABLE_P2P_IF && !WL_CFG80211_P2P_DEV_IF */ #endif /* WL11U */ -#ifdef BCMWAPI_WPI -/* these items should evetually go into wireless.h of the linux system headfile dir */ -#ifndef IW_ENCODE_ALG_SM4 -#define IW_ENCODE_ALG_SM4 0x20 -#endif - -#ifndef IW_AUTH_WAPI_ENABLED -#define IW_AUTH_WAPI_ENABLED 0x20 -#endif - -#ifndef IW_AUTH_WAPI_VERSION_1 -#define IW_AUTH_WAPI_VERSION_1 0x00000008 -#endif - -#ifndef IW_AUTH_CIPHER_SMS4 -#define IW_AUTH_CIPHER_SMS4 0x00000020 -#endif - -#ifndef IW_AUTH_KEY_MGMT_WAPI_PSK -#define IW_AUTH_KEY_MGMT_WAPI_PSK 4 -#endif - -#ifndef IW_AUTH_KEY_MGMT_WAPI_CERT -#define IW_AUTH_KEY_MGMT_WAPI_CERT 8 -#endif -#endif /* BCMWAPI_WPI */ -#ifdef BCMWAPI_WPI -#define IW_WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED)) -#else /* BCMWAPI_WPI */ #define IW_WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED)) -#endif /* BCMWAPI_WPI */ static struct device *cfg80211_parent_dev = NULL; /* g_bcm_cfg should be static. Do not change */ static struct bcm_cfg80211 *g_bcm_cfg = NULL; +#ifdef CUSTOMER_HW4_DEBUG +u32 wl_dbg_level = WL_DBG_ERR | WL_DBG_P2P_ACTION; +#else u32 wl_dbg_level = WL_DBG_ERR; +#endif /* CUSTOMER_HW4_DEBUG */ +#define MAX_WAIT_TIME 1500 #ifdef WLAIBSS_MCHAN #define IBSS_IF_NAME "ibss%d" #endif /* WLAIBSS_MCHAN */ @@ -131,20 +135,38 @@ u32 wl_dbg_level = WL_DBG_ERR; #else #define WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) wl_get_drv_status_all(cfg, SENDING_ACT_FRM) #endif /* WL_CFG80211_SYNC_GON */ -#define WL_IS_P2P_DEV_EVENT(e) ((e->emsg.ifidx == 0) && \ - (e->emsg.bsscfgidx == P2PAPI_BSSCFG_DEVICE)) +#define DNGL_FUNC(func, parameters) func parameters #define COEX_DHCP #define WLAN_EID_SSID 0 #define CH_MIN_5G_CHANNEL 34 #define CH_MIN_2G_CHANNEL 1 +#define ACTIVE_SCAN 1 +#define PASSIVE_SCAN 0 + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \ +_Pragma("GCC diagnostic push") \ +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \ +(entry) = list_first_entry((ptr), type, member); \ +_Pragma("GCC diagnostic pop") \ + +#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \ +_Pragma("GCC diagnostic push") \ +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \ +entry = container_of((ptr), type, member); \ +_Pragma("GCC diagnostic pop") \ -#ifdef WLAIBSS -enum abiss_event_type { - AIBSS_EVENT_TXFAIL -}; -#endif +#else +#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \ +(entry) = list_first_entry((ptr), type, member); \ + +#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \ +entry = container_of((ptr), type, member); \ + +#endif /* STRICT_GCC_WARNINGS */ enum rmc_event_type { RMC_EVENT_NONE, @@ -156,7 +178,15 @@ enum rmc_event_type { * and NL80211_RRF_NO_IBSS for 5GHz channels (for 36..48 and 149..165). * With respect to these flags, wpa_supplicant doesn't start p2p operations on 5GHz channels. * All the chnages in world regulatory domain are to be done here. + * + * this definition reuires disabling missing-field-initializer warning + * as the ieee80211_regdomain definition differs in plain linux and in Android */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wmissing-field-initializers\"") +#endif static const struct ieee80211_regdomain brcm_regdom = { .n_reg_rules = 4, .alpha2 = "99", @@ -173,20 +203,20 @@ static const struct ieee80211_regdomain brcm_regdom = { /* IEEE 802.11a, channel 100..165 */ REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), } }; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \ (defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF)) -/* - * Possible interface combinations supported by driver - * - * ADHOC Mode - #ADHOC <= 1 on channels = 1 - * SoftAP Mode - #AP <= 1 on channels = 1 - * STA + P2P Mode - #STA <= 2, #{P2P-GO, P2P-client} <= 1, #P2P-device <= 1 - * on channels = 2 - */ static const struct ieee80211_iface_limit common_if_limits[] = { { - .max = 1, + /* + * Driver can support up to 2 AP's + */ + .max = 2, .types = BIT(NL80211_IFTYPE_AP), }, { @@ -225,10 +255,17 @@ static const struct ieee80211_iface_limit common_if_limits[] = { #else #define NUM_DIFF_CHANNELS 2 #endif -static const struct ieee80211_iface_combination +static struct ieee80211_iface_combination common_iface_combinations[] = { { .num_different_channels = NUM_DIFF_CHANNELS, + /* + * max_interfaces = 4 + * The max no of interfaces will be used in dual p2p case. + * {STA, P2P Device, P2P Group 1, P2P Group 2}. Though we + * will not be using the STA functionality in this case, it + * will remain registered as it is the primary interface. + */ .max_interfaces = 4, .limits = common_if_limits, .n_limits = ARRAY_SIZE(common_if_limits), @@ -269,19 +306,12 @@ common_iface_combinations[] = { #define WPS_CONFIG_VIRT_DISPLAY 0x2008 #define WPS_CONFIG_PHY_DISPLAY 0x4008 -#ifdef BCMCCX -#ifndef WLAN_AKM_SUITE_CCKM -#define WLAN_AKM_SUITE_CCKM 0x00409600 -#endif -#define DOT11_LEAP_AUTH 0x80 /* LEAP auth frame paylod constants */ -#endif /* BCMCCX */ +#define PM_BLOCK 1 +#define PM_ENABLE 0 -#ifdef MFP -#define WL_AKM_SUITE_MFP_1X 0x000FAC05 -#define WL_AKM_SUITE_MFP_PSK 0x000FAC06 -#define WL_MFP_CAPABLE 0x1 -#define WL_MFP_REQUIRED 0x2 -#endif /* MFP */ + +#define WL_AKM_SUITE_SHA256_1X 0x000FAC05 +#define WL_AKM_SUITE_SHA256_PSK 0x000FAC06 #ifndef IBSS_COALESCE_ALLOWED #define IBSS_COALESCE_ALLOWED 0 @@ -292,6 +322,16 @@ common_iface_combinations[] = { #endif #define CUSTOM_RETRY_MASK 0xff000000 /* Mask for retry counter of custom dwell time */ +#define LONG_LISTEN_TIME 2000 + +#define MAX_SCAN_ABORT_WAIT_CNT 20 +#define WAIT_SCAN_ABORT_OSL_SLEEP_TIME 10 + +#define IDSUP_4WAY_HANDSHAKE_TIMEOUT 10000 +enum idsup_event_type { + IDSUP_EVENT_SUCCESS = 0, + IDSUP_EVENT_4WAY_HANDSHAKE_TIMEOUT +}; /* * cfg80211_ops api/callback list */ @@ -301,10 +341,11 @@ static s32 wl_frame_get_mgmt(u16 fc, const struct ether_addr *da, static s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_scan_request *request, struct cfg80211_ssid *this_ssid); -static s32 #if defined(WL_CFG80211_P2P_DEV_IF) +static s32 wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request); #else +static s32 wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_scan_request *request); #endif /* WL_CFG80211_P2P_DEV_IF */ @@ -317,9 +358,15 @@ static s32 wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ibss_params *params); static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +static s32 wl_cfg80211_get_station(struct wiphy *wiphy, + struct net_device *dev, const u8 *mac, + struct station_info *sinfo); +#else static s32 wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, u8 *mac, struct station_info *sinfo); +#endif static s32 wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, bool enabled, s32 timeout); @@ -327,20 +374,20 @@ static int wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme); static s32 wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_code); -static s32 #if defined(WL_CFG80211_P2P_DEV_IF) +static s32 wl_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, enum nl80211_tx_power_setting type, s32 mbm); #else +static s32 wl_cfg80211_set_tx_power(struct wiphy *wiphy, enum nl80211_tx_power_setting type, s32 dbm); #endif /* WL_CFG80211_P2P_DEV_IF */ -static s32 #if defined(WL_CFG80211_P2P_DEV_IF) -wl_cfg80211_get_tx_power(struct wiphy *wiphy, +static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, s32 *dbm); #else -wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm); +static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm); #endif /* WL_CFG80211_P2P_DEV_IF */ static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev, @@ -361,53 +408,98 @@ static s32 wl_cfg80211_resume(struct wiphy *wiphy); 2, 0)) static s32 wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev, u64 cookie); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) +static s32 wl_cfg80211_del_station( + struct wiphy *wiphy, struct net_device *ndev, + struct station_del_parameters *params); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +static s32 wl_cfg80211_del_station(struct wiphy *wiphy, + struct net_device *ndev, const u8* mac_addr); +#else static s32 wl_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev, u8* mac_addr); +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +static s32 wl_cfg80211_change_station(struct wiphy *wiphy, + struct net_device *dev, const u8 *mac, struct station_parameters *params); +#else static s32 wl_cfg80211_change_station(struct wiphy *wiphy, struct net_device *dev, u8 *mac, struct station_parameters *params); +#endif #endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */ -static s32 -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS) -wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) +static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow); #else -wl_cfg80211_suspend(struct wiphy *wiphy); -#endif +static s32 wl_cfg80211_suspend(struct wiphy *wiphy); +#endif /* KERNEL_VERSION(2, 6, 39) || WL_COMPAT_WIRELES */ static s32 wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_pmksa *pmksa); static s32 wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_pmksa *pmksa); static s32 wl_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev); -#ifdef P2PONEINT -void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg); -#else -void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg); -#endif +static void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg); +static void wl_cfg80211_cancel_scan(struct bcm_cfg80211 *cfg); static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool aborted, bool fw_abort); -#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS) -#if defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2) +#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) +#if (defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)) || (LINUX_VERSION_CODE < \ + KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)) static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, u32 peer_capability, const u8 *data, size_t len); +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) +static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, + u32 peer_capability, const u8 *data, size_t len); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) +static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, + u32 peer_capability, bool initiator, const u8 *data, size_t len); #else static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, const u8 *data, size_t len); #endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +static s32 wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, enum nl80211_tdls_operation oper); +#else static s32 wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, u8 *peer, enum nl80211_tdls_operation oper); -#endif /* LINUX_VERSION > KERNEL_VERSION(3,2,0) || WL_COMPAT_WIRELESS */ +#endif +#endif #ifdef WL_SCHED_SCAN static int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev); #endif -#if defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF) +#if defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF) bcm_struct_cfgdev* wl_cfg80211_create_iface(struct wiphy *wiphy, enum nl80211_iftype iface_type, u8 *mac_addr, const char *name); s32 wl_cfg80211_del_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev); -#endif /* defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF) */ +#endif /* defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF) */ + +s32 wl_cfg80211_interface_ops(struct bcm_cfg80211 *cfg, + struct net_device *ndev, s32 bsscfg_idx, + enum nl80211_iftype iface_type, s32 del, u8 *addr); +s32 wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg, + struct net_device *ndev, s32 bsscfg_idx, + enum nl80211_iftype iface_type, s32 del, u8 *addr); +chanspec_t wl_chspec_driver_to_host(chanspec_t chanspec); +chanspec_t wl_chspec_host_to_driver(chanspec_t chanspec); +#ifdef WL11ULB +static s32 wl_cfg80211_get_ulb_bw(struct wireless_dev *wdev); +static chanspec_t wl_cfg80211_ulb_get_min_bw_chspec(struct wireless_dev *wdev, s32 bssidx); +static s32 wl_cfg80211_ulbbw_to_ulbchspec(u32 ulb_bw); +#else +static inline chanspec_t wl_cfg80211_ulb_get_min_bw_chspec( + struct wireless_dev *wdev, s32 bssidx) +{ + return WL_CHANSPEC_BW_20; +} +#endif /* WL11ULB */ /* * event & event Q handlers for cfg80211 interfaces @@ -453,14 +545,21 @@ wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev, static s32 wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data); #endif /* PNO_SUPPORT */ +#ifdef GSCAN_SUPPORT +static s32 wl_notify_gscan_event(struct bcm_cfg80211 *wl, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); +#endif /* GSCAN_SUPPORT */ static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info, enum wl_status state, bool set); -#ifdef WL_SDO -static s32 wl_svc_resp_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, - const wl_event_msg_t *e, void *data); -static s32 wl_notify_device_discovery(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, +#ifdef DHD_LOSSLESS_ROAMING +static s32 wl_notify_roam_prep_status(struct bcm_cfg80211 *cfg, + bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data); +static void wl_del_roam_timeout(struct bcm_cfg80211 *cfg); +#endif /* DHD_LOSSLESS_ROAMING */ +#ifdef CUSTOM_EVENT_PM_WAKE +static s32 wl_check_pmstatus(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data); -#endif +#endif /* CUSTOM_EVENT_PM_WAKE */ #ifdef WLTDLS static s32 wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, @@ -470,7 +569,6 @@ static s32 wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cf * register/deregister parent device */ static void wl_cfg80211_clear_parent_dev(void); - /* * ioctl utilites */ @@ -486,7 +584,7 @@ static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l); * cfg profile utilities */ static s32 wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, - const wl_event_msg_t *e, void *data, s32 item); + const wl_event_msg_t *e, const void *data, s32 item); static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item); static void wl_init_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev); @@ -503,26 +601,21 @@ static s32 wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme); static s32 wl_set_set_sharedkey(struct net_device *dev, struct cfg80211_connect_params *sme); -#ifdef BCMWAPI_WPI -static s32 wl_set_set_wapi_ie(struct net_device *dev, - struct cfg80211_connect_params *sme); -#endif static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev); -static void wl_ch_to_chanspec(int ch, +static s32 wl_ch_to_chanspec(struct net_device *dev, int ch, struct wl_join_params *join_params, size_t *join_params_size); +void wl_cfg80211_clear_security(struct bcm_cfg80211 *cfg); /* * information element utilities */ static void wl_rst_ie(struct bcm_cfg80211 *cfg); static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v); -static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, u8 *ie_stream, u32 *ie_size, bool roam); +static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, const u8 *ie_stream, u32 *ie_size, + bool roam); static s32 wl_mrg_ie(struct bcm_cfg80211 *cfg, u8 *ie_stream, u16 ie_size); static s32 wl_cp_ie(struct bcm_cfg80211 *cfg, u8 *dst, u16 dst_size); static u32 wl_get_ielen(struct bcm_cfg80211 *cfg); -#ifdef MFP -static int wl_cfg80211_get_rsn_capa(bcm_tlv_t *wpa2ie, u8* capa); -#endif #ifdef WL11U bcm_tlv_t * @@ -535,24 +628,20 @@ wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bss static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *dev, dhd_pub_t *data); static void wl_free_wdev(struct bcm_cfg80211 *cfg); #ifdef CONFIG_CFG80211_INTERNAL_REGDB +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) static int +#else +static void +#endif /* kernel version < 3.10.11 */ wl_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request); #endif /* CONFIG_CFG80211_INTERNAL_REGDB */ static s32 wl_inform_bss(struct bcm_cfg80211 *cfg); static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, struct wl_bss_info *bi, bool roam); static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam); -#ifdef P2PONEINT -chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy); -#else -chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy); -#endif +static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy); s32 wl_cfg80211_channel_to_freq(u32 channel); -#if defined(DHCP_SCAN_SUPPRESS) -static void wl_cfg80211_work_handler(struct work_struct *work); -static void wl_cfg80211_scan_supp_timerfunc(ulong data); -#endif /* DHCP_SCAN_SUPPRESS */ static void wl_cfg80211_work_handler(struct work_struct *work); static s32 wl_add_keyext(struct wiphy *wiphy, struct net_device *dev, @@ -631,32 +720,27 @@ int dhd_monitor_uninit(void); int dhd_start_xmit(struct sk_buff *skb, struct net_device *net); +#ifdef DHD_IFDEBUG +void wl_dump_ifinfo(struct bcm_cfg80211 *cfg); +#endif + +#ifdef P2P_LISTEN_OFFLOADING +s32 wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg); +#endif /* P2P_LISTEN_OFFLOADING */ + static int wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev, const struct ether_addr *bssid); -#ifdef WL_SDO -s32 wl_cfg80211_sdo_init(struct bcm_cfg80211 *cfg); -s32 wl_cfg80211_sdo_deinit(struct bcm_cfg80211 *cfg); -#define MAX_SDO_PROTO 5 -wl_sdo_proto_t wl_sdo_protos [] = { - { "all", SVC_RPOTYPE_ALL }, - { "upnp", SVC_RPOTYPE_UPNP }, - { "bonjour", SVC_RPOTYPE_BONJOUR }, - { "wsd", SVC_RPOTYPE_WSD }, - { "vendor", SVC_RPOTYPE_VENDOR }, -}; -#endif static int bw2cap[] = { 0, 0, WLC_BW_CAP_20MHZ, WLC_BW_CAP_40MHZ, WLC_BW_CAP_80MHZ, WLC_BW_CAP_160MHZ, WLC_BW_CAP_160MHZ }; -#define RETURN_EIO_IF_NOT_UP(wlpriv) \ -do { \ - struct net_device *checkSysUpNDev = bcmcfg_to_prmry_ndev(wlpriv); \ - if (unlikely(!wl_get_drv_status(wlpriv, READY, checkSysUpNDev))) { \ - WL_INFORM(("device is not ready\n")); \ - return -EIO; \ - } \ -} while (0) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)) +#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \ + cfg80211_disconnected(dev, reason, ie, len, gfp); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) +#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \ + cfg80211_disconnected(dev, reason, ie, len, loc_gen, gfp); +#endif #define IS_WPA_AKM(akm) ((akm) == RSN_AKM_NONE || \ (akm) == RSN_AKM_UNSPECIFIED || \ @@ -668,8 +752,24 @@ extern int dhd_wait_pend8021x(struct net_device *dev); extern int disable_proptx; #endif /* PROP_TXSTATUS_VSDB */ + extern int passive_channel_skip; +static s32 +wl_ap_start_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); +static s32 +wl_csa_complete_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0)) && (LINUX_VERSION_CODE <= (3, 7, \ + 0))) +struct chan_info { + int freq; + int chan_type; +}; +#endif + + #if (WL_DBG_LEVEL > 0) #define WL_DBG_ESTR_MAX 50 static s8 wl_dbg_estr[][WL_DBG_ESTR_MAX] = { @@ -683,7 +783,8 @@ static s8 wl_dbg_estr[][WL_DBG_ESTR_MAX] = { "PFN_NET_LOST", "RESET_COMPLETE", "JOIN_START", "ROAM_START", "ASSOC_START", "IBSS_ASSOC", - "RADIO", "PSM_WATCHDOG", "WLC_E_CCX_ASSOC_START", "WLC_E_CCX_ASSOC_ABORT", + "RADIO", "PSM_WATCHDOG", + "WLC_E_XXX_ASSOC_START", "WLC_E_XXX_ASSOC_ABORT", "PROBREQ_MSG", "SCAN_CONFIRM_IND", "PSK_SUP", "COUNTRY_CODE_CHANGED", "EXCEEDED_MEDIUM_TIME", "ICV_ERROR", @@ -699,6 +800,13 @@ static s8 wl_dbg_estr[][WL_DBG_ESTR_MAX] = { }; #endif /* WL_DBG_LEVEL */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)) +#define ieee80211_band nl80211_band +#define IEEE80211_BAND_2GHZ NL80211_BAND_2GHZ +#define IEEE80211_BAND_5GHZ NL80211_BAND_5GHZ +#define IEEE80211_NUM_BANDS NUM_NL80211_BANDS +#endif + #define CHAN2G(_channel, _freq, _flags) { \ .band = IEEE80211_BAND_2GHZ, \ .center_freq = (_freq), \ @@ -775,8 +883,8 @@ static struct ieee80211_channel __wl_5ghz_a_channels[] = { CHAN5G(124, 0), CHAN5G(128, 0), CHAN5G(132, 0), CHAN5G(136, 0), CHAN5G(140, 0), CHAN5G(144, 0), - CHAN5G(149, 0), CHAN5G(153, 0), - CHAN5G(157, 0), CHAN5G(161, 0), + CHAN5G(149, 0), CHAN5G(153, 0), + CHAN5G(157, 0), CHAN5G(161, 0), CHAN5G(165, 0) }; @@ -802,12 +910,6 @@ static const u32 __wl_cipher_suites[] = { WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, WLAN_CIPHER_SUITE_AES_CMAC, -#ifdef BCMWAPI_WPI - WLAN_CIPHER_SUITE_SMS4, -#endif -#if defined(WLFBT) && defined(WLAN_CIPHER_SUITE_PMK) - WLAN_CIPHER_SUITE_PMK, -#endif }; #ifdef WL_SUPPORT_ACS @@ -848,35 +950,84 @@ static const struct { }; #endif +#ifdef CUSTOMER_HW4_DEBUG +uint prev_dhd_console_ms = 0; +u32 prev_wl_dbg_level = 0; +bool wl_scan_timeout_dbg_enabled = 0; +static void wl_scan_timeout_dbg_set(void); +static void wl_scan_timeout_dbg_clear(void); + +static void wl_scan_timeout_dbg_set(void) +{ + WL_ERR(("Enter \n")); + prev_dhd_console_ms = dhd_console_ms; + prev_wl_dbg_level = wl_dbg_level; + + dhd_console_ms = 1; + wl_dbg_level |= (WL_DBG_ERR | WL_DBG_P2P_ACTION | WL_DBG_SCAN); + + wl_scan_timeout_dbg_enabled = 1; +} +static void wl_scan_timeout_dbg_clear(void) +{ + WL_ERR(("Enter \n")); + dhd_console_ms = prev_dhd_console_ms; + wl_dbg_level = prev_wl_dbg_level; + + wl_scan_timeout_dbg_enabled = 0; +} +#endif /* CUSTOMER_HW4_DEBUG */ + +/* watchdog timer for disconnecting when fw is not associated for FW_ASSOC_WATCHDOG_TIME ms */ +uint32 fw_assoc_watchdog_ms = 0; +bool fw_assoc_watchdog_started = 0; +#define FW_ASSOC_WATCHDOG_TIME 10 * 1000 /* msec */ + +#ifdef DHD_IFDEBUG + +void wl_dump_ifinfo(struct bcm_cfg80211 *cfg) +{ + WL_ERR(("cfg=%p\n", cfg)); + if (cfg) { + WL_ERR(("cfg->wdev=%p\n", bcmcfg_to_prmry_wdev(cfg))); + if (bcmcfg_to_prmry_wdev(cfg)) { + WL_ERR(("cfg->wdev->wiphy=%p\n", bcmcfg_to_wiphy(cfg))); + WL_ERR(("cfg->wdev->netdev=%p\n", bcmcfg_to_prmry_ndev(cfg))); + } + } +} +#endif -static void wl_add_remove_pm_enable_work(struct bcm_cfg80211 *cfg, bool add_remove, - enum wl_handler_del_type type) +static void wl_add_remove_pm_enable_work(struct bcm_cfg80211 *cfg, + enum wl_pm_workq_act_type type) { + u16 wq_duration = 0; + if (cfg == NULL) return; - if (cfg->pm_enable_work_on) { - if (add_remove) { - schedule_delayed_work(&cfg->pm_enable_work, - msecs_to_jiffies(WL_PM_ENABLE_TIMEOUT)); - } else { - cancel_delayed_work_sync(&cfg->pm_enable_work); - switch (type) { - case WL_HANDLER_MAINTAIN: - schedule_delayed_work(&cfg->pm_enable_work, - msecs_to_jiffies(WL_PM_ENABLE_TIMEOUT)); - break; - case WL_HANDLER_PEND: - schedule_delayed_work(&cfg->pm_enable_work, - msecs_to_jiffies(WL_PM_ENABLE_TIMEOUT*2)); - break; - case WL_HANDLER_DEL: - default: - cfg->pm_enable_work_on = false; - break; - } - } + mutex_lock(&cfg->pm_sync); + /* + * Make cancel and schedule work part mutually exclusive + * so that while cancelling, we are sure that there is no + * work getting scheduled. + */ + if (delayed_work_pending(&cfg->pm_enable_work)) { + cancel_delayed_work_sync(&cfg->pm_enable_work); + DHD_OS_WAKE_UNLOCK(cfg->pub); + } + + if (type == WL_PM_WORKQ_SHORT) { + wq_duration = WL_PM_ENABLE_TIMEOUT; + } else if (type == WL_PM_WORKQ_LONG) { + wq_duration = (WL_PM_ENABLE_TIMEOUT*2); + } + if (wq_duration) { + DHD_OS_WAKE_LOCK(cfg->pub); + schedule_delayed_work(&cfg->pm_enable_work, + msecs_to_jiffies((const unsigned int)wq_duration)); } + mutex_unlock(&cfg->pm_sync); } /* Return a new chanspec given a legacy chanspec @@ -989,9 +1140,8 @@ wl_chspec_host_to_driver(chanspec_t chanspec) * Returns INVCHANSPEC on error */ chanspec_t -wl_ch_host_to_driver(u16 channel) +wl_ch_host_to_driver(s32 bssidx, u16 channel) { - chanspec_t chanspec; chanspec = channel & WL_CHANSPEC_CHAN_MASK; @@ -1001,7 +1151,8 @@ wl_ch_host_to_driver(u16 channel) else chanspec |= WL_CHANSPEC_BAND_5G; - chanspec |= WL_CHANSPEC_BW_20; + chanspec |= wl_cfg80211_ulb_get_min_bw_chspec(NULL, bssidx); + chanspec |= WL_CHANSPEC_CTL_SB_NONE; return wl_chspec_host_to_driver(chanspec); @@ -1011,7 +1162,7 @@ wl_ch_host_to_driver(u16 channel) * a chanspec_t value * Returns INVCHANSPEC on error */ -static chanspec_t +chanspec_t wl_chspec_driver_to_host(chanspec_t chanspec) { chanspec = dtohchanspec(chanspec); @@ -1042,45 +1193,6 @@ wl_cfg80211_ether_atoe(const char *a, struct ether_addr *n) return (count == ETHER_ADDR_LEN); } -/* convert hex string buffer to binary */ -int -wl_cfg80211_hex_str_to_bin(unsigned char *data, int dlen, char *str) -{ - int count, slen; - int hvalue; - char tmp[3] = {0}; - char *ptr = str, *endp = NULL; - - if (!data || !str || !dlen) { - WL_DBG((" passed buffer is empty \n")); - return 0; - } - - slen = strlen(str); - if (dlen * 2 < slen) { - WL_DBG((" destination buffer too short \n")); - return 0; - } - - if (slen % 2) { - WL_DBG((" source buffer is of odd length \n")); - return 0; - } - - for (count = 0; count < slen; count += 2) { - memcpy(tmp, ptr, 2); - hvalue = simple_strtol(tmp, &endp, 16); - if (*endp != '\0') { - WL_DBG((" non hexadecimal character encountered \n")); - return 0; - } - *data++ = (unsigned char)hvalue; - ptr += 2; - } - - return (slen / 2); -} - /* There isn't a lot of sense in it, but you can transmit anything you like */ static const struct ieee80211_txrx_stypes wl_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = { @@ -1264,12 +1376,6 @@ s32 wl_set_tx_power(struct net_device *dev, if (dbm > 0xffff) dbm = 0xffff; txpwrqdbm = dbm * 4; -#ifdef SUPPORT_WL_TXPOWER - if (type == NL80211_TX_POWER_AUTOMATIC) - txpwrqdbm = 127; - else - txpwrqdbm |= WL_TXPWR_OVERRIDE; -#endif /* SUPPORT_WL_TXPOWER */ err = wldev_iovar_setbuf_bsscfg(dev, "qtxpower", (void *)&txpwrqdbm, sizeof(txpwrqdbm), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync); @@ -1303,12 +1409,7 @@ s32 wl_get_tx_power(struct net_device *dev, s32 *dbm) return err; } -chanspec_t -#ifdef P2PONEINT -wl_cfg80211_get_shared_freq(struct wiphy *wiphy) -#else -wl_cfg80211_get_shared_freq(struct wiphy *wiphy) -#endif +static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy) { chanspec_t chspec; int err = 0; @@ -1316,6 +1417,7 @@ wl_cfg80211_get_shared_freq(struct wiphy *wiphy) struct net_device *dev = bcmcfg_to_prmry_ndev(cfg); struct ether_addr bssid; struct wl_bss_info *bss = NULL; + s32 bssidx = 0; /* Explicitly set to primary bssidx */ if ((err = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, sizeof(bssid), false))) { /* STA interface is not associated. So start the new interface on a temp @@ -1323,7 +1425,7 @@ wl_cfg80211_get_shared_freq(struct wiphy *wiphy) * via set_channel (cfg80211 API). */ WL_DBG(("Not associated. Return a temp channel. \n")); - return wl_ch_host_to_driver(WL_P2P_TEMP_CHAN); + return wl_ch_host_to_driver(bssidx, WL_P2P_TEMP_CHAN); } @@ -1331,7 +1433,7 @@ wl_cfg80211_get_shared_freq(struct wiphy *wiphy) if ((err = wldev_ioctl(dev, WLC_GET_BSS_INFO, cfg->extra_buf, WL_EXTRA_BUF_MAX, false))) { WL_ERR(("Failed to get associated bss info, use temp channel \n")); - chspec = wl_ch_host_to_driver(WL_P2P_TEMP_CHAN); + chspec = wl_ch_host_to_driver(bssidx, WL_P2P_TEMP_CHAN); } else { bss = (struct wl_bss_info *) (cfg->extra_buf + 4); @@ -1364,42 +1466,46 @@ wl_cfg80211_add_virtual_iface(struct wiphy *wiphy, #else char *name, #endif /* WL_CFG80211_P2P_DEV_IF */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + unsigned char name_assign_type, +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) */ enum nl80211_iftype type, u32 *flags, struct vif_params *params) { - s32 err; + s32 err = -ENODEV; s32 timeout = -1; s32 wlif_type = -1; s32 mode = 0; s32 val = 0; + s32 cfg_type; s32 dhd_mode = 0; chanspec_t chspec; struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); struct net_device *primary_ndev; struct net_device *new_ndev; struct ether_addr primary_mac; +#ifdef WL_VIRTUAL_APSTA + bcm_struct_cfgdev *new_cfgdev; +#endif /* WL_VIRTUAL_APSTA */ #ifdef PROP_TXSTATUS_VSDB #if defined(BCMSDIO) s32 up = 1; - dhd_pub_t *dhd; bool enabled; -#endif +#endif + dhd_pub_t *dhd; #endif /* PROP_TXSTATUS_VSDB */ #if defined(SUPPORT_AP_POWERSAVE) dhd_pub_t *dhd; -#endif +#endif /* SUPPORT_AP_POWERSAVE */ + bool hang_required = false; if (!cfg) return ERR_PTR(-EINVAL); -#ifdef PROP_TXSTATUS_VSDB -#if defined(BCMSDIO) dhd = (dhd_pub_t *)(cfg->pub); -#endif -#endif /* PROP_TXSTATUS_VSDB */ #if defined(SUPPORT_AP_POWERSAVE) dhd = (dhd_pub_t *)(cfg->pub); -#endif +#endif /* SUPPORT_AP_POWERSAVE */ /* Use primary I/F for sending cmds down to firmware */ primary_ndev = bcmcfg_to_prmry_ndev(cfg); @@ -1425,24 +1531,35 @@ wl_cfg80211_add_virtual_iface(struct wiphy *wiphy, return wl_cfg80211_add_monitor_if((char *)name); #if defined(WL_CFG80211_P2P_DEV_IF) case NL80211_IFTYPE_P2P_DEVICE: + cfg->down_disc_if = FALSE; return wl_cfgp2p_add_p2p_disc_if(cfg); #endif /* WL_CFG80211_P2P_DEV_IF */ case NL80211_IFTYPE_STATION: -#ifdef DUAL_STA +#ifdef WL_VIRTUAL_APSTA #ifdef WLAIBSS_MCHAN if (cfg->ibss_cfgdev) { WL_ERR(("AIBSS is already operational. " " AIBSS & DUALSTA can't be used together \n")); - return NULL; + return ERR_PTR(-ENOMEM); } #endif /* WLAIBSS_MCHAN */ if (!name) { WL_ERR(("Interface name not provided \n")); - return NULL; + return ERR_PTR(-ENODEV); + } + + if (wl_cfgp2p_vif_created(cfg)) { + WL_ERR(("Could not create new iface." + "Already one p2p interface is running")); + return ERR_PTR(-ENODEV); } - return wl_cfg80211_create_iface(cfg->wdev->wiphy, + new_cfgdev = wl_cfg80211_create_iface(cfg->wdev->wiphy, NL80211_IFTYPE_STATION, NULL, name); -#endif /* DUAL_STA */ + if (!new_cfgdev) + return ERR_PTR(-ENOMEM); + else + return new_cfgdev; +#endif /* WL_VIRTUAL_APSTA */ case NL80211_IFTYPE_P2P_CLIENT: wlif_type = WL_P2P_IF_CLIENT; mode = WL_MODE_BSS; @@ -1454,13 +1571,13 @@ wl_cfg80211_add_virtual_iface(struct wiphy *wiphy, break; default: WL_ERR(("Unsupported interface type\n")); - return NULL; + return ERR_PTR(-ENODEV); break; } if (!name) { WL_ERR(("name is NULL\n")); - return NULL; + return ERR_PTR(-ENODEV); } if (cfg->p2p_supported && (wlif_type != -1)) { ASSERT(cfg->p2p); /* ensure expectation of p2p initialization */ @@ -1469,22 +1586,26 @@ wl_cfg80211_add_virtual_iface(struct wiphy *wiphy, #if defined(BCMSDIO) if (!dhd) return ERR_PTR(-ENODEV); -#endif +#endif #endif /* PROP_TXSTATUS_VSDB */ if (!cfg->p2p) return ERR_PTR(-ENODEV); + if (cfg->cfgdev_bssidx != -1) { + WL_ERR(("Failed to start p2p, Maximum no of interface reached")); + return ERR_PTR(-ENODEV); + } + if (cfg->p2p && !cfg->p2p->on && strstr(name, WL_P2P_INTERFACE_PREFIX)) { p2p_on(cfg) = true; wl_cfgp2p_set_firm_p2p(cfg); wl_cfgp2p_init_discovery(cfg); get_primary_mac(cfg, &primary_mac); - wl_cfgp2p_generate_bss_mac(&primary_mac, - &cfg->p2p->dev_addr, &cfg->p2p->int_addr); + wl_cfgp2p_generate_bss_mac(cfg, &primary_mac); } - memset(cfg->p2p->vir_ifname, 0, IFNAMSIZ); strncpy(cfg->p2p->vir_ifname, name, IFNAMSIZ - 1); + cfg->p2p->vir_ifname[IFNAMSIZ - 1] = '\0'; wl_cfg80211_scan_abort(cfg); #ifdef PROP_TXSTATUS_VSDB @@ -1500,9 +1621,17 @@ wl_cfg80211_add_virtual_iface(struct wiphy *wiphy, } cfg->wlfc_on = true; } -#endif +#endif #endif /* PROP_TXSTATUS_VSDB */ + /* Dual p2p doesn't support multiple P2PGO interfaces, + * p2p_go_count is the counter for GO creation + * requests. + */ + if ((cfg->p2p->p2p_go_count > 0) && (type == NL80211_IFTYPE_P2P_GO)) { + WL_ERR(("Fw doesnot support multiple Go")); + return ERR_PTR(-ENOMEM); + } /* In concurrency case, STA may be already associated in a particular channel. * so retrieve the current channel of primary interface and then start the virtual * interface on that. @@ -1512,11 +1641,22 @@ wl_cfg80211_add_virtual_iface(struct wiphy *wiphy, /* For P2P mode, use P2P-specific driver features to create the * bss: "cfg p2p_ifadd" */ + if (wl_check_dongle_idle(wiphy) != TRUE) { + WL_ERR(("FW is busy to add interface")); + return ERR_PTR(-ENOMEM); + } wl_set_p2p_status(cfg, IF_ADDING); memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info)); if (wlif_type == WL_P2P_IF_GO) wldev_iovar_setint(primary_ndev, "mpc", 0); - err = wl_cfgp2p_ifadd(cfg, &cfg->p2p->int_addr, htod32(wlif_type), chspec); + cfg_type = wl_cfgp2p_get_conn_idx(cfg); + if (cfg_type == BCME_ERROR) { + wl_clr_p2p_status(cfg, IF_ADDING); + WL_ERR(("Failed to get connection idx for p2p interface")); + goto fail; + } + err = wl_cfgp2p_ifadd(cfg, wl_to_p2p_bss_macaddr(cfg, cfg_type), + htod32(wlif_type), chspec); if (unlikely(err)) { wl_clr_p2p_status(cfg, IF_ADDING); WL_ERR((" virtual iface add failed (%d) \n", err)); @@ -1524,49 +1664,81 @@ wl_cfg80211_add_virtual_iface(struct wiphy *wiphy, } timeout = wait_event_interruptible_timeout(cfg->netif_change_event, - (wl_get_p2p_status(cfg, IF_ADDING) == false), + ((wl_get_p2p_status(cfg, IF_ADDING) == false) && + (cfg->if_event_info.valid)), msecs_to_jiffies(MAX_WAIT_TIME)); if (timeout > 0 && !wl_get_p2p_status(cfg, IF_ADDING) && cfg->if_event_info.valid) { struct wireless_dev *vwdev; int pm_mode = PM_ENABLE; wl_if_event_info *event = &cfg->if_event_info; - /* IF_ADD event has come back, we can proceed to to register * the new interface now, use the interface name provided by caller (thus * ignore the one from wlc) */ - strncpy(cfg->if_event_info.name, name, IFNAMSIZ - 1); new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, cfg->p2p->vir_ifname, - event->mac, event->bssidx); + event->mac, event->bssidx, event->name); if (new_ndev == NULL) goto fail; - wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION) = new_ndev; - wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION) = event->bssidx; + wl_to_p2p_bss_ndev(cfg, cfg_type) = new_ndev; + wl_to_p2p_bss_bssidx(cfg, cfg_type) = event->bssidx; vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL); if (unlikely(!vwdev)) { WL_ERR(("Could not allocate wireless device\n")); + err = -ENOMEM; goto fail; } vwdev->wiphy = cfg->wdev->wiphy; WL_INFORM(("virtual interface(%s) is created\n", cfg->p2p->vir_ifname)); + if (type == NL80211_IFTYPE_P2P_GO) { + cfg->p2p->p2p_go_count++; + } vwdev->iftype = type; +#ifdef DHD_IFDEBUG + WL_ERR(("new_ndev: %p\n", new_ndev)); +#endif vwdev->netdev = new_ndev; new_ndev->ieee80211_ptr = vwdev; SET_NETDEV_DEV(new_ndev, wiphy_dev(vwdev->wiphy)); wl_set_drv_status(cfg, READY, new_ndev); - cfg->p2p->vif_created = true; wl_set_mode_by_netdev(cfg, new_ndev, mode); if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK) { wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev); + err = -ENODEV; + goto fail; + } + err = wl_alloc_netinfo(cfg, new_ndev, vwdev, mode, pm_mode, event->bssidx); + if (unlikely(err != 0)) { + WL_ERR(("Allocation of netinfo failed (%d) \n", err)); goto fail; } - wl_alloc_netinfo(cfg, new_ndev, vwdev, mode, pm_mode); val = 1; /* Disable firmware roaming for P2P interface */ wldev_iovar_setint(new_ndev, "roam_off", val); + wldev_iovar_setint(new_ndev, "bcn_timeout", dhd->conf->bcn_timeout); +#ifdef WL11ULB + if (cfg->p2p_wdev && is_p2p_group_iface(new_ndev->ieee80211_ptr)) { + u32 ulb_bw = wl_cfg80211_get_ulb_bw(cfg->p2p_wdev); + if (ulb_bw) { + /* Apply ULB BW settings on the newly spawned interface */ + WL_DBG(("[ULB] Applying ULB BW for the newly" + "created P2P interface \n")); + if (wl_cfg80211_set_ulb_bw(new_ndev, + ulb_bw, new_ndev->name) < 0) { + /* + * If ulb_bw set failed, fail the iface creation. + * wl_dealloc_netinfo_by_wdev will be called by the + * unregister notifier. + */ + wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev); + err = -EINVAL; + goto fail; + } + } + } +#endif /* WL11ULB */ if (mode != WL_MODE_AP) wldev_iovar_setint(new_ndev, "buf_key_b4_m4", 1); @@ -1579,7 +1751,7 @@ wl_cfg80211_add_virtual_iface(struct wiphy *wiphy, if (mode == WL_MODE_AP) { dhd_set_ap_powersave(dhd, 0, TRUE); } -#endif +#endif /* SUPPORT_AP_POWERSAVE */ if (type == NL80211_IFTYPE_P2P_CLIENT) dhd_mode = DHD_FLAG_P2P_GC_MODE; else if (type == NL80211_IFTYPE_P2P_GO) @@ -1603,46 +1775,60 @@ wl_cfg80211_add_virtual_iface(struct wiphy *wiphy, wl_clr_p2p_status(cfg, GO_NEG_PHASE); wl_set_p2p_status(cfg, IF_DELETING); - err = wl_cfgp2p_ifdel(cfg, &cfg->p2p->int_addr); + err = wl_cfgp2p_ifdel(cfg, wl_to_p2p_bss_macaddr(cfg, cfg_type)); if (err == BCME_OK) { timeout = wait_event_interruptible_timeout(cfg->netif_change_event, - (wl_get_p2p_status(cfg, IF_DELETING) == false), + ((wl_get_p2p_status(cfg, IF_DELETING) == false) && + (cfg->if_event_info.valid)), msecs_to_jiffies(MAX_WAIT_TIME)); if (timeout > 0 && !wl_get_p2p_status(cfg, IF_DELETING) && cfg->if_event_info.valid) { + /* + * Should indicate upper layer this failure case of p2p + * interface creation + */ WL_ERR(("IFDEL operation done\n")); } else { WL_ERR(("IFDEL didn't complete properly\n")); - err = BCME_ERROR; + hang_required = true; } + } else { + hang_required = true; } - if (err != BCME_OK) { + + if (hang_required) { struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); WL_ERR(("p2p_ifdel failed, error %d, sent HANG event to %s\n", err, ndev->name)); + dhd->hang_reason = HANG_REASON_P2P_IFACE_DEL_FAILURE; net_os_send_hang_message(ndev); } memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ); - cfg->p2p->vif_created = false; + wl_to_p2p_bss_bssidx(cfg, cfg_type) = -1; #ifdef PROP_TXSTATUS_VSDB #if defined(BCMSDIO) dhd_wlfc_get_enable(dhd, &enabled); - if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE && - dhd->op_mode != DHD_FLAG_IBSS_MODE) { - dhd_wlfc_deinit(dhd); - cfg->wlfc_on = false; - } -#endif + if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE && + dhd->op_mode != DHD_FLAG_IBSS_MODE && dhd->conf->disable_proptx!=0) { + dhd_wlfc_deinit(dhd); + cfg->wlfc_on = false; + } +#endif #endif /* PROP_TXSTATUS_VSDB */ + /* + * Returns -ENODEV to upperlayer to indicate that DHD + * failed to create p2p interface + */ + err = -ENODEV; } } - fail: if (wlif_type == WL_P2P_IF_GO) wldev_iovar_setint(primary_ndev, "mpc", 1); - return ERR_PTR(-ENODEV); + return ERR_PTR(err); } static s32 @@ -1654,6 +1840,7 @@ wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev) s32 timeout = -1; s32 ret = 0; s32 index = -1; + s32 type = -1; #ifdef CUSTOM_SET_CPUCORE dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); #endif /* CUSTOM_SET_CPUCORE */ @@ -1664,10 +1851,15 @@ wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev) if (!(dhd->chan_isvht80)) dhd_set_cpucore(dhd, FALSE); #endif /* CUSTOM_SET_CPUCORE */ -#if defined(WL_CFG80211_P2P_DEV_IF) +#ifdef WL_CFG80211_P2P_DEV_IF if (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE) { - return wl_cfgp2p_del_p2p_disc_if(cfgdev, cfg); - } + if (dhd_download_fw_on_driverload) { + return wl_cfgp2p_del_p2p_disc_if(cfgdev, cfg); + } else { + cfg->down_disc_if = TRUE; + return 0; + } + } #endif /* WL_CFG80211_P2P_DEV_IF */ dev = cfgdev_to_wlc_ndev(cfgdev, cfg); @@ -1676,29 +1868,34 @@ wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev) return bcm_cfg80211_del_ibss_if(wiphy, cfgdev); #endif /* WLAIBSS_MCHAN */ -#ifdef DUAL_STA +#ifdef WL_VIRTUAL_APSTA if (cfgdev == cfg->bss_cfgdev) return wl_cfg80211_del_iface(wiphy, cfgdev); -#endif /* DUAL_STA */ - - if (wl_cfgp2p_find_idx(cfg, dev, &index) != BCME_OK) { - WL_ERR(("Find p2p index from ndev(%p) failed\n", dev)); +#endif /* WL_VIRTUAL_APSTA */ + if ((index = wl_get_bssidx_by_wdev(cfg, cfgdev_to_wdev(cfgdev))) < 0) { + WL_ERR(("Find p2p index from wdev failed\n")); + return BCME_ERROR; + } + if (wl_check_dongle_idle(wiphy) != TRUE) { + WL_ERR(("FW is busy to add interface")); return BCME_ERROR; } if (cfg->p2p_supported) { - memcpy(p2p_mac.octet, cfg->p2p->int_addr.octet, ETHER_ADDR_LEN); + if (wl_cfgp2p_find_type(cfg, index, &type) != BCME_OK) + return BCME_ERROR; + memcpy(p2p_mac.octet, wl_to_p2p_bss_macaddr(cfg, type).octet, ETHER_ADDR_LEN); /* Clear GO_NEG_PHASE bit to take care of GO-NEG-FAIL cases */ WL_DBG(("P2P: GO_NEG_PHASE status cleared ")); wl_clr_p2p_status(cfg, GO_NEG_PHASE); - if (cfg->p2p->vif_created) { + if (wl_cfgp2p_vif_created(cfg)) { if (wl_get_drv_status(cfg, SCANNING, dev)) { wl_notify_escan_complete(cfg, dev, true, true); } wldev_iovar_setint(dev, "mpc", 1); /* Delete pm_enable_work */ - wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL); + wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL); /* for GC */ if (wl_get_drv_status(cfg, DISCONNECTING, dev) && @@ -1715,6 +1912,7 @@ wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev) /* for GO */ if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) { wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, false); + cfg->p2p->p2p_go_count--; /* disable interface before bsscfg free */ ret = wl_cfgp2p_ifdisable(cfg, &p2p_mac); /* if fw doesn't support "ifdis", @@ -1728,26 +1926,30 @@ wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev) msleep(300); } } - wl_cfgp2p_clear_management_ie(cfg, index); + wl_cfg80211_clear_per_bss_ies(cfg, index); if (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_AP) wldev_iovar_setint(dev, "buf_key_b4_m4", 0); + memcpy(p2p_mac.octet, wl_to_p2p_bss_macaddr(cfg, type).octet, + ETHER_ADDR_LEN); + CFGP2P_INFO(("primary idx %d : cfg p2p_ifdis "MACDBG"\n", + dev->ifindex, MAC2STRDBG(p2p_mac.octet))); /* delete interface after link down */ ret = wl_cfgp2p_ifdel(cfg, &p2p_mac); - if (ret != BCME_OK) { struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); WL_ERR(("p2p_ifdel failed, error %d, sent HANG event to %s\n", ret, ndev->name)); - #if defined(BCMDONGLEHOST) && defined(OEM_ANDROID) + dhd->hang_reason = HANG_REASON_P2P_IFACE_DEL_FAILURE; net_os_send_hang_message(ndev); - #endif } else { /* Wait for IF_DEL operation to be finished */ timeout = wait_event_interruptible_timeout(cfg->netif_change_event, - (wl_get_p2p_status(cfg, IF_DELETING) == false), + ((wl_get_p2p_status(cfg, IF_DELETING) == false) && + (cfg->if_event_info.valid)), msecs_to_jiffies(MAX_WAIT_TIME)); if (timeout > 0 && !wl_get_p2p_status(cfg, IF_DELETING) && cfg->if_event_info.valid) { @@ -1779,8 +1981,11 @@ wl_cfg80211_change_virtual_iface(struct wiphy *wiphy, struct net_device *ndev, s32 wlif_type; s32 mode = 0; s32 err = BCME_OK; + s32 index; + s32 conn_idx = -1; chanspec_t chspec; struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg); dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); WL_DBG(("Enter type %d\n", type)); @@ -1802,6 +2007,8 @@ wl_cfg80211_change_virtual_iface(struct wiphy *wiphy, struct net_device *ndev, infra = 1; break; case NL80211_IFTYPE_AP: + dhd->op_mode |= DHD_FLAG_HOSTAP_MODE; + /* intentional fall through */ case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_GO: mode = WL_MODE_AP; @@ -1812,26 +2019,63 @@ wl_cfg80211_change_virtual_iface(struct wiphy *wiphy, struct net_device *ndev, } if (!dhd) return -EINVAL; + + /* If any scan is going on, abort it */ + if (wl_get_drv_status_all(cfg, SCANNING)) { + int wait_cnt = MAX_SCAN_ABORT_WAIT_CNT; + WL_ERR(("Scan in progress. Aborting the scan!\n")); + wl_cfg80211_scan_abort(cfg); + while (wl_get_drv_status_all(cfg, SCANNING) && wait_cnt) { + WL_DBG(("Waiting for SCANNING terminated, wait_cnt: %d\n", wait_cnt)); + wait_cnt--; + OSL_SLEEP(WAIT_SCAN_ABORT_OSL_SLEEP_TIME); + } + if (wl_get_drv_status_all(cfg, SCANNING)) { + wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true); + } + } + + if (wl_check_dongle_idle(wiphy) != TRUE) { + WL_ERR(("FW is busy to add interface\n")); + return -EINVAL; + } if (ap) { wl_set_mode_by_netdev(cfg, ndev, mode); - if (cfg->p2p_supported && cfg->p2p->vif_created) { - WL_DBG(("p2p_vif_created (%d) p2p_on (%d)\n", cfg->p2p->vif_created, - p2p_on(cfg))); + if (is_p2p_group_iface(ndev->ieee80211_ptr) && + cfg->p2p && wl_cfgp2p_vif_created(cfg)) { + WL_DBG(("p2p_vif_created p2p_on (%d)\n", p2p_on(cfg))); wldev_iovar_setint(ndev, "mpc", 0); wl_notify_escan_complete(cfg, ndev, true, true); + /* Dual p2p doesn't support multiple P2PGO interfaces, + * p2p_go_count is the counter for GO creation + * requests. + */ + if ((cfg->p2p->p2p_go_count > 0) && (type == NL80211_IFTYPE_P2P_GO)) { + wl_set_mode_by_netdev(cfg, ndev, WL_MODE_BSS); + WL_ERR(("Fw doesnot support multiple GO\n")); + return BCME_ERROR; + } /* In concurrency case, STA may be already associated in a particular * channel. so retrieve the current channel of primary interface and * then start the virtual interface on that. */ chspec = wl_cfg80211_get_shared_freq(wiphy); + index = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr); + if (index < 0) { + WL_ERR(("Find p2p index from ndev(%p) failed\n", ndev)); + return BCME_ERROR; + } + if (wl_cfgp2p_find_type(cfg, index, &conn_idx) != BCME_OK) + return BCME_ERROR; wlif_type = WL_P2P_IF_GO; - printf("%s : ap (%d), infra (%d), iftype: (%d)\n", - ndev->name, ap, infra, type); + printf("%s : ap (%d), infra (%d), iftype (%d) conn_idx (%d)\n", + ndev->name, ap, infra, type, conn_idx); wl_set_p2p_status(cfg, IF_CHANGING); wl_clr_p2p_status(cfg, IF_CHANGED); - wl_cfgp2p_ifchange(cfg, &cfg->p2p->int_addr, htod32(wlif_type), chspec); + wl_cfgp2p_ifchange(cfg, wl_to_p2p_bss_macaddr(cfg, conn_idx), + htod32(wlif_type), chspec, conn_idx); wait_event_interruptible_timeout(cfg->netif_change_event, (wl_get_p2p_status(cfg, IF_CHANGED) == true), msecs_to_jiffies(MAX_WAIT_TIME)); @@ -1844,67 +2088,25 @@ wl_cfg80211_change_virtual_iface(struct wiphy *wiphy, struct net_device *ndev, wl_set_drv_status(cfg, CONNECTED, ndev); #ifdef SUPPORT_AP_POWERSAVE dhd_set_ap_powersave(dhd, 0, TRUE); -#endif - } else if (ndev == bcmcfg_to_prmry_ndev(cfg) && +#endif /* SUPPORT_AP_POWERSAVE */ + } else if (((ndev == primary_ndev) || + (ndev == ((struct net_device *)cfgdev_to_ndev(cfg->bss_cfgdev)))) && !wl_get_drv_status(cfg, AP_CREATED, ndev)) { wl_set_drv_status(cfg, AP_CREATING, ndev); - if (!cfg->ap_info && - !(cfg->ap_info = kzalloc(sizeof(struct ap_info), GFP_KERNEL))) { - WL_ERR(("struct ap_saved_ie allocation failed\n")); - return -ENOMEM; - } } else { WL_ERR(("Cannot change the interface for GO or SOFTAP\n")); return -EINVAL; } } else { - WL_DBG(("Change_virtual_iface for transition from GO/AP to client/STA")); + /* P2P GO interface deletion is handled on the basis of role type (AP). + * So avoid changing role for p2p type. + */ + if (ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) + wl_set_mode_by_netdev(cfg, ndev, mode); + WL_DBG(("Change_virtual_iface for transition from GO/AP to client/STA\n")); #ifdef SUPPORT_AP_POWERSAVE dhd_set_ap_powersave(dhd, 0, FALSE); -#endif -#ifdef P2PONEINT - wl_set_mode_by_netdev(cfg, ndev, mode); - if (cfg->p2p_supported && cfg->p2p->vif_created) { - WL_DBG(("p2p_vif_created (%d) p2p_on (%d)\n", cfg->p2p->vif_created, - p2p_on(cfg))); - wldev_iovar_setint(ndev, "mpc", 0); - wl_notify_escan_complete(cfg, ndev, true, true); - - /* In concurrency case, STA may be already associated in a particular - * channel. so retrieve the current channel of primary interface and - * then start the virtual interface on that. - */ - chspec = wl_cfg80211_get_shared_freq(wiphy); - - wlif_type = WL_P2P_IF_CLIENT; - WL_ERR(("%s : ap (%d), infra (%d), iftype: (%d) chspec 0x%x \n", - ndev->name, ap, infra, type, chspec)); - wl_set_p2p_status(cfg, IF_CHANGING); - wl_clr_p2p_status(cfg, IF_CHANGED); - wl_cfgp2p_ifchange(cfg, &cfg->p2p->int_addr, htod32(wlif_type), chspec); - wait_event_interruptible_timeout(cfg->netif_change_event, - (wl_get_p2p_status(cfg, IF_CHANGED) == true), - msecs_to_jiffies(MAX_WAIT_TIME)); - wl_set_mode_by_netdev(cfg, ndev, mode); - dhd->op_mode |= DHD_FLAG_P2P_GC_MODE; - dhd->op_mode &= ~DHD_FLAG_P2P_GO_MODE; - wl_clr_p2p_status(cfg, IF_CHANGING); - wl_clr_p2p_status(cfg, IF_CHANGED); - -#define INIT_IE(IE_TYPE, BSS_TYPE) \ - do { \ - memset(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \ - sizeof(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \ - wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \ - } while (0); - - INIT_IE(probe_req, P2PAPI_BSSCFG_CONNECTION); - INIT_IE(probe_res, P2PAPI_BSSCFG_CONNECTION); - INIT_IE(assoc_req, P2PAPI_BSSCFG_CONNECTION); - INIT_IE(assoc_res, P2PAPI_BSSCFG_CONNECTION); - INIT_IE(beacon, P2PAPI_BSSCFG_CONNECTION); - } -#endif /* P2PONEINT */ +#endif /* SUPPORT_AP_POWERSAVE */ } if (ibss) { @@ -2008,17 +2210,17 @@ static s32 wl_cfg80211_handle_ifdel(struct bcm_cfg80211 *cfg, wl_if_event_info * #if defined(BCMSDIO) dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); bool enabled; -#endif +#endif #endif /* PROP_TXSTATUS_VSDB */ bssidx = if_event_info->bssidx; - if (bssidx != wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION)) { + if (bssidx != wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) && + bssidx != wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2)) { WL_ERR(("got IF_DEL for if %d, not owned by cfg driver\n", bssidx)); return BCME_ERROR; } - if (p2p_is_on(cfg) && cfg->p2p->vif_created) { - + if (p2p_is_on(cfg) && wl_cfgp2p_vif_created(cfg)) { if (cfg->scan_request && (cfg->escan_info.ndev == ndev)) { /* Abort any pending scan requests */ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE; @@ -2027,28 +2229,32 @@ static s32 wl_cfg80211_handle_ifdel(struct bcm_cfg80211 *cfg, wl_if_event_info * } memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ); - if (wl_cfgp2p_find_type(cfg, bssidx, &type) != BCME_OK) { - WL_ERR(("Find p2p type from bssidx(%d) failed\n", bssidx)); + if (wl_cfgp2p_find_type(cfg, bssidx, &type) == BCME_OK) { + /* Update P2P data */ + wl_clr_drv_status(cfg, CONNECTED, wl_to_p2p_bss_ndev(cfg, type)); + wl_to_p2p_bss_ndev(cfg, type) = NULL; + wl_to_p2p_bss_bssidx(cfg, type) = -1; + } else if (wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr) < 0) { + WL_ERR(("bssidx not known for the given ndev as per net_info data \n")); return BCME_ERROR; } - wl_clr_drv_status(cfg, CONNECTED, wl_to_p2p_bss_ndev(cfg, type)); - wl_to_p2p_bss_ndev(cfg, type) = NULL; - wl_to_p2p_bss_bssidx(cfg, type) = WL_INVALID; - cfg->p2p->vif_created = false; #ifdef PROP_TXSTATUS_VSDB #if defined(BCMSDIO) dhd_wlfc_get_enable(dhd, &enabled); if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE && - dhd->op_mode != DHD_FLAG_IBSS_MODE) { + dhd->op_mode != DHD_FLAG_IBSS_MODE && dhd->conf->disable_proptx!=0) { dhd_wlfc_deinit(dhd); cfg->wlfc_on = false; } -#endif +#endif #endif /* PROP_TXSTATUS_VSDB */ } + dhd_net_if_lock(ndev); wl_cfg80211_remove_if(cfg, if_event_info->ifidx, ndev); + dhd_net_if_unlock(ndev); + return BCME_OK; } @@ -2060,7 +2266,21 @@ static s32 wl_find_listen_channel(struct bcm_cfg80211 *cfg, u8 *end, *pos; s32 listen_channel; +/* unfortunately const cast required here - function is + * a callback so its signature must not be changed + * and cascade of changing wl_cfgp2p_find_p2pie + * causes need for const cast in other places + */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif pos = (u8 *)ie; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif p2p_ie = wl_cfgp2p_find_p2pie(pos, ie_len); if (p2p_ie == NULL) @@ -2118,6 +2338,7 @@ static void wl_scan_prep(struct wl_scan_params *params, struct cfg80211_scan_req char *ptr; wlc_ssid_t ssid; struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wireless_dev *wdev; memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN); params->bss_type = DOT11_BSSTYPE_ANY; @@ -2166,27 +2387,22 @@ static void wl_scan_prep(struct wl_scan_params *params, struct cfg80211_scan_req if (!dhd_conf_match_channel(cfg->pub, channel)) continue; +#if defined(WL_CFG80211_P2P_DEV_IF) + wdev = request->wdev; +#else + wdev = request->dev->ieee80211_ptr; +#endif /* WL_CFG80211_P2P_DEV_IF */ + chanspec = wl_cfg80211_ulb_get_min_bw_chspec(wdev, -1); + if (chanspec == INVCHANSPEC) { + WL_ERR(("Invalid chanspec! Skipping channel\n")); + continue; + } + if (request->channels[i]->band == IEEE80211_BAND_2GHZ) { -#ifdef WL_HOST_BAND_MGMT - if (cfg->curr_band == WLC_BAND_5G) { - WL_DBG(("In 5G only mode, omit 2G channel:%d\n", channel)); - continue; - } -#endif /* WL_HOST_BAND_MGMT */ chanspec |= WL_CHANSPEC_BAND_2G; } else { -#ifdef WL_HOST_BAND_MGMT - if (cfg->curr_band == WLC_BAND_2G) { - WL_DBG(("In 2G only mode, omit 5G channel:%d\n", channel)); - continue; - } -#endif /* WL_HOST_BAND_MGMT */ chanspec |= WL_CHANSPEC_BAND_5G; } - - chanspec |= WL_CHANSPEC_BW_20; - chanspec |= WL_CHANSPEC_CTL_SB_NONE; - params->channel_list[j] = channel; params->channel_list[j] &= WL_CHANSPEC_CHAN_MASK; params->channel_list[j] |= chanspec; @@ -2253,7 +2469,7 @@ wl_get_valid_channels(struct net_device *ndev, u8 *valid_chan_list, s32 size) #if defined(USE_INITIAL_SHORT_DWELL_TIME) #define FIRST_SCAN_ACTIVE_DWELL_TIME_MS 40 bool g_first_broadcast_scan = TRUE; -#endif +#endif static s32 wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev, @@ -2267,15 +2483,16 @@ wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev, u8 chan_buf[sizeof(u32)*(WL_NUMCHANNELS + 1)]; u32 num_chans = 0; s32 channel; - s32 n_valid_chan; + u32 n_valid_chan; s32 search_state = WL_P2P_DISC_ST_SCAN; u32 i, j, n_nodfs = 0; u16 *default_chan_list = NULL; wl_uint32_list_t *list; + s32 bssidx = -1; struct net_device *dev = NULL; #if defined(USE_INITIAL_SHORT_DWELL_TIME) bool is_first_init_2g_scan = false; -#endif +#endif p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_PURPOSE_MIN; scb_val_t scbval; static int cnt = 0; @@ -2300,7 +2517,7 @@ wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev, is_first_init_2g_scan = true; g_first_broadcast_scan = false; } -#endif +#endif /* if scan request is not empty parse scan request paramters */ if (request != NULL) { @@ -2326,7 +2543,7 @@ wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev, /* Override active_time to reduce scan time if it's first bradcast scan. */ if (is_first_init_2g_scan) params->params.active_time = FIRST_SCAN_ACTIVE_DWELL_TIME_MS; -#endif +#endif params->version = htod32(ESCAN_REQ_VERSION); params->action = htod16(action); @@ -2338,8 +2555,16 @@ wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev, err = -ENOMEM; goto exit; } + if (cfg->active_scan == PASSIVE_SCAN) { + params->params.scan_type = DOT11_SCANTYPE_PASSIVE; + WL_DBG(("Passive scan_type %d \n", params->params.scan_type)); + } + + bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr); + err = wldev_iovar_setbuf(ndev, "escan", params, params_size, cfg->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL); + WL_SCAN(("%s: LEGACY_SCAN sync ID: %d, bssidx: %d\n", __FUNCTION__, params->sync_id, bssidx)); if (unlikely(err)) { if (err == BCME_EPERM) /* Scan Not permitted at this point of time */ @@ -2364,23 +2589,15 @@ wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev, goto exit; } if (!wl_get_valid_channels(ndev, chan_buf, sizeof(chan_buf))) { +#ifdef P2P_SKIP_DFS + int is_printed = false; +#endif /* P2P_SKIP_DFS */ list = (wl_uint32_list_t *) chan_buf; n_valid_chan = dtoh32(list->count); for (i = 0; i < num_chans; i++) { -#ifdef WL_HOST_BAND_MGMT - int channel_band = 0; -#endif /* WL_HOST_BAND_MGMT */ _freq = request->channels[i]->center_freq; channel = ieee80211_frequency_to_channel(_freq); -#ifdef WL_HOST_BAND_MGMT - channel_band = (channel > CH_MAX_2G_CHANNEL) ? - WLC_BAND_5G : WLC_BAND_2G; - if ((cfg->curr_band != WLC_BAND_AUTO) && - (cfg->curr_band != channel_band) && - !IS_P2P_SOCIAL_CHANNEL(channel)) - continue; -#endif /* WL_HOST_BAND_MGMT */ /* ignore DFS channels */ if (request->channels[i]->flags & @@ -2392,6 +2609,15 @@ wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev, | IEEE80211_CHAN_PASSIVE_SCAN)) #endif continue; +#ifdef P2P_SKIP_DFS + if (channel >= 52 && channel <= 144) { + if (is_printed == false) { + WL_ERR(("SKIP DFS CHANs(52~144)\n")); + is_printed = true; + } + continue; + } +#endif /* P2P_SKIP_DFS */ for (j = 0; j < n_valid_chan; j++) { /* allows only supported channel on @@ -2412,8 +2638,10 @@ wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev, search_state = WL_P2P_DISC_ST_SEARCH; p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL; WL_INFORM(("P2P SEARCH PHASE START \n")); - } else if ((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION)) && - (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) { + } else if (((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1)) && + (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) || + ((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION2)) && + (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP))) { /* If you are already a GO, then do SEARCH only */ WL_INFORM(("Already a GO. Do SEARCH Only")); search_state = WL_P2P_DISC_ST_SEARCH; @@ -2436,7 +2664,7 @@ wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev, err = -EINVAL; goto exit; } - err = wl_cfgp2p_escan(cfg, ndev, cfg->active_scan, num_chans, default_chan_list, + err = wl_cfgp2p_escan(cfg, ndev, ACTIVE_SCAN, num_chans, default_chan_list, search_state, action, wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE), NULL, p2p_scan_purpose); @@ -2469,7 +2697,6 @@ wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev, return err; } - static s32 wl_do_escan(struct bcm_cfg80211 *cfg, struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_scan_request *request) @@ -2480,7 +2707,6 @@ wl_do_escan(struct bcm_cfg80211 *cfg, struct wiphy *wiphy, struct net_device *nd s32 passive_scan_time_org; wl_scan_results_t *results; WL_SCAN(("Enter \n")); - mutex_lock(&cfg->usr_sync); results = wl_escan_get_buf(cfg, FALSE); results->version = 0; @@ -2536,7 +2762,6 @@ wl_do_escan(struct bcm_cfg80211 *cfg, struct wiphy *wiphy, struct net_device *nd } exit: - mutex_unlock(&cfg->usr_sync); return err; } @@ -2562,21 +2787,16 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, struct net_device *remain_on_channel_ndev = NULL; #endif - dhd_pub_t *dhd; - - dhd = (dhd_pub_t *)(cfg->pub); /* * Hostapd triggers scan before starting automatic channel selection - * also Dump stats IOVAR scans each channel hence returning from here. + * to collect channel characteristics. However firmware scan engine + * doesn't support any channel characteristics collection along with + * scan. Hence return scan success. */ - if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { -#ifdef WL_SUPPORT_ACS - WL_INFORM(("Scan Command at SoftAP mode\n")); - return 0; -#else - WL_ERR(("Invalid Scan Command at SoftAP mode\n")); - return -EINVAL; -#endif /* WL_SUPPORT_ACS */ + if (request && (scan_req_iftype(request) == NL80211_IFTYPE_AP)) { + WL_INFORM(("Scan Command on SoftAP Interface. Ignoring...\n")); +// terence 20161023: let it scan in SoftAP mode +// return 0; } ndev = ndev_to_wlc_ndev(ndev, cfg); @@ -2604,6 +2824,14 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, WL_ERR(("request null or n_ssids > WL_SCAN_PARAMS_SSID_MAX\n")); return -EOPNOTSUPP; } + +#ifdef P2P_LISTEN_OFFLOADING + if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) { + WL_ERR(("P2P_FIND: Discovery offload is in progress\n")); + return -EAGAIN; + } +#endif /* P2P_LISTEN_OFFLOADING */ + #ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST remain_on_channel_ndev = wl_cfg80211_get_remain_on_channel_ndev(cfg); if (remain_on_channel_ndev) { @@ -2612,11 +2840,6 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, } #endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ -#ifdef WL_SDO - if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) { - wl_cfg80211_pause_sdo(ndev, cfg); - } -#endif /* Arm scan timeout timer */ mod_timer(&cfg->scan_timeout, jiffies + msecs_to_jiffies(WL_SCAN_TIMER_INTERVAL_MS)); @@ -2638,8 +2861,10 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, p2p_on(cfg) = true; wl_cfgp2p_set_firm_p2p(cfg); get_primary_mac(cfg, &primary_mac); - wl_cfgp2p_generate_bss_mac(&primary_mac, - &cfg->p2p->dev_addr, &cfg->p2p->int_addr); + wl_cfgp2p_generate_bss_mac(cfg, &primary_mac); +#if defined(P2P_IE_MISSING_FIX) + cfg->p2p_prb_noti = false; +#endif } wl_clr_p2p_status(cfg, GO_NEG_PHASE); WL_DBG(("P2P: GO_NEG_PHASE status cleared \n")); @@ -2667,8 +2892,8 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, } } if (!cfg->p2p_supported || !p2p_scan(cfg)) { - - if (wl_cfgp2p_find_idx(cfg, ndev, &bssidx) != BCME_OK) { + if ((bssidx = wl_get_bssidx_by_wdev(cfg, + ndev->ieee80211_ptr)) < 0) { WL_ERR(("Find p2p index from ndev(%p) failed\n", ndev)); err = BCME_ERROR; @@ -2682,7 +2907,7 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, interworking_ie->data, interworking_ie->len); if (unlikely(err)) { - goto scan_out; + WL_ERR(("Failed to add interworking IE")); } } else if (cfg->iw_ie_len != 0) { /* we have to clear IW IE and disable gratuitous APR */ @@ -2691,18 +2916,21 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, DOT11_MNG_INTERWORKING_ID, 0, 0); - wldev_iovar_setint_bsscfg(ndev, "grat_arp", 0, + (void)wldev_iovar_setint_bsscfg(ndev, "grat_arp", 0, bssidx); cfg->wl11u = FALSE; + cfg->iw_ie_len = 0; + memset(cfg->iw_ie, 0, IW_IES_MAX_BUF_LEN); /* we don't care about error */ } #endif /* WL11U */ - err = wl_cfgp2p_set_management_ie(cfg, ndev, bssidx, - VNDR_IE_PRBREQ_FLAG, (u8 *)request->ie, + err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(ndev), + bssidx, VNDR_IE_PRBREQ_FLAG, request->ie, request->ie_len); if (unlikely(err)) { - goto scan_out; +// terence 20161023: let it scan in SoftAP mode +// goto scan_out; } } @@ -2711,27 +2939,16 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, ssids = this_ssid; } - if (request && cfg->p2p && !p2p_scan(cfg)) { + if (request && cfg->p2p_supported && !p2p_scan(cfg)) { WL_TRACE_HW4(("START SCAN\n")); + DHD_OS_SCAN_WAKE_LOCK_TIMEOUT((dhd_pub_t *)(cfg->pub), + SCAN_WAKE_LOCK_TIMEOUT); + DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub)); } - cfg->scan_request = request; - wl_set_drv_status(cfg, SCANNING, ndev); - if (cfg->p2p_supported) { if (p2p_on(cfg) && p2p_scan(cfg)) { -#ifdef WL_SDO - if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) { - /* We shouldn't be getting p2p_find while discovery - * offload is in progress - */ - WL_SD(("P2P_FIND: Discovery offload is in progress." - " Do nothing\n")); - err = -EINVAL; - goto scan_out; - } -#endif /* find my listen channel */ cfg->afx_hdl->my_listen_chan = wl_find_listen_channel(cfg, request->ie, @@ -2752,6 +2969,8 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, scan_success: busy_count = 0; + cfg->scan_request = request; + wl_set_drv_status(cfg, SCANNING, ndev); return 0; @@ -2759,13 +2978,24 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, if (err == BCME_BUSY || err == BCME_NOTREADY) { WL_ERR(("Scan err = (%d), busy?%d", err, -EBUSY)); err = -EBUSY; + } else if ((err == BCME_EPERM) && cfg->scan_suppressed) { + WL_ERR(("Scan not permitted due to scan suppress\n")); + err = -EPERM; + } else { + /* For all other fw errors, use a generic error code as return + * value to cfg80211 stack + */ + err = -EAGAIN; } -#define SCAN_EBUSY_RETRY_LIMIT 10 +#define SCAN_EBUSY_RETRY_LIMIT 20 if (err == -EBUSY) { if (busy_count++ > SCAN_EBUSY_RETRY_LIMIT) { struct ether_addr bssid; s32 ret = 0; +#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub); +#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */ busy_count = 0; WL_ERR(("Unusual continuous EBUSY error, %d %d %d %d %d %d %d %d %d\n", wl_get_drv_status(cfg, SCANNING, ndev), @@ -2778,6 +3008,13 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev), wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev))); +#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + if (dhdp->memdump_enabled) { + dhdp->memdump_type = DUMP_TYPE_SCAN_BUSY; + dhd_bus_mem_dump(dhdp); + } +#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */ + bzero(&bssid, sizeof(bssid)); if ((ret = wldev_ioctl(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false)) == 0) @@ -2788,6 +3025,13 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, wl_cfg80211_scan_abort(cfg); + } else { + /* Hold the context for 400msec, so that 10 subsequent scans + * can give a buffer of 4sec which is enough to + * cover any on-going scan in the firmware + */ + WL_DBG(("Enforcing delay for EBUSY case \n")); + msleep(500); } } else { busy_count = 0; @@ -2796,15 +3040,11 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, wl_clr_drv_status(cfg, SCANNING, ndev); if (timer_pending(&cfg->scan_timeout)) del_timer_sync(&cfg->scan_timeout); + DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub)); spin_lock_irqsave(&cfg->cfgdrv_lock, flags); cfg->scan_request = NULL; spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); -#ifdef WL_SDO - if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) { - wl_cfg80211_resume_sdo(ndev, cfg); - } -#endif return err; } @@ -2822,22 +3062,22 @@ wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, struct net_device *ndev = wdev_to_wlc_ndev(request->wdev, cfg); #endif /* WL_CFG80211_P2P_DEV_IF */ - WL_DBG(("Enter \n")); + WL_DBG(("Enter\n")); RETURN_EIO_IF_NOT_UP(cfg); -#ifdef P2PONEINT - ndev = bcmcfg_to_prmry_ndev(cfg); - WL_DBG(("scan use [dev name %s ] \n", ndev->name)); -#endif + if (ndev == bcmcfg_to_prmry_ndev(cfg)) { + if (wl_cfg_multip2p_operational(cfg)) { + WL_ERR(("wlan0 scan failed, p2p devices are operational")); + return -ENODEV; + } + } + mutex_lock(&cfg->usr_sync); err = __wl_cfg80211_scan(wiphy, ndev, request, NULL); if (unlikely(err)) { - if ((err == BCME_EPERM) && cfg->scan_suppressed) - WL_DBG(("scan not permitted at this time (%d)\n", err)); - else - WL_ERR(("scan error (%d)\n", err)); - return err; + WL_ERR(("scan error (%d)\n", err)); } + mutex_unlock(&cfg->usr_sync); return err; } @@ -3082,7 +3322,6 @@ bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name) goto fail; event = &cfg->if_event_info; - strncpy(event->name, name, IFNAMSIZ - 1); /* By calling wl_cfg80211_allocate_if (dhd_allocate_if eventually) we give the control * over this net_device interface to dhd_linux, hence the interface is managed by dhd_liux * and will be freed by dhd_detach unless it gets unregistered before that. The @@ -3090,7 +3329,7 @@ bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name) * be freed by wl_dealloc_netinfo */ new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, event->name, - event->mac, event->bssidx); + event->mac, event->bssidx, event->name); if (new_ndev == NULL) goto fail; wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); @@ -3109,7 +3348,7 @@ bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name) if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK) goto fail; - wl_alloc_netinfo(cfg, new_ndev, wdev, WL_MODE_IBSS, PM_ENABLE); + wl_alloc_netinfo(cfg, new_ndev, wdev, WL_MODE_IBSS, PM_ENABLE, event->bssidx); cfg->ibss_cfgdev = ndev_to_cfgdev(new_ndev); WL_ERR(("IBSS interface %s created\n", new_ndev->name)); return cfg->ibss_cfgdev; @@ -3208,7 +3447,7 @@ wl_cfg80211_interface_ops(struct bcm_cfg80211 *cfg, return ret; } -#if defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF) + s32 wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, @@ -3257,6 +3496,7 @@ wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg, return ret; } +#if defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF) /* Create a Generic Network Interface and initialize it depending up on * the interface type */ @@ -3269,7 +3509,7 @@ wl_cfg80211_create_iface(struct wiphy *wiphy, struct net_device *new_ndev = NULL; struct net_device *primary_ndev = NULL; s32 ret = BCME_OK; - s32 bsscfg_idx = 1; + s32 bsscfg_idx = 0; u32 timeout; wl_if_event_info *event = NULL; struct wireless_dev *wdev = NULL; @@ -3284,8 +3524,31 @@ wl_cfg80211_create_iface(struct wiphy *wiphy, primary_ndev = bcmcfg_to_prmry_ndev(cfg); +#ifdef DHD_IFDEBUG + WL_ERR(("cfg=%p, primary_ndev=%p, ifname=%s\n", cfg, primary_ndev, name)); +#endif + + /* If any scan is going on, abort it */ + if (wl_get_drv_status_all(cfg, SCANNING)) { + int wait_cnt = MAX_SCAN_ABORT_WAIT_CNT; + WL_ERR(("Scan in progress. Aborting the scan!\n")); + wl_cfg80211_scan_abort(cfg); + while (wl_get_drv_status_all(cfg, SCANNING) && wait_cnt) { + WL_DBG(("Waiting for SCANNING terminated, wait_cnt: %d\n", wait_cnt)); + wait_cnt--; + OSL_SLEEP(WAIT_SCAN_ABORT_OSL_SLEEP_TIME); + } + if (!wait_cnt && wl_get_drv_status_all(cfg, SCANNING)) { + WL_ERR(("Failed to abort scan\n")); + return NULL; + } + } + + primary_ndev = bcmcfg_to_prmry_ndev(cfg); if (likely(!mac_addr)) { - /* Use primary MAC with the locally administered bit for the Secondary STA I/F */ + /* Use primary MAC with the locally administered bit for the + * Secondary STA I/F + */ memcpy(addr, primary_ndev->dev_addr, ETH_ALEN); addr[0] |= 0x02; } else { @@ -3314,6 +3577,9 @@ wl_cfg80211_create_iface(struct wiphy *wiphy, CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret)); } +#ifdef DHD_IFDEBUG + WL_ERR(("call wl_cfgp2p_disable_discovery()\n")); +#endif wl_cfgp2p_disable_discovery(cfg); wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0; p2p_on(cfg) = false; @@ -3325,19 +3591,22 @@ wl_cfg80211_create_iface(struct wiphy *wiphy, ret = wl_cfg80211_interface_ops(cfg, primary_ndev, bsscfg_idx, NL80211_IFTYPE_STATION, 0, addr); if (ret == BCME_UNSUPPORTED) { - /* Use bssidx 1 by default */ + /* Use bssidx 1 by default */ + bsscfg_idx = 1; if ((ret = wl_cfg80211_add_del_bss(cfg, primary_ndev, bsscfg_idx, iface_type, 0, addr)) < 0) { return NULL; } } else if (ret < 0) { - WL_ERR(("Interface create failed!! ret:%d \n", ret)); - goto fail; + WL_ERR(("Interface create failed!! ret:%d \n", ret)); + goto fail; } else { - /* Success */ - bsscfg_idx = ret; + /* Success */ + bsscfg_idx = ret; } + WL_DBG(("Interface created!! bssidx:%d \n", bsscfg_idx)); + /* * Wait till the firmware send a confirmation event back. */ @@ -3354,9 +3623,8 @@ wl_cfg80211_create_iface(struct wiphy *wiphy, * the host interface creation. */ event = &cfg->if_event_info; - strncpy(event->name, name, IFNAMSIZ - 1); new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, - event->name, addr, event->bssidx); + (char*)name, addr, event->bssidx, event->name); if (!new_ndev) { WL_ERR(("I/F allocation failed! \n")); goto fail; @@ -3375,13 +3643,18 @@ wl_cfg80211_create_iface(struct wiphy *wiphy, new_ndev->ieee80211_ptr = wdev; SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy)); +#ifdef DHD_IFDEBUG + WL_ERR(("wdev=%p, new_ndev=%p\n", wdev, new_ndev)); +#endif + /* RTNL lock must have been acquired. */ ASSERT_RTNL(); /* Set the locally administed mac addr, if not applied already */ if (memcmp(addr, event->mac, ETH_ALEN) != 0) { - ret = wldev_iovar_setbuf_bsscfg(primary_ndev, "cur_etheraddr", addr, ETH_ALEN, - cfg->ioctl_buf, WLC_IOCTL_MAXLEN, event->bssidx, &cfg->ioctl_buf_sync); + ret = wldev_iovar_setbuf_bsscfg(primary_ndev, "cur_etheraddr", + addr, ETH_ALEN, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, + event->bssidx, &cfg->ioctl_buf_sync); if (unlikely(ret)) { WL_ERR(("set cur_etheraddr Error (%d)\n", ret)); goto fail; @@ -3397,20 +3670,29 @@ wl_cfg80211_create_iface(struct wiphy *wiphy, /* Initialize with the station mode params */ wl_alloc_netinfo(cfg, new_ndev, wdev, (iface_type == NL80211_IFTYPE_STATION) ? - WL_MODE_BSS : WL_MODE_AP, PM_ENABLE); + WL_MODE_BSS : WL_MODE_AP, PM_ENABLE, event->bssidx); cfg->bss_cfgdev = ndev_to_cfgdev(new_ndev); cfg->cfgdev_bssidx = event->bssidx; WL_DBG(("Host Network Interface for Secondary I/F created")); +#ifdef DHD_IFDEBUG + WL_ERR(("cfg->bss_cfgdev=%p\n", cfg->bss_cfgdev)); +#endif + return cfg->bss_cfgdev; fail: cfg->bss_pending_op = FALSE; - if (new_ndev) - wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev); + cfg->cfgdev_bssidx = -1; if (wdev) kfree(wdev); + if (new_ndev) + wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev); + +#ifdef DHD_IFDEBUG + WL_ERR(("failed!!!\n")); +#endif return NULL; } @@ -3427,35 +3709,42 @@ wl_cfg80211_del_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev) u32 ifidx; enum nl80211_iftype iface_type = NL80211_IFTYPE_STATION; - WL_DBG(("Enter\n")); + WL_ERR(("Enter\n")); if (!cfg->bss_cfgdev) return 0; /* If any scan is going on, abort it */ if (wl_get_drv_status_all(cfg, SCANNING)) { - WL_DBG(("Scan in progress. Aborting the scan!\n")); + WL_ERR(("Scan in progress. Aborting the scan!\n")); wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true); } ndev = (struct net_device *)cfgdev_to_ndev(cfg->bss_cfgdev); primary_ndev = bcmcfg_to_prmry_ndev(cfg); +#ifdef DHD_IFDEBUG + WL_ERR(("cfg->bss_cfgdev=%p, ndev=%p, primary_ndev=%p\n", + cfg->bss_cfgdev, ndev, primary_ndev)); +#endif + cfg->bss_pending_op = TRUE; memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info)); - /* Delete the firmware interface */ + /* Delete the firmware interface. "interface_remove" command + * should go on the interface to be deleted + */ ret = wl_cfg80211_interface_ops(cfg, ndev, cfg->cfgdev_bssidx, NL80211_IFTYPE_STATION, 1, NULL); if (ret == BCME_UNSUPPORTED) { if ((ret = wl_cfg80211_add_del_bss(cfg, ndev, bsscfg_idx, iface_type, true, NULL)) < 0) { WL_ERR(("DEL bss failed ret:%d \n", ret)); - return ret; + goto exit; } } else if (ret < 0) { - WL_ERR(("Interface DEL failed ret:%d \n", ret)); - return ret; + WL_ERR(("Interface DEL failed ret:%d \n", ret)); + goto exit; } timeout = wait_event_interruptible_timeout(cfg->netif_change_event, @@ -3463,17 +3752,19 @@ wl_cfg80211_del_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev) if (timeout <= 0 || cfg->bss_pending_op) { WL_ERR(("timeout in waiting IF_DEL event\n")); } + +exit: ifidx = dhd_net2idx(((struct dhd_pub *)(cfg->pub))->info, ndev); wl_cfg80211_remove_if(cfg, ifidx, ndev); cfg->bss_cfgdev = NULL; cfg->cfgdev_bssidx = -1; cfg->bss_pending_op = FALSE; - WL_DBG(("IF_DEL Done.\n")); + WL_ERR(("IF_DEL Done.\n")); return ret; } -#endif /* defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF) */ +#endif /* defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF) */ static s32 wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, @@ -3491,9 +3782,6 @@ wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, chanspec_t chanspec = 0; u32 param[2] = {0, 0}; u32 bw_cap = 0; -#if defined(WLAIBSS) && defined(WLAIBSS_PS) - s32 atim = 10; -#endif /* WLAIBSS & WLAIBSS_PS */ WL_TRACE(("In\n")); RETURN_EIO_IF_NOT_UP(cfg); @@ -3510,18 +3798,18 @@ wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, if (chan) cfg->channel = ieee80211_frequency_to_channel(chan->center_freq); if (wl_get_drv_status(cfg, CONNECTED, dev)) { - struct wlc_ssid *ssid = (struct wlc_ssid *)wl_read_prof(cfg, dev, WL_PROF_SSID); + struct wlc_ssid *lssid = (struct wlc_ssid *)wl_read_prof(cfg, dev, WL_PROF_SSID); u8 *bssid = (u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID); u32 *channel = (u32 *)wl_read_prof(cfg, dev, WL_PROF_CHAN); if (!params->bssid || ((memcmp(params->bssid, bssid, ETHER_ADDR_LEN) == 0) && - (memcmp(params->ssid, ssid->SSID, ssid->SSID_len) == 0) && + (memcmp(params->ssid, lssid->SSID, lssid->SSID_len) == 0) && (*channel == cfg->channel))) { WL_ERR(("Connection already existed to " MACDBG "\n", MAC2STRDBG((u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID)))); return -EISCONN; } WL_ERR(("Ignore Previous connecton to %s (" MACDBG ")\n", - ssid->SSID, MAC2STRDBG(bssid))); + lssid->SSID, MAC2STRDBG(bssid))); } /* remove the VSIE */ @@ -3614,22 +3902,6 @@ wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_DISABLED); wldev_iovar_setint(dev, "wsec", 0); -#ifdef WLAIBSS - /* Enable custom ibss features */ - err = wldev_iovar_setint(dev, "aibss", TRUE); - - if (unlikely(err)) { - WL_ERR(("Enable custom IBSS mode failed (%d)\n", err)); - return err; - } -#ifdef WLAIBSS_PS - err = wldev_ioctl(dev, WLC_SET_ATIM, &atim, sizeof(int), true); - if (unlikely(err)) { - WL_ERR(("Enable custom IBSS ATIM mode failed (%d)\n", err)); - return err; - } -#endif /* WLAIBSS_PS */ -#endif /* WLAIBSS */ err = wldev_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size, true); @@ -3650,9 +3922,6 @@ wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, } wl_update_prof(cfg, dev, NULL, &join_params.ssid, WL_PROF_SSID); wl_update_prof(cfg, dev, NULL, &cfg->channel, WL_PROF_CHAN); -#ifdef WLAIBSS - cfg->aibss_txfail_seq = 0; /* initialize the sequence */ -#endif /* WLAIBSS */ cfg->rmc_event_seq = 0; /* initialize rmcfail sequence */ return err; } @@ -3686,43 +3955,6 @@ static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) return err; } -#ifdef MFP -static int wl_cfg80211_get_rsn_capa(bcm_tlv_t *wpa2ie, u8* capa) -{ - u16 suite_count; - wpa_suite_mcast_t *mcast; - wpa_suite_ucast_t *ucast; - u16 len; - wpa_suite_auth_key_mgmt_t *mgmt; - - if (!wpa2ie) - return -1; - - len = wpa2ie->len; - mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN]; - if ((len -= WPA_SUITE_LEN) <= 0) - return BCME_BADLEN; - ucast = (wpa_suite_ucast_t *)&mcast[1]; - suite_count = ltoh16_ua(&ucast->count); - if ((suite_count > NL80211_MAX_NR_CIPHER_SUITES) || - (len -= (WPA_IE_SUITE_COUNT_LEN + - (WPA_SUITE_LEN * suite_count))) <= 0) - return BCME_BADLEN; - - mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count]; - suite_count = ltoh16_ua(&mgmt->count); - - if ((suite_count > NL80211_MAX_NR_CIPHER_SUITES) || - (len -= (WPA_IE_SUITE_COUNT_LEN + - (WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) { - capa[0] = *(u8 *)&mgmt->list[suite_count]; - capa[1] = *((u8 *)&mgmt->list[suite_count] + 1); - } else - return BCME_BADLEN; - - return 0; -} -#endif /* MFP */ static s32 wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme) @@ -3732,22 +3964,17 @@ wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme) s32 val = 0; s32 err = 0; s32 bssidx; - if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) { - WL_ERR(("Find p2p index from dev(%p) failed\n", dev)); + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); return BCME_ERROR; } if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) val = WPA_AUTH_PSK | -#ifdef BCMCCX - WPA_AUTH_CCKM | -#endif WPA_AUTH_UNSPECIFIED; else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) val = WPA2_AUTH_PSK| -#ifdef BCMCCX - WPA2_AUTH_CCKM | -#endif WPA2_AUTH_UNSPECIFIED; else val = WPA_AUTH_DISABLED; @@ -3755,13 +3982,6 @@ wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme) if (is_wps_conn(sme)) val = WPA_AUTH_DISABLED; -#ifdef BCMWAPI_WPI - if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) { - WL_DBG((" * wl_set_wpa_version, set wpa_auth" - " to WPA_AUTH_WAPI 0x400")); - val = WAPI_AUTH_PSK | WAPI_AUTH_UNSPECIFIED; - } -#endif WL_DBG(("setting wpa_auth to 0x%0x\n", val)); err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx); if (unlikely(err)) { @@ -3773,33 +3993,6 @@ wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme) return err; } -#ifdef BCMWAPI_WPI -static s32 -wl_set_set_wapi_ie(struct net_device *dev, struct cfg80211_connect_params *sme) -{ - struct bcm_cfg80211 *cfg = g_bcm_cfg; - s32 err = 0; - s32 bssidx; - if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) { - WL_ERR(("Find p2p index from dev(%p) failed\n", dev)); - return BCME_ERROR; - } - - WL_DBG((" %s \n", __FUNCTION__)); - - if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) { - err = wldev_iovar_setbuf_bsscfg(dev, "wapiie", sme->ie, sme->ie_len, - cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); - - if (unlikely(err)) { - WL_ERR(("===> set_wapi_ie Error (%d)\n", err)); - return err; - } - } else - WL_DBG((" * skip \n")); - return err; -} -#endif /* BCMWAPI_WPI */ static s32 wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme) @@ -3809,8 +4002,9 @@ wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme) s32 val = 0; s32 err = 0; s32 bssidx; - if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) { - WL_ERR(("Find p2p index from dev(%p) failed\n", dev)); + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); return BCME_ERROR; } @@ -3827,12 +4021,6 @@ wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme) val = WL_AUTH_OPEN_SHARED; WL_DBG(("automatic\n")); break; -#ifdef BCMCCX - case NL80211_AUTHTYPE_NETWORK_EAP: - WL_DBG(("network eap\n")); - val = DOT11_LEAP_AUTH; - break; -#endif default: val = 2; WL_ERR(("invalid auth type (%d)\n", sme->auth_type)); @@ -3858,18 +4046,11 @@ wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme) s32 gval = 0; s32 err = 0; s32 wsec_val = 0; -#ifdef MFP - s32 mfp = 0; - bcm_tlv_t *wpa2_ie; - u8 rsn_cap[2]; -#endif /* MFP */ -#ifdef BCMWAPI_WPI - s32 val = 0; -#endif s32 bssidx; - if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) { - WL_ERR(("Find p2p index from dev(%p) failed\n", dev)); + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); return BCME_ERROR; } @@ -3886,36 +4067,12 @@ wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme) case WLAN_CIPHER_SUITE_AES_CMAC: pval = AES_ENABLED; break; -#ifdef BCMWAPI_WPI - case WLAN_CIPHER_SUITE_SMS4: - val = SMS4_ENABLED; - pval = SMS4_ENABLED; - break; -#endif default: WL_ERR(("invalid cipher pairwise (%d)\n", sme->crypto.ciphers_pairwise[0])); return -EINVAL; } } -#if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X) - /* Ensure in-dongle supplicant is turned on when FBT wants to do the 4-way - * handshake. - * Note that the FW feature flag only exists on kernels that support the - * FT-EAP AKM suite. - */ - if (cfg->wdev->wiphy->features & NL80211_FEATURE_FW_4WAY_HANDSHAKE) { - if (pval == AES_ENABLED) - err = wldev_iovar_setint_bsscfg(dev, "sup_wpa", 1, bssidx); - else - err = wldev_iovar_setint_bsscfg(dev, "sup_wpa", 0, bssidx); - - if (err) { - WL_ERR(("FBT: Error setting sup_wpa (%d)\n", err)); - return err; - } - } -#endif /* BCMSUP_4WAY_HANDSHAKE && WLAN_AKM_SUITE_FT_8021X */ if (sme->crypto.cipher_group) { switch (sme->crypto.cipher_group) { case WLAN_CIPHER_SUITE_WEP40: @@ -3931,12 +4088,6 @@ wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme) case WLAN_CIPHER_SUITE_AES_CMAC: gval = AES_ENABLED; break; -#ifdef BCMWAPI_WPI - case WLAN_CIPHER_SUITE_SMS4: - val = SMS4_ENABLED; - gval = SMS4_ENABLED; - break; -#endif default: WL_ERR(("invalid cipher group (%d)\n", sme->crypto.cipher_group)); @@ -3953,63 +4104,12 @@ wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme) /* WPS-2.0 allows no security */ err = wldev_iovar_setint_bsscfg(dev, "wsec", 0, bssidx); } else { -#ifdef BCMWAPI_WPI - if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_SMS4) { - WL_DBG((" NO, is_wps_conn, WAPI set to SMS4_ENABLED")); - err = wldev_iovar_setint_bsscfg(dev, "wsec", val, bssidx); - } else { -#endif WL_DBG((" NO, is_wps_conn, Set pval | gval to WSEC")); wsec_val = pval | gval; -#ifdef MFP - if (pval == AES_ENABLED) { - if (((wpa2_ie = bcm_parse_tlvs((u8 *)sme->ie, sme->ie_len, - DOT11_MNG_RSN_ID)) != NULL) && - (wl_cfg80211_get_rsn_capa(wpa2_ie, rsn_cap) == 0)) { - - if (rsn_cap[0] & RSN_CAP_MFPC) { - /* MFP Capability advertised by supplicant. Check - * whether MFP is supported in the firmware - */ - if ((err = wldev_iovar_getint_bsscfg(dev, - "mfp", &mfp, bssidx)) < 0) { - WL_ERR(("Get MFP failed! " - "Check MFP support in FW \n")); - return -1; - } - - if ((sme->crypto.n_akm_suites == 1) && - ((sme->crypto.akm_suites[0] == - WL_AKM_SUITE_MFP_PSK) || - (sme->crypto.akm_suites[0] == - WL_AKM_SUITE_MFP_1X))) { - wsec_val |= MFP_SHA256; - } else if (sme->crypto.n_akm_suites > 1) { - WL_ERR(("Multiple AKM Specified \n")); - return -EINVAL; - } - - wsec_val |= MFP_CAPABLE; - if (rsn_cap[0] & RSN_CAP_MFPR) - wsec_val |= MFP_REQUIRED; - - if (rsn_cap[0] & RSN_CAP_MFPR) - mfp = WL_MFP_REQUIRED; - else - mfp = WL_MFP_CAPABLE; - err = wldev_iovar_setint_bsscfg(dev, "mfp", - mfp, bssidx); - } - } - } -#endif /* MFP */ WL_DBG((" Set WSEC to fW 0x%x \n", wsec_val)); err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec_val, bssidx); -#ifdef BCMWAPI_WPI - } -#endif } if (unlikely(err)) { WL_ERR(("error (%d)\n", err)); @@ -4031,8 +4131,9 @@ wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme) s32 val = 0; s32 err = 0; s32 bssidx; - if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) { - WL_ERR(("Find p2p index from dev(%p) failed\n", dev)); + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); return BCME_ERROR; } @@ -4043,9 +4144,6 @@ wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme) return err; } if (val & (WPA_AUTH_PSK | -#ifdef BCMCCX - WPA_AUTH_CCKM | -#endif WPA_AUTH_UNSPECIFIED)) { switch (sme->crypto.akm_suites[0]) { case WLAN_AKM_SUITE_8021X: @@ -4054,74 +4152,29 @@ wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme) case WLAN_AKM_SUITE_PSK: val = WPA_AUTH_PSK; break; -#ifdef BCMCCX - case WLAN_AKM_SUITE_CCKM: - val = WPA_AUTH_CCKM; - break; -#endif default: - WL_ERR(("invalid cipher group (%d)\n", - sme->crypto.cipher_group)); + WL_ERR(("invalid akm suite (0x%x)\n", + sme->crypto.akm_suites[0])); return -EINVAL; } } else if (val & (WPA2_AUTH_PSK | -#ifdef BCMCCX - WPA2_AUTH_CCKM | -#endif WPA2_AUTH_UNSPECIFIED)) { switch (sme->crypto.akm_suites[0]) { case WLAN_AKM_SUITE_8021X: val = WPA2_AUTH_UNSPECIFIED; break; -#ifdef MFP - case WL_AKM_SUITE_MFP_1X: - val = WPA2_AUTH_UNSPECIFIED; - break; - case WL_AKM_SUITE_MFP_PSK: - val = WPA2_AUTH_PSK; - break; -#endif case WLAN_AKM_SUITE_PSK: val = WPA2_AUTH_PSK; break; -#if defined(WLFBT) && defined(WLAN_AKM_SUITE_FT_8021X) - case WLAN_AKM_SUITE_FT_8021X: - val = WPA2_AUTH_UNSPECIFIED | WPA2_AUTH_FT; - break; -#endif -#if defined(WLFBT) && defined(WLAN_AKM_SUITE_FT_PSK) - case WLAN_AKM_SUITE_FT_PSK: - val = WPA2_AUTH_PSK | WPA2_AUTH_FT; - break; -#endif -#ifdef BCMCCX - case WLAN_AKM_SUITE_CCKM: - val = WPA2_AUTH_CCKM; - break; -#endif - default: - WL_ERR(("invalid cipher group (%d)\n", - sme->crypto.cipher_group)); - return -EINVAL; - } - } -#ifdef BCMWAPI_WPI - else if (val & (WAPI_AUTH_PSK | WAPI_AUTH_UNSPECIFIED)) { - switch (sme->crypto.akm_suites[0]) { - case WLAN_AKM_SUITE_WAPI_CERT: - val = WAPI_AUTH_UNSPECIFIED; - break; - case WLAN_AKM_SUITE_WAPI_PSK: - val = WAPI_AUTH_PSK; - break; default: - WL_ERR(("invalid cipher group (%d)\n", - sme->crypto.cipher_group)); + WL_ERR(("invalid akm suite (0x%x)\n", + sme->crypto.akm_suites[0])); return -EINVAL; } } -#endif - WL_DBG(("setting wpa_auth to %d\n", val)); + + + WL_DBG(("setting wpa_auth to 0x%x\n", val)); err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx); if (unlikely(err)) { @@ -4145,8 +4198,9 @@ wl_set_set_sharedkey(struct net_device *dev, s32 val; s32 err = 0; s32 bssidx; - if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) { - WL_ERR(("Find p2p index from dev(%p) failed\n", dev)); + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); return BCME_ERROR; } @@ -4156,17 +4210,9 @@ wl_set_set_sharedkey(struct net_device *dev, WL_DBG(("wpa_versions 0x%x cipher_pairwise 0x%x\n", sec->wpa_versions, sec->cipher_pairwise)); if (!(sec->wpa_versions & (NL80211_WPA_VERSION_1 | -#ifdef BCMWAPI_WPI - NL80211_WPA_VERSION_2 | NL80211_WAPI_VERSION_1)) && -#else NL80211_WPA_VERSION_2)) && -#endif (sec->cipher_pairwise & (WLAN_CIPHER_SUITE_WEP40 | -#ifdef BCMWAPI_WPI - WLAN_CIPHER_SUITE_WEP104 | WLAN_CIPHER_SUITE_SMS4))) -#else WLAN_CIPHER_SUITE_WEP104))) -#endif { memset(&key, 0, sizeof(key)); key.len = (u32) sme->key_len; @@ -4184,11 +4230,6 @@ wl_set_set_sharedkey(struct net_device *dev, case WLAN_CIPHER_SUITE_WEP104: key.algo = CRYPTO_ALGO_WEP128; break; -#ifdef BCMWAPI_WPI - case WLAN_CIPHER_SUITE_SMS4: - key.algo = CRYPTO_ALGO_SMS4; - break; -#endif default: WL_ERR(("Invalid algorithm (%d)\n", sme->crypto.ciphers_pairwise[0])); @@ -4258,12 +4299,29 @@ wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, u32 wpaie_len = 0; u32 chan_cnt = 0; struct ether_addr bssid; - s32 bssidx; + s32 bssidx = -1; int ret; int wait_cnt; WL_DBG(("In\n")); +#if defined(SUPPORT_RANDOM_MAC_SCAN) + wl_cfg80211_set_random_mac(dev, FALSE); +#endif /* SUPPORT_RANDOM_MAC_SCAN */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)) + if (sme->channel_hint) { + chan = sme->channel_hint; + WL_DBG(("channel_hint (%d), channel_hint center_freq (%d)\n", + ieee80211_frequency_to_channel(sme->channel_hint->center_freq), + sme->channel_hint->center_freq)); + } + if (sme->bssid_hint) { + sme->bssid = sme->bssid_hint; + WL_DBG(("bssid_hint "MACDBG" \n", MAC2STRDBG(sme->bssid_hint))); + } +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */ + if (unlikely(!sme->ssid)) { WL_ERR(("Invalid ssid\n")); return -EOPNOTSUPP; @@ -4280,9 +4338,19 @@ wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, /* * Cancel ongoing scan to sync up with sme state machine of cfg80211. */ -#if (!defined(ESCAN_RESULT_PATCH) || defined(CUSTOMER_HW10)) +#if (defined(BCM4359_CHIP) || !defined(ESCAN_RESULT_PATCH)) if (cfg->scan_request) { - wl_notify_escan_complete(cfg, dev, true, true); + WL_TRACE_HW4(("Aborting the scan! \n")); + wl_cfg80211_scan_abort(cfg); + wait_cnt = MAX_SCAN_ABORT_WAIT_CNT; + while (wl_get_drv_status(cfg, SCANNING, dev) && wait_cnt) { + WL_DBG(("Waiting for SCANNING terminated, wait_cnt: %d\n", wait_cnt)); + wait_cnt--; + OSL_SLEEP(WAIT_SCAN_ABORT_OSL_SLEEP_TIME); + } + if (wl_get_drv_status(cfg, SCANNING, dev)) { + wl_notify_escan_complete(cfg, dev, true, true); + } } #endif #ifdef WL_SCHED_SCAN @@ -4330,12 +4398,16 @@ wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, WL_DBG(("Currently not associated!\n")); } else { /* if status is DISCONNECTING, wait for disconnection terminated max 500 ms */ - wait_cnt = 500/10; + wait_cnt = 200/10; while (wl_get_drv_status(cfg, DISCONNECTING, dev) && wait_cnt) { WL_DBG(("Waiting for disconnection terminated, wait_cnt: %d\n", wait_cnt)); wait_cnt--; OSL_SLEEP(10); } + if (wl_get_drv_status(cfg, DISCONNECTING, dev)) { + WL_ERR(("Force clear DISCONNECTING status!\n")); + wl_clr_drv_status(cfg, DISCONNECTING, dev); + } } /* Clean BSSID */ @@ -4345,11 +4417,12 @@ wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, if (p2p_is_on(cfg) && (dev != bcmcfg_to_prmry_ndev(cfg))) { /* we only allow to connect using virtual interface in case of P2P */ - if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) { - WL_ERR(("Find p2p index from dev(%p) failed\n", dev)); + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", + dev->ieee80211_ptr)); return BCME_ERROR; } - wl_cfgp2p_set_management_ie(cfg, dev, bssidx, + wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx, VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len); } else if (dev == bcmcfg_to_prmry_ndev(cfg)) { /* find the RSN_IE */ @@ -4381,54 +4454,43 @@ wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, } } - if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) { - WL_ERR(("Find p2p index from dev(%p) failed\n", dev)); + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); return BCME_ERROR; } - err = wl_cfgp2p_set_management_ie(cfg, dev, bssidx, - VNDR_IE_ASSOCREQ_FLAG, (u8 *)sme->ie, sme->ie_len); + err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx, + VNDR_IE_ASSOCREQ_FLAG, (const u8 *)sme->ie, sme->ie_len); if (unlikely(err)) { return err; } } if (chan) { + /* If RCC is not enabled, use the channel provided by userspace */ cfg->channel = ieee80211_frequency_to_channel(chan->center_freq); chan_cnt = 1; WL_DBG(("channel (%d), center_req (%d), %d channels\n", cfg->channel, chan->center_freq, chan_cnt)); - } else + } else { + /* + * No channel information from user space. if RCC is enabled, the RCC + * would prepare the channel list, else no channel would be provided + * and firmware would need to do a full channel scan. + */ + WL_DBG(("No channel info from user space\n")); cfg->channel = 0; -#ifdef BCMWAPI_WPI - WL_DBG(("1. enable wapi auth\n")); - if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) { - WL_DBG(("2. set wapi ie \n")); - err = wl_set_set_wapi_ie(dev, sme); - if (unlikely(err)) - return err; - } else - WL_DBG(("2. Not wapi ie \n")); -#endif + } WL_DBG(("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len)); - WL_DBG(("3. set wapi version \n")); + WL_DBG(("3. set wpa version \n")); err = wl_set_wpa_version(dev, sme); if (unlikely(err)) { WL_ERR(("Invalid wpa_version\n")); return err; } -#ifdef BCMWAPI_WPI - if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) - WL_DBG(("4. WAPI Dont Set wl_set_auth_type\n")); - else { - WL_DBG(("4. wl_set_auth_type\n")); -#endif err = wl_set_auth_type(dev, sme); if (unlikely(err)) { WL_ERR(("Invalid auth type\n")); return err; } -#ifdef BCMWAPI_WPI - } -#endif err = wl_set_set_cipher(dev, sme); if (unlikely(err)) { @@ -4481,18 +4543,31 @@ wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, memcpy(&ext_join_params->assoc.bssid, ðer_bcast, ETH_ALEN); ext_join_params->assoc.chanspec_num = chan_cnt; if (chan_cnt) { - u16 channel, band, bw, ctl_sb; - chanspec_t chspec; - channel = cfg->channel; - band = (channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G - : WL_CHANSPEC_BAND_5G; - bw = WL_CHANSPEC_BW_20; - ctl_sb = WL_CHANSPEC_CTL_SB_NONE; - chspec = (channel | band | bw | ctl_sb); - ext_join_params->assoc.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK; - ext_join_params->assoc.chanspec_list[0] |= chspec; - ext_join_params->assoc.chanspec_list[0] = - wl_chspec_host_to_driver(ext_join_params->assoc.chanspec_list[0]); + if (cfg->channel) { + /* + * Use the channel provided by userspace + */ + u16 channel, band, bw, ctl_sb; + chanspec_t chspec; + channel = cfg->channel; + band = (channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G + : WL_CHANSPEC_BAND_5G; + + /* Get min_bw set for the interface */ + bw = wl_cfg80211_ulb_get_min_bw_chspec(dev->ieee80211_ptr, bssidx); + if (bw == INVCHANSPEC) { + WL_ERR(("Invalid chanspec \n")); + kfree(ext_join_params); + return BCME_ERROR; + } + + ctl_sb = WL_CHANSPEC_CTL_SB_NONE; + chspec = (channel | band | bw | ctl_sb); + ext_join_params->assoc.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK; + ext_join_params->assoc.chanspec_list[0] |= chspec; + ext_join_params->assoc.chanspec_list[0] = + wl_chspec_host_to_driver(ext_join_params->assoc.chanspec_list[0]); + } } ext_join_params->assoc.chanspec_num = htod32(ext_join_params->assoc.chanspec_num); if (ext_join_params->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) { @@ -4501,17 +4576,26 @@ wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, } wl_set_drv_status(cfg, CONNECTING, dev); - if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) { - WL_ERR(("Find p2p index from dev(%p) failed\n", dev)); + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); kfree(ext_join_params); return BCME_ERROR; } +#ifdef WL_EXT_IAPSTA + wl_android_ext_iapsta_disconnect_sta(dev, cfg->channel); +#endif err = wldev_iovar_setbuf_bsscfg(dev, "join", ext_join_params, join_params_size, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); - printf("Connectting with " MACDBG " channel (%d) ssid \"%s\", len (%d)\n\n", - MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)), cfg->channel, - ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len); + if (cfg->rcc_enabled) { + printf("Connecting with " MACDBG " ssid \"%s\", len (%d) with rcc channels \n\n", + MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)), + ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len); + } else { + printf("Connecting with " MACDBG " ssid \"%s\", len (%d) channel=%d\n\n", + MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)), + ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len, cfg->channel); + } kfree(ext_join_params); if (err) { @@ -4539,7 +4623,11 @@ wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, else memcpy(&join_params.params.bssid, ðer_bcast, ETH_ALEN); - wl_ch_to_chanspec(cfg->channel, &join_params, &join_params_size); + if (wl_ch_to_chanspec(dev, cfg->channel, &join_params, &join_params_size) < 0) { + WL_ERR(("Invalid chanspec\n")); + return -EINVAL; + } + WL_DBG(("join_param_size %zu\n", join_params_size)); if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) { @@ -4580,26 +4668,35 @@ wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, act = true; } #endif /* ESCAN_RESULT_PATCH */ + if (act) { /* * Cancel ongoing scan to sync up with sme state machine of cfg80211. */ -#if (!defined(ESCAN_RESULT_PATCH) || defined(CUSTOMER_HW10)) +#if !defined(ESCAN_RESULT_PATCH) /* Let scan aborted by F/W */ if (cfg->scan_request) { + WL_TRACE_HW4(("Aborting the scan! \n")); wl_notify_escan_complete(cfg, dev, true, true); } #endif /* ESCAN_RESULT_PATCH */ - wl_set_drv_status(cfg, DISCONNECTING, dev); - scbval.val = reason_code; - memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN); - scbval.val = htod32(scbval.val); - err = wldev_ioctl(dev, WLC_DISASSOC, &scbval, - sizeof(scb_val_t), true); - if (unlikely(err)) { - wl_clr_drv_status(cfg, DISCONNECTING, dev); - WL_ERR(("error (%d)\n", err)); - return err; + if (wl_get_drv_status(cfg, CONNECTING, dev) || + wl_get_drv_status(cfg, CONNECTED, dev)) { + wl_set_drv_status(cfg, DISCONNECTING, dev); + scbval.val = reason_code; + memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN); + scbval.val = htod32(scbval.val); + err = wldev_ioctl(dev, WLC_DISASSOC, &scbval, + sizeof(scb_val_t), true); + if (unlikely(err)) { + wl_clr_drv_status(cfg, DISCONNECTING, dev); + WL_ERR(("error (%d)\n", err)); + return err; + } +#if defined(BCM4358_CHIP) + WL_ERR(("Wait for complete of disconnecting \n")); + OSL_SLEEP(200); +#endif /* BCM4358_CHIP */ } } #ifdef CUSTOM_SET_CPUCORE @@ -4692,8 +4789,9 @@ wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev, s32 wsec; s32 err = 0; s32 bssidx; - if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) { - WL_ERR(("Find p2p index from dev(%p) failed\n", dev)); + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from dev(%p) failed\n", dev->ieee80211_ptr)); return BCME_ERROR; } @@ -4726,15 +4824,16 @@ wl_add_keyext(struct wiphy *wiphy, struct net_device *dev, s32 err = 0; s32 bssidx; s32 mode = wl_get_mode_by_netdev(cfg, dev); - if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) { - WL_ERR(("Find p2p index from dev(%p) failed\n", dev)); + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); return BCME_ERROR; } memset(&key, 0, sizeof(key)); key.index = (u32) key_idx; if (!ETHER_ISMULTI(mac_addr)) - memcpy((char *)&key.ea, (void *)mac_addr, ETHER_ADDR_LEN); + memcpy((char *)&key.ea, (const void *)mac_addr, ETHER_ADDR_LEN); key.len = (u32) params->key_len; /* check for key index change */ @@ -4795,12 +4894,6 @@ wl_add_keyext(struct wiphy *wiphy, struct net_device *dev, key.algo = CRYPTO_ALGO_AES_CCM; WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n")); break; -#ifdef BCMWAPI_WPI - case WLAN_CIPHER_SUITE_SMS4: - key.algo = CRYPTO_ALGO_SMS4; - WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n")); - break; -#endif default: WL_ERR(("Invalid cipher (0x%x)\n", params->cipher)); return -EINVAL; @@ -4832,8 +4925,14 @@ wl_cfg80211_enable_roam_offload(struct net_device *dev, int enable) if (err) return err; + if (enable) { + err = wldev_iovar_setint(dev, "sup_wpa_tmo", IDSUP_4WAY_HANDSHAKE_TIMEOUT); + if (err) { + WL_INFORM(("Setting 'sup_wpa_tmo' failed, err=%d\n", err)); + } + } + bzero(&ev_buf, sizeof(wl_eventmsg_buf_t)); - wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_PSK_SUP, !enable); wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_REQ_IE, !enable); wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_RESP_IE, !enable); wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_REASSOC, !enable); @@ -4846,6 +4945,50 @@ wl_cfg80211_enable_roam_offload(struct net_device *dev, int enable) return err; } +#if defined(WL_VIRTUAL_APSTA) +int +wl_cfg80211_interface_create(struct net_device *dev, char *name) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + bcm_struct_cfgdev *new_cfgdev; + + new_cfgdev = wl_cfg80211_create_iface(cfg->wdev->wiphy, + NL80211_IFTYPE_STATION, NULL, name); + if (!new_cfgdev) { + return BCME_ERROR; + } + else { + WL_DBG(("Iface %s created successfuly\n", name)); + return BCME_OK; + } +} + +int +wl_cfg80211_interface_delete(struct net_device *dev, char *name) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct net_info *iter, *next; + int err = BCME_ERROR; + + if (name == NULL) { + return BCME_ERROR; + } + + for_each_ndev(cfg, iter, next) { + if (iter->ndev) { + if (strcmp(iter->ndev->name, name) == 0) { + err = wl_cfg80211_del_iface(cfg->wdev->wiphy, cfg->bss_cfgdev); + break; + } + } + } + if (!err) { + WL_DBG(("Iface %s deleted successfuly", name)); + } + return err; +} +#endif /* defined (WL_VIRTUAL_APSTA) */ + static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx, bool pairwise, const u8 *mac_addr, @@ -4862,8 +5005,8 @@ wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, WL_DBG(("key index (%d)\n", key_idx)); RETURN_EIO_IF_NOT_UP(cfg); - if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) { - WL_ERR(("Find p2p index from dev(%p) failed\n", dev)); + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from dev(%p) failed\n", dev->ieee80211_ptr)); return BCME_ERROR; } @@ -4874,6 +5017,8 @@ wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, goto exit; } memset(&key, 0, sizeof(key)); + /* Clear any buffered wep key */ + memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key)); key.len = (u32) params->key_len; key.index = (u32) key_idx; @@ -4917,46 +5062,6 @@ wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, val = AES_ENABLED; WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n")); break; -#ifdef BCMWAPI_WPI - case WLAN_CIPHER_SUITE_SMS4: - key.algo = CRYPTO_ALGO_SMS4; - WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n")); - val = SMS4_ENABLED; - break; -#endif /* BCMWAPI_WPI */ -#if defined(WLFBT) && defined(WLAN_CIPHER_SUITE_PMK) - case WLAN_CIPHER_SUITE_PMK: { - int j; - wsec_pmk_t pmk; - char keystring[WSEC_MAX_PSK_LEN + 1]; - char* charptr = keystring; - uint len; - struct wl_security *sec; - - sec = wl_read_prof(cfg, dev, WL_PROF_SEC); - if (sec->wpa_auth == WLAN_AKM_SUITE_8021X) { - err = wldev_iovar_setbuf(dev, "okc_info_pmk", params->key, - WSEC_MAX_PSK_LEN / 2, keystring, sizeof(keystring), NULL); - if (err) { - /* could fail in case that 'okc' is not supported */ - WL_INFORM(("Setting 'okc_info_pmk' failed, err=%d\n", err)); - } - } - /* copy the raw hex key to the appropriate format */ - for (j = 0; j < (WSEC_MAX_PSK_LEN / 2); j++) { - sprintf(charptr, "%02x", params->key[j]); - charptr += 2; - } - len = strlen(keystring); - pmk.key_len = htod16(len); - bcopy(keystring, pmk.key, len); - pmk.flags = htod16(WSEC_PASSPHRASE); - - err = wldev_ioctl(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk), true); - if (err) - return err; - } break; -#endif /* WLFBT && WLAN_CIPHER_SUITE_PMK */ default: WL_ERR(("Invalid cipher (0x%x)\n", params->cipher)); return -EINVAL; @@ -4968,6 +5073,19 @@ wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_NONE); } swap_key_from_BE(&key); + if ((params->cipher == WLAN_CIPHER_SUITE_WEP40) || + (params->cipher == WLAN_CIPHER_SUITE_WEP104)) { + /* + * For AP role, since we are doing a wl down before bringing up AP, + * the plumbed keys will be lost. So for AP once we bring up AP, we + * need to plumb keys again. So buffer the keys for future use. This + * is more like a WAR. If firmware later has the capability to do + * interface upgrade without doing a "wl down" and "wl apsta 0", then + * this will not be required. + */ + WL_DBG(("Buffering WEP Keys \n")); + memcpy(&cfg->wep_key, &key, sizeof(struct wl_wsec_key)); + } err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); if (unlikely(err)) { @@ -5000,16 +5118,15 @@ wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); s32 err = 0; s32 bssidx; - if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) { - WL_ERR(("Find p2p index from dev(%p) failed\n", dev)); + + WL_DBG(("Enter. key_idx: %d\n", key_idx)); + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); return BCME_ERROR; } - WL_DBG(("Enter\n")); -#ifndef IEEE80211W if ((key_idx >= DOT11_MAX_DEFAULT_KEYS) && (key_idx < DOT11_MAX_DEFAULT_KEYS+2)) return -EINVAL; -#endif RETURN_EIO_IF_NOT_UP(cfg); memset(&key, 0, sizeof(key)); @@ -5018,7 +5135,6 @@ wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, key.algo = CRYPTO_ALGO_OFF; key.index = (u32) key_idx; - WL_DBG(("key index (%d)\n", key_idx)); /* Set the new key/index */ swap_key_from_BE(&key); err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), cfg->ioctl_buf, @@ -5049,8 +5165,9 @@ wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, s32 wsec; s32 err = 0; s32 bssidx; - if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) { - WL_ERR(("Find p2p index from dev(%p) failed\n", dev)); + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); return BCME_ERROR; } WL_DBG(("key index (%d)\n", key_idx)); @@ -5060,7 +5177,7 @@ wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, swap_key_to_BE(&key); memset(¶ms, 0, sizeof(params)); params.key_len = (u8) min_t(u8, DOT11_MAX_KEY_SIZE, key.len); - memcpy(params.key, key.data, params.key_len); + memcpy((void *)params.key, key.data, params.key_len); err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx); if (unlikely(err)) { @@ -5086,12 +5203,6 @@ wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, params.cipher = WLAN_CIPHER_SUITE_AES_CMAC; WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n")); break; -#ifdef BCMWAPI_WPI - case WLAN_CIPHER_SUITE_SMS4: - key.algo = CRYPTO_ALGO_SMS4; - WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n")); - break; -#endif #if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED) /* to connect to mixed mode AP */ case (AES_ENABLED | TKIP_ENABLED): /* TKIP CCMP */ @@ -5108,6 +5219,14 @@ wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, return err; } +static s32 +wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy, + struct net_device *dev, u8 key_idx) +{ + WL_INFORM(("Not supported\n")); + return -EOPNOTSUPP; +} + #if defined(RSSIAVG) static wl_rssi_cache_ctrl_t g_rssi_cache_ctrl; static wl_rssi_cache_ctrl_t g_connected_rssi_cache_ctrl; @@ -5117,20 +5236,13 @@ static wl_bss_cache_ctrl_t g_bss_cache_ctrl; #endif static s32 -wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy, - struct net_device *dev, u8 key_idx) -{ -#ifdef MFP - return 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, + const u8 *mac, struct station_info *sinfo) #else - WL_INFORM(("Not supported\n")); - return -EOPNOTSUPP; -#endif /* MFP */ -} - -static s32 wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, u8 *mac, struct station_info *sinfo) +#endif { struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); scb_val_t scb_val; @@ -5138,11 +5250,14 @@ wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, s32 rate; s32 err = 0; sta_info_t *sta; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) || defined(WL_COMPAT_WIRELESS) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) s8 eabuf[ETHER_ADDR_STR_LEN]; #endif - static int err_cnt = 0; dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + bool fw_assoc_state = FALSE; + u32 dhd_assoc_state = 0; + static int err_cnt = 0; + RETURN_EIO_IF_NOT_UP(cfg); if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) { err = wldev_iovar_getbuf(dev, "sta_info", (struct ether_addr *)mac, @@ -5151,7 +5266,7 @@ wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, WL_ERR(("GET STA INFO failed, %d\n", err)); return err; } - sinfo->filled = STATION_INFO_INACTIVE_TIME; + sinfo->filled = STA_INFO_BIT(INFO_INACTIVE_TIME); sta = (sta_info_t *)cfg->ioctl_buf; sta->len = dtoh16(sta->len); sta->cap = dtoh16(sta->cap); @@ -5159,9 +5274,9 @@ wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, sta->idle = dtoh32(sta->idle); sta->in = dtoh32(sta->in); sinfo->inactive_time = sta->idle * 1000; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) || defined(WL_COMPAT_WIRELESS) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) if (sta->flags & WL_STA_ASSOC) { - sinfo->filled |= STATION_INFO_CONNECTED_TIME; + sinfo->filled |= STA_INFO_BIT(INFO_CONNECTED_TIME); sinfo->connected_time = sta->in; } WL_INFORM(("STA %s : idle time : %d sec, connected time :%d ms\n", @@ -5179,7 +5294,8 @@ wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, if (err) { WL_ERR(("Failed to get current BSSID\n")); } else { - if (memcmp(mac, &bssid.octet, ETHER_ADDR_LEN) != 0) { + if (!ETHER_ISNULLADDR(&bssid.octet) && + memcmp(mac, &bssid.octet, ETHER_ADDR_LEN) != 0) { /* roaming is detected */ err = wl_cfg80211_delayed_roam(cfg, dev, &bssid); if (err) @@ -5189,14 +5305,42 @@ wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, } } } - if (!wl_get_drv_status(cfg, CONNECTED, dev) || - (dhd_is_associated(dhd, NULL, &err) == FALSE)) { + dhd_assoc_state = wl_get_drv_status(cfg, CONNECTED, dev); + fw_assoc_state = dhd_is_associated(dhd, 0, &err); + if (!dhd_assoc_state || !fw_assoc_state) { WL_ERR(("NOT assoc\n")); if (err == -ERESTARTSYS) return err; + if (!dhd_assoc_state) { + WL_TRACE_HW4(("drv state is not connected \n")); + } + if (!fw_assoc_state) { + WL_TRACE_HW4(("fw state is not associated \n")); + } + /* Disconnect due to fw is not associated for FW_ASSOC_WATCHDOG_TIME ms. + * 'err == 0' of dhd_is_associated() and '!fw_assoc_state' + * means that BSSID is null. + */ + if (dhd_assoc_state && !fw_assoc_state && !err) { + if (!fw_assoc_watchdog_started) { + fw_assoc_watchdog_ms = OSL_SYSUPTIME(); + fw_assoc_watchdog_started = TRUE; + WL_TRACE_HW4(("fw_assoc_watchdog_started \n")); + } else { + if (OSL_SYSUPTIME() - fw_assoc_watchdog_ms > + FW_ASSOC_WATCHDOG_TIME) { + fw_assoc_watchdog_started = FALSE; + err = -ENODEV; + WL_TRACE_HW4(("fw is not associated for %d ms \n", + (OSL_SYSUPTIME() - fw_assoc_watchdog_ms))); + goto get_station_err; + } + } + } err = -ENODEV; return err; } + fw_assoc_watchdog_started = FALSE; curmacp = wl_read_prof(cfg, dev, WL_PROF_BSSID); if (memcmp(mac, curmacp, ETHER_ADDR_LEN)) { WL_ERR(("Wrong Mac address: "MACDBG" != "MACDBG"\n", @@ -5212,7 +5356,7 @@ wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, int rxpktglom; #endif rate = dtoh32(rate); - sinfo->filled |= STATION_INFO_TX_BITRATE; + sinfo->filled |= STA_INFO_BIT(INFO_TX_BITRATE); sinfo->txrate.legacy = rate * 5; WL_DBG(("Rate %d Mbps\n", (rate / 2))); #if defined(USE_DYNAMIC_MAXPKT_RXGLOM) @@ -5257,16 +5401,16 @@ wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS rssi = MIN(rssi, RSSI_MAXVAL); #endif - sinfo->filled |= STATION_INFO_SIGNAL; + sinfo->filled |= STA_INFO_BIT(INFO_SIGNAL); sinfo->signal = rssi; WL_DBG(("RSSI %d dBm\n", rssi)); err = wldev_ioctl(dev, WLC_GET_PKTCNTS, &pktcnt, sizeof(pktcnt), false); if (!err) { - sinfo->filled |= (STATION_INFO_RX_PACKETS | - STATION_INFO_RX_DROP_MISC | - STATION_INFO_TX_PACKETS | - STATION_INFO_TX_FAILED); + sinfo->filled |= (STA_INFO_BIT(INFO_RX_PACKETS) | + STA_INFO_BIT(INFO_RX_DROP_MISC) | + STA_INFO_BIT(INFO_TX_PACKETS) | + STA_INFO_BIT(INFO_TX_FAILED)); sinfo->rx_packets = pktcnt.rx_good_pkt; sinfo->rx_dropped_misc = pktcnt.rx_bad_pkt; sinfo->tx_packets = pktcnt.tx_good_pkt; @@ -5281,7 +5425,7 @@ wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, /* Disconnect due to zero BSSID or error to get RSSI */ WL_ERR(("force cfg80211_disconnected: %d\n", err)); wl_clr_drv_status(cfg, CONNECTED, dev); - cfg80211_disconnected(dev, 0, NULL, 0, GFP_KERNEL); + CFG80211_DISCONNECTED(dev, 0, NULL, 0, false, GFP_KERNEL); wl_link_down(cfg); } } @@ -5304,13 +5448,14 @@ wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, RETURN_EIO_IF_NOT_UP(cfg); WL_DBG(("Enter\n")); - if (cfg->p2p_net == dev || _net_info == NULL || cfg->vsdb_mode || - !wl_get_drv_status(cfg, CONNECTED, dev)) { + if (cfg->p2p_net == dev || _net_info == NULL || + !wl_get_drv_status(cfg, CONNECTED, dev) || + (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_BSS && + wl_get_mode_by_netdev(cfg, dev) != WL_MODE_IBSS)) { return err; } - - /* Delete pm_enable_work */ - wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_PEND); + /* Enlarge pm_enable_work */ + wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_LONG); pm = enabled ? PM_FAST : PM_OFF; if (_net_info->pm_block) { @@ -5345,6 +5490,18 @@ void wl_cfg80211_update_power_mode(struct net_device *dev) dev->ieee80211_ptr->ps = (pm == PM_OFF) ? false : true; } +void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + if (strcmp(command, "SCAN-ACTIVE") == 0) { + cfg->active_scan = 1; + } else if (strcmp(command, "SCAN-PASSIVE") == 0) { + cfg->active_scan = 0; + } else + WL_ERR(("Unknown command \n")); +} + static __used u32 wl_find_msb(u16 bit16) { u32 ret = 0; @@ -5376,52 +5533,71 @@ static s32 wl_cfg80211_resume(struct wiphy *wiphy) { struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); - s32 err = 0; + s32 err = BCME_OK; if (unlikely(!wl_get_drv_status(cfg, READY, ndev))) { WL_INFORM(("device is not ready\n")); - return 0; + return err; } return err; } static s32 -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS) +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow) #else wl_cfg80211_suspend(struct wiphy *wiphy) -#endif +#endif /* KERNEL_VERSION(2, 6, 39) || WL_COMPAT_WIRELES */ { + s32 err = BCME_OK; #ifdef DHD_CLEAR_ON_SUSPEND struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); struct net_info *iter, *next; struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); unsigned long flags; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) + struct cfg80211_scan_info info; +#endif + if (unlikely(!wl_get_drv_status(cfg, READY, ndev))) { WL_INFORM(("device is not ready : status (%d)\n", (int)cfg->status)); - return 0; + return err; } - for_each_ndev(cfg, iter, next) - wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev); + for_each_ndev(cfg, iter, next) { + /* p2p discovery iface doesn't have a ndev associated with it (for kernel > 3.8) */ + if (iter->ndev) + wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev); + } spin_lock_irqsave(&cfg->cfgdrv_lock, flags); if (cfg->scan_request) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) + info.aborted = true; + cfg80211_scan_done(cfg->scan_request, &info); +#else cfg80211_scan_done(cfg->scan_request, true); +#endif cfg->scan_request = NULL; } for_each_ndev(cfg, iter, next) { - wl_clr_drv_status(cfg, SCANNING, iter->ndev); - wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev); + if (iter->ndev) { + wl_clr_drv_status(cfg, SCANNING, iter->ndev); + wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev); + } } spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); for_each_ndev(cfg, iter, next) { - if (wl_get_drv_status(cfg, CONNECTING, iter->ndev)) { - wl_bss_connect_done(cfg, iter->ndev, NULL, NULL, false); + if (iter->ndev) { + if (wl_get_drv_status(cfg, CONNECTING, iter->ndev)) { + wl_bss_connect_done(cfg, iter->ndev, NULL, NULL, false); + } } } #endif /* DHD_CLEAR_ON_SUSPEND */ - return 0; + + + return err; } static s32 @@ -5433,7 +5609,7 @@ wl_update_pmklist(struct net_device *dev, struct wl_pmk_list *pmk_list, struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg); if (!pmk_list) { - printk("pmk_list is NULL\n"); + printf("pmk_list is NULL\n"); return -EINVAL; } /* pmk list is supported only for STA interface i.e. primary interface @@ -5502,7 +5678,8 @@ wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_pmksa *pmksa) { struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); - struct _pmkid_list pmkid = {0}; + + struct _pmkid_list pmkid = {.npmkid = 0}; s32 err = 0; int i; @@ -5562,6 +5739,7 @@ wl_cfg80211_scan_alloc_params(int channel, int nprobes, int *out_params_size) wl_scan_params_t *params; int params_size; int num_chans; + int bssidx = 0; *out_params_size = 0; @@ -5587,7 +5765,7 @@ wl_cfg80211_scan_alloc_params(int channel, int nprobes, int *out_params_size) if (channel == -1) params->channel_list[0] = htodchanspec(channel); else - params->channel_list[0] = wl_ch_host_to_driver(channel); + params->channel_list[0] = wl_ch_host_to_driver(bssidx, channel); /* Our scan params have 1 channel and 0 ssids */ params->channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) | @@ -5609,7 +5787,6 @@ wl_cfg80211_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev, #endif /* WL_CFG80211_P2P_DEV_IF */ { s32 target_channel; - u32 id; s32 err = BCME_OK; struct ether_addr primary_mac; struct net_device *ndev = NULL; @@ -5627,6 +5804,13 @@ wl_cfg80211_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev, goto exit; } +#ifdef P2P_LISTEN_OFFLOADING + if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) { + WL_ERR(("P2P_FIND: Discovery offload is in progress\n")); + return -EAGAIN; + } +#endif /* P2P_LISTEN_OFFLOADING */ + #ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST if (wl_get_drv_status_all(cfg, SCANNING)) { wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true); @@ -5638,30 +5822,30 @@ wl_cfg80211_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev, #if defined(WL_ENABLE_P2P_IF) cfg->remain_on_chan_type = channel_type; #endif /* WL_ENABLE_P2P_IF */ - id = ++cfg->last_roc_id; - if (id == 0) - id = ++cfg->last_roc_id; - *cookie = id; - + *cookie = wl_cfg80211_get_new_roc_id(cfg); #ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST if (wl_get_drv_status(cfg, SCANNING, ndev)) { struct timer_list *_timer; WL_DBG(("scan is running. go to fake listen state\n")); - wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev); + if (duration > LONG_LISTEN_TIME) { + wl_cfg80211_scan_abort(cfg); + } else { + wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev); - if (timer_pending(&cfg->p2p->listen_timer)) { - WL_DBG(("cancel current listen timer \n")); - del_timer_sync(&cfg->p2p->listen_timer); - } + if (timer_pending(&cfg->p2p->listen_timer)) { + WL_DBG(("cancel current listen timer \n")); + del_timer_sync(&cfg->p2p->listen_timer); + } - _timer = &cfg->p2p->listen_timer; - wl_clr_p2p_status(cfg, LISTEN_EXPIRED); + _timer = &cfg->p2p->listen_timer; + wl_clr_p2p_status(cfg, LISTEN_EXPIRED); - INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration, 0); + INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration, 0); - err = BCME_OK; - goto exit; + err = BCME_OK; + goto exit; + } } #endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ @@ -5683,7 +5867,7 @@ wl_cfg80211_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev, * without turning on P2P */ get_primary_mac(cfg, &primary_mac); - wl_cfgp2p_generate_bss_mac(&primary_mac, &cfg->p2p->dev_addr, &cfg->p2p->int_addr); + wl_cfgp2p_generate_bss_mac(cfg, &primary_mac); p2p_on(cfg) = true; } @@ -5734,12 +5918,13 @@ wl_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev, u64 cookie) { s32 err = 0; + struct bcm_cfg80211 *cfg = g_bcm_cfg; #ifdef P2PLISTEN_AP_SAMECHN - struct bcm_cfg80211 *cfg = g_bcm_cfg; struct net_device *dev; #endif /* P2PLISTEN_AP_SAMECHN */ + RETURN_EIO_IF_NOT_UP(cfg); #if defined(WL_CFG80211_P2P_DEV_IF) if (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE) { WL_DBG((" enter ) on P2P dedicated discover interface\n")); @@ -5756,6 +5941,15 @@ wl_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy, WL_DBG(("p2p_resp_apchn_status Turn OFF \n")); } #endif /* P2PLISTEN_AP_SAMECHN */ + + if (cfg->last_roc_id == cookie) { + wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE)); + } else { + WL_ERR(("%s : ignore, request cookie(%llu) is not matched. (cur : %llu)\n", + __FUNCTION__, cookie, cfg->last_roc_id)); + } + return err; } @@ -5766,7 +5960,7 @@ wl_cfg80211_afx_handler(struct work_struct *work) struct bcm_cfg80211 *cfg = g_bcm_cfg; s32 ret = BCME_OK; - afx_instance = container_of(work, struct afx_hdl, work); + BCM_SET_CONTAINER_OF(afx_instance, work, struct afx_hdl, work); if (afx_instance != NULL && cfg->afx_hdl->is_active) { if (cfg->afx_hdl->is_listen && cfg->afx_hdl->my_listen_chan) { ret = wl_cfgp2p_discover_listen(cfg, cfg->afx_hdl->my_listen_chan, @@ -5788,6 +5982,7 @@ static s32 wl_cfg80211_af_searching_channel(struct bcm_cfg80211 *cfg, struct net_device *dev) { u32 max_retry = WL_CHANNEL_SYNC_RETRY; + bool is_p2p_gas = false; if (dev == NULL) return -1; @@ -5797,6 +5992,13 @@ wl_cfg80211_af_searching_channel(struct bcm_cfg80211 *cfg, struct net_device *de wl_set_drv_status(cfg, FINDING_COMMON_CHANNEL, dev); cfg->afx_hdl->is_active = TRUE; + if (cfg->afx_hdl->pending_tx_act_frm) { + wl_action_frame_t *action_frame; + action_frame = &(cfg->afx_hdl->pending_tx_act_frm->action_frame); + if (wl_cfgp2p_is_p2p_gas_action(action_frame->data, action_frame->len)) + is_p2p_gas = true; + } + /* Loop to wait until we find a peer's channel or the * pending action frame tx is cancelled. */ @@ -5815,6 +6017,9 @@ wl_cfg80211_af_searching_channel(struct bcm_cfg80211 *cfg, struct net_device *de !(wl_get_drv_status(cfg, FINDING_COMMON_CHANNEL, dev))) break; + if (is_p2p_gas) + break; + if (cfg->afx_hdl->my_listen_chan) { WL_DBG(("Scheduling Listen peer in my listen channel = %d\n", cfg->afx_hdl->my_listen_chan)); @@ -6005,7 +6210,17 @@ wl_cfg80211_check_DFS_channel(struct bcm_cfg80211 *cfg, wl_af_params_t *af_param return result; } #endif /* WL11U */ - +static bool +wl_cfg80211_check_dwell_overflow(int32 requested_dwell, ulong dwell_jiffies) +{ + if ((requested_dwell & CUSTOM_RETRY_MASK) && + (jiffies_to_msecs(jiffies - dwell_jiffies) > + (requested_dwell & ~CUSTOM_RETRY_MASK))) { + WL_ERR(("Action frame TX retry time over dwell time!\n")); + return true; + } + return false; +} static bool wl_cfg80211_send_action_frame(struct wiphy *wiphy, struct net_device *dev, @@ -6020,11 +6235,15 @@ wl_cfg80211_send_action_frame(struct wiphy *wiphy, struct net_device *dev, u8 category, action; s32 tx_retry; struct p2p_config_af_params config_af_params; + struct net_info *netinfo; #ifdef VSDB ulong off_chan_started_jiffies = 0; #endif + ulong dwell_jiffies = 0; + bool dwell_overflow = false; dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + int32 requested_dwell = af_params->dwell_time; /* Add the default dwell time * Dwell time to stay off-channel to wait for a response action frame @@ -6076,6 +6295,15 @@ wl_cfg80211_send_action_frame(struct wiphy *wiphy, struct net_device *dev, cfg->next_af_subtype = action + 1; af_params->dwell_time = WL_MED_DWELL_TIME; + if (requested_dwell & CUSTOM_RETRY_MASK) { + config_af_params.max_tx_retry = + (requested_dwell & CUSTOM_RETRY_MASK) >> 24; + af_params->dwell_time = + (requested_dwell & ~CUSTOM_RETRY_MASK); + WL_DBG(("Custom retry(%d) and dwell time(%d) is set.\n", + config_af_params.max_tx_retry, + af_params->dwell_time)); + } } else if (action == P2PSD_ACTION_ID_GAS_IRESP || action == P2PSD_ACTION_ID_GAS_CRESP) { /* configure service discovery response frame */ @@ -6103,9 +6331,10 @@ wl_cfg80211_send_action_frame(struct wiphy *wiphy, struct net_device *dev, wldev_iovar_setint(dev, "mpc", 0); } + netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx); /* validate channel and p2p ies */ if (config_af_params.search_channel && IS_P2P_SOCIAL(af_params->channel) && - wl_to_p2p_bss_saved_ie(cfg, P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie_len) { + netinfo && netinfo->bss.ies.probe_req_ie_len) { config_af_params.search_channel = true; } else { config_af_params.search_channel = false; @@ -6127,6 +6356,14 @@ wl_cfg80211_send_action_frame(struct wiphy *wiphy, struct net_device *dev, wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true); } + /* Abort P2P listen */ + if (discover_cfgdev(cfgdev, cfg)) { + if (cfg->p2p_supported && cfg->p2p) { + wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE)); + } + } + #ifdef WL11U /* handling DFS channel exceptions */ if (!wl_cfg80211_check_DFS_channel(cfg, af_params, action_frame->data, action_frame->len)) { @@ -6147,11 +6384,17 @@ wl_cfg80211_send_action_frame(struct wiphy *wiphy, struct net_device *dev, /* save af_params for rx process */ cfg->afx_hdl->pending_tx_act_frm = af_params; + if (wl_cfgp2p_is_p2p_gas_action(action_frame->data, action_frame->len)) { + WL_DBG(("Set GAS action frame config.\n")); + config_af_params.search_channel = false; + config_af_params.max_tx_retry = 1; + } + /* search peer's channel */ if (config_af_params.search_channel) { /* initialize afx_hdl */ - if (wl_cfgp2p_find_idx(cfg, dev, &cfg->afx_hdl->bssidx) != BCME_OK) { - WL_ERR(("Find p2p index from dev(%p) failed\n", dev)); + if ((cfg->afx_hdl->bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); goto exit; } cfg->afx_hdl->dev = dev; @@ -6175,7 +6418,10 @@ wl_cfg80211_send_action_frame(struct wiphy *wiphy, struct net_device *dev, /* Suspend P2P discovery's search-listen to prevent it from * starting a scan or changing the channel. */ - wl_cfgp2p_discover_enable_search(cfg, false); + if ((wl_cfgp2p_discover_enable_search(cfg, false)) < 0) { + WL_ERR(("Can not disable discovery mode\n")); + goto exit; + } /* update channel */ af_params->channel = cfg->afx_hdl->peer_chan; @@ -6187,11 +6433,16 @@ wl_cfg80211_send_action_frame(struct wiphy *wiphy, struct net_device *dev, wl_cfgp2p_print_actframe(true, action_frame->data, action_frame->len, af_params->channel); + wl_cfgp2p_need_wait_actfrmae(cfg, action_frame->data, action_frame->len, true); + + dwell_jiffies = jiffies; /* Now send a tx action frame */ ack = wl_cfgp2p_tx_action_frame(cfg, dev, af_params, bssidx) ? false : true; + dwell_overflow = wl_cfg80211_check_dwell_overflow(requested_dwell, dwell_jiffies); /* if failed, retry it. tx_retry_max value is configure by .... */ - while ((ack == false) && (tx_retry++ < config_af_params.max_tx_retry)) { + while ((ack == false) && (tx_retry++ < config_af_params.max_tx_retry) && + !dwell_overflow) { #ifdef VSDB if (af_params->channel) { if (jiffies_to_msecs(jiffies - off_chan_started_jiffies) > @@ -6204,6 +6455,7 @@ wl_cfg80211_send_action_frame(struct wiphy *wiphy, struct net_device *dev, #endif /* VSDB */ ack = wl_cfgp2p_tx_action_frame(cfg, dev, af_params, bssidx) ? false : true; + dwell_overflow = wl_cfg80211_check_dwell_overflow(requested_dwell, dwell_jiffies); } if (ack == false) { @@ -6271,10 +6523,10 @@ wl_cfg80211_mgmt_tx(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev, bool channel_type_valid, #endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(3, 7, 0) */ unsigned int wait, const u8* buf, size_t len, -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) bool no_cck, #endif -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) || defined(WL_COMPAT_WIRELESS) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) bool dont_wait_for_ack, #endif u64 *cookie) @@ -6315,8 +6567,8 @@ wl_cfg80211_mgmt_tx(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev, bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); } else { - if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) { - WL_ERR(("Find p2p index from dev(%p) failed\n", dev)); + if ((bssidx = wl_get_bssidx_by_wdev(cfg, cfgdev_to_wdev(cfgdev))) < 0) { + WL_ERR(("Find p2p index failed\n")); return BCME_ERROR; } } @@ -6342,15 +6594,19 @@ wl_cfg80211_mgmt_tx(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev, if (ieee80211_is_probe_resp(mgmt->frame_control)) { s32 ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN; s32 ie_len = len - ie_offset; -#ifdef P2PONEINT - if (dev == wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION)) - dev = bcmcfg_to_prmry_ndev(cfg); -#endif - if ((dev == bcmcfg_to_prmry_ndev(cfg)) && cfg->p2p) + if ((dev == bcmcfg_to_prmry_ndev(cfg)) && cfg->p2p) { bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); - wl_cfgp2p_set_management_ie(cfg, dev, bssidx, - VNDR_IE_PRBRSP_FLAG, (u8 *)(buf + ie_offset), ie_len); + } + wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx, + VNDR_IE_PRBRSP_FLAG, (const u8 *)(buf + ie_offset), ie_len); cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, true, GFP_KERNEL); +#if defined(P2P_IE_MISSING_FIX) + if (!cfg->p2p_prb_noti) { + cfg->p2p_prb_noti = true; + WL_DBG(("%s: TX 802_1X Probe Response first time.\n", + __FUNCTION__)); + } +#endif goto exit; } else if (ieee80211_is_disassoc(mgmt->frame_control) || ieee80211_is_deauth(mgmt->frame_control)) { @@ -6472,17 +6728,6 @@ wl_cfg80211_change_bss(struct wiphy *wiphy, { s32 err = 0; s32 ap_isolate = 0; -#if defined(SUPPORT_HOSTAPD_BGN_MODE) - dhd_pub_t *dhd; - s32 gmode = -1, nmode = -1; - s32 gmode_prev = -1, nmode_prev = -1; - struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); -#if defined(WL_ENABLE_P2P_IF) - if (cfg->p2p_net == dev) - dev = bcmcfg_to_prmry_ndev(cfg); -#endif - dhd = (dhd_pub_t *)(cfg->pub); -#endif /* SUPPORT_HOSTAPD_BGN_MODE */ if (params->use_cts_prot >= 0) { } @@ -6494,22 +6739,6 @@ wl_cfg80211_change_bss(struct wiphy *wiphy, } if (params->basic_rates) { -#if defined(SUPPORT_HOSTAPD_BGN_MODE) - switch ((int)(params->basic_rates[params->basic_rates_len -1])) { - case 22: /* B only , rate 11 */ - gmode = 0; - nmode = 0; - break; - case 108: /* G only , rate 54 */ - gmode = 2; - nmode = 0; - break; - default: - gmode = -1; - nmode = -1; - break; - } -#endif /* SUPPORT_HOSTAPD_BGN_MODE */ } if (params->ap_isolate >= 0) { @@ -6522,77 +6751,24 @@ wl_cfg80211_change_bss(struct wiphy *wiphy, } if (params->ht_opmode >= 0) { -#if defined(SUPPORT_HOSTAPD_BGN_MODE) - nmode = 1; - gmode = 1; - } else { - nmode = 0; -#endif /* SUPPORT_HOSTAPD_BGN_MODE */ - } - -#if defined(SUPPORT_HOSTAPD_BGN_MODE) - err = wldev_iovar_getint(dev, "nmode", &nmode_prev); - if (unlikely(err)) { - WL_ERR(("error reading nmode (%d)\n", err)); - } - if (nmode == nmode_prev) { - nmode = -1; - } - err = wldev_ioctl(dev, WLC_GET_GMODE, &gmode_prev, sizeof(gmode_prev), 0); - if (unlikely(err)) { - WL_ERR(("error reading gmode (%d)\n", err)); - } - if (gmode == gmode_prev) { - gmode = -1; } - if (((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) == DHD_FLAG_HOSTAP_MODE) && - ((gmode > -1) || (nmode > -1))) { - s32 val = 0; - - err = wldev_ioctl(dev, WLC_DOWN, &val, sizeof(s32), true); - if (unlikely(err)) - WL_ERR(("WLC_DOWN command failed:[%d]\n", err)); - - if (nmode > -1) { - err = wldev_iovar_setint(dev, "nmode", nmode); - if (unlikely(err)) - WL_ERR(("nmode command failed:mode[%d]:err[%d]\n", nmode, err)); - } - - if (gmode > -1) { - err = wldev_ioctl(dev, WLC_SET_GMODE, &gmode, sizeof(s32), true); - if (unlikely(err)) - WL_ERR(("WLC_SET_GMODE command failed:mode[%d]:err[%d]\n", - gmode, err)); - } - - val = 0; - err = wldev_ioctl(dev, WLC_UP, &val, sizeof(s32), true); - if (unlikely(err)) - WL_ERR(("WLC_UP command failed:err[%d]\n", err)); - - } -#endif /* SUPPORT_HOSTAPD_BGN_MODE */ return 0; } static s32 -#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && !defined(WL_COMPAT_WIRELESS)) -wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev, - struct ieee80211_channel *chan, - struct cfg80211_chan_def chandef) -#else wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type) -#endif /* ((LINUX_VERSION >= VERSION(3, 6, 0) && !WL_COMPAT_WIRELESS) */ { s32 _chan; chanspec_t chspec = 0; chanspec_t fw_chspec = 0; u32 bw = WL_CHANSPEC_BW_20; +#ifdef WL11ULB + u32 ulb_bw = wl_cfg80211_get_ulb_bw(dev->ieee80211_ptr); +#endif /* WL11ULB */ s32 err = BCME_OK; s32 bw_cap = 0; @@ -6605,41 +6781,19 @@ wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev, dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); #endif /* CUSTOM_SET_CPUCORE */ -#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && !defined(WL_COMPAT_WIRELESS)) - enum nl80211_channel_type channel_type = NL80211_CHAN_HT20; -#endif /* ((LINUX_VERSION >= VERSION(3, 6, 0) && !WL_COMPAT_WIRELESS) */ - -#ifndef P2PONEINT dev = ndev_to_wlc_ndev(dev, cfg); -#endif _chan = ieee80211_frequency_to_channel(chan->center_freq); printf("netdev_ifidx(%d), chan_type(%d) target channel(%d) \n", dev->ifindex, channel_type, _chan); -#ifdef CUSTOM_PLATFORM_NV_TEGRA -#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) && !defined(WL_COMPAT_WIRELESS)) - WL_ERR(("chan_width = %d\n", chandef.width)); - switch (chandef.width) { - case NL80211_CHAN_WIDTH_40: - bw = WL_CHANSPEC_BW_40; - break; - case NL80211_CHAN_WIDTH_80: - bw = WL_CHANSPEC_BW_80; - break; - case NL80211_CHAN_WIDTH_80P80: - bw = WL_CHANSPEC_BW_8080; - break; - case NL80211_CHAN_WIDTH_160: - bw = WL_CHANSPEC_BW_160; - break; - default: - bw = WL_CHANSPEC_BW_20; - break; - } - goto set_channel; -#endif /* ((LINUX_VERSION >= VERSION(3, 8, 0) && !WL_COMPAT_WIRELESS) */ -#endif /* CUSTOM_PLATFORM_NV_TEGRA */ +#ifdef WL11ULB + if (ulb_bw) { + WL_DBG(("[ULB] setting AP/GO BW to ulb_bw 0x%x \n", ulb_bw)); + bw = wl_cfg80211_ulbbw_to_ulbchspec(ulb_bw); + goto set_channel; + } +#endif /* WL11ULB */ if (chan->band == IEEE80211_BAND_5GHZ) { param.band = WLC_BAND_5G; err = wldev_iovar_getbuf(dev, "bw_cap", ¶m, sizeof(param), @@ -6706,13 +6860,24 @@ wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev, #ifdef CUSTOM_SET_CPUCORE if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE) { WL_DBG(("SoftAP mode do not need to set cpucore\n")); - } else if ((dev == wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION)) && - (chspec & WL_CHANSPEC_BW_80)) { - /* If GO is vht80 */ - dhd->chan_isvht80 |= DHD_FLAG_P2P_MODE; - dhd_set_cpucore(dhd, TRUE); + } else if (chspec & WL_CHANSPEC_BW_80) { + /* SoftAp only mode do not need to set cpucore */ + if ((dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) && + dev != bcmcfg_to_prmry_ndev(cfg)) { + /* Soft AP on virtual Iface (AP+STA case) */ + dhd->chan_isvht80 |= DHD_FLAG_HOSTAP_MODE; + dhd_set_cpucore(dhd, TRUE); + } else if (is_p2p_group_iface(dev->ieee80211_ptr)) { + /* If P2P IF is vht80 */ + dhd->chan_isvht80 |= DHD_FLAG_P2P_MODE; + dhd_set_cpucore(dhd, TRUE); + } } #endif /* CUSTOM_SET_CPUCORE */ + if (!err && (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) { + /* Update AP/GO operating channel */ + cfg->ap_oper_channel = _chan; + } return err; } @@ -6731,9 +6896,11 @@ wl_cfg80211_get_remain_on_channel_ndev(struct bcm_cfg80211 *cfg) #endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ static s32 -wl_validate_opensecurity(struct net_device *dev, s32 bssidx) +wl_validate_opensecurity(struct net_device *dev, s32 bssidx, bool privacy) { s32 err = BCME_OK; + u32 wpa_val; + s32 wsec = 0; /* set auth */ err = wldev_iovar_setint_bsscfg(dev, "auth", 0, bssidx); @@ -6741,17 +6908,26 @@ wl_validate_opensecurity(struct net_device *dev, s32 bssidx) WL_ERR(("auth error %d\n", err)); return BCME_ERROR; } -#ifndef CUSTOMER_HW10 /* for WEP Support */ + + if (privacy) { + /* If privacy bit is set in open mode, then WEP would be enabled */ + wsec = WEP_ENABLED; + WL_DBG(("Setting wsec to %d for WEP \n", wsec)); + } + /* set wsec */ - err = wldev_iovar_setint_bsscfg(dev, "wsec", 0, bssidx); + err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx); if (err < 0) { WL_ERR(("wsec error %d\n", err)); return BCME_ERROR; } -#endif /* CUSTOMER_HW10 */ /* set upper-layer auth */ - err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", WPA_AUTH_NONE, bssidx); + if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_ADHOC) + wpa_val = WPA_AUTH_NONE; + else + wpa_val = WPA_AUTH_DISABLED; + err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_val, bssidx); if (err < 0) { WL_ERR(("wpa_auth error %d\n", err)); return BCME_ERROR; @@ -6775,10 +6951,6 @@ wl_validate_wpa2ie(struct net_device *dev, bcm_tlv_t *wpa2ie, s32 bssidx) wpa_suite_auth_key_mgmt_t *mgmt; wpa_pmkid_list_t *pmkid; int cnt = 0; -#ifdef MFP - int mfp = 0; - struct bcm_cfg80211 *cfg = g_bcm_cfg; -#endif /* MFP */ u16 suite_count; u8 rsn_cap[2]; @@ -6805,11 +6977,6 @@ wl_validate_wpa2ie(struct net_device *dev, bcm_tlv_t *wpa2ie, s32 bssidx) case WPA_CIPHER_AES_CCM: gval = AES_ENABLED; break; -#ifdef BCMWAPI_WPI - case WAPI_CIPHER_SMS4: - gval = SMS4_ENABLED; - break; -#endif default: WL_ERR(("No Security Info\n")); break; @@ -6834,11 +7001,6 @@ wl_validate_wpa2ie(struct net_device *dev, bcm_tlv_t *wpa2ie, s32 bssidx) case WPA_CIPHER_AES_CCM: pval = AES_ENABLED; break; -#ifdef BCMWAPI_WPI - case WAPI_CIPHER_SMS4: - pval = SMS4_ENABLED; - break; -#endif default: WL_ERR(("No Security Info\n")); } @@ -6852,27 +7014,17 @@ wl_validate_wpa2ie(struct net_device *dev, bcm_tlv_t *wpa2ie, s32 bssidx) suite_count = cnt = ltoh16_ua(&mgmt->count); while (cnt--) { switch (mgmt->list[cnt].type) { - case RSN_AKM_NONE: - wpa_auth = WPA_AUTH_NONE; - break; - case RSN_AKM_UNSPECIFIED: - wpa_auth = WPA2_AUTH_UNSPECIFIED; - break; - case RSN_AKM_PSK: - wpa_auth = WPA2_AUTH_PSK; - break; -#ifdef MFP - case RSN_AKM_MFP_PSK: - wpa_auth |= WPA2_AUTH_PSK; - wsec |= MFP_SHA256; - break; - case RSN_AKM_MFP_1X: + case RSN_AKM_NONE: + wpa_auth |= WPA_AUTH_NONE; + break; + case RSN_AKM_UNSPECIFIED: wpa_auth |= WPA2_AUTH_UNSPECIFIED; - wsec |= MFP_SHA256; + break; + case RSN_AKM_PSK: + wpa_auth |= WPA2_AUTH_PSK; break; -#endif /* MFP */ - default: - WL_ERR(("No Key Mgmt Info\n")); + default: + WL_ERR(("No Key Mgmt Info\n")); } } @@ -6886,15 +7038,6 @@ wl_validate_wpa2ie(struct net_device *dev, bcm_tlv_t *wpa2ie, s32 bssidx) wme_bss_disable = 1; } -#ifdef MFP - if (rsn_cap[0] & RSN_CAP_MFPR) { - WL_DBG(("MFP Required \n")); - mfp = WL_MFP_REQUIRED; - } else if (rsn_cap[0] & RSN_CAP_MFPC) { - WL_DBG(("MFP Capable \n")); - mfp = WL_MFP_CAPABLE; - } -#endif /* MFP */ /* set wme_bss_disable to sync RSN Capabilities */ err = wldev_iovar_setint_bsscfg(dev, "wme_bss_disable", wme_bss_disable, bssidx); @@ -6906,7 +7049,8 @@ wl_validate_wpa2ie(struct net_device *dev, bcm_tlv_t *wpa2ie, s32 bssidx) WL_DBG(("There is no RSN Capabilities. remained len %d\n", len)); } - if ((len -= RSN_CAP_LEN) >= WPA2_PMKID_COUNT_LEN) { + len -= RSN_CAP_LEN; + if (len >= WPA2_PMKID_COUNT_LEN) { pmkid = (wpa_pmkid_list_t *)((u8 *)&mgmt->list[suite_count] + RSN_CAP_LEN); cnt = ltoh16_ua(&pmkid->count); if (cnt != 0) { @@ -6917,18 +7061,6 @@ wl_validate_wpa2ie(struct net_device *dev, bcm_tlv_t *wpa2ie, s32 bssidx) /* so don't bother to send down this info to firmware */ } -#ifdef MFP - if ((len -= WPA2_PMKID_COUNT_LEN) >= RSN_GROUPMANAGE_CIPHER_LEN) { - err = wldev_iovar_setbuf_bsscfg(dev, "bip", - (void *)((u8 *)&mgmt->list[suite_count] + RSN_CAP_LEN + WPA2_PMKID_COUNT_LEN), - RSN_GROUPMANAGE_CIPHER_LEN, - cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync); - if (err < 0) { - WL_ERR(("bip set error %d\n", err)); - return BCME_ERROR; - } - } -#endif /* set auth */ err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx); @@ -6936,6 +7068,7 @@ wl_validate_wpa2ie(struct net_device *dev, bcm_tlv_t *wpa2ie, s32 bssidx) WL_ERR(("auth error %d\n", err)); return BCME_ERROR; } + /* set wsec */ err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx); if (err < 0) { @@ -6943,17 +7076,6 @@ wl_validate_wpa2ie(struct net_device *dev, bcm_tlv_t *wpa2ie, s32 bssidx) return BCME_ERROR; } -#ifdef MFP - if (mfp) { - /* This needs to go after wsec otherwise the wsec command will - * overwrite the values set by MFP - */ - if ((err = wldev_iovar_setint_bsscfg(dev, "mfp", mfp, bssidx)) < 0) { - WL_ERR(("MFP Setting failed. ret = %d \n", err)); - return err; - } - } -#endif /* MFP */ /* set upper-layer auth */ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx); @@ -7159,6 +7281,25 @@ static u32 wl_get_suite_auth_key_mgmt_type(uint8 type) return ret; } +static u32 wl_get_suite_auth2_key_mgmt_type(uint8 type) +{ + u32 ret = 0; + switch (type) { + case RSN_AKM_NONE: + ret = WPA_AUTH_NONE; + break; + case RSN_AKM_UNSPECIFIED: + ret = WPA2_AUTH_UNSPECIFIED; + break; + case RSN_AKM_PSK: + ret = WPA2_AUTH_PSK; + break; + default: + WL_ERR(("No Key Mgmt Info\n")); + } + return ret; +} + static s32 wl_validate_wpaie_wpa2ie(struct net_device *dev, wpa_ie_fixed_t *wpaie, bcm_tlv_t *wpa2ie, s32 bssidx) @@ -7286,7 +7427,7 @@ wl_validate_wpaie_wpa2ie(struct net_device *dev, wpa_ie_fixed_t *wpaie, mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count]; suite_count = ltoh16_ua(&mgmt->count); ptmp = (u8 *)&mgmt->list[0]; - wpa_auth2 = wl_get_suite_auth_key_mgmt_type(ptmp[DOT11_OUI_LEN]); + wpa_auth2 = wl_get_suite_auth2_key_mgmt_type(ptmp[DOT11_OUI_LEN]); WL_ERR(("wpa ie wpa_suite_auth_key_mgmt count=%d, key_mgmt = 0x%X\n", count, wpa_auth2)); if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) { @@ -7341,9 +7482,16 @@ wl_cfg80211_bcn_validate_sec( struct net_device *dev, struct parsed_ies *ies, u32 dev_role, - s32 bssidx) + s32 bssidx, + bool privacy) { struct bcm_cfg80211 *cfg = g_bcm_cfg; + wl_cfgbss_t *bss = wl_get_cfgbss_by_wdev(cfg, dev->ieee80211_ptr); + + if (!bss) { + WL_ERR(("cfgbss is NULL \n")); + return BCME_ERROR; + } if (dev_role == NL80211_IFTYPE_P2P_GO && (ies->wpa2_ie)) { /* For P2P GO, the sec type is WPA2-PSK */ @@ -7359,7 +7507,7 @@ wl_cfg80211_bcn_validate_sec( #if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED) if ((ies->wpa_ie != NULL && ies->wpa2_ie != NULL)) { if (wl_validate_wpaie_wpa2ie(dev, ies->wpa_ie, ies->wpa2_ie, bssidx) < 0) { - cfg->ap_info->security_mode = false; + bss->security_mode = false; return BCME_ERROR; } } @@ -7368,33 +7516,33 @@ wl_cfg80211_bcn_validate_sec( if ((ies->wpa2_ie || ies->wpa_ie) && ((wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx) < 0 || wl_validate_wpaie(dev, ies->wpa_ie, bssidx) < 0))) { - cfg->ap_info->security_mode = false; + bss->security_mode = false; return BCME_ERROR; } - cfg->ap_info->security_mode = true; - if (cfg->ap_info->rsn_ie) { - kfree(cfg->ap_info->rsn_ie); - cfg->ap_info->rsn_ie = NULL; + bss->security_mode = true; + if (bss->rsn_ie) { + kfree(bss->rsn_ie); + bss->rsn_ie = NULL; } - if (cfg->ap_info->wpa_ie) { - kfree(cfg->ap_info->wpa_ie); - cfg->ap_info->wpa_ie = NULL; + if (bss->wpa_ie) { + kfree(bss->wpa_ie); + bss->wpa_ie = NULL; } - if (cfg->ap_info->wps_ie) { - kfree(cfg->ap_info->wps_ie); - cfg->ap_info->wps_ie = NULL; + if (bss->wps_ie) { + kfree(bss->wps_ie); + bss->wps_ie = NULL; } if (ies->wpa_ie != NULL) { /* WPAIE */ - cfg->ap_info->rsn_ie = NULL; - cfg->ap_info->wpa_ie = kmemdup(ies->wpa_ie, + bss->rsn_ie = NULL; + bss->wpa_ie = kmemdup(ies->wpa_ie, ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN, GFP_KERNEL); } else if (ies->wpa2_ie != NULL) { /* RSNIE */ - cfg->ap_info->wpa_ie = NULL; - cfg->ap_info->rsn_ie = kmemdup(ies->wpa2_ie, + bss->wpa_ie = NULL; + bss->rsn_ie = kmemdup(ies->wpa2_ie, ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN, GFP_KERNEL); } @@ -7402,12 +7550,12 @@ wl_cfg80211_bcn_validate_sec( } #endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */ if (!ies->wpa2_ie && !ies->wpa_ie) { - wl_validate_opensecurity(dev, bssidx); - cfg->ap_info->security_mode = false; + wl_validate_opensecurity(dev, bssidx, privacy); + bss->security_mode = false; } if (ies->wps_ie) { - cfg->ap_info->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL); + bss->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL); } } @@ -7415,7 +7563,7 @@ wl_cfg80211_bcn_validate_sec( } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) static s32 wl_cfg80211_bcn_set_params( struct cfg80211_ap_settings *info, struct net_device *dev, @@ -7467,7 +7615,7 @@ static s32 wl_cfg80211_bcn_set_params( return err; } -#endif /* LINUX_VERSION >= VERSION(3,4,0) || WL_COMPAT_WIRELESS */ +#endif static s32 wl_cfg80211_parse_ies(u8 *ptr, u32 len, struct parsed_ies *ies) @@ -7501,6 +7649,7 @@ wl_cfg80211_parse_ies(u8 *ptr, u32 len, struct parsed_ies *ies) } +#define MAX_AP_LINK_WAIT_TIME 10000 static s32 wl_cfg80211_bcn_bringup_ap( struct net_device *dev, @@ -7509,20 +7658,39 @@ wl_cfg80211_bcn_bringup_ap( { struct bcm_cfg80211 *cfg = g_bcm_cfg; struct wl_join_params join_params; + struct wiphy *wiphy; bool is_bssup = false; s32 infra = 1; s32 join_params_size = 0; s32 ap = 1; -#ifdef DISABLE_11H_SOFTAP - s32 spect = 0; -#endif /* DISABLE_11H_SOFTAP */ + s32 pm; + s32 wsec; +#ifdef SOFTAP_UAPSD_OFF + uint32 wme_apsd = 0; +#endif /* SOFTAP_UAPSD_OFF */ s32 err = BCME_OK; + s32 is_rsdb_supported = BCME_ERROR; + u32 timeout; +#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub); +#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */ + + is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE); + if (is_rsdb_supported < 0) + return (-ENODEV); - WL_DBG(("Enter dev_role: %d\n", dev_role)); + WL_DBG(("Enter dev_role:%d bssidx:%d\n", dev_role, bssidx)); /* Common code for SoftAP and P2P GO */ + wiphy = bcmcfg_to_wiphy(cfg); + if (wl_check_dongle_idle(wiphy) != TRUE) { + WL_ERR(("FW is busy to add interface")); + return -EINVAL; + } wldev_iovar_setint(dev, "mpc", 0); + wl_clr_drv_status(cfg, AP_CREATED, dev); + if (dev_role == NL80211_IFTYPE_P2P_GO) { is_bssup = wl_cfgp2p_bss_isup(dev, bssidx); if (!is_bssup && (ies->wpa2_ie != NULL)) { @@ -7552,29 +7720,73 @@ wl_cfg80211_bcn_bringup_ap( WL_DBG(("Bss is already up\n")); } else if ((dev_role == NL80211_IFTYPE_AP) && (wl_get_drv_status(cfg, AP_CREATING, dev))) { + /* Device role SoftAP */ - err = wldev_ioctl(dev, WLC_DOWN, &ap, sizeof(s32), true); - if (err < 0) { - WL_ERR(("WLC_DOWN error %d\n", err)); - goto exit; - } - err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true); - if (err < 0) { - WL_ERR(("SET INFRA error %d\n", err)); - goto exit; - } - if ((err = wldev_ioctl(dev, WLC_SET_AP, &ap, sizeof(s32), true)) < 0) { - WL_ERR(("setting AP mode failed %d \n", err)); - goto exit; + WL_DBG(("Creating AP bssidx:%d dev_role:%d\n", bssidx, dev_role)); + + /* Clear the status bit after use */ + wl_clr_drv_status(cfg, AP_CREATING, dev); + + /* AP on primary Interface */ + if (bssidx == 0) { + if (is_rsdb_supported) { + if ((err = wl_cfg80211_add_del_bss(cfg, dev, bssidx, + NL80211_IFTYPE_AP, 0, NULL)) < 0) { + WL_ERR(("wl add_del_bss returned error:%d\n", err)); + goto exit; + } + } else if (is_rsdb_supported == 0) { + /* AP mode switch not supported. Try setting up AP explicitly */ + err = wldev_ioctl(dev, WLC_DOWN, &ap, sizeof(s32), true); + if (err < 0) { + WL_ERR(("WLC_DOWN error %d\n", err)); + goto exit; + } + err = wldev_iovar_setint(dev, "apsta", 0); + if (err < 0) { + WL_ERR(("wl apsta 0 error %d\n", err)); + goto exit; + } + + if ((err = wldev_ioctl(dev, + WLC_SET_AP, &ap, sizeof(s32), true)) < 0) { + WL_ERR(("setting AP mode failed %d \n", err)); + goto exit; + } + + } + + pm = 0; + if ((err = wldev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), true)) != 0) { + WL_ERR(("wl PM 0 returned error:%d\n", err)); + goto exit; + } + + err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true); + if (err < 0) { + WL_ERR(("SET INFRA error %d\n", err)); + goto exit; + } + } else if (cfg->cfgdev_bssidx && (bssidx == cfg->cfgdev_bssidx)) { + + WL_DBG(("Bringup SoftAP on virtual Interface bssidx:%d \n", bssidx)); + + if ((err = wl_cfg80211_add_del_bss(cfg, dev, + bssidx, NL80211_IFTYPE_AP, 0, NULL)) < 0) { + WL_ERR(("wl bss ap returned error:%d\n", err)); + goto exit; + } + } -#ifdef DISABLE_11H_SOFTAP - err = wldev_ioctl(dev, WLC_SET_SPECT_MANAGMENT, - &spect, sizeof(s32), true); + +#ifdef SOFTAP_UAPSD_OFF + err = wldev_iovar_setbuf_bsscfg(dev, "wme_apsd", &wme_apsd, sizeof(wme_apsd), + cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync); if (err < 0) { - WL_ERR(("SET SPECT_MANAGMENT error %d\n", err)); - goto exit; + WL_ERR(("failed to disable uapsd, error=%d\n", err)); } -#endif /* DISABLE_11H_SOFTAP */ +#endif /* SOFTAP_UAPSD_OFF */ + dhd_conf_set_wme(cfg->pub, 1); err = wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), true); if (unlikely(err)) { @@ -7582,6 +7794,24 @@ wl_cfg80211_bcn_bringup_ap( goto exit; } + err = wldev_iovar_getint(dev, "wsec", (s32 *)&wsec); + if (unlikely(err)) { + WL_ERR(("Could not get wsec %d\n", err)); + goto exit; + } + if ((wsec == WEP_ENABLED) && cfg->wep_key.len) { + WL_DBG(("Applying buffered WEP KEY \n")); + err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &cfg->wep_key, + sizeof(struct wl_wsec_key), cfg->ioctl_buf, + WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + /* clear the key after use */ + memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key)); + if (unlikely(err)) { + WL_ERR(("WLC_SET_KEY error (%d)\n", err)); + goto exit; + } + } + memset(&join_params, 0, sizeof(join_params)); /* join parameters starts with ssid */ join_params_size = sizeof(join_params.ssid); @@ -7591,19 +7821,44 @@ wl_cfg80211_bcn_bringup_ap( /* create softap */ if ((err = wldev_ioctl(dev, WLC_SET_SSID, &join_params, - join_params_size, true)) == 0) { - WL_DBG(("SoftAP set SSID (%s) success\n", join_params.ssid.SSID)); - wl_clr_drv_status(cfg, AP_CREATING, dev); - wl_set_drv_status(cfg, AP_CREATED, dev); + join_params_size, true)) != 0) { + WL_ERR(("SoftAP/GO set ssid failed! \n")); + goto exit; + } else { + WL_DBG((" SoftAP SSID \"%s\" \n", join_params.ssid.SSID)); + } + + if (bssidx != 0) { + /* AP on Virtual Interface */ + if ((err = wl_cfgp2p_bss(cfg, dev, bssidx, 1)) < 0) { + WL_ERR(("GO Bring up error %d\n", err)); + goto exit; + } } - } + } + /* Wait for Linkup event to mark successful AP/GO bring up */ + timeout = wait_event_interruptible_timeout(cfg->netif_change_event, + wl_get_drv_status(cfg, AP_CREATED, dev), msecs_to_jiffies(MAX_AP_LINK_WAIT_TIME)); + if (timeout <= 0 || !wl_get_drv_status(cfg, AP_CREATED, dev)) { + WL_ERR(("Link up didn't come for AP interface. AP/GO creation failed! \n")); +#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + if (dhdp->memdump_enabled) { + dhdp->memdump_type = DUMP_TYPE_AP_LINKUP_FAILURE; + dhd_bus_mem_dump(dhdp); + } +#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */ + err = -ENODEV; + goto exit; + } exit: + if (cfg->wep_key.len) + memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key)); return err; } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) s32 wl_cfg80211_parse_ap_ies( struct net_device *dev, @@ -7663,8 +7918,8 @@ wl_cfg80211_set_ies( s32 err = BCME_OK; /* Set Beacon IEs to FW */ - if ((err = wl_cfgp2p_set_management_ie(cfg, dev, bssidx, - VNDR_IE_BEACON_FLAG, (u8 *)info->tail, + if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx, + VNDR_IE_BEACON_FLAG, (const u8 *)info->tail, info->tail_len)) < 0) { WL_ERR(("Set Beacon IE Failed \n")); } else { @@ -7686,7 +7941,7 @@ wl_cfg80211_set_ies( } /* Set Probe Response IEs to FW */ - if ((err = wl_cfgp2p_set_management_ie(cfg, dev, bssidx, + if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx, VNDR_IE_PRBRSP_FLAG, vndr, vndr_ie_len)) < 0) { WL_ERR(("Set Probe Resp IE Failed \n")); } else { @@ -7695,7 +7950,7 @@ wl_cfg80211_set_ies( return err; } -#endif /* LINUX_VERSION >= VERSION(3,4,0) || WL_COMPAT_WIRELESS */ +#endif static s32 wl_cfg80211_hostapd_sec( struct net_device *dev, @@ -7704,60 +7959,65 @@ static s32 wl_cfg80211_hostapd_sec( { bool update_bss = 0; struct bcm_cfg80211 *cfg = g_bcm_cfg; + wl_cfgbss_t *bss = wl_get_cfgbss_by_wdev(cfg, dev->ieee80211_ptr); + if (!bss) { + WL_ERR(("cfgbss is NULL \n")); + return -EINVAL; + } if (ies->wps_ie) { - if (cfg->ap_info->wps_ie && - memcmp(cfg->ap_info->wps_ie, ies->wps_ie, ies->wps_ie_len)) { + if (bss->wps_ie && + memcmp(bss->wps_ie, ies->wps_ie, ies->wps_ie_len)) { WL_DBG((" WPS IE is changed\n")); - kfree(cfg->ap_info->wps_ie); - cfg->ap_info->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL); - } else if (cfg->ap_info->wps_ie == NULL) { + kfree(bss->wps_ie); + bss->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL); + } else if (bss->wps_ie == NULL) { WL_DBG((" WPS IE is added\n")); - cfg->ap_info->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL); + bss->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL); } #if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED) if (ies->wpa_ie != NULL && ies->wpa2_ie != NULL) { WL_ERR(("update bss - wpa_ie and wpa2_ie is not null\n")); - if (!cfg->ap_info->security_mode) { + if (!bss->security_mode) { /* change from open mode to security mode */ update_bss = true; - cfg->ap_info->wpa_ie = + bss->wpa_ie = kmemdup(ies->wpa_ie, ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN, GFP_KERNEL); - cfg->ap_info->rsn_ie = + bss->rsn_ie = kmemdup(ies->wpa2_ie, ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN, GFP_KERNEL); } else { /* change from (WPA or WPA2 or WPA/WPA2) to WPA/WPA2 mixed mode */ - if (cfg->ap_info->wpa_ie) { - if (memcmp(cfg->ap_info->wpa_ie, + if (bss->wpa_ie) { + if (memcmp(bss->wpa_ie, ies->wpa_ie, ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN)) { - kfree(cfg->ap_info->wpa_ie); + kfree(bss->wpa_ie); update_bss = true; - cfg->ap_info->wpa_ie = kmemdup(ies->wpa_ie, + bss->wpa_ie = kmemdup(ies->wpa_ie, ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN, GFP_KERNEL); } } else { update_bss = true; - cfg->ap_info->wpa_ie = + bss->wpa_ie = kmemdup(ies->wpa_ie, ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN, GFP_KERNEL); } - if (cfg->ap_info->rsn_ie) { - if (memcmp(cfg->ap_info->rsn_ie, + if (bss->rsn_ie) { + if (memcmp(bss->rsn_ie, ies->wpa2_ie, ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN)) { update_bss = true; - kfree(cfg->ap_info->rsn_ie); - cfg->ap_info->rsn_ie = + kfree(bss->rsn_ie); + bss->rsn_ie = kmemdup(ies->wpa2_ie, ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN, @@ -7766,7 +8026,7 @@ static s32 wl_cfg80211_hostapd_sec( } else { update_bss = true; - cfg->ap_info->rsn_ie = + bss->rsn_ie = kmemdup(ies->wpa2_ie, ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN, GFP_KERNEL); @@ -7774,7 +8034,7 @@ static s32 wl_cfg80211_hostapd_sec( } WL_ERR(("update_bss=%d\n", update_bss)); if (update_bss) { - cfg->ap_info->security_mode = true; + bss->security_mode = true; wl_cfgp2p_bss(cfg, dev, bssidx, 0); if (wl_validate_wpaie_wpa2ie(dev, ies->wpa_ie, ies->wpa2_ie, bssidx) < 0) { @@ -7787,40 +8047,40 @@ static s32 wl_cfg80211_hostapd_sec( else #endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */ if ((ies->wpa_ie != NULL || ies->wpa2_ie != NULL)) { - if (!cfg->ap_info->security_mode) { + if (!bss->security_mode) { /* change from open mode to security mode */ update_bss = true; if (ies->wpa_ie != NULL) { - cfg->ap_info->wpa_ie = kmemdup(ies->wpa_ie, + bss->wpa_ie = kmemdup(ies->wpa_ie, ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN, GFP_KERNEL); } else { - cfg->ap_info->rsn_ie = kmemdup(ies->wpa2_ie, + bss->rsn_ie = kmemdup(ies->wpa2_ie, ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN, GFP_KERNEL); } - } else if (cfg->ap_info->wpa_ie) { + } else if (bss->wpa_ie) { /* change from WPA2 mode to WPA mode */ if (ies->wpa_ie != NULL) { update_bss = true; - kfree(cfg->ap_info->rsn_ie); - cfg->ap_info->rsn_ie = NULL; - cfg->ap_info->wpa_ie = kmemdup(ies->wpa_ie, + kfree(bss->rsn_ie); + bss->rsn_ie = NULL; + bss->wpa_ie = kmemdup(ies->wpa_ie, ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN, GFP_KERNEL); - } else if (memcmp(cfg->ap_info->rsn_ie, + } else if (memcmp(bss->rsn_ie, ies->wpa2_ie, ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN)) { update_bss = true; - kfree(cfg->ap_info->rsn_ie); - cfg->ap_info->rsn_ie = kmemdup(ies->wpa2_ie, + kfree(bss->rsn_ie); + bss->rsn_ie = kmemdup(ies->wpa2_ie, ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN, GFP_KERNEL); - cfg->ap_info->wpa_ie = NULL; + bss->wpa_ie = NULL; } } if (update_bss) { - cfg->ap_info->security_mode = true; + bss->security_mode = true; wl_cfgp2p_bss(cfg, dev, bssidx, 0); if (wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx) < 0 || wl_validate_wpaie(dev, ies->wpa_ie, bssidx) < 0) { @@ -7838,10 +8098,21 @@ static s32 wl_cfg80211_hostapd_sec( #if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \ 2, 0)) static s32 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) +wl_cfg80211_del_station( + struct wiphy *wiphy, struct net_device *ndev, + struct station_del_parameters *params) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +wl_cfg80211_del_station( + struct wiphy *wiphy, + struct net_device *ndev, + const u8* mac_addr) +#else wl_cfg80211_del_station( struct wiphy *wiphy, struct net_device *ndev, u8* mac_addr) +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */ { struct net_device *dev; struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); @@ -7853,6 +8124,10 @@ wl_cfg80211_del_station( struct maclist *assoc_maclist = (struct maclist *)mac_buf; int num_associated = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) + const u8 *mac_addr = params->mac; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */ + WL_DBG(("Entry\n")); if (mac_addr == NULL) { WL_DBG(("mac_addr is NULL ignore it\n")); @@ -7885,9 +8160,9 @@ wl_cfg80211_del_station( sizeof(scb_val_t), true); if (err < 0) WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON err %d\n", err)); - printf("Disconnect STA : %s scb_val.val %d\n", + WL_ERR(("Disconnect STA : %s scb_val.val %d\n", bcm_ether_ntoa((const struct ether_addr *)mac_addr, eabuf), - scb_val.val); + scb_val.val)); if (num_associated > 0 && ETHER_ISBCAST(mac_addr)) wl_delay(400); @@ -7896,35 +8171,98 @@ wl_cfg80211_del_station( } static s32 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +wl_cfg80211_change_station( + struct wiphy *wiphy, + struct net_device *dev, + const u8 *mac, + struct station_parameters *params) +#else wl_cfg80211_change_station( struct wiphy *wiphy, struct net_device *dev, u8 *mac, struct station_parameters *params) +#endif { int err; struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg); + WL_DBG(("SCB_AUTHORIZE mac_addr:"MACDBG" sta_flags_mask:0x%x " + "sta_flags_set:0x%x iface:%s \n", MAC2STRDBG(mac), + params->sta_flags_mask, params->sta_flags_set, dev->name)); + /* Processing only authorize/de-authorize flag for now */ - if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))) + if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))) { + WL_ERR(("WLC_SCB_AUTHORIZE sta_flags_mask not set \n")); return -ENOTSUPP; + } if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) + err = wldev_ioctl(primary_ndev, WLC_SCB_DEAUTHORIZE, (u8 *)mac, ETH_ALEN, true); +#else err = wldev_ioctl(primary_ndev, WLC_SCB_DEAUTHORIZE, mac, ETH_ALEN, true); +#endif if (err) WL_ERR(("WLC_SCB_DEAUTHORIZE error (%d)\n", err)); return err; } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) + err = wldev_ioctl(primary_ndev, WLC_SCB_AUTHORIZE, (u8 *)mac, ETH_ALEN, true); +#else err = wldev_ioctl(primary_ndev, WLC_SCB_AUTHORIZE, mac, ETH_ALEN, true); +#endif if (err) WL_ERR(("WLC_SCB_AUTHORIZE error (%d)\n", err)); +#ifdef DHD_LOSSLESS_ROAMING + wl_del_roam_timeout(cfg); +#endif return err; } #endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS) +static s32 +wl_cfg80211_set_scb_timings( + struct bcm_cfg80211 *cfg, + struct net_device *dev) +{ + int err; + u32 ps_pretend; + wl_scb_probe_t scb_probe; + + bzero(&scb_probe, sizeof(wl_scb_probe_t)); + scb_probe.scb_timeout = WL_SCB_TIMEOUT; + scb_probe.scb_activity_time = WL_SCB_ACTIVITY_TIME; + scb_probe.scb_max_probe = WL_SCB_MAX_PROBE; + err = wldev_iovar_setbuf(dev, "scb_probe", (void *)&scb_probe, + sizeof(wl_scb_probe_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN, + &cfg->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("set 'scb_probe' failed, error = %d\n", err)); + return err; + } + + ps_pretend = MAX(WL_SCB_MAX_PROBE / 2, WL_MIN_PSPRETEND_THRESHOLD); + err = wldev_iovar_setint(dev, "pspretend_threshold", ps_pretend); + if (unlikely(err)) { + if (err == BCME_UNSUPPORTED) { + /* Ignore error if fw doesn't support the iovar */ + WL_DBG(("wl pspretend_threshold %d set error %d\n", + ps_pretend, err)); + } else { + WL_ERR(("wl pspretend_threshold %d set error %d\n", + ps_pretend, err)); + return err; + } + } + + return 0; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) static s32 wl_cfg80211_start_ap( struct wiphy *wiphy, @@ -7936,44 +8274,86 @@ wl_cfg80211_start_ap( struct parsed_ies ies; s32 bssidx = 0; u32 dev_role = 0; + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); WL_DBG(("Enter \n")); - if (dev == bcmcfg_to_prmry_ndev(cfg)) { - WL_DBG(("Start AP req on primary iface: Softap\n")); + +#if defined(SUPPORT_RANDOM_MAC_SCAN) + wl_cfg80211_set_random_mac(dev, FALSE); +#endif /* SUPPORT_RANDOM_MAC_SCAN */ + + if ((dev == bcmcfg_to_prmry_ndev(cfg)) || + (dev == ((struct net_device *)cfgdev_to_ndev(cfg->bss_cfgdev)))) { + WL_DBG(("Start AP req on iface: %s \n", dev->name)); dev_role = NL80211_IFTYPE_AP; } #if defined(WL_ENABLE_P2P_IF) else if (dev == cfg->p2p_net) { /* Group Add request on p2p0 */ WL_DBG(("Start AP req on P2P iface: GO\n")); -#ifndef P2PONEINT dev = bcmcfg_to_prmry_ndev(cfg); -#endif dev_role = NL80211_IFTYPE_P2P_GO; } #endif /* WL_ENABLE_P2P_IF */ - if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) { - WL_ERR(("Find p2p index from dev(%p) failed\n", dev)); + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); return BCME_ERROR; } - if (p2p_is_on(cfg) && - (bssidx == wl_to_p2p_bss_bssidx(cfg, - P2PAPI_BSSCFG_CONNECTION))) { + + if (p2p_is_on(cfg) && (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO)) { dev_role = NL80211_IFTYPE_P2P_GO; - WL_DBG(("Start AP req on P2P connection iface\n")); + } else if (dev_role == NL80211_IFTYPE_AP) { + dhd->op_mode |= DHD_FLAG_HOSTAP_MODE; + /* + * Enabling Softap is causing issues with STA NDO operations + * as NDO is not interface specific. So disable NDO while + * Softap is enabled + */ + err = dhd_ndo_enable(dhd, FALSE); + WL_DBG(("%s: Disabling NDO on Hostapd mode %d\n", __FUNCTION__, err)); + if (err) { + /* Non fatal error. */ + WL_ERR(("%s: Disabling NDO Failed %d\n", __FUNCTION__, err)); + } else { + cfg->revert_ndo_disable = true; + } + +#ifdef PKT_FILTER_SUPPORT + /* Disable packet filter */ + if (dhd->early_suspended) { + WL_ERR(("Disable pkt_filter\n")); + dhd_enable_packet_filter(0, dhd); + } +#endif /* PKT_FILTER_SUPPORT */ +#ifdef ARP_OFFLOAD_SUPPORT + /* IF SoftAP is enabled, disable arpoe */ + dhd_arp_offload_set(dhd, 0); + dhd_arp_offload_enable(dhd, FALSE); +#endif /* ARP_OFFLOAD_SUPPORT */ + if ((dhd->op_mode & DHD_FLAG_STA_MODE) && wl_cfg80211_is_roam_offload()) { + WL_ERR(("Cleare roam_offload_bssid_list at STA-SoftAP MODE.\n")); + wl_android_set_roam_offload_bssid_list(dev, "0"); + } + } else { + /* only AP or GO role need to be handled here. */ + err = -EINVAL; + goto fail; } - if (!check_dev_role_integrity(cfg, dev_role)) + if (!check_dev_role_integrity(cfg, dev_role)) { + err = -EINVAL; goto fail; + } -#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && !defined(WL_COMPAT_WIRELESS)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) if ((err = wl_cfg80211_set_channel(wiphy, dev, dev->ieee80211_ptr->preset_chandef.chan, - dev->ieee80211_ptr->preset_chandef) < 0)) { + NL80211_CHAN_HT20) < 0)) { WL_ERR(("Set channel failed \n")); goto fail; } -#endif /* ((LINUX_VERSION >= VERSION(3, 6, 0) && !WL_COMPAT_WIRELESS) */ +#endif if ((err = wl_cfg80211_bcn_set_params(info, dev, dev_role, bssidx)) < 0) { @@ -7987,8 +8367,8 @@ wl_cfg80211_start_ap( goto fail; } - if ((wl_cfg80211_bcn_validate_sec(dev, &ies, - dev_role, bssidx)) < 0) + if ((err = wl_cfg80211_bcn_validate_sec(dev, &ies, + dev_role, bssidx, info->privacy)) < 0) { WL_ERR(("Beacon set security failed \n")); goto fail; @@ -8000,6 +8380,12 @@ wl_cfg80211_start_ap( goto fail; } + /* Set GC/STA SCB expiry timings. */ + if ((err = wl_cfg80211_set_scb_timings(cfg, dev))) { + WL_ERR(("scb setting failed \n")); +// goto fail; + } + WL_DBG(("** AP/GO Created **\n")); #ifdef WL_CFG80211_ACL @@ -8027,6 +8413,17 @@ wl_cfg80211_start_ap( if (err) { WL_ERR(("ADD/SET beacon failed\n")); wldev_iovar_setint(dev, "mpc", 1); + if (dev_role == NL80211_IFTYPE_AP) { + dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE; + +#ifdef PKT_FILTER_SUPPORT + /* Enable packet filter */ + if (dhd->early_suspended) { + WL_ERR(("Enable pkt_filter\n")); + dhd_enable_packet_filter(1, dhd); + } +#endif /* PKT_FILTER_SUPPORT */ + } } return err; @@ -8043,66 +8440,108 @@ wl_cfg80211_stop_ap( int ap = 0; s32 bssidx = 0; struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + s32 is_rsdb_supported = BCME_ERROR; + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); WL_DBG(("Enter \n")); - if (dev == bcmcfg_to_prmry_ndev(cfg)) { + + is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE); + if (is_rsdb_supported < 0) + return (-ENODEV); + + wl_clr_drv_status(cfg, AP_CREATING, dev); + wl_clr_drv_status(cfg, AP_CREATED, dev); + cfg->ap_oper_channel = 0; + + if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) { dev_role = NL80211_IFTYPE_AP; - } -#if defined(WL_ENABLE_P2P_IF) - else if (dev == cfg->p2p_net) { - /* Group Add request on p2p0 */ -#ifndef P2PONEINT - dev = bcmcfg_to_prmry_ndev(cfg); -#endif + WL_DBG(("stopping AP operation\n")); + } else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) { dev_role = NL80211_IFTYPE_P2P_GO; + WL_DBG(("stopping P2P GO operation\n")); + } else { + WL_ERR(("no AP/P2P GO interface is operational.\n")); + return -EINVAL; } -#endif /* WL_ENABLE_P2P_IF */ - if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) { - WL_ERR(("Find p2p index from dev(%p) failed\n", dev)); + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); return BCME_ERROR; } - if (p2p_is_on(cfg) && - (bssidx == wl_to_p2p_bss_bssidx(cfg, - P2PAPI_BSSCFG_CONNECTION))) { - dev_role = NL80211_IFTYPE_P2P_GO; - } - if (!check_dev_role_integrity(cfg, dev_role)) + if (!check_dev_role_integrity(cfg, dev_role)) { + WL_ERR(("role integrity check failed \n")); + err = -EINVAL; goto exit; + } + + if ((err = wl_cfgp2p_bss(cfg, dev, bssidx, 0)) < 0) { + WL_ERR(("bss down error %d\n", err)); + } if (dev_role == NL80211_IFTYPE_AP) { - /* SoftAp on primary Interface. - * Shut down AP and turn on MPC - */ - if ((err = wldev_ioctl(dev, WLC_SET_AP, &ap, sizeof(s32), true)) < 0) { - WL_ERR(("setting AP mode failed %d \n", err)); - err = -ENOTSUPP; - goto exit; - } - err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true); - if (err < 0) { - WL_ERR(("SET INFRA error %d\n", err)); - err = -ENOTSUPP; - goto exit; + if (cfg->revert_ndo_disable == true) { + err = dhd_ndo_enable(dhd, TRUE); + WL_DBG(("%s: Enabling back NDO on Softap turn off %d\n", + __FUNCTION__, err)); + if (err) { + WL_ERR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, err)); + } + cfg->revert_ndo_disable = false; } - err = wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), true); - if (unlikely(err)) { - WL_ERR(("WLC_UP error (%d)\n", err)); - err = -EINVAL; - goto exit; +#ifdef PKT_FILTER_SUPPORT + /* Enable packet filter */ + if (dhd->early_suspended) { + WL_ERR(("Enable pkt_filter\n")); + dhd_enable_packet_filter(1, dhd); + } +#endif /* PKT_FILTER_SUPPORT */ +#ifdef ARP_OFFLOAD_SUPPORT + /* IF SoftAP is disabled, enable arpoe back for STA mode. */ + dhd_arp_offload_set(dhd, dhd_arp_mode); + dhd_arp_offload_enable(dhd, TRUE); +#endif /* ARP_OFFLOAD_SUPPORT */ + /* + * Bring down the AP interface by changing role to STA. + * Don't do a down or "WLC_SET_AP 0" since the shared + * interface may be still running + */ + if (is_rsdb_supported) { + if ((err = wl_cfg80211_add_del_bss(cfg, dev, + bssidx, NL80211_IFTYPE_STATION, 0, NULL)) < 0) { + if ((err = wldev_ioctl(dev, WLC_SET_AP, &ap, sizeof(s32), + true)) < 0) { + WL_ERR(("setting AP mode failed %d \n", err)); + err = -ENOTSUPP; + goto exit; + } + } + } else if (is_rsdb_supported == 0) { + // terence 20160426: fix softap issue + if ((err = wldev_ioctl(dev, WLC_SET_AP, &ap, sizeof(s32), true)) < 0) { + WL_ERR(("setting AP mode failed %d \n", err)); + err = -ENOTSUPP; + goto exit; + } + err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true); + if (err < 0) { + WL_ERR(("SET INFRA error %d\n", err)); + err = -ENOTSUPP; + goto exit; + } + err = wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), true); + if (unlikely(err)) { + WL_ERR(("WLC_UP error (%d)\n", err)); + err = -EINVAL; + goto exit; + } } - wl_clr_drv_status(cfg, AP_CREATED, dev); /* Turn on the MPC */ wldev_iovar_setint(dev, "mpc", 1); - if (cfg->ap_info) { - kfree(cfg->ap_info->wpa_ie); - kfree(cfg->ap_info->rsn_ie); - kfree(cfg->ap_info->wps_ie); - kfree(cfg->ap_info); - cfg->ap_info = NULL; - } + + wl_cfg80211_clear_per_bss_ies(cfg, bssidx); } else { WL_DBG(("Stopping P2P GO \n")); DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE((dhd_pub_t *)(cfg->pub), @@ -8111,6 +8550,11 @@ wl_cfg80211_stop_ap( } exit: + + if (dev_role == NL80211_IFTYPE_AP) { + /* clear the AP mode */ + dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE; + } return err; } @@ -8135,24 +8579,24 @@ wl_cfg80211_change_beacon( #if defined(WL_ENABLE_P2P_IF) else if (dev == cfg->p2p_net) { /* Group Add request on p2p0 */ -#ifndef P2PONEINT dev = bcmcfg_to_prmry_ndev(cfg); -#endif dev_role = NL80211_IFTYPE_P2P_GO; } #endif /* WL_ENABLE_P2P_IF */ - if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) { - WL_ERR(("Find p2p index from dev(%p) failed\n", dev)); + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); return BCME_ERROR; } - if (p2p_is_on(cfg) && - (bssidx == wl_to_p2p_bss_bssidx(cfg, - P2PAPI_BSSCFG_CONNECTION))) { + + if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) { dev_role = NL80211_IFTYPE_P2P_GO; } - if (!check_dev_role_integrity(cfg, dev_role)) + if (!check_dev_role_integrity(cfg, dev_role)) { + err = -EINVAL; goto fail; + } if ((dev_role == NL80211_IFTYPE_P2P_GO) && (cfg->p2p_wdev == NULL)) { WL_ERR(("P2P already down status!\n")); @@ -8205,6 +8649,10 @@ wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev, struct parsed_ies ies; bcm_tlv_t *ssid_ie; bool pbc = 0; + bool privacy; + bool is_bss_up = 0; + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + WL_DBG(("interval (%d) dtim_period (%d) head_len (%d) tail_len (%d)\n", info->interval, info->dtim_period, info->head_len, info->tail_len)); @@ -8214,24 +8662,26 @@ wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev, #if defined(WL_ENABLE_P2P_IF) else if (dev == cfg->p2p_net) { /* Group Add request on p2p0 */ -#ifndef P2PONEINT dev = bcmcfg_to_prmry_ndev(cfg); -#endif dev_role = NL80211_IFTYPE_P2P_GO; } #endif /* WL_ENABLE_P2P_IF */ - if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) { - WL_ERR(("Find p2p index from dev(%p) failed\n", dev)); + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); return BCME_ERROR; } - if (p2p_is_on(cfg) && - (bssidx == wl_to_p2p_bss_bssidx(cfg, - P2PAPI_BSSCFG_CONNECTION))) { + + if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) { dev_role = NL80211_IFTYPE_P2P_GO; + } else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) { + dhd->op_mode |= DHD_FLAG_HOSTAP_MODE; } - if (!check_dev_role_integrity(cfg, dev_role)) + if (!check_dev_role_integrity(cfg, dev_role)) { + err = -ENODEV; goto fail; + } if ((dev_role == NL80211_IFTYPE_P2P_GO) && (cfg->p2p_wdev == NULL)) { WL_ERR(("P2P already down status!\n")); @@ -8264,9 +8714,9 @@ wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev, goto fail; } - if (wl_cfgp2p_set_management_ie(cfg, dev, bssidx, + if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx, VNDR_IE_BEACON_FLAG, (u8 *)info->tail, - info->tail_len) < 0) { + info->tail_len)) < 0) { WL_ERR(("Beacon set IEs failed \n")); goto fail; } else { @@ -8274,9 +8724,9 @@ wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev, } #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) - if (wl_cfgp2p_set_management_ie(cfg, dev, bssidx, + if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx, VNDR_IE_PRBRSP_FLAG, (u8 *)info->proberesp_ies, - info->proberesp_ies_len) < 0) { + info->proberesp_ies_len)) < 0) { WL_ERR(("ProbeRsp set IEs failed \n")); goto fail; } else { @@ -8284,10 +8734,18 @@ wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev, } #endif - if (!wl_cfgp2p_bss_isup(dev, bssidx) && - (wl_cfg80211_bcn_validate_sec(dev, &ies, dev_role, bssidx) < 0)) + is_bss_up = wl_cfgp2p_bss_isup(dev, bssidx); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + privacy = info->privacy; +#else + privacy = 0; +#endif + if (!is_bss_up && + (wl_cfg80211_bcn_validate_sec(dev, &ies, dev_role, bssidx, privacy) < 0)) { WL_ERR(("Beacon set security failed \n")); + err = -EINVAL; goto fail; } @@ -8307,11 +8765,20 @@ wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev, } } - if (wl_cfg80211_bcn_bringup_ap(dev, &ies, dev_role, bssidx) < 0) { + /* If bss is already up, skip bring up */ + if (!is_bss_up && + (err = wl_cfg80211_bcn_bringup_ap(dev, &ies, dev_role, bssidx)) < 0) + { WL_ERR(("Beacon bring up AP/GO failed \n")); goto fail; } + /* Set GC/STA SCB expiry timings. */ + if ((err = wl_cfg80211_set_scb_timings(cfg, dev))) { + WL_ERR(("scb setting failed \n")); +// goto fail; + } + if (wl_get_drv_status(cfg, AP_CREATED, dev)) { /* Soft AP already running. Update changed params */ if (wl_cfg80211_hostapd_sec(dev, &ies, bssidx) < 0) { @@ -8335,16 +8802,37 @@ wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev, if (err) { WL_ERR(("ADD/SET beacon failed\n")); wldev_iovar_setint(dev, "mpc", 1); + if (dev_role == NL80211_IFTYPE_AP) { + /* clear the AP mode */ + dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE; + } } return err; } -#endif /* LINUX_VERSION < VERSION(3,4,0) || WL_COMPAT_WIRELESS */ +#endif #ifdef WL_SCHED_SCAN #define PNO_TIME 30 #define PNO_REPEAT 4 #define PNO_FREQ_EXPO_MAX 2 +static bool +is_ssid_in_list(struct cfg80211_ssid *ssid, struct cfg80211_ssid *ssid_list, int count) +{ + int i; + + if (!ssid || !ssid_list) + return FALSE; + + for (i = 0; i < count; i++) { + if (ssid->ssid_len == ssid_list[i].ssid_len) { + if (strncmp(ssid->ssid, ssid_list[i].ssid, ssid->ssid_len) == 0) + return TRUE; + } + } + return FALSE; +} + static int wl_cfg80211_sched_scan_start(struct wiphy *wiphy, struct net_device *dev, @@ -8353,13 +8841,19 @@ wl_cfg80211_sched_scan_start(struct wiphy *wiphy, ushort pno_time = PNO_TIME; int pno_repeat = PNO_REPEAT; int pno_freq_expo_max = PNO_FREQ_EXPO_MAX; - wlc_ssid_t ssids_local[MAX_PFN_LIST_COUNT]; + wlc_ssid_ext_t ssids_local[MAX_PFN_LIST_COUNT]; struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); struct cfg80211_ssid *ssid = NULL; - int ssid_count = 0; + struct cfg80211_ssid *hidden_ssid_list = NULL; + int ssid_cnt = 0; int i; int ret = 0; + if (!request) { + WL_ERR(("Sched scan request was NULL\n")); + return -EINVAL; + } + WL_DBG(("Enter \n")); WL_PNO((">>> SCHED SCAN START\n")); WL_PNO(("Enter n_match_sets:%d n_ssids:%d \n", @@ -8368,36 +8862,36 @@ wl_cfg80211_sched_scan_start(struct wiphy *wiphy, request->n_ssids, pno_time, pno_repeat, pno_freq_expo_max)); - if (!request || !request->n_ssids || !request->n_match_sets) { + if (!request->n_ssids || !request->n_match_sets) { WL_ERR(("Invalid sched scan req!! n_ssids:%d \n", request->n_ssids)); return -EINVAL; } memset(&ssids_local, 0, sizeof(ssids_local)); - if (request->n_match_sets > 0) { - for (i = 0; i < request->n_match_sets; i++) { - ssid = &request->match_sets[i].ssid; - memcpy(ssids_local[i].SSID, ssid->ssid, ssid->ssid_len); - ssids_local[i].SSID_len = ssid->ssid_len; - WL_PNO((">>> PNO filter set for ssid (%s) \n", ssid->ssid)); - ssid_count++; - } - } - if (request->n_ssids > 0) { - for (i = 0; i < request->n_ssids; i++) { - /* Active scan req for ssids */ - WL_PNO((">>> Active scan req for ssid (%s) \n", request->ssids[i].ssid)); - - /* match_set ssids is a supert set of n_ssid list, so we need - * not add these set seperately - */ + hidden_ssid_list = request->ssids; + } + + for (i = 0; i < request->n_match_sets && ssid_cnt < MAX_PFN_LIST_COUNT; i++) { + ssid = &request->match_sets[i].ssid; + /* No need to include null ssid */ + if (ssid->ssid_len) { + memcpy(ssids_local[ssid_cnt].SSID, ssid->ssid, ssid->ssid_len); + ssids_local[ssid_cnt].SSID_len = ssid->ssid_len; + if (is_ssid_in_list(ssid, hidden_ssid_list, request->n_ssids)) { + ssids_local[ssid_cnt].hidden = TRUE; + WL_PNO((">>> PNO hidden SSID (%s) \n", ssid->ssid)); + } else { + ssids_local[ssid_cnt].hidden = FALSE; + WL_PNO((">>> PNO non-hidden SSID (%s) \n", ssid->ssid)); + } + ssid_cnt++; } } - if (ssid_count) { - if ((ret = dhd_dev_pno_set_for_ssid(dev, ssids_local, request->n_match_sets, + if (ssid_cnt) { + if ((ret = dhd_dev_pno_set_for_ssid(dev, ssids_local, ssid_cnt, pno_time, pno_repeat, pno_freq_expo_max, NULL, 0)) < 0) { WL_ERR(("PNO setup failed!! ret=%d \n", ret)); return -EINVAL; @@ -8515,7 +9009,7 @@ static int wl_dump_obss(struct net_device *ndev, cca_msrmnt_query req, if (retry <= 0) { WL_ERR(("failure, dump_obss IOVAR failed\n")); - err = -BCME_ERROR; + err = -EINVAL; goto exit; } @@ -8677,17 +9171,17 @@ static struct cfg80211_ops wl_cfg80211_ops = { .mgmt_tx = wl_cfg80211_mgmt_tx, .mgmt_frame_register = wl_cfg80211_mgmt_frame_register, .change_bss = wl_cfg80211_change_bss, -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) || defined(WL_COMPAT_WIRELESS) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) .set_channel = wl_cfg80211_set_channel, -#endif /* ((LINUX_VERSION < VERSION(3, 6, 0)) || WL_COMPAT_WIRELESS */ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) && !defined(WL_COMPAT_WIRELESS) +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) .set_beacon = wl_cfg80211_add_set_beacon, .add_beacon = wl_cfg80211_add_set_beacon, #else .change_beacon = wl_cfg80211_change_beacon, .start_ap = wl_cfg80211_start_ap, .stop_ap = wl_cfg80211_stop_ap, -#endif /* LINUX_VERSION < KERNEL_VERSION(3,4,0) && !WL_COMPAT_WIRELESS */ +#endif #ifdef WL_SCHED_SCAN .sched_scan_start = wl_cfg80211_sched_scan_start, .sched_scan_stop = wl_cfg80211_sched_scan_stop, @@ -8698,10 +9192,10 @@ static struct cfg80211_ops wl_cfg80211_ops = { .change_station = wl_cfg80211_change_station, .mgmt_tx_cancel_wait = wl_cfg80211_mgmt_tx_cancel_wait, #endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VERSION >= (3,2,0) */ -#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS) +#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) .tdls_mgmt = wl_cfg80211_tdls_mgmt, .tdls_oper = wl_cfg80211_tdls_oper, -#endif /* LINUX_VERSION > VERSION(3, 2, 0) || WL_COMPAT_WIRELESS */ +#endif #ifdef WL_SUPPORT_ACS .dump_survey = wl_cfg80211_dump_survey, #endif /* WL_SUPPORT_ACS */ @@ -8729,17 +9223,27 @@ s32 wl_mode_to_nl80211_iftype(s32 mode) } #ifdef CONFIG_CFG80211_INTERNAL_REGDB -static int -wl_cfg80211_reg_notifier( - struct wiphy *wiphy, - struct regulatory_request *request) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) +#define WL_CFG80211_REG_NOTIFIER() static int wl_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) +#else +#define WL_CFG80211_REG_NOTIFIER() static void wl_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) +#endif /* kernel version < 3.9.0 */ +#endif + +#ifdef CONFIG_CFG80211_INTERNAL_REGDB +WL_CFG80211_REG_NOTIFIER() { struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wiphy); int ret = 0; + int revinfo = -1; if (!request || !cfg) { WL_ERR(("Invalid arg\n")); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) return -EINVAL; +#else + return; +#endif /* kernel version < 3.9.0 */ } WL_DBG(("ccode: %c%c Initiator: %d\n", @@ -8760,26 +9264,50 @@ wl_cfg80211_reg_notifier( ((request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) ? " 11d AP" : "User"))); if ((ret = wldev_set_country(bcmcfg_to_prmry_ndev(cfg), request->alpha2, - false, (request->initiator == NL80211_REGDOM_SET_BY_USER ? true : false))) < 0) { + false, (request->initiator == NL80211_REGDOM_SET_BY_USER ? true : false), + revinfo)) < 0) { WL_ERR(("set country Failed :%d\n", ret)); } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) return ret; +#else + return; +#endif /* kernel version < 3.9.0 */ } #endif /* CONFIG_CFG80211_INTERNAL_REGDB */ #ifdef CONFIG_PM -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) static const struct wiphy_wowlan_support brcm_wowlan_support = { .flags = WIPHY_WOWLAN_ANY, + .n_patterns = WL_WOWLAN_MAX_PATTERNS, + .pattern_min_len = WL_WOWLAN_MIN_PATTERN_LEN, + .pattern_max_len = WL_WOWLAN_MAX_PATTERN_LEN, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) + .max_pkt_offset = WL_WOWLAN_MAX_PATTERN_LEN, +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */ }; -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */ +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) */ + +/* terence 20161107: remove to fix: + * PC is at kfree+0x174/0x180 + * LR is at nl80211_set_wowlan+0x55c/0x614 [cfg80211] + */ +#if 0 +static struct cfg80211_wowlan brcm_wowlan_config = { + .disconnect = true, + .gtk_rekey_failure = true, + .eap_identity_req = true, + .four_way_handshake = true, +}; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */ #endif /* CONFIG_PM */ static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev, dhd_pub_t *context) { s32 err = 0; -#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) || defined(WL_COMPAT_WIRELESS)) +//#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) dhd_pub_t *dhd = (dhd_pub_t *)context; BCM_REFERENCE(dhd); @@ -8788,7 +9316,7 @@ static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev err = -ENODEV; return err; } -#endif +//#endif wdev->wiphy = wiphy_new(&wl_cfg80211_ops, sizeof(struct bcm_cfg80211)); @@ -8826,6 +9354,8 @@ static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \ (defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF)) WL_DBG(("Setting interface combinations for common mode\n")); + if (dhd->conf->num_different_channels >= 0) + common_iface_combinations[0].num_different_channels = dhd->conf->num_different_channels; wdev->wiphy->iface_combinations = common_iface_combinations; wdev->wiphy->n_iface_combinations = ARRAY_SIZE(common_iface_combinations); @@ -8845,23 +9375,28 @@ static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev #endif /* !WL_POWERSAVE_DISABLED */ wdev->wiphy->flags |= WIPHY_FLAG_NETNS_OK | WIPHY_FLAG_4ADDR_AP | -#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && !defined(WL_COMPAT_WIRELESS) +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS | #endif WIPHY_FLAG_4ADDR_STATION; -#if (defined(ROAM_ENABLE) || defined(BCMFW_ROAM_ENABLE)) && ((LINUX_VERSION_CODE >= \ - KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)) && !0 - /* Please use supplicant ver >= 76 if FW_ROAM is enabled - * If driver advertises FW_ROAM, older supplicant wouldn't - * send the BSSID & Freq in the connect req command. This - * will delay the ASSOC as the FW need to do a full scan - * before attempting to connect. Supplicant >=76 has patch - * to allow bssid & freq to be sent down to driver even if - * FW ROAM is advertised. +#if ((defined(ROAM_ENABLE) || defined(BCMFW_ROAM_ENABLE)) && (LINUX_VERSION_CODE >= \ + KERNEL_VERSION(3, 2, 0))) + /* + * If FW ROAM flag is advertised, upper layer wouldn't provide + * the bssid & freq in the connect command. This will result a + * delay in initial connection time due to firmware doing a full + * channel scan to figure out the channel & bssid. However kernel + * ver >= 3.15, provides bssid_hint & freq_hint and hence kernel + * ver >= 3.15 won't have any issue. So if this flags need to be + * advertised for kernel < 3.15, suggest to use RCC along with it + * to avoid the initial connection delay. */ wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM; -#endif -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) || defined(WL_COMPAT_WIRELESS) +#endif +#ifdef UNSET_FW_ROAM_WIPHY_FLAG + wdev->wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_FW_ROAM; +#endif /* UNSET_FW_ROAM_WIPHY_FLAG */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) wdev->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | WIPHY_FLAG_OFFCHAN_TX; #endif @@ -8877,7 +9412,7 @@ static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev wdev->wiphy->max_acl_mac_addrs = MAX_NUM_MAC_FILT; #endif -#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) || defined(WL_COMPAT_WIRELESS)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) /* Supplicant distinguish between the SoftAP mode and other * modes (e.g. P2P, WPS, HS2.0) when it builds the probe * response frame from Supplicant MR1 and Kernel 3.4.0 or @@ -8889,14 +9424,10 @@ static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev wdev->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; wdev->wiphy->probe_resp_offload = 0; } -#endif +#endif #endif /* WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) */ -#ifdef CONFIG_CFG80211_INTERNAL_REGDB - wdev->wiphy->reg_notifier = wl_cfg80211_reg_notifier; -#endif /* CONFIG_CFG80211_INTERNAL_REGDB */ - -#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS) +#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; #endif @@ -8909,9 +9440,23 @@ static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) wdev->wiphy->wowlan = &brcm_wowlan_support; + /* If this is not provided cfg stack will get disconnect + * during suspend. + */ + /* terence 20161107: remove to fix: + * PC is at kfree+0x174/0x180 + * LR is at nl80211_set_wowlan+0x55c/0x614 [cfg80211] + */ +// wdev->wiphy->wowlan_config = &brcm_wowlan_config; #else wdev->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY; -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 10) */ + wdev->wiphy->wowlan.n_patterns = WL_WOWLAN_MAX_PATTERNS; + wdev->wiphy->wowlan.pattern_min_len = WL_WOWLAN_MIN_PATTERN_LEN; + wdev->wiphy->wowlan.pattern_max_len = WL_WOWLAN_MAX_PATTERN_LEN; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) + wdev->wiphy->wowlan.max_pkt_offset = WL_WOWLAN_MAX_PATTERN_LEN; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */ +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */ #endif /* CONFIG_PM && WL_CFG80211_P2P_DEV_IF */ WL_DBG(("Registering custom regulatory)\n")); @@ -8921,13 +9466,13 @@ static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY; #endif wiphy_apply_custom_regulatory(wdev->wiphy, &brcm_regdom); - - WL_DBG(("Registering Vendor80211)\n")); - err = cfgvendor_attach(wdev->wiphy); +#if defined(WL_VENDOR_EXT_SUPPORT) + WL_ERR(("Registering Vendor80211\n")); + err = wl_cfgvendor_attach(wdev->wiphy); if (unlikely(err < 0)) { WL_ERR(("Couldn not attach vendor commands (%d)\n", err)); } - +#endif /* defined(WL_VENDOR_EXT_SUPPORT) */ /* Now we can register wiphy with cfg80211 module */ err = wiphy_register(wdev->wiphy); if (unlikely(err < 0)) { @@ -8946,20 +9491,34 @@ static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev static void wl_free_wdev(struct bcm_cfg80211 *cfg) { struct wireless_dev *wdev = cfg->wdev; - struct wiphy *wiphy; + struct wiphy *wiphy = NULL; if (!wdev) { WL_ERR(("wdev is invalid\n")); return; } - wiphy = wdev->wiphy; - - cfgvendor_detach(wdev->wiphy); + if (wdev->wiphy) { + wiphy = wdev->wiphy; - wiphy_unregister(wdev->wiphy); - wdev->wiphy->dev.parent = NULL; +#if defined(WL_VENDOR_EXT_SUPPORT) + wl_cfgvendor_detach(wdev->wiphy); +#endif /* if defined(WL_VENDOR_EXT_SUPPORT) */ +#if defined(CONFIG_PM) && defined(WL_CFG80211_P2P_DEV_IF) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) + /* Reset wowlan & wowlan_config before Unregister to avoid Kernel Panic */ + WL_DBG(("wl_free_wdev Clearing wowlan Config \n")); + wdev->wiphy->wowlan = NULL; + wdev->wiphy->wowlan_config = NULL; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */ +#endif /* CONFIG_PM && WL_CFG80211_P2P_DEV_IF */ + wiphy_unregister(wdev->wiphy); + wdev->wiphy->dev.parent = NULL; + wdev->wiphy = NULL; + } wl_delete_all_netinfo(cfg); - wiphy_free(wiphy); + if (wiphy) + wiphy_free(wiphy); + /* PLEASE do NOT call any function after wiphy_free, the driver's private structure "cfg", * which is the private part of wiphy, has been freed in wiphy_free !!!!!!!!!!! */ @@ -9134,7 +9693,7 @@ static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, struct wl_bss_info *bi offsetof(struct wl_cfg80211_bss_info, frame_buf)); notif_bss_info->frame_len = offsetof(struct ieee80211_mgmt, u.beacon.variable) + wl_get_ielen(cfg); -#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) freq = ieee80211_channel_to_frequency(notif_bss_info->channel); (void)band->band; #else @@ -9181,6 +9740,7 @@ static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, struct wl_bss_info *bi return -EINVAL; } + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) cfg80211_put_bss(wiphy, cbss); #else @@ -9262,9 +9822,9 @@ wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev, u32 event = ntoh32(e->event_type); u32 reason = ntoh32(e->reason); u32 len = ntoh32(e->datalen); + u32 status = ntoh32(e->status); -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT) \ - && !defined(WL_COMPAT_WIRELESS) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT) bool isfree = false; u8 *mgmt_frame; u8 bsscfgidx = e->bsscfgidx; @@ -9280,7 +9840,7 @@ wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev, channel_info_t ci; #else struct station_info sinfo; -#endif /* (LINUX_VERSION < VERSION(3,2,0)) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */ +#endif WL_DBG(("event %d status %d reason %d\n", event, ntoh32(e->status), reason)); /* if link down, bsscfg is disabled. */ @@ -9292,13 +9852,24 @@ wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev, return 0; } + if ((event == WLC_E_LINK) && (status == WLC_E_STATUS_SUCCESS) && + (reason == WLC_E_REASON_INITIAL_ASSOC) && + (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP)) { + if (!wl_get_drv_status(cfg, AP_CREATED, ndev)) { + /* AP/GO brought up successfull in firmware */ + WL_ERR(("** AP/GO Link up event **\n")); + wl_set_drv_status(cfg, AP_CREATED, ndev); + wake_up_interruptible(&cfg->netif_change_event); + return 0; + } + } + if (event == WLC_E_DISASSOC_IND || event == WLC_E_DEAUTH_IND || event == WLC_E_DEAUTH) { WL_ERR(("event %s(%d) status %d reason %d\n", bcmevent_get_name(event), event, ntoh32(e->status), reason)); } -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT) \ - && !defined(WL_COMPAT_WIRELESS) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT) WL_DBG(("Enter \n")); if (!len && (event == WLC_E_DEAUTH)) { len = 2; /* reason code field */ @@ -9361,7 +9932,7 @@ wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev, kfree(body); return -EINVAL; } -#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) freq = ieee80211_channel_to_frequency(channel); (void)band->band; #else @@ -9375,38 +9946,27 @@ wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev, isfree = true; if (event == WLC_E_ASSOC_IND && reason == DOT11_SC_SUCCESS) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) - cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0); -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) - cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0, GFP_ATOMIC); -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \ - defined(WL_COMPAT_WIRELESS) +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) && (LINUX_VERSION_CODE < \ + KERNEL_VERSION(3, 18, 0))) cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC); + +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len); #else cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC); -#endif /* LINUX_VERSION >= VERSION(3, 12, 0) */ +#endif } else if (event == WLC_E_DISASSOC_IND) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) - cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0); -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) - cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0, GFP_ATOMIC); -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \ - defined(WL_COMPAT_WIRELESS) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC); #else cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC); -#endif /* LINUX_VERSION >= VERSION(3, 12, 0) */ +#endif } else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) - cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0); -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) - cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0, GFP_ATOMIC); -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \ - defined(WL_COMPAT_WIRELESS) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC); #else cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC); -#endif /* LINUX_VERSION >= VERSION(3, 12, 0) */ +#endif } exit: @@ -9418,7 +9978,12 @@ wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev, sinfo.filled = 0; if (((event == WLC_E_ASSOC_IND) || (event == WLC_E_REASSOC_IND)) && reason == DOT11_SC_SUCCESS) { - sinfo.filled = STATION_INFO_ASSOC_REQ_IES; + /* Linux ver >= 4.0 assoc_req_ies_len is used instead of + * STATION_INFO_ASSOC_REQ_IES flag + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)) + sinfo.filled = STA_INFO_BIT(INFO_ASSOC_REQ_IES); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)) */ if (!data) { WL_ERR(("No IEs present in ASSOC/REASSOC_IND")); return -EINVAL; @@ -9434,10 +9999,75 @@ wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev, printf("%s: deauthenticated device "MACDBG"\n", __FUNCTION__, MAC2STRDBG(e->addr.octet)); cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC); } -#endif /* LINUX_VERSION < VERSION(3,2,0) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */ +#endif return err; } +#if defined(DHD_ENABLE_BIGDATA_LOGGING) +#define MAX_ASSOC_REJECT_ERR_STATUS 5 +int wl_get_connect_failed_status(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e) +{ + u32 status = ntoh32(e->status); + + cfg->assoc_reject_status = 0; + + if (status == WLC_E_STATUS_FAIL) { + WL_ERR(("auth assoc status event=%d e->status %d e->reason %d \n", + ntoh32(cfg->event_auth_assoc.event_type), + (int)ntoh32(cfg->event_auth_assoc.status), + (int)ntoh32(cfg->event_auth_assoc.reason))); + + switch ((int)ntoh32(cfg->event_auth_assoc.status)) { + case WLC_E_STATUS_NO_ACK: + cfg->assoc_reject_status = 1; + break; + case WLC_E_STATUS_FAIL: + cfg->assoc_reject_status = 2; + break; + case WLC_E_STATUS_UNSOLICITED: + cfg->assoc_reject_status = 3; + break; + case WLC_E_STATUS_TIMEOUT: + cfg->assoc_reject_status = 4; + break; + case WLC_E_STATUS_ABORT: + cfg->assoc_reject_status = 5; + break; + default: + break; + } + if (cfg->assoc_reject_status) { + if (ntoh32(cfg->event_auth_assoc.event_type) == WLC_E_ASSOC) { + cfg->assoc_reject_status += MAX_ASSOC_REJECT_ERR_STATUS; + } + } + } + + WL_ERR(("assoc_reject_status %d \n", cfg->assoc_reject_status)); + + return 0; +} + +s32 wl_cfg80211_get_connect_failed_status(struct net_device *dev, char* cmd, int total_len) +{ + struct bcm_cfg80211 *cfg = NULL; + int bytes_written = 0; + + cfg = g_bcm_cfg; + + if (cfg == NULL) { + return -1; + } + + memset(cmd, 0, total_len); + bytes_written = snprintf(cmd, 30, "assoc_reject.status %d", cfg->assoc_reject_status); + + WL_ERR(("cmd: %s \n", cmd)); + + return bytes_written; +} +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + static s32 wl_get_auth_assoc_status(struct bcm_cfg80211 *cfg, struct net_device *ndev, const wl_event_msg_t *e) @@ -9446,6 +10076,14 @@ wl_get_auth_assoc_status(struct bcm_cfg80211 *cfg, struct net_device *ndev, u32 event = ntoh32(e->event_type); struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC); WL_DBG(("event type : %d, reason : %d\n", event, reason)); + +#if defined(DHD_ENABLE_BIGDATA_LOGGING) + memcpy(&cfg->event_auth_assoc, e, sizeof(wl_event_msg_t)); + WL_ERR(("event=%d status %d reason %d \n", + ntoh32(cfg->event_auth_assoc.event_type), + ntoh32(cfg->event_auth_assoc.status), + ntoh32(cfg->event_auth_assoc.reason))); +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ if (sec) { switch (event) { case WLC_E_ASSOC: @@ -9503,31 +10141,32 @@ wl_notify_connect_status_ibss(struct bcm_cfg80211 *cfg, struct net_device *ndev, return err; } WL_INFORM(("IBSS BSSID is changed from " MACDBG " to " MACDBG "\n", - MAC2STRDBG(cur_bssid), MAC2STRDBG((u8 *)&e->addr))); + MAC2STRDBG(cur_bssid), MAC2STRDBG((const u8 *)&e->addr))); wl_get_assoc_ies(cfg, ndev); - wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID); + wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID); wl_update_bss_info(cfg, ndev, false); #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) - cfg80211_ibss_joined(ndev, (s8 *)&e->addr, channel, GFP_KERNEL); + cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, channel, GFP_KERNEL); #else - cfg80211_ibss_joined(ndev, (s8 *)&e->addr, GFP_KERNEL); + cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, GFP_KERNEL); #endif } else { /* New connection */ - WL_INFORM(("IBSS connected to " MACDBG "\n", MAC2STRDBG((u8 *)&e->addr))); + WL_INFORM(("IBSS connected to " MACDBG "\n", + MAC2STRDBG((const u8 *)&e->addr))); wl_link_up(cfg); wl_get_assoc_ies(cfg, ndev); - wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID); + wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID); wl_update_bss_info(cfg, ndev, false); #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) - cfg80211_ibss_joined(ndev, (s8 *)&e->addr, channel, GFP_KERNEL); + cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, channel, GFP_KERNEL); #else - cfg80211_ibss_joined(ndev, (s8 *)&e->addr, GFP_KERNEL); + cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, GFP_KERNEL); #endif wl_set_drv_status(cfg, CONNECTED, ndev); active = true; - wl_update_prof(cfg, ndev, NULL, (void *)&active, WL_PROF_ACT); + wl_update_prof(cfg, ndev, NULL, (const void *)&active, WL_PROF_ACT); } } else if ((event == WLC_E_LINK && !(flags & WLC_EVENT_MSG_LINK)) || event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND) { @@ -9544,59 +10183,362 @@ wl_notify_connect_status_ibss(struct bcm_cfg80211 *cfg, struct net_device *ndev, return err; } -static s32 -wl_notify_connect_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, - const wl_event_msg_t *e, void *data) +#if defined(DHD_ENABLE_BIGDATA_LOGGING) +#define WiFiALL_OUI "\x50\x6F\x9A" /* Wi-FiAll OUI */ +#define WiFiALL_OUI_LEN 3 +#define WiFiALL_OUI_TYPE 16 + +int wl_get_bss_info(struct bcm_cfg80211 *cfg, struct net_device *dev, uint8 *mac) { - bool act; - struct net_device *ndev = NULL; s32 err = 0; - u32 event = ntoh32(e->event_type); + struct wl_bss_info *bi; + uint8 eabuf[ETHER_ADDR_LEN]; + u32 rate, channel, freq, supported_rate, nss = 0, mcs_map, mode_80211 = 0; + char rate_str[4]; + u8 *ie = NULL; + u32 ie_len; + struct wiphy *wiphy; + struct cfg80211_bss *bss; + bcm_tlv_t *interworking_ie = NULL; + bcm_tlv_t *tlv_ie = NULL; + bcm_tlv_t *vht_ie = NULL; + vndr_ie_t *vndrie; + int16 ie_11u_rel_num = -1, ie_mu_mimo_cap = -1; + u32 i, remained_len, count = 0; + char roam_count_str[4], akm_str[4]; + s32 val = 0; - ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + /* get BSS information */ - if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) { - err = wl_notify_connect_status_ap(cfg, ndev, e, data); - } else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_IBSS) { - err = wl_notify_connect_status_ibss(cfg, ndev, e, data); - } else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_BSS) { - WL_DBG(("wl_notify_connect_status : event %d status : %d ndev %p\n", - ntoh32(e->event_type), ntoh32(e->status), ndev)); - if (event == WLC_E_ASSOC || event == WLC_E_AUTH) { - wl_get_auth_assoc_status(cfg, ndev, e); - return 0; - } - if (wl_is_linkup(cfg, e, ndev)) { - wl_link_up(cfg); - act = true; - if (!wl_get_drv_status(cfg, DISCONNECTING, ndev)) { - printf("wl_bss_connect_done succeeded with " MACDBG "\n", - MAC2STRDBG((u8*)(&e->addr))); - wl_bss_connect_done(cfg, ndev, e, data, true); - dhd_conf_set_fw_string_cmd(cfg->pub, "phy_oclscdenable", cfg->pub->conf->phy_oclscdenable, 0, FALSE); - WL_DBG(("joined in BSS network \"%s\"\n", - ((struct wlc_ssid *) - wl_read_prof(cfg, ndev, WL_PROF_SSID))->SSID)); - } - wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT); - wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID); - dhd_conf_set_wme(cfg->pub); + strncpy(cfg->bss_info, "x x x x x x x x x x x x x", GET_BSS_INFO_LEN); - } else if (wl_is_linkdown(cfg, e)) { -#ifdef P2PLISTEN_AP_SAMECHN - if (ndev == bcmcfg_to_prmry_ndev(cfg)) { + *(u32 *) cfg->extra_buf = htod32(WL_EXTRA_BUF_MAX); + + err = wldev_ioctl(dev, WLC_GET_BSS_INFO, cfg->extra_buf, WL_EXTRA_BUF_MAX, false); + if (unlikely(err)) { + WL_ERR(("Could not get bss info %d\n", err)); + cfg->roam_count = 0; + return -1; + } + + if (!mac) { + WL_ERR(("mac is null \n")); + cfg->roam_count = 0; + return -1; + } + + memcpy(eabuf, mac, ETHER_ADDR_LEN); + + bi = (struct wl_bss_info *)(cfg->extra_buf + 4); + channel = wf_chspec_ctlchan(bi->chanspec); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) + freq = ieee80211_channel_to_frequency(channel); +#else + if (channel > 14) { + freq = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ); + } else { + freq = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ); + } +#endif + + err = wldev_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate), false); + if (err) { + WL_ERR(("Could not get rate (%d)\n", err)); + snprintf(rate_str, sizeof(rate_str), "x"); // Unknown + + } else { + rate = dtoh32(rate); + snprintf(rate_str, sizeof(rate_str), "%d", (rate/2)); + } + + //supported maximum rate + supported_rate = (bi->rateset.rates[bi->rateset.count - 1] & 0x7f) / 2; + + if (supported_rate < 12) { + mode_80211 = 0; //11b maximum rate is 11Mbps. 11b mode + } else { + //It's not HT Capable case. + if (channel > 14) { + mode_80211 = 3; // 11a mode + } else { + mode_80211 = 1; // 11g mode + } + } + + if (bi->n_cap) { + /* check Rx MCS Map for HT */ + nss = 0; + mode_80211 = 2; + for (i = 0; i < MAX_STREAMS_SUPPORTED; i++) { + int8 bitmap = 0xFF; + if (i == MAX_STREAMS_SUPPORTED-1) { + bitmap = 0x7F; + } + if (bi->basic_mcs[i] & bitmap) { + nss++; + } + } + } + + if (bi->vht_cap) { + nss = 0; + mode_80211 = 4; + for (i = 1; i <= VHT_CAP_MCS_MAP_NSS_MAX; i++) { + mcs_map = VHT_MCS_MAP_GET_MCS_PER_SS(i, dtoh16(bi->vht_rxmcsmap)); + if (mcs_map != VHT_CAP_MCS_MAP_NONE) { + nss++; + } + } + } + + if (nss) { + nss = nss - 1; + } + + wiphy = bcmcfg_to_wiphy(cfg); + bss = cfg80211_get_bss(wiphy, NULL, eabuf, + bi->SSID, strlen(bi->SSID), WLAN_CAPABILITY_ESS, + WLAN_CAPABILITY_ESS); + + if (!bss) { + WL_ERR(("Could not find the AP\n")); + } else { +#if defined(WL_CFG80211_P2P_DEV_IF) + ie = (u8 *)bss->ies->data; + ie_len = bss->ies->len; +#else + ie = bss->information_elements; + ie_len = bss->len_information_elements; +#endif /* WL_CFG80211_P2P_DEV_IF */ + } + + if (ie) { + ie_mu_mimo_cap = 0; + ie_11u_rel_num = 0; + + if (bi->vht_cap) { + if ((vht_ie = bcm_parse_tlvs(ie, (u32)ie_len, + DOT11_MNG_VHT_CAP_ID)) != NULL) { + ie_mu_mimo_cap = (vht_ie->data[2] & 0x08) >> 3; + } + } + + if ((interworking_ie = bcm_parse_tlvs(ie, (u32)ie_len, + DOT11_MNG_INTERWORKING_ID)) != NULL) { + if ((tlv_ie = bcm_parse_tlvs(ie, (u32)ie_len, DOT11_MNG_VS_ID)) != NULL) { + remained_len = ie_len; + + while (tlv_ie) { + if (count > MAX_VNDR_IE_NUMBER) + break; + + if (tlv_ie->id == DOT11_MNG_VS_ID) { + vndrie = (vndr_ie_t *) tlv_ie; + + if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) { + WL_ERR(("%s: invalid vndr ie." + "length is too small %d\n", + __FUNCTION__, vndrie->len)); + break; + } + + if (!bcmp(vndrie->oui, + (u8*)WiFiALL_OUI, WiFiALL_OUI_LEN) && + (vndrie->data[0] == WiFiALL_OUI_TYPE)) + { + WL_ERR(("Found Wi-FiAll OUI oui.\n")); + ie_11u_rel_num = vndrie->data[1]; + ie_11u_rel_num = (ie_11u_rel_num & 0xf0)>>4; + ie_11u_rel_num += 1; + + break; + } + } + count++; + tlv_ie = bcm_next_tlv(tlv_ie, &remained_len); + } + } + } + } + + for (i = 0; i < bi->SSID_len; i++) { + if (bi->SSID[i] == ' ') { + bi->SSID[i] = '_'; + } + } + + //0 : None, 1 : OKC, 2 : FT, 3 : CCKM + err = wldev_iovar_getint(dev, "wpa_auth", &val); + if (unlikely(err)) { + WL_ERR(("could not get wpa_auth (%d)\n", err)); + snprintf(akm_str, sizeof(akm_str), "x"); // Unknown + } else { + WL_ERR(("wpa_auth val %d \n", val)); +#if defined(BCMEXTCCX) + if (val & (WPA_AUTH_CCKM | WPA2_AUTH_CCKM)) { + snprintf(akm_str, sizeof(akm_str), "3"); + } else +#endif + if (val & WPA2_AUTH_FT) { + snprintf(akm_str, sizeof(akm_str), "2"); + } else if (val & (WPA_AUTH_UNSPECIFIED | WPA2_AUTH_UNSPECIFIED)) { + snprintf(akm_str, sizeof(akm_str), "1"); + } else { + snprintf(akm_str, sizeof(akm_str), "0"); + } + } + + if (cfg->roam_offload) { + snprintf(roam_count_str, sizeof(roam_count_str), "x"); // Unknown + } else { + snprintf(roam_count_str, sizeof(roam_count_str), "%d", cfg->roam_count); + } + cfg->roam_count = 0; + + WL_ERR(("BSSID:" MACDBG " SSID %s \n", MAC2STRDBG(eabuf), bi->SSID)); + WL_ERR(("freq:%d, BW:%s, RSSI:%d dBm, Rate:%d Mbps, 11mode:%d, stream:%d," + "MU-MIMO:%d, Passpoint:%d, SNR:%d, Noise:%d, \n" + "akm:%s roam:%s \n", + freq, wf_chspec_to_bw_str(bi->chanspec), + dtoh32(bi->RSSI), (rate / 2), mode_80211, nss, + ie_mu_mimo_cap, ie_11u_rel_num, bi->SNR, bi->phy_noise, + akm_str, roam_count_str)); + + if (ie) { + snprintf(cfg->bss_info, GET_BSS_INFO_LEN, + "%02x:%02x:%02x %d %s %d %s %d %d %d %d %d %d %s %s", + eabuf[0], eabuf[1], eabuf[2], + freq, wf_chspec_to_bw_str(bi->chanspec), + dtoh32(bi->RSSI), rate_str, mode_80211, nss, + ie_mu_mimo_cap, ie_11u_rel_num, + bi->SNR, bi->phy_noise, akm_str, roam_count_str); + } else { + //ie_mu_mimo_cap and ie_11u_rel_num is unknow. + snprintf(cfg->bss_info, GET_BSS_INFO_LEN, + "%02x:%02x:%02x %d %s %d %s %d %d x x %d %d %s %s", + eabuf[0], eabuf[1], eabuf[2], + freq, wf_chspec_to_bw_str(bi->chanspec), + dtoh32(bi->RSSI), rate_str, mode_80211, nss, + bi->SNR, bi->phy_noise, akm_str, roam_count_str); + } + + + return 0; +} + +s32 wl_cfg80211_get_bss_info(struct net_device *dev, char* cmd, int total_len) +{ + struct bcm_cfg80211 *cfg = NULL; + + cfg = g_bcm_cfg; + + if (cfg == NULL) { + return -1; + } + + memset(cmd, 0, total_len); + memcpy(cmd, cfg->bss_info, GET_BSS_INFO_LEN); + + WL_ERR(("cmd: %s \n", cmd)); + + return GET_BSS_INFO_LEN; +} + +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + +static s32 +wl_notify_connect_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + bool act; + struct net_device *ndev = NULL; + s32 err = 0; + u32 event = ntoh32(e->event_type); + struct wiphy *wiphy = NULL; + struct cfg80211_bss *bss = NULL; + struct wlc_ssid *ssid = NULL; + u8 *bssid = 0; + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) { + err = wl_notify_connect_status_ap(cfg, ndev, e, data); + } else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_IBSS) { + err = wl_notify_connect_status_ibss(cfg, ndev, e, data); + } else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_BSS) { + WL_DBG(("wl_notify_connect_status : event %d status : %d ndev %p\n", + ntoh32(e->event_type), ntoh32(e->status), ndev)); + if (event == WLC_E_ASSOC || event == WLC_E_AUTH) { + wl_get_auth_assoc_status(cfg, ndev, e); + return 0; + } + DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub); + if (wl_is_linkup(cfg, e, ndev)) { + wl_link_up(cfg); + act = true; + if (!wl_get_drv_status(cfg, DISCONNECTING, ndev)) { +#ifdef DHD_LOSSLESS_ROAMING + bool is_connected = wl_get_drv_status(cfg, CONNECTED, ndev); +#endif + + printf("wl_bss_connect_done succeeded with " MACDBG "\n", + MAC2STRDBG((const u8*)(&e->addr))); + wl_bss_connect_done(cfg, ndev, e, data, true); + dhd_conf_set_intiovar(cfg->pub, WLC_SET_VAR, "phy_oclscdenable", cfg->pub->conf->phy_oclscdenable, 0, FALSE); + WL_DBG(("joined in BSS network \"%s\"\n", + ((struct wlc_ssid *) + wl_read_prof(cfg, ndev, WL_PROF_SSID))->SSID)); +#ifdef DHD_LOSSLESS_ROAMING + if (event == WLC_E_LINK && is_connected && + !cfg->roam_offload) { + wl_bss_roaming_done(cfg, ndev, e, data); + } +#endif /* DHD_LOSSLESS_ROAMING */ + + } + wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT); + wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID); + dhd_conf_set_wme(cfg->pub, 0); + + } else if (wl_is_linkdown(cfg, e)) { +#ifdef DHD_LOSSLESS_ROAMING + wl_del_roam_timeout(cfg); +#endif +#ifdef P2PLISTEN_AP_SAMECHN + if (ndev == bcmcfg_to_prmry_ndev(cfg)) { wl_cfg80211_set_p2p_resp_ap_chn(ndev, 0); cfg->p2p_resp_apchn_status = false; WL_DBG(("p2p_resp_apchn_status Turn OFF \n")); } #endif /* P2PLISTEN_AP_SAMECHN */ + wl_cfg80211_cancel_scan(cfg); + +#if defined(DHD_ENABLE_BIGDATA_LOGGING) + if (wl_get_drv_status(cfg, CONNECTED, ndev)) { + wl_get_bss_info(cfg, ndev, (u8*)(&e->addr)); + } +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + /* Explicitly calling unlink to remove BSS in CFG */ + wiphy = bcmcfg_to_wiphy(cfg); + ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID); + bssid = (u8 *)wl_read_prof(cfg, ndev, WL_PROF_BSSID); + if (ssid && bssid) { + bss = cfg80211_get_bss(wiphy, NULL, bssid, + ssid->SSID, ssid->SSID_len, WLAN_CAPABILITY_ESS, + WLAN_CAPABILITY_ESS); + if (bss) { + cfg80211_unlink_bss(wiphy, bss); + } + } - if (cfg->scan_request) - wl_notify_escan_complete(cfg, ndev, true, true); if (wl_get_drv_status(cfg, CONNECTED, ndev)) { scb_val_t scbval; u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID); s32 reason = 0; + struct ether_addr bssid_dongle; + struct ether_addr bssid_null = {{0, 0, 0, 0, 0, 0}}; + if (event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND) reason = ntoh32(e->reason); /* WLAN_REASON_UNSPECIFIED is used for hang up event in Android */ @@ -9605,17 +10547,46 @@ wl_notify_connect_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, printf("link down if %s may call cfg80211_disconnected. " "event : %d, reason=%d from " MACDBG "\n", ndev->name, event, ntoh32(e->reason), - MAC2STRDBG((u8*)(&e->addr))); - if (!cfg->roam_offload && - memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) != 0) { - WL_ERR(("BSSID of event is not the connected BSSID" - "(ignore it) cur: " MACDBG " event: " MACDBG"\n", - MAC2STRDBG(curbssid), MAC2STRDBG((u8*)(&e->addr)))); - return 0; + MAC2STRDBG((const u8*)(&e->addr))); + + /* roam offload does not sync BSSID always, get it from dongle */ + if (cfg->roam_offload) { + if (wldev_ioctl(ndev, WLC_GET_BSSID, &bssid_dongle, + sizeof(bssid_dongle), false) == BCME_OK) { + /* if not roam case, it would return null bssid */ + if (memcmp(&bssid_dongle, &bssid_null, + ETHER_ADDR_LEN) != 0) { + curbssid = (u8 *)&bssid_dongle; + } + } + } + if (memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) != 0) { + bool fw_assoc_state = TRUE; + dhd_pub_t *dhd = (dhd_pub_t *)cfg->pub; + fw_assoc_state = dhd_is_associated(dhd, e->ifidx, &err); + if (!fw_assoc_state) { + WL_ERR(("Even sends up even different BSSID" + " cur: " MACDBG " event: " MACDBG"\n", + MAC2STRDBG(curbssid), + MAC2STRDBG((const u8*)(&e->addr)))); + } else { + WL_ERR(("BSSID of event is not the connected BSSID" + "(ignore it) cur: " MACDBG + " event: " MACDBG"\n", + MAC2STRDBG(curbssid), + MAC2STRDBG((const u8*)(&e->addr)))); + return 0; + } } if (!memcmp(ndev->name, WL_P2P_INTERFACE_PREFIX, strlen(WL_P2P_INTERFACE_PREFIX))) { // terence 20130703: Fix for wrong group_capab (timing issue) cfg->p2p_disconnected = 1; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) + if (wl_get_drv_status(cfg, DISCONNECTING, ndev)) { + CFG80211_DISCONNECTED(ndev, reason, NULL, 0, + false, GFP_KERNEL); + } +#endif } memcpy(&cfg->disconnected_bssid, curbssid, ETHER_ADDR_LEN); wl_clr_drv_status(cfg, CONNECTED, ndev); @@ -9633,9 +10604,11 @@ wl_notify_connect_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, WL_ERR(("WLC_DISASSOC error %d\n", err)); err = 0; } - cfg80211_disconnected(ndev, reason, NULL, 0, GFP_KERNEL); + CFG80211_DISCONNECTED(ndev, reason, NULL, 0, + false, GFP_KERNEL); wl_link_down(cfg); wl_init_prof(cfg, ndev); + memset(&cfg->last_roamed_addr, 0, ETHER_ADDR_LEN); } } else if (wl_get_drv_status(cfg, CONNECTING, ndev)) { @@ -9657,14 +10630,19 @@ wl_notify_connect_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, } else if (wl_is_nonetwork(cfg, e)) { printf("connect failed event=%d e->status %d e->reason %d \n", event, (int)ntoh32(e->status), (int)ntoh32(e->reason)); +#if defined(DHD_ENABLE_BIGDATA_LOGGING) + if (event == WLC_E_SET_SSID) { + wl_get_connect_failed_status(cfg, e); + } +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ /* Clean up any pending scan request */ - if (cfg->scan_request) - wl_notify_escan_complete(cfg, ndev, true, true); + wl_cfg80211_cancel_scan(cfg); if (wl_get_drv_status(cfg, CONNECTING, ndev)) wl_bss_connect_done(cfg, ndev, e, data, false); } else { WL_DBG(("%s nothing\n", __FUNCTION__)); } + DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub); } else { WL_ERR(("Invalid ndev status %d\n", wl_get_mode_by_netdev(cfg, ndev))); @@ -9680,33 +10658,7 @@ void wl_cfg80211_set_rmc_pid(int pid) WL_DBG(("set pid for rmc event : pid=%d\n", pid)); } -#ifdef WLAIBSS -void wl_cfg80211_set_txfail_pid(int pid) -{ - struct bcm_cfg80211 *cfg = g_bcm_cfg; - if (pid > 0) - cfg->aibss_txfail_pid = pid; - WL_DBG(("set pid for aibss fail event : pid=%d\n", pid)); -} - -static s32 -wl_notify_aibss_txfail(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, - const wl_event_msg_t *e, void *data) -{ - u32 evt = ntoh32(e->event_type); - int ret = -1; - - if (cfg->aibss_txfail_pid != 0) { - ret = wl_netlink_send_msg(cfg->aibss_txfail_pid, AIBSS_EVENT_TXFAIL, - cfg->aibss_txfail_seq++, (void *)&e->addr, ETHER_ADDR_LEN); - } - - WL_DBG(("txfail : evt=%d, pid=%d, ret=%d, mac=" MACF "\n", - evt, cfg->aibss_txfail_pid, ret, ETHERP_TO_MACF(&e->addr))); - return ret; -} -#endif /* WLAIBSS */ - +#ifdef WL_RELMCAST static s32 wl_notify_rmc_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data) @@ -9730,7 +10682,7 @@ wl_notify_rmc_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, WL_DBG(("rmcevent : evt=%d, pid=%d, ret=%d\n", evt, cfg->rmc_event_pid, ret)); return ret; } - +#endif /* WL_RELMCAST */ static s32 wl_notify_roaming_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data) @@ -9740,6 +10692,9 @@ wl_notify_roaming_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, s32 err = 0; u32 event = be32_to_cpu(e->event_type); u32 status = be32_to_cpu(e->status); +#ifdef DHD_LOSSLESS_ROAMING + struct wl_security *sec; +#endif WL_DBG(("Enter \n")); ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); @@ -9753,79 +10708,306 @@ wl_notify_roaming_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, return err; if ((event == WLC_E_ROAM || event == WLC_E_BSSID) && status == WLC_E_STATUS_SUCCESS) { - if (wl_get_drv_status(cfg, CONNECTED, ndev)) + if (wl_get_drv_status(cfg, CONNECTED, ndev)) { +#ifdef DHD_LOSSLESS_ROAMING + if (cfg->roam_offload) { + wl_bss_roaming_done(cfg, ndev, e, data); + wl_del_roam_timeout(cfg); + } + else { + sec = wl_read_prof(cfg, ndev, WL_PROF_SEC); + /* In order to reduce roaming delay, wl_bss_roaming_done is + * early called with WLC_E_LINK event. It is called from + * here only if WLC_E_LINK event is blocked for specific + * security type. + */ + if (IS_AKM_SUITE_FT(sec)) { + wl_bss_roaming_done(cfg, ndev, e, data); + } + /* Roam timer is deleted mostly from wl_cfg80211_change_station + * after roaming is finished successfully. We need to delete + * the timer from here only for some security types that aren't + * using wl_cfg80211_change_station to authorize SCB + */ + if (IS_AKM_SUITE_FT(sec) || IS_AKM_SUITE_CCKM(sec)) { + wl_del_roam_timeout(cfg); + } + } +#else wl_bss_roaming_done(cfg, ndev, e, data); - else +#endif /* DHD_LOSSLESS_ROAMING */ + } else { wl_bss_connect_done(cfg, ndev, e, data, true); + } act = true; wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT); - wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID); - dhd_conf_set_wme(cfg->pub); + wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID); + } +#ifdef DHD_LOSSLESS_ROAMING + else if ((event == WLC_E_ROAM || event == WLC_E_BSSID) && status != WLC_E_STATUS_SUCCESS) { + wl_del_roam_timeout(cfg); } +#endif return err; } -static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev) +#ifdef QOS_MAP_SET +/* up range from low to high with up value */ +static bool +up_table_set(uint8 *up_table, uint8 up, uint8 low, uint8 high) { - wl_assoc_info_t assoc_info; - struct wl_connect_info *conn_info = wl_to_conn(cfg); - s32 err = 0; + int i; - WL_DBG(("Enter \n")); - err = wldev_iovar_getbuf(ndev, "assoc_info", NULL, 0, cfg->extra_buf, - WL_ASSOC_INFO_MAX, NULL); - if (unlikely(err)) { - WL_ERR(("could not get assoc info (%d)\n", err)); - return err; + if (up > 7 || low > high || low >= UP_TABLE_MAX || high >= UP_TABLE_MAX) { + return FALSE; } - memcpy(&assoc_info, cfg->extra_buf, sizeof(wl_assoc_info_t)); - assoc_info.req_len = htod32(assoc_info.req_len); - assoc_info.resp_len = htod32(assoc_info.resp_len); - assoc_info.flags = htod32(assoc_info.flags); - if (conn_info->req_ie_len) { - conn_info->req_ie_len = 0; - bzero(conn_info->req_ie, sizeof(conn_info->req_ie)); + + for (i = low; i <= high; i++) { + up_table[i] = up; } - if (conn_info->resp_ie_len) { - conn_info->resp_ie_len = 0; - bzero(conn_info->resp_ie, sizeof(conn_info->resp_ie)); + + return TRUE; +} + +/* set user priority table */ +static void +wl_set_up_table(uint8 *up_table, bcm_tlv_t *qos_map_ie) +{ + uint8 len; + + if (up_table == NULL || qos_map_ie == NULL) { + return; } - if (assoc_info.req_len) { - err = wldev_iovar_getbuf(ndev, "assoc_req_ies", NULL, 0, cfg->extra_buf, - WL_ASSOC_INFO_MAX, NULL); - if (unlikely(err)) { - WL_ERR(("could not get assoc req (%d)\n", err)); - return err; - } - conn_info->req_ie_len = assoc_info.req_len - sizeof(struct dot11_assoc_req); - if (assoc_info.flags & WLC_ASSOC_REQ_IS_REASSOC) { - conn_info->req_ie_len -= ETHER_ADDR_LEN; + + /* clear table to check table was set or not */ + memset(up_table, 0xff, UP_TABLE_MAX); + + /* length of QoS Map IE must be 16+n*2, n is number of exceptions */ + if (qos_map_ie != NULL && qos_map_ie->id == DOT11_MNG_QOS_MAP_ID && + (len = qos_map_ie->len) >= QOS_MAP_FIXED_LENGTH && + (len % 2) == 0) { + uint8 *except_ptr = (uint8 *)qos_map_ie->data; + uint8 except_len = len - QOS_MAP_FIXED_LENGTH; + uint8 *range_ptr = except_ptr + except_len; + int i; + + /* fill in ranges */ + for (i = 0; i < QOS_MAP_FIXED_LENGTH; i += 2) { + uint8 low = range_ptr[i]; + uint8 high = range_ptr[i + 1]; + if (low == 255 && high == 255) { + continue; + } + + if (!up_table_set(up_table, i / 2, low, high)) { + /* clear the table on failure */ + memset(up_table, 0xff, UP_TABLE_MAX); + return; + } } - if (conn_info->req_ie_len <= MAX_REQ_LINE) - memcpy(conn_info->req_ie, cfg->extra_buf, conn_info->req_ie_len); - else { - WL_ERR(("IE size %d above max %d size \n", - conn_info->req_ie_len, MAX_REQ_LINE)); - return err; + + /* update exceptions */ + for (i = 0; i < except_len; i += 2) { + uint8 dscp = except_ptr[i]; + uint8 up = except_ptr[i+1]; + + /* exceptions with invalid dscp/up are ignored */ + up_table_set(up_table, up, dscp, dscp); } - } else { - conn_info->req_ie_len = 0; } - if (assoc_info.resp_len) { - err = wldev_iovar_getbuf(ndev, "assoc_resp_ies", NULL, 0, cfg->extra_buf, - WL_ASSOC_INFO_MAX, NULL); - if (unlikely(err)) { - WL_ERR(("could not get assoc resp (%d)\n", err)); + + if (wl_dbg_level & WL_DBG_DBG) { + prhex("UP table", up_table, UP_TABLE_MAX); + } +} + +/* get user priority table */ +uint8 * +wl_get_up_table(void) +{ + return (uint8 *)(g_bcm_cfg->up_table); +} +#endif /* QOS_MAP_SET */ + +#ifdef DHD_LOSSLESS_ROAMING +static s32 +wl_notify_roam_prep_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + s32 err = 0; + struct wl_security *sec; + struct net_device *ndev; + dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub); + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + sec = wl_read_prof(cfg, ndev, WL_PROF_SEC); + /* Disable Lossless Roaming for specific AKM suite + * Any other AKM suite can be added below if transition time + * is delayed because of Lossless Roaming + * and it causes any certication failure + */ + if (IS_AKM_SUITE_FT(sec)) { + return err; + } + + dhdp->dequeue_prec_map = 1 << PRIO_8021D_NC; + /* Restore flow control */ + dhd_txflowcontrol(dhdp, ALL_INTERFACES, OFF); + + mod_timer(&cfg->roam_timeout, jiffies + msecs_to_jiffies(WL_ROAM_TIMEOUT_MS)); + + return err; +} +#endif /* DHD_LOSSLESS_ROAMING */ + +static s32 +wl_notify_idsup_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + s32 err = 0; +#if defined(WL_VENDOR_EXT_SUPPORT) + u32 idsup_status; + u32 reason = ntoh32(e->reason); + struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + struct wiphy *wiphy = bcmcfg_to_wiphy(cfg); +#endif /* defined(WL_VENDOR_EXT_SUPPORT) */ + + if (cfg->roam_offload) { +#if defined(WL_VENDOR_EXT_SUPPORT) + switch (reason) { + case WLC_E_SUP_WPA_PSK_TMO: + idsup_status = IDSUP_EVENT_4WAY_HANDSHAKE_TIMEOUT; + break; + case WLC_E_SUP_OTHER: + idsup_status = IDSUP_EVENT_SUCCESS; + break; + default: + WL_ERR(("Other type at IDSUP. " + "event=%d e->status %d e->reason %d \n", + (int)ntoh32(e->event_type), (int)ntoh32(e->status), + (int)ntoh32(e->reason))); + return err; + } + + err = wl_cfgvendor_send_async_event(wiphy, ndev, + BRCM_VENDOR_EVENT_IDSUP_STATUS, &idsup_status, sizeof(u32)); +#endif /* defined(WL_VENDOR_EXT_SUPPORT) */ + } + return err; +} + +#ifdef CUSTOM_EVENT_PM_WAKE +static s32 +wl_check_pmstatus(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + s32 err = 0; + struct net_device *ndev = NULL; + u8 *pbuf = NULL; + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + pbuf = kzalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL); + if (pbuf == NULL) { + WL_ERR(("failed to allocate local pbuf\n")); + return -ENOMEM; + } + + err = wldev_iovar_getbuf_bsscfg(ndev, "dump", + "pm", strlen("pm"), pbuf, WLC_IOCTL_MEDLEN, 0, &cfg->ioctl_buf_sync); + + if (err) { + WL_ERR(("dump ioctl err = %d", err)); + } else { + WL_ERR(("PM status : %s\n", pbuf)); + } + + if (pbuf) { + kfree(pbuf); + } + return err; +} +#endif /* CUSTOM_EVENT_PM_WAKE */ + +static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev) +{ + wl_assoc_info_t assoc_info; + struct wl_connect_info *conn_info = wl_to_conn(cfg); + s32 err = 0; +#ifdef QOS_MAP_SET + bcm_tlv_t * qos_map_ie = NULL; +#endif /* QOS_MAP_SET */ + + WL_DBG(("Enter \n")); + err = wldev_iovar_getbuf(ndev, "assoc_info", NULL, 0, cfg->extra_buf, + WL_ASSOC_INFO_MAX, NULL); + if (unlikely(err)) { + WL_ERR(("could not get assoc info (%d)\n", err)); + return err; + } + memcpy(&assoc_info, cfg->extra_buf, sizeof(wl_assoc_info_t)); + assoc_info.req_len = htod32(assoc_info.req_len); + assoc_info.resp_len = htod32(assoc_info.resp_len); + assoc_info.flags = htod32(assoc_info.flags); + if (conn_info->req_ie_len) { + conn_info->req_ie_len = 0; + bzero(conn_info->req_ie, sizeof(conn_info->req_ie)); + } + if (conn_info->resp_ie_len) { + conn_info->resp_ie_len = 0; + bzero(conn_info->resp_ie, sizeof(conn_info->resp_ie)); + } + if (assoc_info.req_len) { + err = wldev_iovar_getbuf(ndev, "assoc_req_ies", NULL, 0, cfg->extra_buf, + WL_ASSOC_INFO_MAX, NULL); + if (unlikely(err)) { + WL_ERR(("could not get assoc req (%d)\n", err)); + return err; + } + conn_info->req_ie_len = assoc_info.req_len - sizeof(struct dot11_assoc_req); + if (assoc_info.flags & WLC_ASSOC_REQ_IS_REASSOC) { + conn_info->req_ie_len -= ETHER_ADDR_LEN; + } + if (conn_info->req_ie_len <= MAX_REQ_LINE) + memcpy(conn_info->req_ie, cfg->extra_buf, conn_info->req_ie_len); + else { + WL_ERR(("IE size %d above max %d size \n", + conn_info->req_ie_len, MAX_REQ_LINE)); + return err; + } + } else { + conn_info->req_ie_len = 0; + } + if (assoc_info.resp_len) { + err = wldev_iovar_getbuf(ndev, "assoc_resp_ies", NULL, 0, cfg->extra_buf, + WL_ASSOC_INFO_MAX, NULL); + if (unlikely(err)) { + WL_ERR(("could not get assoc resp (%d)\n", err)); return err; } conn_info->resp_ie_len = assoc_info.resp_len -sizeof(struct dot11_assoc_resp); - if (conn_info->resp_ie_len <= MAX_REQ_LINE) + if (conn_info->resp_ie_len <= MAX_REQ_LINE) { memcpy(conn_info->resp_ie, cfg->extra_buf, conn_info->resp_ie_len); - else { + } else { WL_ERR(("IE size %d above max %d size \n", conn_info->resp_ie_len, MAX_REQ_LINE)); return err; } + +#ifdef QOS_MAP_SET + /* find qos map set ie */ + if ((qos_map_ie = bcm_parse_tlvs(conn_info->resp_ie, conn_info->resp_ie_len, + DOT11_MNG_QOS_MAP_ID)) != NULL) { + WL_DBG((" QoS map set IE found in assoc response\n")); + if (!cfg->up_table) { + cfg->up_table = kmalloc(UP_TABLE_MAX, GFP_KERNEL); + } + wl_set_up_table(cfg->up_table, qos_map_ie); + } else { + kfree(cfg->up_table); + cfg->up_table = NULL; + } +#endif /* QOS_MAP_SET */ } else { conn_info->resp_ie_len = 0; } @@ -9835,41 +11017,55 @@ static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev) return err; } -static void wl_ch_to_chanspec(int ch, struct wl_join_params *join_params, +static s32 wl_ch_to_chanspec(struct net_device *dev, int ch, struct wl_join_params *join_params, size_t *join_params_size) { - chanspec_t chanspec = 0; + struct bcm_cfg80211 *cfg; + s32 bssidx = -1; + chanspec_t chanspec = 0, chspec; + if (ch != 0) { - join_params->params.chanspec_num = 1; - join_params->params.chanspec_list[0] = ch; + cfg = (struct bcm_cfg80211 *)wiphy_priv(dev->ieee80211_ptr->wiphy); + if (cfg && cfg->rcc_enabled) { + } else { + join_params->params.chanspec_num = 1; + join_params->params.chanspec_list[0] = ch; - if (join_params->params.chanspec_list[0] <= CH_MAX_2G_CHANNEL) - chanspec |= WL_CHANSPEC_BAND_2G; - else - chanspec |= WL_CHANSPEC_BAND_5G; + if (join_params->params.chanspec_list[0] <= CH_MAX_2G_CHANNEL) + chanspec |= WL_CHANSPEC_BAND_2G; + else + chanspec |= WL_CHANSPEC_BAND_5G; + + /* Get the min_bw set for the interface */ + chspec = wl_cfg80211_ulb_get_min_bw_chspec(dev->ieee80211_ptr, bssidx); + if (chspec == INVCHANSPEC) { + WL_ERR(("Invalid chanspec \n")); + return -EINVAL; + } + chanspec |= chspec; + chanspec |= WL_CHANSPEC_CTL_SB_NONE; - chanspec |= WL_CHANSPEC_BW_20; - chanspec |= WL_CHANSPEC_CTL_SB_NONE; + *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE + + join_params->params.chanspec_num * sizeof(chanspec_t); - *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE + - join_params->params.chanspec_num * sizeof(chanspec_t); + join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK; + join_params->params.chanspec_list[0] |= chanspec; + join_params->params.chanspec_list[0] = + wl_chspec_host_to_driver(join_params->params.chanspec_list[0]); - join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK; - join_params->params.chanspec_list[0] |= chanspec; - join_params->params.chanspec_list[0] = - wl_chspec_host_to_driver(join_params->params.chanspec_list[0]); + join_params->params.chanspec_num = + htod32(join_params->params.chanspec_num); + } - join_params->params.chanspec_num = - htod32(join_params->params.chanspec_num); WL_DBG(("join_params->params.chanspec_list[0]= %X, %d channels\n", join_params->params.chanspec_list[0], join_params->params.chanspec_num)); } + return 0; } static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam) { - struct cfg80211_bss *bss; struct wl_bss_info *bi; struct wlc_ssid *ssid; struct bcm_tlv *tim; @@ -9881,8 +11077,6 @@ static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 err = 0; struct wiphy *wiphy; u32 channel; - struct ieee80211_channel *cur_channel; - u32 freq, band; wiphy = bcmcfg_to_wiphy(cfg); @@ -9901,49 +11095,19 @@ static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bi = (struct wl_bss_info *)(cfg->extra_buf + 4); channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec)); wl_update_prof(cfg, ndev, NULL, &channel, WL_PROF_CHAN); -#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS) - freq = ieee80211_channel_to_frequency(channel); -#else - band = (channel <= CH_MAX_2G_CHANNEL) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; - freq = ieee80211_channel_to_frequency(channel, band); -#endif - cur_channel = ieee80211_get_channel(wiphy, freq); - - bss = cfg80211_get_bss(wiphy, cur_channel, curbssid, - ssid->SSID, ssid->SSID_len, WLAN_CAPABILITY_ESS, - WLAN_CAPABILITY_ESS); - - if (!bss) { - WL_DBG(("Could not find the AP\n")); - if (memcmp(bi->BSSID.octet, curbssid, ETHER_ADDR_LEN)) { - WL_ERR(("Bssid doesn't match\n")); - err = -EIO; - goto update_bss_info_out; - } - err = wl_inform_single_bss(cfg, bi, roam); - if (unlikely(err)) - goto update_bss_info_out; - ie = ((u8 *)bi) + bi->ie_offset; - ie_len = bi->ie_length; - beacon_interval = cpu_to_le16(bi->beacon_period); - } else { - WL_DBG(("Found the AP in the list - BSSID %pM\n", bss->bssid)); -#if defined(WL_CFG80211_P2P_DEV_IF) - ie = (u8 *)bss->ies->data; - ie_len = bss->ies->len; -#else - ie = bss->information_elements; - ie_len = bss->len_information_elements; -#endif /* WL_CFG80211_P2P_DEV_IF */ - beacon_interval = bss->beacon_interval; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) - cfg80211_put_bss(wiphy, bss); -#else - cfg80211_put_bss(bss); -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */ + if (memcmp(bi->BSSID.octet, curbssid, ETHER_ADDR_LEN)) { + WL_ERR(("Bssid doesn't match\n")); + err = -EIO; + goto update_bss_info_out; } + err = wl_inform_single_bss(cfg, bi, roam); + if (unlikely(err)) + goto update_bss_info_out; + ie = ((u8 *)bi) + bi->ie_offset; + ie_len = bi->ie_length; + beacon_interval = cpu_to_le16(bi->beacon_period); tim = bcm_parse_tlvs(ie, ie_len, WLAN_EID_TIM); if (tim) { dtim_period = tim->data[1]; @@ -9979,27 +11143,42 @@ wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev, struct wl_connect_info *conn_info = wl_to_conn(cfg); s32 err = 0; u8 *curbssid; -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS) +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) struct wiphy *wiphy = bcmcfg_to_wiphy(cfg); struct ieee80211_supported_band *band; struct ieee80211_channel *notify_channel = NULL; u32 *channel; u32 freq; -#endif /* LINUX_VERSION > 2.6.39 || WL_COMPAT_WIRELESS */ +#endif -#ifdef WLFBT - uint32 data_len = 0; - if (data) - data_len = ntoh32(e->datalen); -#endif /* WLFBT */ + +// terence 20161014: fix for roaming issue +#if 0 + if (memcmp(&cfg->last_roamed_addr, &e->addr, ETHER_ADDR_LEN) == 0) { + WL_INFORM(("BSSID already updated\n")); + return err; + } + + /* Skip calling cfg80211_roamed If current bssid and + * roamed bssid are same. Also clear timer roam_timeout. + */ + curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID); + if (memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) == 0) { + WL_ERR(("BSS already present, Skipping roamed event to upper layer\n")); +#ifdef DHD_LOSSLESS_ROAMING + wl_del_roam_timeout(cfg); +#endif /* DHD_LOSSLESS_ROAMING */ + return err; + } +#endif wl_get_assoc_ies(cfg, ndev); - wl_update_prof(cfg, ndev, NULL, (void *)(e->addr.octet), WL_PROF_BSSID); + wl_update_prof(cfg, ndev, NULL, (const void *)(e->addr.octet), WL_PROF_BSSID); curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID); wl_update_bss_info(cfg, ndev, true); wl_update_pmklist(ndev, cfg->pmk_list, err); -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS) +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) /* channel info for cfg80211_roamed introduced in 2.6.39-rc1 */ channel = (u32 *)wl_read_prof(cfg, ndev, WL_PROF_CHAN); if (*channel <= CH_MAX_2G_CHANNEL) @@ -10008,22 +11187,13 @@ wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev, band = wiphy->bands[IEEE80211_BAND_5GHZ]; freq = ieee80211_channel_to_frequency(*channel, band->band); notify_channel = ieee80211_get_channel(wiphy, freq); -#endif /* LINUX_VERSION > 2.6.39 || WL_COMPAT_WIRELESS */ -#ifdef WLFBT - /* back up the given FBT key for the further supplicant request, - * currently not checking the FBT is enabled for current BSS in DHD, - * because the supplicant decides to take it or not. - */ - if (data && (data_len == FBT_KEYLEN)) { - memcpy(cfg->fbt_key, data, FBT_KEYLEN); - } -#endif /* WLFBT */ +#endif printf("wl_bss_roaming_done succeeded to " MACDBG "\n", - MAC2STRDBG((u8*)(&e->addr))); - dhd_conf_set_wme(cfg->pub); + MAC2STRDBG((const u8*)(&e->addr))); + dhd_conf_set_wme(cfg->pub, 0); cfg80211_roamed(ndev, -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS) +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) notify_channel, #endif curbssid, @@ -10031,8 +11201,13 @@ wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev, conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL); WL_DBG(("Report roaming result\n")); + memcpy(&cfg->last_roamed_addr, (void *)&e->addr, ETHER_ADDR_LEN); wl_set_drv_status(cfg, CONNECTED, ndev); +#if defined(DHD_ENABLE_BIGDATA_LOGGING) + cfg->roam_count++; +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + return err; } @@ -10044,7 +11219,7 @@ wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev, struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC); #if defined(CUSTOM_SET_CPUCORE) dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); -#endif +#endif s32 err = 0; u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID); if (!sec) { @@ -10076,7 +11251,8 @@ wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev, wl_clr_drv_status(cfg, CONNECTING, ndev); if (completed) { wl_get_assoc_ies(cfg, ndev); - wl_update_prof(cfg, ndev, NULL, (void *)(e->addr.octet), WL_PROF_BSSID); + wl_update_prof(cfg, ndev, NULL, (const void *)(e->addr.octet), + WL_PROF_BSSID); curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID); wl_update_bss_info(cfg, ndev, false); wl_update_pmklist(ndev, cfg->pmk_list, err); @@ -10093,7 +11269,7 @@ wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev, if (wl_get_chan_isvht80(ndev, dhd)) { if (ndev == bcmcfg_to_prmry_ndev(cfg)) dhd->chan_isvht80 |= DHD_FLAG_STA_MODE; /* STA mode */ - else if (ndev == wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION)) + else if (is_p2p_group_iface(ndev->ieee80211_ptr)) dhd->chan_isvht80 |= DHD_FLAG_P2P_MODE; /* p2p mode */ dhd_set_cpucore(dhd, TRUE); } @@ -10113,7 +11289,7 @@ wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev, GFP_KERNEL); if (completed) { WL_INFORM(("Report connect result - connection succeeded\n")); - dhd_conf_set_wme(cfg->pub); + dhd_conf_set_wme(cfg->pub, 0); } else WL_ERR(("Report connect result - connection failed\n")); } @@ -10143,7 +11319,7 @@ wl_notify_mic_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, else key_type = NL80211_KEYTYPE_PAIRWISE; - cfg80211_michael_mic_failure(ndev, (u8 *)&e->addr, key_type, -1, + cfg80211_michael_mic_failure(ndev, (const u8 *)&e->addr, key_type, -1, NULL, GFP_KERNEL); mutex_unlock(&cfg->usr_sync); @@ -10182,7 +11358,7 @@ wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, #ifndef WL_SCHED_SCAN mutex_lock(&cfg->usr_sync); /* TODO: Use cfg80211_sched_scan_results(wiphy); */ - cfg80211_disconnected(ndev, 0, NULL, 0, GFP_KERNEL); + CFG80211_DISCONNECTED(ndev, 0, NULL, 0, false, GFP_KERNEL); mutex_unlock(&cfg->usr_sync); #else /* If cfg80211 scheduled scan is supported, report the pno results via sched @@ -10194,6 +11370,84 @@ wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, } #endif /* PNO_SUPPORT */ +#ifdef GSCAN_SUPPORT +static s32 +wl_notify_gscan_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + s32 err = 0; + u32 event = be32_to_cpu(e->event_type); + void *ptr; + int send_evt_bytes = 0; + int batch_event_result_dummy = 0; + struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + struct wiphy *wiphy = bcmcfg_to_wiphy(cfg); + u32 len = ntoh32(e->datalen); + + switch (event) { + case WLC_E_PFN_SWC: + ptr = dhd_dev_swc_scan_event(ndev, data, &send_evt_bytes); + if (send_evt_bytes) { + wl_cfgvendor_send_async_event(wiphy, ndev, + GOOGLE_GSCAN_SIGNIFICANT_EVENT, ptr, send_evt_bytes); + kfree(ptr); + } + break; + case WLC_E_PFN_BEST_BATCHING: + err = dhd_dev_retrieve_batch_scan(ndev); + if (err < 0) { + WL_ERR(("Batch retrieval already in progress %d\n", err)); + } else { + wl_cfgvendor_send_async_event(wiphy, ndev, + GOOGLE_GSCAN_BATCH_SCAN_EVENT, + &batch_event_result_dummy, sizeof(int)); + } + break; + case WLC_E_PFN_SCAN_COMPLETE: + batch_event_result_dummy = WIFI_SCAN_COMPLETE; + wl_cfgvendor_send_async_event(wiphy, ndev, + GOOGLE_SCAN_COMPLETE_EVENT, + &batch_event_result_dummy, sizeof(int)); + break; + case WLC_E_PFN_BSSID_NET_FOUND: + ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes, + HOTLIST_FOUND); + if (ptr) { + wl_cfgvendor_send_hotlist_event(wiphy, ndev, + ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT); + dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_FOUND); + } + break; + case WLC_E_PFN_BSSID_NET_LOST: + /* WLC_E_PFN_BSSID_NET_LOST is conflict shared with WLC_E_PFN_SCAN_ALLGONE + * We currently do not use WLC_E_PFN_SCAN_ALLGONE, so if we get it, ignore + */ + if (len) { + ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes, + HOTLIST_LOST); + if (ptr) { + wl_cfgvendor_send_hotlist_event(wiphy, ndev, + ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_LOST_EVENT); + dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_LOST); + } + } + break; + case WLC_E_PFN_GSCAN_FULL_RESULT: + ptr = dhd_dev_process_full_gscan_result(ndev, data, &send_evt_bytes); + if (ptr) { + wl_cfgvendor_send_async_event(wiphy, ndev, + GOOGLE_SCAN_FULL_RESULTS_EVENT, ptr, send_evt_bytes); + kfree(ptr); + } + break; + default: + WL_ERR(("%s: Unexpected event! - %d\n", __FUNCTION__, event)); + + } + return err; +} +#endif /* GSCAN_SUPPORT */ + static s32 wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data) @@ -10204,6 +11458,9 @@ wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, u32 len = WL_SCAN_BUF_MAX; s32 err = 0; unsigned long flags; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) + struct cfg80211_scan_info info; +#endif WL_DBG(("Enter \n")); if (!wl_get_drv_status(cfg, SCANNING, ndev)) { @@ -10246,7 +11503,12 @@ wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, del_timer_sync(&cfg->scan_timeout); spin_lock_irqsave(&cfg->cfgdrv_lock, flags); if (cfg->scan_request) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) + info.aborted = false; + cfg80211_scan_done(cfg->scan_request, &info); +#else cfg80211_scan_done(cfg->scan_request, false); +#endif cfg->scan_request = NULL; } spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); @@ -10336,6 +11598,33 @@ wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev #endif /* WL_CFG80211_SYNC_GON */ } +#if defined(WLTDLS) +bool wl_cfg80211_is_tdls_tunneled_frame(void *frame, u32 frame_len) +{ + unsigned char *data; + + if (frame == NULL) { + WL_ERR(("Invalid frame \n")); + return false; + } + + if (frame_len < 5) { + WL_ERR(("Invalid frame length [%d] \n", frame_len)); + return false; + } + + data = frame; + + if (!memcmp(data, TDLS_TUNNELED_PRB_REQ, 5) || + !memcmp(data, TDLS_TUNNELED_PRB_RESP, 5)) { + WL_DBG(("TDLS Vendor Specific Received type\n")); + return true; + } + + return false; +} +#endif /* WLTDLS */ + int wl_cfg80211_get_ioctl_version(void) { @@ -10357,6 +11646,9 @@ wl_notify_rx_mgmt_frame(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, wifi_p2p_pub_act_frame_t *act_frm = NULL; wifi_p2p_action_frame_t *p2p_act_frm = NULL; wifi_p2psd_gas_pub_act_frame_t *sd_act_frm = NULL; +#if defined(WLTDLS) && defined(TDLS_MSG_ONLY_WFD) + dhd_pub_t *dhdp; +#endif /* WLTDLS && TDLS_MSG_ONLY_WFD */ wl_event_rx_frame_data_t *rxframe = (wl_event_rx_frame_data_t*)data; u32 event = ntoh32(e->event_type); @@ -10364,14 +11656,10 @@ wl_notify_rx_mgmt_frame(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, u8 bsscfgidx = e->bsscfgidx; u32 mgmt_frame_len = ntoh32(e->datalen) - sizeof(wl_event_rx_frame_data_t); u16 channel = ((ntoh16(rxframe->channel) & WL_CHANSPEC_CHAN_MASK)); - bool retval; memset(&bssid, 0, ETHER_ADDR_LEN); ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); -#ifdef P2PONEINT - WL_DBG((" device name is ndev %s \n", ndev->name)); -#endif if (channel <= CH_MAX_2G_CHANNEL) band = wiphy->bands[IEEE80211_BAND_2GHZ]; @@ -10381,7 +11669,7 @@ wl_notify_rx_mgmt_frame(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, WL_ERR(("No valid band\n")); return -EINVAL; } -#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) freq = ieee80211_channel_to_frequency(channel); (void)band->band; #else @@ -10415,13 +11703,6 @@ wl_notify_rx_mgmt_frame(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, (void) p2p_act_frm; } else if (wl_cfgp2p_is_gas_action(&mgmt_frame[DOT11_MGMT_HDR_LEN], mgmt_frame_len - DOT11_MGMT_HDR_LEN)) { -#ifdef WL_SDO - if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) { - WL_ERR(("SD offload is in progress. Don't report the" - "frame via rx_mgmt path\n")); - goto exit; - } -#endif sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *) (&mgmt_frame[DOT11_MGMT_HDR_LEN]); @@ -10437,21 +11718,52 @@ wl_notify_rx_mgmt_frame(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, } (void) sd_act_frm; #ifdef WLTDLS - } else if (mgmt_frame[DOT11_MGMT_HDR_LEN] == TDLS_AF_CATEGORY) { - WL_DBG((" TDLS Action Frame Received type = %d \n", - mgmt_frame[DOT11_MGMT_HDR_LEN + 1])); - + } else if ((mgmt_frame[DOT11_MGMT_HDR_LEN] == TDLS_AF_CATEGORY) || + (wl_cfg80211_is_tdls_tunneled_frame( + &mgmt_frame[DOT11_MGMT_HDR_LEN], + mgmt_frame_len - DOT11_MGMT_HDR_LEN))) { + if (mgmt_frame[DOT11_MGMT_HDR_LEN] == TDLS_AF_CATEGORY) { + WL_ERR((" TDLS Action Frame Received type = %d \n", + mgmt_frame[DOT11_MGMT_HDR_LEN + 1])); + } +#ifdef TDLS_MSG_ONLY_WFD + dhdp = (dhd_pub_t *)(cfg->pub); + if (!dhdp->tdls_mode) { + WL_DBG((" TDLS Frame filtered \n")); + return 0; + } +#else if (mgmt_frame[DOT11_MGMT_HDR_LEN + 1] == TDLS_ACTION_SETUP_RESP) { cfg->tdls_mgmt_frame = mgmt_frame; cfg->tdls_mgmt_frame_len = mgmt_frame_len; cfg->tdls_mgmt_freq = freq; return 0; } - - } else if (mgmt_frame[DOT11_MGMT_HDR_LEN] == TDLS_VENDOR_SPECIFIC) { - WL_DBG((" TDLS Vendor Specific Received type \n")); -#endif +#endif /* TDLS_MSG_ONLY_WFD */ +#endif /* WLTDLS */ +#ifdef QOS_MAP_SET + } else if (mgmt_frame[DOT11_MGMT_HDR_LEN] == DOT11_ACTION_CAT_QOS) { + /* update QoS map set table */ + bcm_tlv_t * qos_map_ie = NULL; + if ((qos_map_ie = bcm_parse_tlvs(&mgmt_frame[DOT11_MGMT_HDR_LEN], + mgmt_frame_len - DOT11_MGMT_HDR_LEN, + DOT11_MNG_QOS_MAP_ID)) != NULL) { + WL_DBG((" QoS map set IE found in QoS action frame\n")); + if (!cfg->up_table) { + cfg->up_table = kmalloc(UP_TABLE_MAX, GFP_KERNEL); + } + wl_set_up_table(cfg->up_table, qos_map_ie); + } else { + kfree(cfg->up_table); + cfg->up_table = NULL; + } +#endif /* QOS_MAP_SET */ } else { + /* + * if we got normal action frame and ndev is p2p0, + * we have to change ndev from p2p0 to wlan0 + */ + if (cfg->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) { u8 action = 0; @@ -10546,6 +11858,9 @@ wl_notify_rx_mgmt_frame(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, * GO-NEG Phase */ if (cfg->p2p && +#if defined(P2P_IE_MISSING_FIX) + cfg->p2p_prb_noti && +#endif wl_get_p2p_status(cfg, GO_NEG_PHASE)) { WL_DBG(("Filtering P2P probe_req while " "being in GO-Neg state\n")); @@ -10554,27 +11869,24 @@ wl_notify_rx_mgmt_frame(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, } } -#ifdef P2PONEINT - if (ndev == cfg->p2p_net && ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) { - ndev = bcmcfg_to_prmry_ndev(cfg); - cfgdev = ndev_to_cfgdev(ndev); - } - WL_DBG((" device name is ndev %s \n", ndev->name)); -#endif + if (discover_cfgdev(cfgdev, cfg)) + WL_DBG(("Rx Managment frame For P2P Discovery Interface \n")); + else + WL_DBG(("Rx Managment frame For Iface (%s) \n", ndev->name)); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) - retval = cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, 0); -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) - retval = cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, 0, GFP_ATOMIC); + cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, 0); +#elif(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) + cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, 0, GFP_ATOMIC); #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \ defined(WL_COMPAT_WIRELESS) - retval = cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, GFP_ATOMIC); + cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, GFP_ATOMIC); #else - retval = cfg80211_rx_mgmt(cfgdev, freq, mgmt_frame, mgmt_frame_len, GFP_ATOMIC); -#endif /* LINUX_VERSION >= VERSION(3, 12, 0) */ + cfg80211_rx_mgmt(cfgdev, freq, mgmt_frame, mgmt_frame_len, GFP_ATOMIC); +#endif /* LINUX_VERSION >= VERSION(3, 14, 0) */ - WL_DBG(("mgmt_frame_len (%d) , e->datalen (%d), channel (%d), freq (%d) retval (%d)\n", - mgmt_frame_len, ntoh32(e->datalen), channel, freq, retval)); + WL_DBG(("mgmt_frame_len (%d) , e->datalen (%d), channel (%d), freq (%d)\n", + mgmt_frame_len, ntoh32(e->datalen), channel, freq)); exit: if (isfree) kfree(mgmt_frame); @@ -10754,18 +12066,21 @@ static void wl_init_event_handler(struct bcm_cfg80211 *cfg) #ifdef PNO_SUPPORT cfg->evt_handler[WLC_E_PFN_NET_FOUND] = wl_notify_pfn_status; #endif /* PNO_SUPPORT */ -#ifdef WL_SDO - cfg->evt_handler[WLC_E_SERVICE_FOUND] = wl_svc_resp_handler; - cfg->evt_handler[WLC_E_P2PO_ADD_DEVICE] = wl_notify_device_discovery; - cfg->evt_handler[WLC_E_P2PO_DEL_DEVICE] = wl_notify_device_discovery; -#endif +#ifdef GSCAN_SUPPORT + cfg->evt_handler[WLC_E_PFN_BEST_BATCHING] = wl_notify_gscan_event; + cfg->evt_handler[WLC_E_PFN_SCAN_COMPLETE] = wl_notify_gscan_event; + cfg->evt_handler[WLC_E_PFN_GSCAN_FULL_RESULT] = wl_notify_gscan_event; + cfg->evt_handler[WLC_E_PFN_SWC] = wl_notify_gscan_event; + cfg->evt_handler[WLC_E_PFN_BSSID_NET_FOUND] = wl_notify_gscan_event; + cfg->evt_handler[WLC_E_PFN_BSSID_NET_LOST] = wl_notify_gscan_event; +#endif /* GSCAN_SUPPORT */ #ifdef WLTDLS cfg->evt_handler[WLC_E_TDLS_PEER_EVENT] = wl_tdls_event_handler; #endif /* WLTDLS */ cfg->evt_handler[WLC_E_BSSID] = wl_notify_roaming_status; -#ifdef WLAIBSS - cfg->evt_handler[WLC_E_AIBSS_TXFAIL] = wl_notify_aibss_txfail; -#endif /* WLAIBSS */ +#ifdef WL_RELMCAST + cfg->evt_handler[WLC_E_RMC_EVENT] = wl_notify_rmc_status; +#endif #ifdef BT_WIFI_HANDOVER cfg->evt_handler[WLC_E_BT_WIFI_HANDOVER_REQ] = wl_notify_bt_wifi_handover_req; #endif @@ -10773,7 +12088,15 @@ static void wl_init_event_handler(struct bcm_cfg80211 *cfg) cfg->evt_handler[WLC_E_NAN] = wl_cfgnan_notify_nan_status; cfg->evt_handler[WLC_E_PROXD] = wl_cfgnan_notify_proxd_status; #endif /* WL_NAN */ - cfg->evt_handler[WLC_E_RMC_EVENT] = wl_notify_rmc_status; + cfg->evt_handler[WLC_E_CSA_COMPLETE_IND] = wl_csa_complete_ind; +#ifdef DHD_LOSSLESS_ROAMING + cfg->evt_handler[WLC_E_ROAM_PREP] = wl_notify_roam_prep_status; +#endif + cfg->evt_handler[WLC_E_AP_STARTED] = wl_ap_start_ind; +#ifdef CUSTOM_EVENT_PM_WAKE + cfg->evt_handler[WLC_E_EXCESS_PM_WAKE_EVENT] = wl_check_pmstatus; +#endif /* CUSTOM_EVENT_PM_WAKE */ + cfg->evt_handler[WLC_E_PSK_SUP] = wl_notify_idsup_status; } #if defined(STATIC_WL_PRIV_STRUCT) @@ -10796,6 +12119,7 @@ wl_deinit_escan_result_buf(struct bcm_cfg80211 *cfg) static s32 wl_init_priv_mem(struct bcm_cfg80211 *cfg) { WL_DBG(("Enter \n")); + cfg->scan_results = (void *)kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL); if (unlikely(!cfg->scan_results)) { WL_ERR(("Scan results alloc failed\n")); @@ -10832,12 +12156,6 @@ static s32 wl_init_priv_mem(struct bcm_cfg80211 *cfg) WL_ERR(("pmk list alloc failed\n")); goto init_priv_mem_out; } - cfg->sta_info = (void *)kzalloc(sizeof(*cfg->sta_info), GFP_KERNEL); - if (unlikely(!cfg->sta_info)) { - WL_ERR(("sta info alloc failed\n")); - goto init_priv_mem_out; - } - #if defined(STATIC_WL_PRIV_STRUCT) cfg->conn_info = (void *)kzalloc(sizeof(*cfg->conn_info), GFP_KERNEL); if (unlikely(!cfg->conn_info)) { @@ -10861,8 +12179,14 @@ static s32 wl_init_priv_mem(struct bcm_cfg80211 *cfg) INIT_WORK(&cfg->afx_hdl->work, wl_cfg80211_afx_handler); } - return 0; - +#ifdef WLTDLS + if (cfg->tdls_mgmt_frame) { + kfree(cfg->tdls_mgmt_frame); + cfg->tdls_mgmt_frame = NULL; + } +#endif /* WLTDLS */ + return 0; + init_priv_mem_out: wl_deinit_priv_mem(cfg); @@ -10885,8 +12209,6 @@ static void wl_deinit_priv_mem(struct bcm_cfg80211 *cfg) cfg->extra_buf = NULL; kfree(cfg->pmk_list); cfg->pmk_list = NULL; - kfree(cfg->sta_info); - cfg->sta_info = NULL; #if defined(STATIC_WL_PRIV_STRUCT) kfree(cfg->conn_info); cfg->conn_info = NULL; @@ -10900,19 +12222,6 @@ static void wl_deinit_priv_mem(struct bcm_cfg80211 *cfg) cfg->afx_hdl = NULL; } - if (cfg->ap_info) { - kfree(cfg->ap_info->wpa_ie); - kfree(cfg->ap_info->rsn_ie); - kfree(cfg->ap_info->wps_ie); - kfree(cfg->ap_info); - cfg->ap_info = NULL; - } -#ifdef WLTDLS - if (cfg->tdls_mgmt_frame) { - kfree(cfg->tdls_mgmt_frame); - cfg->tdls_mgmt_frame = NULL; - } -#endif /* WLTDLS */ } static s32 wl_create_event_handler(struct bcm_cfg80211 *cfg) @@ -10935,45 +12244,128 @@ static void wl_destroy_event_handler(struct bcm_cfg80211 *cfg) PROC_STOP(&cfg->event_tsk); } +void wl_terminate_event_handler(void) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + if (cfg) { + wl_destroy_event_handler(cfg); + wl_flush_eq(cfg); + } +} + static void wl_scan_timeout(unsigned long data) { wl_event_msg_t msg; struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data; -// struct net_device *dev = bcmcfg_to_prmry_ndev(cfg); + struct wireless_dev *wdev = NULL; + struct net_device *ndev = NULL; + struct wl_scan_results *bss_list; + struct wl_bss_info *bi = NULL; + s32 i; + u32 channel; +#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub); + uint32 prev_memdump_mode = dhdp->memdump_enabled; +#endif /* DHD_DEBUG && BCMPCIE */ if (!(cfg->scan_request)) { WL_ERR(("timer expired but no scan request\n")); return; } + + bss_list = wl_escan_get_buf(cfg, FALSE); + if (!bss_list) { + WL_ERR(("bss_list is null. Didn't receive any partial scan results\n")); + } else { + WL_ERR(("scanned AP count (%d)\n", bss_list->count)); + + bi = next_bss(bss_list, bi); + for_each_bss(bss_list, bi, i) { + channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec)); + WL_ERR(("SSID :%s Channel :%d\n", bi->SSID, channel)); + } + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) + if (cfg->scan_request->dev) + wdev = cfg->scan_request->dev->ieee80211_ptr; +#else + wdev = cfg->scan_request->wdev; +#endif /* LINUX_VERSION < KERNEL_VERSION(3, 6, 0) */ + if (!wdev) { + WL_ERR(("No wireless_dev present\n")); + return; + } + ndev = wdev_to_wlc_ndev(wdev, cfg); + bzero(&msg, sizeof(wl_event_msg_t)); WL_ERR(("timer expired\n")); +#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + if (dhdp->memdump_enabled) { + dhdp->memdump_enabled = DUMP_MEMFILE; + dhdp->memdump_type = DUMP_TYPE_SCAN_TIMEOUT; + dhd_bus_mem_dump(dhdp); + dhdp->memdump_enabled = prev_memdump_mode; + } +#endif /* DHD_DEBUG && BCMPCIE */ msg.event_type = hton32(WLC_E_ESCAN_RESULT); msg.status = hton32(WLC_E_STATUS_TIMEOUT); msg.reason = 0xFFFFFFFF; - wl_cfg80211_event(bcmcfg_to_prmry_ndev(cfg), &msg, NULL); + wl_cfg80211_event(ndev, &msg, NULL); +#ifdef CUSTOMER_HW4_DEBUG + if (!wl_scan_timeout_dbg_enabled) + wl_scan_timeout_dbg_set(); +#endif /* CUSTOMER_HW4_DEBUG */ // terence 20130729: workaround to fix out of memory in firmware -// if (dhd_conf_get_chip(dhd_get_pub(dev)) == BCM43362_CHIP_ID) { +// if (dhd_conf_get_chip(dhd_get_pub(ndev)) == BCM43362_CHIP_ID) { // WL_ERR(("Send hang event\n")); -// net_os_send_hang_message(dev); +// net_os_send_hang_message(ndev); // } } +#ifdef DHD_LOSSLESS_ROAMING +static void wl_del_roam_timeout(struct bcm_cfg80211 *cfg) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub); + + /* restore prec_map to ALLPRIO */ + dhdp->dequeue_prec_map = ALLPRIO; + if (timer_pending(&cfg->roam_timeout)) { + del_timer_sync(&cfg->roam_timeout); + } + +} + +static void wl_roam_timeout(unsigned long data) +{ + struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data; + dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub); + + WL_ERR(("roam timer expired\n")); + + /* restore prec_map to ALLPRIO */ + dhdp->dequeue_prec_map = ALLPRIO; +} + +#endif /* DHD_LOSSLESS_ROAMING */ + static s32 wl_cfg80211_netdev_notifier_call(struct notifier_block * nb, - unsigned long state, - void *ptr) + unsigned long state, void *ptr) { #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)) struct net_device *dev = ptr; #else - // terence 20150701: fix for p2p connection issue struct net_device *dev = netdev_notifier_info_to_dev(ptr); -#endif - struct wireless_dev *wdev = dev->ieee80211_ptr; +#endif /* LINUX_VERSION < VERSION(3, 11, 0) */ + struct wireless_dev *wdev = ndev_to_wdev(dev); struct bcm_cfg80211 *cfg = g_bcm_cfg; - WL_DBG(("Enter \n")); +#ifdef DHD_IFDEBUG + WL_ERR(("Enter \n")); +#endif if (!wdev || !cfg || dev == bcmcfg_to_prmry_ndev(cfg)) return NOTIFY_DONE; @@ -10986,6 +12378,9 @@ wl_cfg80211_netdev_notifier_call(struct notifier_block * nb, int max_wait_count = 100; int refcnt = 0; unsigned long limit = jiffies + max_wait_timeout * HZ; +#ifdef DHD_IFDEBUG + WL_ERR(("NETDEV_DOWN(+) wdev=%p, cfg=%p, dev=%p\n", wdev, cfg, dev)); +#endif while (work_pending(&wdev->cleanup_work)) { if (refcnt%5 == 0) { WL_ERR(("[NETDEV_DOWN] wait for " @@ -11011,41 +12406,77 @@ wl_cfg80211_netdev_notifier_call(struct notifier_block * nb, set_current_state(TASK_RUNNING); refcnt++; } -#endif /* LINUX_VERSION < VERSION(3, 14, 0) */ +#ifdef DHD_IFDEBUG + WL_ERR(("NETDEV_DOWN(-) wdev=%p, cfg=%p, dev=%p\n", wdev, cfg, dev)); +#endif +#endif /* LINUX_VERSION < VERSION(3, 14, 0) */ break; } - case NETDEV_UNREGISTER: +#ifdef DHD_IFDEBUG + WL_ERR(("NETDEV_UNREGISTER(+) wdev=%p, cfg=%p, dev=%p\n", wdev, cfg, dev)); +#endif /* after calling list_del_rcu(&wdev->list) */ - wl_dealloc_netinfo(cfg, dev); + wl_cfg80211_clear_per_bss_ies(cfg, + wl_get_bssidx_by_wdev(cfg, wdev)); + wl_dealloc_netinfo_by_wdev(cfg, wdev); +#ifdef DHD_IFDEBUG + WL_ERR(("NETDEV_UNREGISTER(-) wdev=%p, cfg=%p, dev=%p\n", wdev, cfg, dev)); +#endif break; case NETDEV_GOING_DOWN: - /* At NETDEV_DOWN state, wdev_cleanup_work work will be called. - * In front of door, the function checks - * whether current scan is working or not. - * If the scanning is still working, wdev_cleanup_work call WARN_ON and - * make the scan done forcibly. - */ + /* + * At NETDEV_DOWN state, wdev_cleanup_work work will be called. + * In front of door, the function checks whether current scan + * is working or not. If the scanning is still working, + * wdev_cleanup_work call WARN_ON and make the scan done forcibly. + */ +#ifdef DHD_IFDEBUG + WL_ERR(("NETDEV_GOING_DOWN wdev=%p, cfg=%p, dev=%p\n", wdev, cfg, dev)); +#endif if (wl_get_drv_status(cfg, SCANNING, dev)) wl_notify_escan_complete(cfg, dev, true, true); break; } return NOTIFY_DONE; } + static struct notifier_block wl_cfg80211_netdev_notifier = { .notifier_call = wl_cfg80211_netdev_notifier_call, }; -/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be + +/* + * to make sure we won't register the same notifier twice, otherwise a loop is likely to be * created in kernel notifier link list (with 'next' pointing to itself) */ static bool wl_cfg80211_netdev_notifier_registered = FALSE; -void -#ifdef P2PONEINT -wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg) +static void wl_cfg80211_cancel_scan(struct bcm_cfg80211 *cfg) +{ + struct wireless_dev *wdev = NULL; + struct net_device *ndev = NULL; + + if (!cfg->scan_request) + return; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) + if (cfg->scan_request->dev) + wdev = cfg->scan_request->dev->ieee80211_ptr; #else -wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg) -#endif + wdev = cfg->scan_request->wdev; +#endif /* LINUX_VERSION < KERNEL_VERSION(3, 6, 0) */ + + if (!wdev) { + WL_ERR(("No wireless_dev present\n")); + return; + } + + ndev = wdev_to_wlc_ndev(wdev, cfg); + wl_notify_escan_complete(cfg, ndev, true, true); + WL_ERR(("Scan aborted! \n")); +} + +static void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg) { wl_scan_params_t *params = NULL; s32 params_size = 0; @@ -11066,6 +12497,12 @@ wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg) kfree(params); } } +#ifdef WLTDLS + if (cfg->tdls_mgmt_frame) { + kfree(cfg->tdls_mgmt_frame); + cfg->tdls_mgmt_frame = NULL; + } +#endif /* WLTDLS */ } static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg, @@ -11075,18 +12512,25 @@ static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg, s32 err = BCME_OK; unsigned long flags; struct net_device *dev; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) + struct cfg80211_scan_info info; + info.aborted = aborted; +#endif WL_DBG(("Enter \n")); + + mutex_lock(&cfg->scan_complete); + if (!ndev) { WL_ERR(("ndev is null\n")); err = BCME_ERROR; - return err; + goto out; } if (cfg->escan_info.ndev != ndev) { WL_ERR(("ndev is different %p %p\n", cfg->escan_info.ndev, ndev)); err = BCME_ERROR; - return err; + goto out; } if (cfg->scan_request) { @@ -11094,6 +12538,13 @@ static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg, #if defined(WL_ENABLE_P2P_IF) if (cfg->scan_request->dev != cfg->p2p_net) dev = cfg->scan_request->dev; +#elif defined(WL_CFG80211_P2P_DEV_IF) + if (cfg->scan_request->wdev->iftype != NL80211_IFTYPE_P2P_DEVICE) { +#ifdef DHD_IFDEBUG + WL_ERR(("%s: dev: %p\n", __FUNCTION__, cfg->scan_request->wdev->netdev)); +#endif + dev = cfg->scan_request->wdev->netdev; + } #endif /* WL_ENABLE_P2P_IF */ } else { @@ -11123,19 +12574,22 @@ static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg, } #endif /* WL_SCHED_SCAN */ if (likely(cfg->scan_request)) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) + cfg80211_scan_done(cfg->scan_request, &info); +#else cfg80211_scan_done(cfg->scan_request, aborted); +#endif cfg->scan_request = NULL; + DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub)); + DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub)); } if (p2p_is_on(cfg)) wl_clr_p2p_status(cfg, SCANNING); wl_clr_drv_status(cfg, SCANNING, dev); spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); -#ifdef WL_SDO - if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS) && !in_atomic()) { - wl_cfg80211_resume_sdo(ndev, cfg); - } -#endif +out: + mutex_unlock(&cfg->scan_complete); return err; } @@ -11315,11 +12769,6 @@ static s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, list = wl_escan_get_buf(cfg, FALSE); if (scan_req_match(cfg)) { -#ifdef WL_HOST_BAND_MGMT - s32 channel = 0; - s32 channel_band = 0; - chanspec_t chspec; -#endif /* WL_HOST_BAND_MGMT */ /* p2p scan && allow only probe response */ if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) && (bi->flags & WL_BSS_FLAGS_FROM_BEACON)) @@ -11330,19 +12779,6 @@ static s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, " response/beacon\n")); goto exit; } -#ifdef WL_HOST_BAND_MGMT - chspec = wl_chspec_driver_to_host(bi->chanspec); - channel = wf_chspec_ctlchan(chspec); - channel_band = CHSPEC2WLC_BAND(chspec); - - if ((cfg->curr_band == WLC_BAND_5G) && - (channel_band == WLC_BAND_2G)) { - /* Avoid sending the GO results in band conflict */ - if (wl_cfgp2p_retreive_p2pattrib(p2p_ie, - P2P_SEID_GROUP_ID) != NULL) - goto exit; - } -#endif /* WL_HOST_BAND_MGMT */ } #ifdef ESCAN_BUF_OVERFLOW_MGMT if (bi_length > ESCAN_BUF_SIZE - list->buflen) @@ -11452,31 +12888,24 @@ static s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, goto exit; #endif /* ESCAN_BUF_OVERFLOW_MGMT */ } - if (strlen(bi->SSID) == 0) { // terence: fix for hidden SSID - WL_SCAN(("Skip hidden SSID %pM\n", &bi->BSSID)); - goto exit; - } memcpy(&(((char *)list)[list->buflen]), bi, bi_length); list->version = dtoh32(bi->version); list->buflen += bi_length; list->count++; + /* + * !Broadcast && number of ssid = 1 && number of channels =1 + * means specific scan to association + */ + if (wl_cfgp2p_is_p2p_specific_scan(cfg->scan_request)) { + WL_ERR(("P2P assoc scan fast aborted.\n")); + wl_notify_escan_complete(cfg, cfg->escan_info.ndev, false, true); + goto exit; + } } - } else if (status == WLC_E_STATUS_SUCCESS) { -#if defined(CUSTOM_PLATFORM_NV_TEGRA) -#if defined(P2P_DISCOVERY_WAR) - if (cfg->p2p_net && cfg->scan_request && - cfg->scan_request->dev == cfg->p2p_net && - !cfg->p2p->vif_created) { - if (wldev_iovar_setint(wl_to_prmry_ndev(cfg), "mpc", 1) < 0) { - WL_ERR(("mpc enabling back failed\n")); - } - } -#endif /* defined(P2P_DISCOVERY_WAR) */ -#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE; wl_escan_print_sync_id(status, cfg->escan_info.cur_sync_id, escan_result->sync_id); @@ -11498,22 +12927,18 @@ static s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, wl_notify_escan_complete(cfg, ndev, false, false); } wl_escan_increment_sync_id(cfg, SCAN_BUF_NEXT); - } - else if (status == WLC_E_STATUS_ABORT) { -#if defined(CUSTOM_PLATFORM_NV_TEGRA) -#if defined(P2P_DISCOVERY_WAR) - if (cfg->p2p_net && cfg->scan_request && - cfg->scan_request->dev == cfg->p2p_net && - !cfg->p2p->vif_created) { - if (wldev_iovar_setint(wl_to_prmry_ndev(cfg), "mpc", 1) < 0) { - WL_ERR(("mpc enabling back failed\n")); - } - } -#endif /* defined(P2P_DISCOVERY_WAR) */ -#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */ +#ifdef CUSTOMER_HW4_DEBUG + if (wl_scan_timeout_dbg_enabled) + wl_scan_timeout_dbg_clear(); +#endif /* CUSTOMER_HW4_DEBUG */ + } else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) || + (status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) || + (status == WLC_E_STATUS_NEWASSOC)) { + /* Handle all cases of scan abort */ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE; wl_escan_print_sync_id(status, escan_result->sync_id, cfg->escan_info.cur_sync_id); + WL_DBG(("ESCAN ABORT reason: %d\n", status)); if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) { WL_INFORM(("ACTION FRAME SCAN DONE\n")); wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev); @@ -11524,17 +12949,16 @@ static s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, WL_INFORM(("ESCAN ABORTED\n")); cfg->bss_list = wl_escan_get_buf(cfg, TRUE); if (!scan_req_match(cfg)) { - WL_TRACE_HW4(("SCAN ABORTED: scanned AP count=%d\n", + WL_TRACE_HW4(("scan_req_match=0: scanned AP count=%d\n", cfg->bss_list->count)); } wl_inform_bss(cfg); wl_notify_escan_complete(cfg, ndev, true, false); + } else { + /* If there is no pending host initiated scan, do nothing */ + WL_DBG(("ESCAN ABORT: No pending scans. Ignoring event.\n")); } wl_escan_increment_sync_id(cfg, SCAN_BUF_CNT); - } else if (status == WLC_E_STATUS_NEWSCAN) { - WL_ERR(("WLC_E_STATUS_NEWSCAN : scan_request[%p]\n", cfg->scan_request)); - WL_ERR(("sync_id[%d], bss_count[%d]\n", escan_result->sync_id, - escan_result->bss_count)); } else if (status == WLC_E_STATUS_TIMEOUT) { WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request)); WL_ERR(("reason[0x%x]\n", e->reason)); @@ -11572,38 +12996,58 @@ static s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, static void wl_cfg80211_concurrent_roam(struct bcm_cfg80211 *cfg, int enable) { u32 connected_cnt = wl_get_drv_status_all(cfg, CONNECTED); + bool p2p_connected = wl_cfgp2p_vif_created(cfg); struct net_info *iter, *next; - int err; if (!cfg->roamoff_on_concurrent) return; - if (enable && connected_cnt > 1) { + if (enable && (p2p_connected||(connected_cnt > 1))) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif for_each_ndev(cfg, iter, next) { - /* Save the current roam setting */ - if ((err = wldev_iovar_getint(iter->ndev, "roam_off", - (s32 *)&iter->roam_off)) != BCME_OK) { - WL_ERR(("%s:Failed to get current roam setting err %d\n", - iter->ndev->name, err)); - continue; - } - if ((err = wldev_iovar_setint(iter->ndev, "roam_off", 1)) != BCME_OK) { - WL_ERR((" %s:failed to set roam_off : %d\n", - iter->ndev->name, err)); + if (iter->ndev && iter->wdev && + iter->wdev->iftype == NL80211_IFTYPE_STATION) { + if (wldev_iovar_setint(iter->ndev, "roam_off", TRUE) + == BCME_OK) { + iter->roam_off = TRUE; + } + else { + WL_ERR(("error to enable roam_off\n")); + } } } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif } else if (!enable) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif for_each_ndev(cfg, iter, next) { - if (iter->roam_off != WL_INVALID) { - if ((err = wldev_iovar_setint(iter->ndev, "roam_off", - iter->roam_off)) == BCME_OK) - iter->roam_off = WL_INVALID; - else { - WL_ERR((" %s:failed to set roam_off : %d\n", - iter->ndev->name, err)); + if (iter->ndev && iter->wdev && + iter->wdev->iftype == NL80211_IFTYPE_STATION) { + if (iter->roam_off != WL_INVALID) { + if (wldev_iovar_setint(iter->ndev, "roam_off", FALSE) + == BCME_OK) { + iter->roam_off = FALSE; + } + else { + WL_ERR(("error to disable roam_off\n")); + } } } } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif } return; } @@ -11620,30 +13064,46 @@ static void wl_cfg80211_determine_vsdb_mode(struct bcm_cfg80211 *cfg) if (connected_cnt <= 1) { return; } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif for_each_ndev(cfg, iter, next) { - chanspec = 0; - ctl_chan = 0; - if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) { - if (wldev_iovar_getint(iter->ndev, "chanspec", - (s32 *)&chanspec) == BCME_OK) { - chanspec = wl_chspec_driver_to_host(chanspec); - ctl_chan = wf_chspec_ctlchan(chanspec); - wl_update_prof(cfg, iter->ndev, NULL, - &ctl_chan, WL_PROF_CHAN); - } - if (!cfg->vsdb_mode) { - if (!pre_ctl_chan && ctl_chan) - pre_ctl_chan = ctl_chan; - else if (pre_ctl_chan && (pre_ctl_chan != ctl_chan)) { - cfg->vsdb_mode = true; + /* p2p discovery iface ndev could be null */ + if (iter->ndev) { + chanspec = 0; + ctl_chan = 0; + if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) { + if (wldev_iovar_getint(iter->ndev, "chanspec", + (s32 *)&chanspec) == BCME_OK) { + chanspec = wl_chspec_driver_to_host(chanspec); + ctl_chan = wf_chspec_ctlchan(chanspec); + wl_update_prof(cfg, iter->ndev, NULL, + &ctl_chan, WL_PROF_CHAN); + } + if (!cfg->vsdb_mode) { + if (!pre_ctl_chan && ctl_chan) + pre_ctl_chan = ctl_chan; + else if (pre_ctl_chan && (pre_ctl_chan != ctl_chan)) { + cfg->vsdb_mode = true; + } } } } } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif printf("%s concurrency is enabled\n", cfg->vsdb_mode ? "Multi Channel" : "Same Channel"); return; } +#if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF) +extern int g_frameburst; +#endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */ + static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info, enum wl_status state, bool set) { @@ -11651,9 +13111,12 @@ static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_ s32 err = BCME_OK; u32 mode; u32 chan = 0; - struct net_info *iter, *next; struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg); - dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + if (dhd->busstate == DHD_BUS_DOWN) { + WL_ERR(("%s : busstate is DHD_BUS_DOWN!\n", __FUNCTION__)); + return 0; + } WL_DBG(("Enter state %d set %d _net_info->pm_restore %d iface %s\n", state, set, _net_info->pm_restore, _net_info->ndev->name)); @@ -11662,141 +13125,88 @@ static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_ mode = wl_get_mode_by_netdev(cfg, _net_info->ndev); if (set) { wl_cfg80211_concurrent_roam(cfg, 1); - + wl_cfg80211_determine_vsdb_mode(cfg); if (mode == WL_MODE_AP) { - if (wl_add_remove_eventmsg(primary_dev, WLC_E_P2P_PROBREQ_MSG, false)) WL_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n")); } - wl_cfg80211_determine_vsdb_mode(cfg); - if (cfg->vsdb_mode || _net_info->pm_block) { - /* Delete pm_enable_work */ - wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_MAINTAIN); - /* save PM_FAST in _net_info to restore this - * if _net_info->pm_block is false - */ - if (!_net_info->pm_block && (mode == WL_MODE_BSS)) { - _net_info->pm = PM_FAST; - if (dhd_conf_get_pm(dhd) >= 0) - _net_info->pm = dhd_conf_get_pm(dhd); - _net_info->pm_restore = true; - } - pm = PM_OFF; - for_each_ndev(cfg, iter, next) { - if (iter->pm_restore) - continue; - /* Save the current power mode */ - err = wldev_ioctl(iter->ndev, WLC_GET_PM, &iter->pm, - sizeof(iter->pm), false); - WL_DBG(("%s:power save %s\n", iter->ndev->name, - iter->pm ? "enabled" : "disabled")); - if (!err && iter->pm) { - iter->pm_restore = true; - } - - } - for_each_ndev(cfg, iter, next) { - if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev)) - continue; - if (pm != PM_OFF && dhd_conf_get_pm(dhd) >= 0) - pm = dhd_conf_get_pm(dhd); - if ((err = wldev_ioctl(iter->ndev, WLC_SET_PM, &pm, - sizeof(pm), true)) != 0) { - if (err == -ENODEV) - WL_DBG(("%s:netdev not ready\n", iter->ndev->name)); - else - WL_ERR(("%s:error (%d)\n", iter->ndev->name, err)); - wl_cfg80211_update_power_mode(iter->ndev); - } - } - } else { - /* add PM Enable timer to go to power save mode - * if supplicant control pm mode, it will be cleared or - * updated by wl_cfg80211_set_power_mgmt() if not - for static IP & HW4 P2P, - * PM will be configured when timer expired - */ - - /* - * before calling pm_enable_timer, we need to set PM -1 for all ndev - */ - pm = PM_OFF; - if (!_net_info->pm_block) { - for_each_ndev(cfg, iter, next) { - if (iter->pm_restore) - continue; - /* Save the current power mode */ - err = wldev_ioctl(iter->ndev, WLC_GET_PM, &iter->pm, - sizeof(iter->pm), false); - WL_DBG(("%s:power save %s\n", iter->ndev->name, - iter->pm ? "enabled" : "disabled")); - if (!err && iter->pm) { - iter->pm_restore = true; - } - } - } - for_each_ndev(cfg, iter, next) { - if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev)) - continue; - if (pm != PM_OFF && dhd_conf_get_pm(dhd) >= 0) - pm = dhd_conf_get_pm(dhd); - if ((err = wldev_ioctl(iter->ndev, WLC_SET_PM, &pm, - sizeof(pm), true)) != 0) { - if (err == -ENODEV) - WL_DBG(("%s:netdev not ready\n", iter->ndev->name)); - else - WL_ERR(("%s:error (%d)\n", iter->ndev->name, err)); - } - } - if (cfg->pm_enable_work_on) { - wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL); - } + pm = PM_OFF; + if ((err = wldev_ioctl(_net_info->ndev, WLC_SET_PM, &pm, + sizeof(pm), true)) != 0) { + if (err == -ENODEV) + WL_DBG(("%s:netdev not ready\n", + _net_info->ndev->name)); + else + WL_ERR(("%s:error (%d)\n", + _net_info->ndev->name, err)); - cfg->pm_enable_work_on = true; - wl_add_remove_pm_enable_work(cfg, TRUE, WL_HANDLER_NOTUSE); + wl_cfg80211_update_power_mode(_net_info->ndev); } + wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_SHORT); #if defined(WLTDLS) -#if defined(DISABLE_TDLS_IN_P2P) - if (cfg->vsdb_mode || p2p_is_on(cfg)) -#else - if (cfg->vsdb_mode) -#endif /* defined(DISABLE_TDLS_IN_P2P) */ - { - + if (wl_cfg80211_is_concurrent_mode()) { err = wldev_iovar_setint(primary_dev, "tdls_enable", 0); } #endif /* defined(WLTDLS) */ - } - else { /* clear */ + +#ifdef DISABLE_FRAMEBURST_VSDB +#ifdef USE_WFA_CERT_CONF + if (g_frameburst) +#endif /* USE_WFA_CERT_CONF */ + { + if (wl_cfg80211_is_concurrent_mode()) { + int frameburst = 0; + if (wldev_ioctl(primary_dev, WLC_SET_FAKEFRAG, &frameburst, + sizeof(frameburst), true) != 0) { + WL_DBG(("frameburst set error\n")); + } + WL_DBG(("Frameburst Disabled\n")); + } + } +#endif /* DISABLE_FRAMEBURST_VSDB */ + } else { /* clear */ chan = 0; /* clear chan information when the net device is disconnected */ wl_update_prof(cfg, _net_info->ndev, NULL, &chan, WL_PROF_CHAN); wl_cfg80211_determine_vsdb_mode(cfg); - for_each_ndev(cfg, iter, next) { - if (iter->pm_restore && iter->pm) { - WL_DBG(("%s:restoring power save %s\n", - iter->ndev->name, (iter->pm ? "enabled" : "disabled"))); - if (iter->pm != PM_OFF && dhd_conf_get_pm(dhd) >= 0) - iter->pm = dhd_conf_get_pm(dhd); - err = wldev_ioctl(iter->ndev, - WLC_SET_PM, &iter->pm, sizeof(iter->pm), true); - if (unlikely(err)) { - if (err == -ENODEV) - WL_DBG(("%s:netdev not ready\n", iter->ndev->name)); - else - WL_ERR(("%s:error(%d)\n", iter->ndev->name, err)); - break; - } - iter->pm_restore = 0; - wl_cfg80211_update_power_mode(iter->ndev); + if (primary_dev == _net_info->ndev) { + pm = PM_FAST; + if (dhd_conf_get_pm(dhd) >= 0) + pm = dhd_conf_get_pm(dhd); + if ((err = wldev_ioctl(_net_info->ndev, WLC_SET_PM, &pm, + sizeof(pm), true)) != 0) { + if (err == -ENODEV) + WL_DBG(("%s:netdev not ready\n", + _net_info->ndev->name)); + else + WL_ERR(("%s:error (%d)\n", + _net_info->ndev->name, err)); + + wl_cfg80211_update_power_mode(_net_info->ndev); } } + wl_cfg80211_concurrent_roam(cfg, 0); #if defined(WLTDLS) - if (!cfg->vsdb_mode) { + if (!wl_cfg80211_is_concurrent_mode()) { err = wldev_iovar_setint(primary_dev, "tdls_enable", 1); } #endif /* defined(WLTDLS) */ + +#ifdef DISABLE_FRAMEBURST_VSDB +#ifdef USE_WFA_CERT_CONF + if (g_frameburst) +#endif /* USE_WFA_CERT_CONF */ + { + int frameburst = 1; + if (wldev_ioctl(primary_dev, WLC_SET_FAKEFRAG, &frameburst, + sizeof(frameburst), true) != 0) { + WL_DBG(("frameburst set error\n")); + } + WL_DBG(("Frameburst Enabled\n")); + } +#endif /* DISABLE_FRAMEBURST_VSDB */ } return err; } @@ -11816,6 +13226,20 @@ static s32 wl_init_scan(struct bcm_cfg80211 *cfg) return err; } +#ifdef DHD_LOSSLESS_ROAMING +static s32 wl_init_roam_timeout(struct bcm_cfg80211 *cfg) +{ + int err = 0; + + /* Init roam timer */ + init_timer(&cfg->roam_timeout); + cfg->roam_timeout.data = (unsigned long) cfg; + cfg->roam_timeout.function = wl_roam_timeout; + + return err; +} +#endif /* DHD_LOSSLESS_ROAMING */ + static s32 wl_init_priv(struct bcm_cfg80211 *cfg) { struct wiphy *wiphy = bcmcfg_to_wiphy(cfg); @@ -11830,9 +13254,10 @@ static s32 wl_init_priv(struct bcm_cfg80211 *cfg) cfg->vsdb_mode = false; #if defined(BCMSDIO) cfg->wlfc_on = false; -#endif +#endif cfg->roamoff_on_concurrent = true; cfg->disable_roam_event = false; + cfg->cfgdev_bssidx = -1; /* register interested state */ set_bit(WL_STATUS_CONNECTED, &cfg->interrested_state); spin_lock_init(&cfg->cfgdrv_lock); @@ -11849,9 +13274,16 @@ static s32 wl_init_priv(struct bcm_cfg80211 *cfg) wl_init_event_handler(cfg); mutex_init(&cfg->usr_sync); mutex_init(&cfg->event_sync); + mutex_init(&cfg->scan_complete); err = wl_init_scan(cfg); if (err) return err; +#ifdef DHD_LOSSLESS_ROAMING + err = wl_init_roam_timeout(cfg); + if (err) { + return err; + } +#endif /* DHD_LOSSLESS_ROAMING */ wl_init_conf(cfg->conf); wl_init_prof(cfg, ndev); wl_link_down(cfg); @@ -11867,6 +13299,9 @@ static void wl_deinit_priv(struct bcm_cfg80211 *cfg) wl_flush_eq(cfg); wl_link_down(cfg); del_timer_sync(&cfg->scan_timeout); +#ifdef DHD_LOSSLESS_ROAMING + del_timer_sync(&cfg->roam_timeout); +#endif wl_deinit_priv_mem(cfg); if (wl_cfg80211_netdev_notifier_registered) { wl_cfg80211_netdev_notifier_registered = FALSE; @@ -11874,12 +13309,7 @@ static void wl_deinit_priv(struct bcm_cfg80211 *cfg) } } -#if defined(CUSTOMER_HW20) && defined(WLANAUDIO) -struct net_device *wl0dot1_dev; -#endif /* CUSTOMER_HW20 && WLANAUDIO */ - -#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT) || \ - defined(P2PONEINT) +#if defined(WL_ENABLE_P2P_IF) static s32 wl_cfg80211_attach_p2p(void) { struct bcm_cfg80211 *cfg = g_bcm_cfg; @@ -11891,15 +13321,9 @@ static s32 wl_cfg80211_attach_p2p(void) return -ENODEV; } -#if defined(CUSTOMER_HW20) && defined(WLANAUDIO) - wl0dot1_dev = cfg->p2p_net; -#endif /* CUSTOMER_HW20 && WLANAUDIO */ - return 0; } -#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT || P2PONEINT */ -#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT) static s32 wl_cfg80211_detach_p2p(void) { struct bcm_cfg80211 *cfg = g_bcm_cfg; @@ -11912,25 +13336,21 @@ static s32 wl_cfg80211_detach_p2p(void) } else wdev = cfg->p2p_wdev; -#ifndef WL_NEWCFG_PRIVCMD_SUPPORT if (!wdev) { WL_ERR(("Invalid Ptr\n")); return -EINVAL; } -#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */ wl_cfgp2p_unregister_ndev(cfg); cfg->p2p_wdev = NULL; cfg->p2p_net = NULL; -#ifndef WL_NEWCFG_PRIVCMD_SUPPORT - WL_DBG(("Freeing 0x%08x \n", (unsigned int)wdev)); + WL_DBG(("Freeing 0x%p \n", wdev)); kfree(wdev); -#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */ return 0; } -#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT */ +#endif s32 wl_cfg80211_attach_post(struct net_device *ndev) { @@ -11959,18 +13379,7 @@ s32 wl_cfg80211_attach_post(struct net_device *ndev) if ((err = wl_cfgp2p_init_priv(cfg)) != 0) goto fail; -#ifdef P2PONEINT - if (!cfg->p2p_net) { - cfg->p2p_supported = true; - - err = wl_cfg80211_attach_p2p(); - if (err) - goto fail; - - cfg->p2p_supported = true; - } -#endif -#if defined(WL_ENABLE_P2P_IF) || defined(P2PONEINT) +#if defined(WL_ENABLE_P2P_IF) if (cfg->p2p_net) { /* Update MAC addr for p2p0 interface here. */ memcpy(cfg->p2p_net->dev_addr, ndev->dev_addr, ETH_ALEN); @@ -11984,9 +13393,7 @@ s32 wl_cfg80211_attach_post(struct net_device *ndev) return -ENODEV; } #endif /* WL_ENABLE_P2P_IF */ -#ifndef P2PONEINT cfg->p2p_supported = true; -#endif } else if (ret == 0) { if ((err = wl_cfgp2p_init_priv(cfg)) != 0) goto fail; @@ -12002,7 +13409,7 @@ s32 wl_cfg80211_attach_post(struct net_device *ndev) return err; } -s32 wl_cfg80211_attach(struct net_device *ndev, dhd_pub_t *context) +s32 wl_cfg80211_attach(struct net_device *ndev, void *context) { struct wireless_dev *wdev; struct bcm_cfg80211 *cfg; @@ -12032,11 +13439,12 @@ s32 wl_cfg80211_attach(struct net_device *ndev, dhd_pub_t *context) cfg->wdev = wdev; cfg->pub = context; INIT_LIST_HEAD(&cfg->net_list); + spin_lock_init(&cfg->net_list_sync); ndev->ieee80211_ptr = wdev; SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); wdev->netdev = ndev; cfg->state_notifier = wl_notifier_change_state; - err = wl_alloc_netinfo(cfg, ndev, wdev, WL_MODE_BSS, PM_ENABLE); + err = wl_alloc_netinfo(cfg, ndev, wdev, WL_MODE_BSS, PM_ENABLE, 0); if (err) { WL_ERR(("Failed to alloc net_info (%d)\n", err)); goto cfg80211_attach_out; @@ -12072,17 +13480,24 @@ s32 wl_cfg80211_attach(struct net_device *ndev, dhd_pub_t *context) cfg->btcoex_info = wl_cfg80211_btcoex_init(cfg->wdev->netdev); if (!cfg->btcoex_info) goto cfg80211_attach_out; -#endif - +#endif +#if defined(SUPPORT_RANDOM_MAC_SCAN) + cfg->random_mac_enabled = FALSE; +#endif /* SUPPORT_RANDOM_MAC_SCAN */ g_bcm_cfg = cfg; -#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT) -#ifndef P2PONEINT +#ifdef CONFIG_CFG80211_INTERNAL_REGDB + wdev->wiphy->reg_notifier = wl_cfg80211_reg_notifier; +#endif /* CONFIG_CFG80211_INTERNAL_REGDB */ + +#if defined(WL_ENABLE_P2P_IF) err = wl_cfg80211_attach_p2p(); if (err) goto cfg80211_attach_out; -#endif -#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT */ +#endif + + INIT_DELAYED_WORK(&cfg->pm_enable_work, wl_cfg80211_work_handler); + mutex_init(&cfg->pm_sync); return err; @@ -12101,12 +13516,12 @@ void wl_cfg80211_detach(void *para) WL_TRACE(("In\n")); - wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL); + wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL); #if defined(COEX_DHCP) wl_cfg80211_btcoex_deinit(); cfg->btcoex_info = NULL; -#endif +#endif wl_setup_rfkill(cfg, FALSE); #ifdef DEBUGFS_CFG80211 @@ -12120,15 +13535,22 @@ void wl_cfg80211_detach(void *para) if (timer_pending(&cfg->scan_timeout)) del_timer_sync(&cfg->scan_timeout); +#ifdef DHD_LOSSLESS_ROAMING + if (timer_pending(&cfg->roam_timeout)) { + del_timer_sync(&cfg->roam_timeout); + } +#endif /* DHD_LOSSLESS_ROAMING */ #if defined(WL_CFG80211_P2P_DEV_IF) - wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg); + if (cfg->p2p_wdev) + wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg); #endif /* WL_CFG80211_P2P_DEV_IF */ -#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT) +#if defined(WL_ENABLE_P2P_IF) wl_cfg80211_detach_p2p(); -#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT */ +#endif wl_cfg80211_ibss_vsie_free(cfg); + wl_cfg80211_clear_mgmt_vndr_ies(cfg); wl_deinit_priv(cfg); g_bcm_cfg = NULL; wl_cfg80211_clear_parent_dev(); @@ -12148,139 +13570,63 @@ void wl_cfg80211_detach(void *para) static void wl_wakeup_event(struct bcm_cfg80211 *cfg) { - if (cfg->event_tsk.thr_pid >= 0) { - DHD_OS_WAKE_LOCK(cfg->pub); + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + + if (dhd->up && (cfg->event_tsk.thr_pid >= 0)) { up(&cfg->event_tsk.sema); } } -#if defined(P2PONEINT) || defined(WL_ENABLE_P2P_IF) -static int wl_is_p2p_event(struct wl_event_q *e) +static s32 wl_event_handler(void *data) { - struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct bcm_cfg80211 *cfg = NULL; + struct wl_event_q *e; + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + struct wireless_dev *wdev = NULL; - switch (e->etype) { - case WLC_E_IF: - WL_TRACE(("P2P event(%d) on interface(ifidx:%d)\n", - e->etype, e->emsg.ifidx)); + cfg = (struct bcm_cfg80211 *)tsk->parent; - (void)schedule_timeout(20); + printf("tsk Enter, tsk = 0x%p\n", tsk); - if (wl_get_p2p_status(cfg, IF_ADDING) || - wl_get_p2p_status(cfg, IF_DELETING) || - wl_get_p2p_status(cfg, IF_CHANGING) || - wl_get_p2p_status(cfg, IF_CHANGED)) { - WL_TRACE(("P2P Event on Primary I/F (ifidx:%d)." - " Sent it to p2p0 \n", e->emsg.ifidx)); - return TRUE; - } else { - WL_TRACE(("Event is Not p2p event return False \n")); - return FALSE; - } + while (down_interruptible (&tsk->sema) == 0) { + SMP_RD_BARRIER_DEPENDS(); + if (tsk->terminated) { + break; + } + while ((e = wl_deq_event(cfg))) { + WL_DBG(("event type (%d), ifidx: %d bssidx: %d \n", + e->etype, e->emsg.ifidx, e->emsg.bsscfgidx)); - case WLC_E_P2P_PROBREQ_MSG: - case WLC_E_P2P_DISC_LISTEN_COMPLETE: - case WLC_E_ACTION_FRAME_RX: - case WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE: - case WLC_E_ACTION_FRAME_COMPLETE: - - if (e->emsg.ifidx != 0) { - WL_TRACE(("P2P event(%d) on virtual interface(ifidx:%d)\n", - e->etype, e->emsg.ifidx)); - return FALSE; - } else { - WL_TRACE(("P2P event(%d) on interface(ifidx:%d)\n", - e->etype, e->emsg.ifidx)); - return TRUE; + if (e->emsg.ifidx > WL_MAX_IFS) { + WL_ERR((" Event ifidx not in range. val:%d \n", e->emsg.ifidx)); + goto fail; } - break; - - default: - WL_TRACE(("NON-P2P event(%d) on interface(ifidx:%d)\n", - e->etype, e->emsg.ifidx)); - return FALSE; - } -} -#endif - -static s32 wl_event_handler(void *data) -{ - struct bcm_cfg80211 *cfg = NULL; - struct wl_event_q *e; - tsk_ctl_t *tsk = (tsk_ctl_t *)data; - bcm_struct_cfgdev *cfgdev = NULL; - cfg = (struct bcm_cfg80211 *)tsk->parent; - - printf("tsk Enter, tsk = 0x%p\n", tsk); - - while (down_interruptible (&tsk->sema) == 0) { - SMP_RD_BARRIER_DEPENDS(); - if (tsk->terminated) - break; - while ((e = wl_deq_event(cfg))) { - WL_DBG(("event type (%d), if idx: %d\n", e->etype, e->emsg.ifidx)); - /* All P2P device address related events comes on primary interface since - * there is no corresponding bsscfg for P2P interface. Map it to p2p0 - * interface. - */ -#if defined(WL_CFG80211_P2P_DEV_IF) -#ifdef P2PONEINT - if ((wl_is_p2p_event(e) == TRUE) && (cfg->p2p_wdev)) -#else - if (WL_IS_P2P_DEV_EVENT(e) && (cfg->p2p_wdev)) -#endif - { - cfgdev = bcmcfg_to_p2p_wdev(cfg); - } else { - struct net_device *ndev = NULL; - - ndev = dhd_idx2net((struct dhd_pub *)(cfg->pub), e->emsg.ifidx); - if (ndev) - cfgdev = ndev_to_wdev(ndev); -#ifdef P2PONEINT - else if (e->etype == WLC_E_IF) { - wl_put_event(e); - DHD_OS_WAKE_UNLOCK(cfg->pub); - continue; - } - - if (cfgdev == NULL) { - if (e->etype == WLC_E_IF) - cfgdev = bcmcfg_to_prmry_wdev(cfg); - else { - cfgdev = ndev_to_wdev(wl_to_p2p_bss_ndev(cfg, - P2PAPI_BSSCFG_CONNECTION)); + if (!(wdev = wl_get_wdev_by_bssidx(cfg, e->emsg.bsscfgidx))) { + /* For WLC_E_IF would be handled by wl_host_event */ + if (e->etype != WLC_E_IF) + WL_ERR(("No wdev corresponding to bssidx: 0x%x found!" + " Ignoring event.\n", e->emsg.bsscfgidx)); + } else if (e->etype < WLC_E_LAST && cfg->evt_handler[e->etype]) { + dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub); + if (dhd->busstate == DHD_BUS_DOWN) { + WL_ERR((": BUS is DOWN.\n")); + } else { +#ifdef DHD_IFDEBUG + if (cfg->iface_cnt == 0) { + wl_dump_ifinfo(cfg); } - } #endif - } -#elif defined(WL_ENABLE_P2P_IF) - // terence 20150116: fix for p2p connection in kernel 3.4 -// if (WL_IS_P2P_DEV_EVENT(e) && (cfg->p2p_net)) { - if ((wl_is_p2p_event(e) == TRUE) && (cfg->p2p_net)) { - cfgdev = cfg->p2p_net; - } else { - cfgdev = dhd_idx2net((struct dhd_pub *)(cfg->pub), - e->emsg.ifidx); - } -#endif /* WL_CFG80211_P2P_DEV_IF */ - - if (!cfgdev) { -#if defined(WL_CFG80211_P2P_DEV_IF) - cfgdev = bcmcfg_to_prmry_wdev(cfg); -#else - cfgdev = bcmcfg_to_prmry_ndev(cfg); -#endif /* WL_CFG80211_P2P_DEV_IF */ - } - if (e->etype < WLC_E_LAST && cfg->evt_handler[e->etype]) { - cfg->evt_handler[e->etype] (cfg, cfgdev, &e->emsg, e->edata); + cfg->evt_handler[e->etype](cfg, wdev_to_cfgdev(wdev), + &e->emsg, e->edata); + } } else { WL_DBG(("Unknown Event (%d): ignoring\n", e->etype)); } +fail: wl_put_event(e); + DHD_EVENT_WAKE_UNLOCK(cfg->pub); } - DHD_OS_WAKE_UNLOCK(cfg->pub); } printf("%s: was terminated\n", __FUNCTION__); complete_and_exit(&tsk->completed, 0); @@ -12292,6 +13638,7 @@ wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data) { u32 event_type = ntoh32(e->event_type); struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct net_info *netinfo; #if (WL_DBG_LEVEL > 0) s8 *estr = (event_type <= sizeof(wl_dbg_estr) / WL_DBG_ESTR_MAX - 1) ? @@ -12299,27 +13646,35 @@ wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data) WL_DBG(("event_type (%d):" "WLC_E_" "%s\n", event_type, estr)); #endif /* (WL_DBG_LEVEL > 0) */ + if (cfg->event_tsk.thr_pid == -1) { + WL_ERR(("Event handler is not created\n")); + return; + } + + if ((cfg == NULL) || (cfg->p2p_supported && cfg->p2p == NULL)) { + WL_ERR(("Stale event ignored\n")); + return; + } + if (wl_get_p2p_status(cfg, IF_CHANGING) || wl_get_p2p_status(cfg, IF_ADDING)) { WL_ERR(("during IF change, ignore event %d\n", event_type)); return; } - if (ndev != bcmcfg_to_prmry_ndev(cfg) && cfg->p2p_supported) { - if ((cfg->bss_cfgdev) && - (ndev == cfgdev_to_wlc_ndev(cfg->bss_cfgdev, cfg))) { - /* Event is corresponding to the secondary STA interface */ - WL_DBG(("DualSta event (%d), proceed to enqueue it \n", event_type)); - } else if (ndev != wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION) && -#if defined(WL_ENABLE_P2P_IF) - (ndev != (cfg->p2p_net ? cfg->p2p_net : - wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE))) && -#else - (ndev != wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE)) && -#endif /* WL_ENABLE_P2P_IF */ - TRUE) { - WL_ERR(("ignore event %d, not interested\n", event_type)); - return; - } +#ifdef DHD_IFDEBUG + if (event_type != WLC_E_ESCAN_RESULT) { + WL_ERR(("Event_type %d , status : %d, reason : %d, bssidx:%d \n", + event_type, ntoh32(e->status), ntoh32(e->reason), e->bsscfgidx)); + } +#endif + netinfo = wl_get_netinfo_by_bssidx(cfg, e->bsscfgidx); + if (!netinfo) { + /* Since the netinfo entry is not there, the netdev entry is not + * created via cfg80211 interface. so the event is not of interest + * to the cfg80211 layer. + */ + WL_ERR(("ignore event %d, not interested\n", event_type)); + return; } if (event_type == WLC_E_PFN_NET_FOUND) { @@ -12329,8 +13684,12 @@ wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data) WL_DBG((" PNOEVENT: PNO_NET_LOST\n")); } - if (likely(!wl_enq_event(cfg, ndev, event_type, e, data))) + DHD_EVENT_WAKE_LOCK(cfg->pub); + if (likely(!wl_enq_event(cfg, ndev, event_type, e, data))) { wl_wakeup_event(cfg); + } else { + DHD_EVENT_WAKE_UNLOCK(cfg->pub); + } } static void wl_init_eq(struct bcm_cfg80211 *cfg) @@ -12345,8 +13704,8 @@ static void wl_flush_eq(struct bcm_cfg80211 *cfg) unsigned long flags; flags = wl_lock_eq(cfg); - while (!list_empty(&cfg->eq_list)) { - e = list_first_entry(&cfg->eq_list, struct wl_event_q, eq_list); + while (!list_empty_careful(&cfg->eq_list)) { + BCM_SET_LIST_FIRST_ENTRY(e, &cfg->eq_list, struct wl_event_q, eq_list); list_del(&e->eq_list); kfree(e); } @@ -12364,7 +13723,7 @@ static struct wl_event_q *wl_deq_event(struct bcm_cfg80211 *cfg) flags = wl_lock_eq(cfg); if (likely(!list_empty(&cfg->eq_list))) { - e = list_first_entry(&cfg->eq_list, struct wl_event_q, eq_list); + BCM_SET_LIST_FIRST_ENTRY(e, &cfg->eq_list, struct wl_event_q, eq_list); list_del(&e->eq_list); } wl_unlock_eq(cfg, flags); @@ -12635,7 +13994,7 @@ static int wl_construct_reginfo(struct bcm_cfg80211 *cfg, s32 bw_cap) if (!dhd_conf_match_channel(cfg->pub, channel)) continue; if (index < array_size) { -#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) band_chan_arr[index].center_freq = ieee80211_channel_to_frequency(channel); #else @@ -12721,6 +14080,17 @@ s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify) s32 err = 0; s32 index = 0; s32 nmode = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) + u32 j = 0; + s32 vhtmode = 0; + s32 txstreams = 0; + s32 rxstreams = 0; + s32 ldpc_cap = 0; + s32 stbc_rx = 0; + s32 stbc_tx = 0; + s32 txbf_bfe_cap = 0; + s32 txbf_bfr_cap = 0; +#endif bool rollback_lock = false; s32 bw_cap = 0; s32 cur_band = -1; @@ -12750,8 +14120,58 @@ s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify) err = wldev_iovar_getint(dev, "nmode", &nmode); if (unlikely(err)) { WL_ERR(("error reading nmode (%d)\n", err)); - } else { - /* For nmodeonly check bw cap */ + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) + err = wldev_iovar_getint(dev, "vhtmode", &vhtmode); + if (unlikely(err)) { + WL_ERR(("error reading vhtmode (%d)\n", err)); + } + + if (vhtmode) { + err = wldev_iovar_getint(dev, "txstreams", &txstreams); + if (unlikely(err)) { + WL_ERR(("error reading txstreams (%d)\n", err)); + } + + err = wldev_iovar_getint(dev, "rxstreams", &rxstreams); + if (unlikely(err)) { + WL_ERR(("error reading rxstreams (%d)\n", err)); + } + + err = wldev_iovar_getint(dev, "ldpc_cap", &ldpc_cap); + if (unlikely(err)) { + WL_ERR(("error reading ldpc_cap (%d)\n", err)); + } + + err = wldev_iovar_getint(dev, "stbc_rx", &stbc_rx); + if (unlikely(err)) { + WL_ERR(("error reading stbc_rx (%d)\n", err)); + } + + err = wldev_iovar_getint(dev, "stbc_tx", &stbc_tx); + if (unlikely(err)) { + WL_ERR(("error reading stbc_tx (%d)\n", err)); + } + + err = wldev_iovar_getint(dev, "txbf_bfe_cap", &txbf_bfe_cap); + if (unlikely(err)) { + WL_ERR(("error reading txbf_bfe_cap (%d)\n", err)); + } + + err = wldev_iovar_getint(dev, "txbf_bfr_cap", &txbf_bfr_cap); + if (unlikely(err)) { + WL_ERR(("error reading txbf_bfr_cap (%d)\n", err)); + } + } +#endif + + /* For nmode and vhtmode check bw cap */ + if (nmode || +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) + vhtmode || +#endif + 0) { err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap); if (unlikely(err)) { WL_ERR(("error get mimo_bw_cap (%d)\n", err)); @@ -12774,8 +14194,94 @@ s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify) bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; index = IEEE80211_BAND_5GHZ; - if (bw_cap == WLC_N_BW_40ALL || bw_cap == WLC_N_BW_20IN2G_40IN5G) + if (nmode && (bw_cap == WLC_N_BW_40ALL || bw_cap == WLC_N_BW_20IN2G_40IN5G)) bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) + /* VHT capabilities. */ + if (vhtmode) { + /* Supported */ + bands[index]->vht_cap.vht_supported = TRUE; + + for (j = 1; j <= VHT_CAP_MCS_MAP_NSS_MAX; j++) { + /* TX stream rates. */ + if (j <= txstreams) { + VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_0_9, + bands[index]->vht_cap.vht_mcs.tx_mcs_map); + } else { + VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_NONE, + bands[index]->vht_cap.vht_mcs.tx_mcs_map); + } + + /* RX stream rates. */ + if (j <= rxstreams) { + VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_0_9, + bands[index]->vht_cap.vht_mcs.rx_mcs_map); + } else { + VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_NONE, + bands[index]->vht_cap.vht_mcs.rx_mcs_map); + } + } + + + /* Capabilities */ + /* 80 MHz is mandatory */ + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_SHORT_GI_80; + + if (WL_BW_CAP_160MHZ(bw_cap)) { + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_SHORT_GI_160; + } + + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; + + if (ldpc_cap) + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_RXLDPC; + + if (stbc_tx) + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_TXSTBC; + + if (stbc_rx) + bands[index]->vht_cap.cap |= + (stbc_rx << VHT_CAP_INFO_RX_STBC_SHIFT); + + if (txbf_bfe_cap) + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE; + + if (txbf_bfr_cap) { + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; + } + + if (txbf_bfe_cap || txbf_bfr_cap) { + bands[index]->vht_cap.cap |= + (2 << VHT_CAP_INFO_NUM_BMFMR_ANT_SHIFT); + bands[index]->vht_cap.cap |= + ((txstreams - 1) << + VHT_CAP_INFO_NUM_SOUNDING_DIM_SHIFT); + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB; + } + + /* AMPDU length limit, support max 1MB (2 ^ (13 + 7)) */ + bands[index]->vht_cap.cap |= + (7 << VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT); + WL_INFORM(("%s band[%d] vht_enab=%d vht_cap=%08x " + "vht_rx_mcs_map=%04x vht_tx_mcs_map=%04x\n", + __FUNCTION__, index, + bands[index]->vht_cap.vht_supported, + bands[index]->vht_cap.cap, + bands[index]->vht_cap.vht_mcs.rx_mcs_map, + bands[index]->vht_cap.vht_mcs.tx_mcs_map)); + } +#endif } else if (bandlist[i] == WLC_BAND_2G && __wl_band_2ghz.n_channels > 0) { bands[IEEE80211_BAND_2GHZ] = @@ -12819,14 +14325,18 @@ s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify) static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg) { s32 err = 0; -#ifdef WL_HOST_BAND_MGMT - s32 ret = 0; -#endif /* WL_HOST_BAND_MGMT */ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); struct wireless_dev *wdev = ndev->ieee80211_ptr; WL_DBG(("In\n")); + err = wl_create_event_handler(cfg); + if (err) { + WL_ERR(("wl_create_event_handler failed\n")); + return err; + } + wl_init_event_handler(cfg); + err = dhd_config_dongle(cfg); if (unlikely(err)) return err; @@ -12839,6 +14349,12 @@ static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg) return err; } } + + err = wl_init_scan(cfg); + if (err) { + WL_ERR(("wl_init_scan failed\n")); + return err; + } err = wl_update_wiphybands(cfg, true); if (unlikely(err)) { WL_ERR(("wl_update_wiphybands failed\n")); @@ -12847,32 +14363,14 @@ static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg) return err; } } +#ifdef DHD_LOSSLESS_ROAMING + if (timer_pending(&cfg->roam_timeout)) { + del_timer_sync(&cfg->roam_timeout); + } +#endif /* DHD_LOSSLESS_ROAMING */ err = dhd_monitor_init(cfg->pub); -#ifdef WL_HOST_BAND_MGMT - /* By default the curr_band is initialized to BAND_AUTO */ - if ((ret = wl_cfg80211_set_band(ndev, WLC_BAND_AUTO)) < 0) { - if (ret == BCME_UNSUPPORTED) { - /* Don't fail the initialization, lets just - * fall back to the original method - */ - WL_ERR(("WL_HOST_BAND_MGMT defined, " - "but roam_band iovar not supported \n")); - } else { - WL_ERR(("roam_band failed. ret=%d", ret)); - err = -1; - } - } -#endif /* WL_HOST_BAND_MGMT */ -#if defined(DHCP_SCAN_SUPPRESS) - /* wlan scan_supp timer and work thread info */ - init_timer(&cfg->scan_supp_timer); - cfg->scan_supp_timer.data = (ulong)cfg; - cfg->scan_supp_timer.function = wl_cfg80211_scan_supp_timerfunc; - INIT_WORK(&cfg->wlan_work, wl_cfg80211_work_handler); -#endif /* DHCP_SCAN_SUPPRESS */ - INIT_DELAYED_WORK(&cfg->pm_enable_work, wl_cfg80211_work_handler); wl_set_drv_status(cfg, READY, ndev); return err; } @@ -12883,75 +14381,95 @@ static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg) unsigned long flags; struct net_info *iter, *next; struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); -#if defined(WL_CFG80211) && (defined(WL_ENABLE_P2P_IF)|| \ - defined(WL_NEWCFG_PRIVCMD_SUPPORT)) +#if defined(WL_CFG80211) && defined(WL_ENABLE_P2P_IF) struct net_device *p2p_net = cfg->p2p_net; -#endif /* WL_CFG80211 && (WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT) */ - u32 bssidx = 0; +#endif #ifdef PROP_TXSTATUS_VSDB #if defined(BCMSDIO) dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); #endif #endif /* PROP_TXSTATUS_VSDB */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) + struct cfg80211_scan_info info; +#endif + WL_DBG(("In\n")); /* Delete pm_enable_work */ - wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL); + wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL); #ifdef WL_NAN - wl_cfgnan_stop_handler(ndev, g_bcm_cfg, NULL, NULL); + wl_cfgnan_stop_handler(ndev, g_bcm_cfg, NULL, 0, NULL); #endif /* WL_NAN */ if (cfg->p2p_supported) { wl_clr_p2p_status(cfg, GO_NEG_PHASE); #ifdef PROP_TXSTATUS_VSDB #if defined(BCMSDIO) - if (cfg->p2p->vif_created) { + if (wl_cfgp2p_vif_created(cfg)) { bool enabled = false; dhd_wlfc_get_enable(dhd, &enabled); if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE && - dhd->op_mode != DHD_FLAG_IBSS_MODE) { + dhd->op_mode != DHD_FLAG_IBSS_MODE && dhd->conf->disable_proptx!=0) { dhd_wlfc_deinit(dhd); cfg->wlfc_on = false; } } -#endif +#endif #endif /* PROP_TXSTATUS_VSDB */ } -#if defined(DHCP_SCAN_SUPPRESS) - /* Force clear of scan_suppress */ - if (cfg->scan_suppressed) - wl_cfg80211_scan_suppress(ndev, 0); - if (timer_pending(&cfg->scan_supp_timer)) - del_timer_sync(&cfg->scan_supp_timer); - cancel_work_sync(&cfg->wlan_work); -#endif /* DHCP_SCAN_SUPPRESS */ /* If primary BSS is operational (for e.g SoftAP), bring it down */ - if (!(wl_cfgp2p_find_idx(cfg, ndev, &bssidx)) && - wl_cfgp2p_bss_isup(ndev, bssidx)) { - if (wl_cfgp2p_bss(cfg, ndev, bssidx, 0) < 0) + if (wl_cfgp2p_bss_isup(ndev, 0)) { + if (wl_cfgp2p_bss(cfg, ndev, 0, 0) < 0) WL_ERR(("BSS down failed \n")); } /* Check if cfg80211 interface is already down */ if (!wl_get_drv_status(cfg, READY, ndev)) return err; /* it is even not ready */ - for_each_ndev(cfg, iter, next) - wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev); -#ifdef WL_SDO - wl_cfg80211_sdo_deinit(cfg); + /* clear all the security setting on primary Interface */ + wl_cfg80211_clear_security(cfg); + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + for_each_ndev(cfg, iter, next) { + if (iter->ndev) /* p2p discovery iface is null */ + wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev); + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") #endif +#ifdef P2P_LISTEN_OFFLOADING + wl_cfg80211_p2plo_deinit(cfg); +#endif /* P2P_LISTEN_OFFLOADING */ + spin_lock_irqsave(&cfg->cfgdrv_lock, flags); if (cfg->scan_request) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) + info.aborted = true; + cfg80211_scan_done(cfg->scan_request, &info); +#else cfg80211_scan_done(cfg->scan_request, true); +#endif cfg->scan_request = NULL; } spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); - +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif for_each_ndev(cfg, iter, next) { + /* p2p discovery iface ndev ptr could be null */ + if (iter->ndev == NULL) + continue; wl_clr_drv_status(cfg, READY, iter->ndev); wl_clr_drv_status(cfg, SCANNING, iter->ndev); wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev); @@ -12961,37 +14479,74 @@ static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg) wl_clr_drv_status(cfg, AP_CREATED, iter->ndev); wl_clr_drv_status(cfg, AP_CREATING, iter->ndev); } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif bcmcfg_to_prmry_ndev(cfg)->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION; -#if defined(WL_CFG80211) && (defined(WL_ENABLE_P2P_IF)|| \ - defined(WL_NEWCFG_PRIVCMD_SUPPORT)) +#if defined(WL_CFG80211) && defined(WL_ENABLE_P2P_IF) if (p2p_net) dev_close(p2p_net); -#endif /* WL_CFG80211 && (WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT) */ +#endif + + /* Avoid deadlock from wl_cfg80211_down */ + mutex_unlock(&cfg->usr_sync); + wl_destroy_event_handler(cfg); + mutex_lock(&cfg->usr_sync); wl_flush_eq(cfg); - wl_link_down(cfg); - if (cfg->p2p_supported) + + if (cfg->link_up) { //army fix wifi stop call trace issue + CFG80211_DISCONNECTED(ndev, 0, NULL, 0, false, GFP_KERNEL); + wl_link_down(cfg); + } + + if (cfg->p2p_supported) { + if (timer_pending(&cfg->p2p->listen_timer)) + del_timer_sync(&cfg->p2p->listen_timer); wl_cfgp2p_down(cfg); - if (cfg->ap_info) { - kfree(cfg->ap_info->wpa_ie); - kfree(cfg->ap_info->rsn_ie); - kfree(cfg->ap_info->wps_ie); - kfree(cfg->ap_info); - cfg->ap_info = NULL; } + + if (timer_pending(&cfg->scan_timeout)) { + del_timer_sync(&cfg->scan_timeout); + } + + DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub)); + dhd_monitor_uninit(); #ifdef WLAIBSS_MCHAN bcm_cfg80211_del_ibss_if(cfg->wdev->wiphy, cfg->ibss_cfgdev); #endif /* WLAIBSS_MCHAN */ -#if defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF) +#if defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF) /* Clean up if not removed already */ if (cfg->bss_cfgdev) wl_cfg80211_del_iface(cfg->wdev->wiphy, cfg->bss_cfgdev); -#endif /* defined (DUAL_STA) || defined (DUAL_STA_STATIC_IF) */ +#endif /* defined (WL_VIRTUAL_APSTA) || defined (DUAL_STA_STATIC_IF) */ + +#ifdef WL11U + /* Clear interworking element. */ + if (cfg->wl11u) { + cfg->wl11u = FALSE; + cfg->iw_ie_len = 0; + memset(cfg->iw_ie, 0, IW_IES_MAX_BUF_LEN); + } +#endif /* WL11U */ + +#ifdef CUSTOMER_HW4_DEBUG + if (wl_scan_timeout_dbg_enabled) + wl_scan_timeout_dbg_clear(); +#endif /* CUSTOMER_HW4_DEBUG */ + + cfg->disable_roam_event = false; DNGL_FUNC(dhd_cfg80211_down, (cfg)); +#ifdef DHD_IFDEBUG + /* Printout all netinfo entries */ + wl_probe_wdev_all(cfg); +#endif /* DHD_IFDEBUG */ + return err; } @@ -13001,6 +14556,11 @@ s32 wl_cfg80211_up(void *para) s32 err = 0; int val = 1; dhd_pub_t *dhd; +#ifdef DISABLE_PM_BCNRX + s32 interr = 0; + uint param = 0; + s8 iovbuf[WLC_IOCTL_SMLEN]; +#endif /* DISABLE_PM_BCNRX */ (void)para; WL_DBG(("In\n")); @@ -13024,16 +14584,25 @@ s32 wl_cfg80211_up(void *para) dhd = (dhd_pub_t *)(cfg->pub); if (!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) { err = wl_cfg80211_attach_post(bcmcfg_to_prmry_ndev(cfg)); - if (unlikely(err)) + if (unlikely(err)) { + mutex_unlock(&cfg->usr_sync); return err; + } } -#if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X) - if (dhd->fw_4way_handshake) - cfg->wdev->wiphy->features |= NL80211_FEATURE_FW_4WAY_HANDSHAKE; -#endif err = __wl_cfg80211_up(cfg); if (unlikely(err)) WL_ERR(("__wl_cfg80211_up failed\n")); + + + + /* IOVAR configurations with 'up' condition */ +#ifdef DISABLE_PM_BCNRX + bcm_mkiovar("pm_bcnrx", (char *)¶m, 4, iovbuf, sizeof(iovbuf)); + interr = wldev_ioctl(bcmcfg_to_prmry_ndev(cfg), WLC_SET_VAR, iovbuf, sizeof(iovbuf), true); + if (unlikely(interr)) + WL_ERR(("Set pm_bcnrx returned (%d)\n", interr)); +#endif /* DISABLE_PM_BCNRX */ + mutex_unlock(&cfg->usr_sync); #ifdef WLAIBSS_MCHAN @@ -13041,8 +14610,8 @@ s32 wl_cfg80211_up(void *para) #endif /* WLAIBSS_MCHAN */ #ifdef DUAL_STA_STATIC_IF -#ifdef DUAL_STA -#error "Both DUAL_STA and DUAL_STA_STATIC_IF can't be enabled together" +#ifdef WL_VIRTUAL_APSTA +#error "Both DUAL STA and DUAL_STA_STATIC_IF can't be enabled together" #endif /* Static Interface support is currently supported only for STA only builds (without P2P) */ wl_cfg80211_create_iface(cfg->wdev->wiphy, NL80211_IFTYPE_STATION, NULL, "wlan%d"); @@ -13055,11 +14624,34 @@ s32 wl_cfg80211_up(void *para) int wl_cfg80211_hang(struct net_device *dev, u16 reason) { struct bcm_cfg80211 *cfg; + dhd_pub_t *dhd; +#if defined(SOFTAP_SEND_HANGEVT) + /* specifc mac address used for hang event */ + uint8 hang_mac[ETHER_ADDR_LEN] = {0x11, 0x11, 0x11, 0x11, 0x11, 0x11}; +#endif /* SOFTAP_SEND_HANGEVT */ + if (!g_bcm_cfg) { + return BCME_ERROR; + } + cfg = g_bcm_cfg; + dhd = (dhd_pub_t *)(cfg->pub); + +#ifdef DHD_USE_EXTENDED_HANG_REASON + if (dhd->hang_reason != 0) { + reason = dhd->hang_reason; + } +#endif /* DHD_USE_EXTENDED_HANG_REASON */ - WL_ERR(("In : chip crash eventing\n")); - wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL); - cfg80211_disconnected(dev, reason, NULL, 0, GFP_KERNEL); + WL_ERR(("In : chip crash eventing, reason=0x%x\n", (uint32)(dhd->hang_reason))); + wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL); +#if defined(SOFTAP_SEND_HANGEVT) + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + cfg80211_del_sta(dev, hang_mac, GFP_ATOMIC); + } else +#endif /* SOFTAP_SEND_HANGEVT */ + { + CFG80211_DISCONNECTED(dev, reason, NULL, 0, false, GFP_KERNEL); + } #if defined(RSSIAVG) wl_free_rssi_cache(&g_rssi_cache_ctrl); #endif @@ -13127,10 +14719,10 @@ static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 static s32 wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, - const wl_event_msg_t *e, void *data, s32 item) + const wl_event_msg_t *e, const void *data, s32 item) { s32 err = 0; - struct wlc_ssid *ssid; + const struct wlc_ssid *ssid; unsigned long flags; struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev); @@ -13139,7 +14731,7 @@ wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, spin_lock_irqsave(&cfg->cfgdrv_lock, flags); switch (item) { case WL_PROF_SSID: - ssid = (wlc_ssid_t *) data; + ssid = (const wlc_ssid_t *) data; memset(profile->ssid.SSID, 0, sizeof(profile->ssid.SSID)); memcpy(profile->ssid.SSID, ssid->SSID, ssid->SSID_len); @@ -13155,16 +14747,16 @@ wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, memcpy(&profile->sec, data, sizeof(profile->sec)); break; case WL_PROF_ACT: - profile->active = *(bool *)data; + profile->active = *(const bool *)data; break; case WL_PROF_BEACONINT: - profile->beacon_interval = *(u16 *)data; + profile->beacon_interval = *(const u16 *)data; break; case WL_PROF_DTIMPERIOD: - profile->dtim_period = *(u8 *)data; + profile->dtim_period = *(const u8 *)data; break; case WL_PROF_CHAN: - profile->channel = *(u32*)data; + profile->channel = *(const u32*)data; break; default: err = -EOPNOTSUPP; @@ -13223,10 +14815,21 @@ static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v) return err; } -static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, u8 *ie_stream, u32 *ie_size, bool roam) +static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, const u8 *ie_stream, u32 *ie_size, + bool roam) { u8 *ssidie; + /* cfg80211_find_ie defined in kernel returning const u8 */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif ssidie = (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ie_stream, *ie_size); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif if (!ssidie) return; if (ssidie[1] != bi->SSID_len) { @@ -13331,19 +14934,17 @@ static void wl_delay(u32 ms) s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr) { struct bcm_cfg80211 *cfg = g_bcm_cfg; - struct ether_addr p2pif_addr; struct ether_addr primary_mac; if (!cfg->p2p) return -1; if (!p2p_is_on(cfg)) { get_primary_mac(cfg, &primary_mac); - wl_cfgp2p_generate_bss_mac(&primary_mac, p2pdev_addr, &p2pif_addr); + wl_cfgp2p_generate_bss_mac(cfg, &primary_mac); } else { - memcpy(p2pdev_addr->octet, - cfg->p2p->dev_addr.octet, ETHER_ADDR_LEN); + memcpy(p2pdev_addr->octet, wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE).octet, + ETHER_ADDR_LEN); } - return 0; } s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len) @@ -13370,6 +14971,15 @@ s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len) return wl_cfgp2p_set_p2p_ps(cfg, net, buf, len); } + +s32 wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len) +{ + struct bcm_cfg80211 *cfg; + cfg = g_bcm_cfg; + + return wl_cfgp2p_set_p2p_ecsa(cfg, net, buf, len); +} + #ifdef P2PLISTEN_AP_SAMECHN s32 wl_cfg80211_set_p2p_resp_ap_chn(struct net_device *net, s32 enable) { @@ -13390,7 +15000,7 @@ s32 wl_cfg80211_channel_to_freq(u32 channel) { int freq = 0; -#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) freq = ieee80211_channel_to_frequency(channel); #else { @@ -13405,2095 +15015,2864 @@ s32 wl_cfg80211_channel_to_freq(u32 channel) return freq; } -#ifdef WL_SDO -#define MAX_QR_LEN NLMSG_GOODSIZE - -typedef struct wl_cfg80211_dev_info { - u16 band; - u16 freq; - s16 rssi; - u16 ie_len; - u8 bssid[ETH_ALEN]; -} wl_cfg80211_dev_info_t; +#ifdef WLTDLS static s32 -wl_notify_device_discovery(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, - const wl_event_msg_t *e, void *data) -{ - int err = 0; - u32 event = ntoh32(e->event_type); - wl_cfg80211_dev_info_t info; - struct wl_bss_info *bi = NULL; - struct net_device *ndev = NULL; - u8 *buf = NULL; - u32 buflen = 0; - u16 channel = 0; - wl_escan_result_t *escan_result; - - WL_SD(("Enter. type:%d \n", event)); +wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) { - if ((event != WLC_E_P2PO_ADD_DEVICE) && (event != WLC_E_P2PO_DEL_DEVICE)) { - WL_ERR(("Unknown Event\n")); - return -EINVAL; - } + struct net_device *ndev = NULL; + u32 reason = ntoh32(e->reason); + s8 *msg = NULL; ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); - mutex_lock(&cfg->usr_sync); - if (event == WLC_E_P2PO_DEL_DEVICE) { - WL_SD(("DEV_LOST MAC:"MACDBG" \n", MAC2STRDBG(e->addr.octet))); - err = wl_genl_send_msg(ndev, event, (u8 *)e->addr.octet, ETH_ALEN, 0, 0); - } else { - - escan_result = (wl_escan_result_t *) data; - - if (dtoh16(escan_result->bss_count) != 1) { - WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count)); - err = -EINVAL; - goto exit; - } - - bi = escan_result->bss_info; - buflen = dtoh32(bi->length); - if (unlikely(buflen > WL_BSS_INFO_MAX)) { - WL_DBG(("Beacon is larger than buffer. Discarding\n")); - err = -EINVAL; - goto exit; + switch (reason) { + case WLC_E_TDLS_PEER_DISCOVERED : + msg = " TDLS PEER DISCOVERD "; + break; + case WLC_E_TDLS_PEER_CONNECTED : +#ifdef PCIE_FULL_DONGLE + dhd_tdls_update_peer_info(ndev, TRUE, (uint8 *)&e->addr.octet[0]); +#endif /* PCIE_FULL_DONGLE */ + if (cfg->tdls_mgmt_frame) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0, + cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, 0); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) + cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0, + cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, + 0, GFP_ATOMIC); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \ + defined(WL_COMPAT_WIRELESS) + cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0, + cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, + GFP_ATOMIC); +#else + cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, + cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, + GFP_ATOMIC); +#endif /* LINUX_VERSION >= VERSION(3, 12, 0) */ } - - /* Update sub-header */ - bzero(&info, sizeof(wl_cfg80211_dev_info_t)); - channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec)); - info.freq = wl_cfg80211_channel_to_freq(channel); - info.rssi = dtoh16(bi->RSSI); -#if defined(RSSIOFFSET) - info.rssi = wl_update_rssi_offset(ndev, info.rssi); -#endif -#if !defined(RSSIAVG) && !defined(RSSIOFFSET) - // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS - info.rssi = MIN(info.rssi, RSSI_MAXVAL); -#endif - memcpy(info.bssid, &bi->BSSID, ETH_ALEN); - info.ie_len = buflen; - - WL_SD(("DEV_FOUND band:%x Freq:%d rssi:%x "MACDBG" \n", - info.band, info.freq, info.rssi, MAC2STRDBG(info.bssid))); - - buf = ((u8 *) bi) + bi->ie_offset; - err = wl_genl_send_msg(ndev, event, buf, - buflen, (u8 *)&info, sizeof(wl_cfg80211_dev_info_t)); + msg = " TDLS PEER CONNECTED "; + break; + case WLC_E_TDLS_PEER_DISCONNECTED : +#ifdef PCIE_FULL_DONGLE + dhd_tdls_update_peer_info(ndev, FALSE, (uint8 *)&e->addr.octet[0]); +#endif /* PCIE_FULL_DONGLE */ + if (cfg->tdls_mgmt_frame) { + kfree(cfg->tdls_mgmt_frame); + cfg->tdls_mgmt_frame = NULL; + cfg->tdls_mgmt_freq = 0; + } + msg = "TDLS PEER DISCONNECTED "; + break; } -exit: - mutex_unlock(&cfg->usr_sync); - return err; + if (msg) { + WL_ERR(("%s: " MACDBG " on %s ndev\n", msg, MAC2STRDBG((u8*)(&e->addr)), + (bcmcfg_to_prmry_ndev(cfg) == ndev) ? "primary" : "secondary")); + } + return 0; + } +#endif /* WLTDLS */ -s32 -wl_cfg80211_sdo_init(struct bcm_cfg80211 *cfg) +#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) +static s32 +#if (defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)) || (LINUX_VERSION_CODE < \ + KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)) +wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, + u32 peer_capability, const u8 *data, size_t len) +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) +wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, + u32 peer_capability, const u8 *data, size_t len) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) +wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, + u32 peer_capability, bool initiator, const u8 *data, size_t len) +#else +wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, const u8 *data, + size_t len) +#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */ { - u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + s32 ret = 0; +#ifdef WLTDLS + struct bcm_cfg80211 *cfg; + tdls_wfd_ie_iovar_t info; + memset(&info, 0, sizeof(tdls_wfd_ie_iovar_t)); + cfg = g_bcm_cfg; - if (cfg->sdo) { - WL_SD(("SDO already initialized\n")); - return 0; - } +#if defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2) + /* Some customer platform back ported this feature from kernel 3.15 to kernel 3.10 + * and that cuases build error + */ + BCM_REFERENCE(peer_capability); +#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */ - cfg->sdo = kzalloc(sizeof(sd_offload_t), kflags); - if (!cfg->sdo) { - WL_ERR(("malloc failed for SDO \n")); - return -ENOMEM; + switch (action_code) { + /* We need to set TDLS Wifi Display IE to firmware + * using tdls_wfd_ie iovar + */ + case WLAN_TDLS_SET_PROBE_WFD_IE: + WL_ERR(("%s WLAN_TDLS_SET_PROBE_WFD_IE\n", __FUNCTION__)); + info.mode = TDLS_WFD_PROBE_IE_TX; + memcpy(&info.data, data, len); + info.length = len; + break; + case WLAN_TDLS_SET_SETUP_WFD_IE: + WL_ERR(("%s WLAN_TDLS_SET_SETUP_WFD_IE\n", __FUNCTION__)); + info.mode = TDLS_WFD_IE_TX; + memcpy(&info.data, data, len); + info.length = len; + break; + case WLAN_TDLS_SET_WFD_ENABLED: + WL_ERR(("%s WLAN_TDLS_SET_MODE_WFD_ENABLED\n", __FUNCTION__)); + dhd_tdls_set_mode((dhd_pub_t *)(cfg->pub), true); + goto out; + case WLAN_TDLS_SET_WFD_DISABLED: + WL_ERR(("%s WLAN_TDLS_SET_MODE_WFD_DISABLED\n", __FUNCTION__)); + dhd_tdls_set_mode((dhd_pub_t *)(cfg->pub), false); + goto out; + default: + WL_ERR(("Unsupported action code : %d\n", action_code)); + goto out; } - return 0; + ret = wldev_iovar_setbuf(dev, "tdls_wfd_ie", &info, sizeof(info), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + + if (ret) { + WL_ERR(("tdls_wfd_ie error %d\n", ret)); + } +out: +#endif /* WLTDLS */ + return ret; } -s32 -wl_cfg80211_sdo_deinit(struct bcm_cfg80211 *cfg) +static s32 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, enum nl80211_tdls_operation oper) +#else +wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, + u8 *peer, enum nl80211_tdls_operation oper) +#endif { - s32 bssidx; - int ret = 0; - int sdo_pause = 0; - if (!cfg || !cfg->p2p) { - WL_ERR(("Wl %p or cfg->p2p %p is null\n", - cfg, cfg ? cfg->p2p : 0)); - return 0; + s32 ret = 0; +#ifdef WLTDLS + struct bcm_cfg80211 *cfg; + tdls_iovar_t info; + dhd_pub_t *dhdp; + bool tdls_auto_mode = false; + cfg = g_bcm_cfg; + dhdp = (dhd_pub_t *)(cfg->pub); + memset(&info, 0, sizeof(tdls_iovar_t)); + if (peer) { + memcpy(&info.ea, peer, ETHER_ADDR_LEN); + } else { + return -1; } - - bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); - if (!cfg->sdo) { - WL_DBG(("SDO Not Initialized. Do nothing. \n")); - return 0; + switch (oper) { + case NL80211_TDLS_DISCOVERY_REQ: + /* If the discovery request is broadcast then we need to set + * info.mode to Tunneled Probe Request + */ + if (memcmp(peer, (const uint8 *)BSSID_BROADCAST, ETHER_ADDR_LEN) == 0) { + info.mode = TDLS_MANUAL_EP_WFD_TPQ; + WL_ERR(("%s TDLS TUNNELED PRBOBE REQUEST\n", __FUNCTION__)); + } else { + info.mode = TDLS_MANUAL_EP_DISCOVERY; + } + break; + case NL80211_TDLS_SETUP: + if (dhdp->tdls_mode == true) { + info.mode = TDLS_MANUAL_EP_CREATE; + tdls_auto_mode = false; + ret = dhd_tdls_enable(dev, false, tdls_auto_mode, NULL); + if (ret < 0) { + return ret; + } + } else { + tdls_auto_mode = true; + } + break; + case NL80211_TDLS_TEARDOWN: + info.mode = TDLS_MANUAL_EP_DELETE; + break; + default: + WL_ERR(("Unsupported operation : %d\n", oper)); + goto out; } - if (cfg->sdo->dd_state && - (ret = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), - "p2po_stop", (void*)&sdo_pause, sizeof(sdo_pause), - cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, NULL)) < 0) { - WL_ERR(("p2po_stop Failed :%d\n", ret)); + /* turn on TDLS */ + ret = dhd_tdls_enable(dev, true, tdls_auto_mode, NULL); + if (ret < 0) { + return ret; } - kfree(cfg->sdo); - cfg->sdo = NULL; - - WL_SD(("SDO Deinit Done \n")); - - return 0; + if (info.mode) { + ret = wldev_iovar_setbuf(dev, "tdls_endpoint", &info, sizeof(info), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (ret) { + WL_ERR(("tdls_endpoint error %d\n", ret)); + } + } +out: +#endif /* WLTDLS */ + return ret; } +#endif -s32 -wl_cfg80211_resume_sdo(struct net_device *dev, struct bcm_cfg80211 *cfg) +s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *ndev, char *buf, int len, + enum wl_management_type type) { - wl_sd_listen_t sd_listen; - int ret = 0; - s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); - - WL_DBG(("Enter\n")); + struct bcm_cfg80211 *cfg; + s32 ret = 0; + struct ether_addr primary_mac; + s32 bssidx = 0; + s32 pktflag = 0; + cfg = g_bcm_cfg; - if (!cfg->sdo) { - return -EINVAL; + if (wl_get_drv_status(cfg, AP_CREATING, ndev)) { + /* Vendor IEs should be set to FW + * after SoftAP interface is brought up + */ + WL_DBG(("Skipping set IE since AP is not up \n")); + goto exit; + } else if (ndev == bcmcfg_to_prmry_ndev(cfg)) { + /* Either stand alone AP case or P2P discovery */ + if (wl_get_drv_status(cfg, AP_CREATED, ndev)) { + /* Stand alone AP case on primary interface */ + WL_DBG(("Apply IEs for Primary AP Interface \n")); + bssidx = 0; + } else { + /* P2P Discovery case (p2p listen) */ + if (!cfg->p2p->on) { + /* Turn on Discovery interface */ + get_primary_mac(cfg, &primary_mac); + wl_cfgp2p_generate_bss_mac(cfg, &primary_mac); + p2p_on(cfg) = true; + ret = wl_cfgp2p_enable_discovery(cfg, ndev, NULL, 0); + if (unlikely(ret)) { + WL_ERR(("Enable discovery failed \n")); + goto exit; + } + } + WL_DBG(("Apply IEs for P2P Discovery Iface \n")); + ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY); + bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + } + } else { + /* Virtual AP/ P2P Group Interface */ + WL_DBG(("Apply IEs for iface:%s\n", ndev->name)); + bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr); } - if (dev == NULL) - dev = bcmcfg_to_prmry_ndev(cfg); - - /* Disable back the ESCAN events for the offload */ - wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, false); - - /* Resume according to the saved state */ - if (cfg->sdo->dd_state == WL_DD_STATE_SEARCH) { - if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_find", NULL, 0, - cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, NULL)) < 0) { - WL_ERR(("p2po_find Failed :%d\n", ret)); + if (ndev != NULL) { + switch (type) { + case WL_BEACON: + pktflag = VNDR_IE_BEACON_FLAG; + break; + case WL_PROBE_RESP: + pktflag = VNDR_IE_PRBRSP_FLAG; + break; + case WL_ASSOC_RESP: + pktflag = VNDR_IE_ASSOCRSP_FLAG; + break; } - } else if (cfg->sdo->dd_state == WL_DD_STATE_LISTEN) { - sd_listen.interval = cfg->sdo->sd_listen.interval; - sd_listen.period = cfg->sdo->sd_listen.period; - - if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen", (void*)&sd_listen, - sizeof(wl_sd_listen_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN, - bssidx, NULL)) < 0) { - WL_ERR(("p2po_listen Failed :%d\n", ret)); + if (pktflag) { + ret = wl_cfg80211_set_mgmt_vndr_ies(cfg, + ndev_to_cfgdev(ndev), bssidx, pktflag, buf, len); } - } - - /* p2po_stop clears of the eventmask for GAS. Set it back */ - wl_add_remove_eventmsg(dev, WLC_E_SERVICE_FOUND, true); - wl_add_remove_eventmsg(dev, WLC_E_GAS_FRAGMENT_RX, true); - wl_add_remove_eventmsg(dev, WLC_E_GAS_COMPLETE, true); - - WL_SD(("SDO Resumed \n")); - +exit: return ret; } -s32 wl_cfg80211_pause_sdo(struct net_device *dev, struct bcm_cfg80211 *cfg) +#ifdef WL_SUPPORT_AUTO_CHANNEL +static s32 +wl_cfg80211_set_auto_channel_scan_state(struct net_device *ndev) { + u32 val = 0; + s32 ret = BCME_ERROR; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wiphy *wiphy; + /* Disable mpc, to avoid automatic interface down. */ + val = 0; - int ret = 0; - s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); - int sdo_pause = 1; - - WL_DBG(("Enter \n")); - - if (!cfg->sdo) { - WL_ERR(("SDO not initialized \n")); - return -EINVAL; + wiphy = bcmcfg_to_wiphy(cfg); + if (wl_check_dongle_idle(wiphy) != TRUE) { + WL_ERR(("FW is busy to add interface")); + return ret; + } + ret = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val, + sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, + &cfg->ioctl_buf_sync); + if (ret < 0) { + WL_ERR(("set 'mpc' failed, error = %d\n", ret)); + goto done; } - if (dev == NULL) - dev = bcmcfg_to_prmry_ndev(cfg); + /* Set interface up, explicitly. */ + val = 1; - if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_stop", - (void*)&sdo_pause, sizeof(sdo_pause), - cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync)) < 0) { - WL_ERR(("p2po_stop Failed :%d\n", ret)); + ret = wldev_ioctl(ndev, WLC_UP, (void *)&val, sizeof(val), true); + if (ret < 0) { + WL_ERR(("set interface up failed, error = %d\n", ret)); + goto done; } - /* Enable back the ESCAN events for the SCAN */ - wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, true); - - WL_SD(("SDO Paused \n")); + /* Stop all scan explicitly, till auto channel selection complete. */ + wl_set_drv_status(cfg, SCANNING, ndev); + if (cfg->escan_info.ndev == NULL) { + ret = BCME_OK; + goto done; + } + ret = wl_notify_escan_complete(cfg, ndev, true, true); + if (ret < 0) { + WL_ERR(("set scan abort failed, error = %d\n", ret)); + ret = BCME_OK; // terence 20140115: fix escan_complete error + goto done; + } +done: return ret; } -static s32 -wl_svc_resp_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, - const wl_event_msg_t *e, void *data) +static bool +wl_cfg80211_valid_channel_p2p(int channel) { - u32 event = ntoh32(e->event_type); - struct net_device *ndev = NULL; - u8 *dst_mac = (u8 *)e->addr.octet; - int ret = 0; - wl_event_sd_t *gas = NULL; - int status = ntoh32(e->status); - sdo_event_t sdo_hdr; - u32 data_len = ntoh32(e->datalen); - u8 *data_ptr = NULL; - u32 tot_len = 0; - - - WL_SD(("Enter event_type:%d status:%d\n", event, status)); + bool valid = false; - if (!cfg->sdo) { - WL_ERR(("SDO Not initialized \n")); - return -EINVAL; + /* channel 1 to 14 */ + if ((channel >= 1) && (channel <= 14)) { + valid = true; } - - if (!(cfg->sdo->sd_state & WL_SD_SEARCH_SVC)) { - /* We are not searching for any service. Drop - * any bogus Event - */ - WL_ERR(("Bogus SDO Event. Do nothing.. \n")); - return -1; + /* channel 36 to 48 */ + else if ((channel >= 36) && (channel <= 48)) { + valid = true; + } + /* channel 149 to 161 */ + else if ((channel >= 149) && (channel <= 161)) { + valid = true; + } + else { + valid = false; + WL_INFORM(("invalid P2P chanspec, channel = %d\n", channel)); } - ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); - - mutex_lock(&cfg->usr_sync); - if (event == WLC_E_SERVICE_FOUND) { - - if ((status != WLC_E_STATUS_SUCCESS) && (status != WLC_E_STATUS_PARTIAL)) { - WL_ERR(("WLC_E_SERVICE_FOUND: unknown status \n")); - goto exit; - } + return valid; +} - gas = (wl_event_sd_t *)data; - if (!gas) { - ret = -EINVAL; - goto exit; - } +s32 +wl_cfg80211_get_chanspecs_2g(struct net_device *ndev, void *buf, s32 buflen) +{ + s32 ret = BCME_ERROR; + struct bcm_cfg80211 *cfg = NULL; + wl_uint32_list_t *list = NULL; + chanspec_t chanspec = 0; - bzero(&sdo_hdr, sizeof(sdo_event_t)); - sdo_hdr.freq = wl_cfg80211_channel_to_freq(gas->channel); - sdo_hdr.count = gas->count; - memcpy(sdo_hdr.addr, dst_mac, ETH_ALEN); - data_ptr = (char *)gas->tlv; - tot_len = data_len - (sizeof(wl_event_sd_t) - sizeof(wl_sd_tlv_t)); + memset(buf, 0, buflen); - WL_SD(("WLC_E_SERVICE_FOUND "MACDBG" data_len:%d tlv_count:%d \n", - MAC2STRDBG(dst_mac), data_len, sdo_hdr.count)); + cfg = g_bcm_cfg; + list = (wl_uint32_list_t *)buf; + list->count = htod32(WL_NUMCHANSPECS); - if (tot_len > NLMSG_DEFAULT_SIZE) { - WL_ERR(("size(%u) > %lu not supported \n", tot_len, NLMSG_DEFAULT_SIZE)); - ret = -ENOMEM; - goto exit; - } + /* Restrict channels to 2.4GHz, 20MHz BW, no SB. */ + chanspec |= (WL_CHANSPEC_BAND_2G | WL_CHANSPEC_BW_20 | + WL_CHANSPEC_CTL_SB_NONE); + chanspec = wl_chspec_host_to_driver(chanspec); - if (wl_genl_send_msg(ndev, event, data_ptr, - tot_len, (u8 *)&sdo_hdr, sizeof(sdo_event_t)) < 0) - WL_ERR(("Couldn't send up the NETLINK Event \n")); - else - WL_SD(("GAS event sent up \n")); - } else { - WL_ERR(("Unsupported Event: %d \n", event)); + ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec, + sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync); + if (ret < 0) { + WL_ERR(("get 'chanspecs' failed, error = %d\n", ret)); } -exit: - mutex_unlock(&cfg->usr_sync); return ret; } -s32 wl_cfg80211_DsdOffloadParseProto(char* proto_str, u8* proto) +s32 +wl_cfg80211_get_chanspecs_5g(struct net_device *ndev, void *buf, s32 buflen) { - s32 len = -1; - int i = 0; - - for (i = 0; i < MAX_SDO_PROTO; i++) { - if (strncmp(proto_str, wl_sdo_protos[i].str, strlen(wl_sdo_protos[i].str)) == 0) { - WL_SD(("Matching proto (%d) found \n", wl_sdo_protos[i].val)); - *proto = wl_sdo_protos[i].val; - len = strlen(wl_sdo_protos[i].str); - break; - } - } - return len; -} + u32 channel = 0; + s32 ret = BCME_ERROR; + s32 i = 0; + s32 j = 0; + struct bcm_cfg80211 *cfg = NULL; + wl_uint32_list_t *list = NULL; + chanspec_t chanspec = 0; -/* - * register to search for a UPnP service - * ./DRIVER P2P_SD_REQ upnp 0x10urn:schemas-upnporg:device:InternetGatewayDevice:1 - * - * Enable discovery - * ./cfg p2po_find -*/ -#define UPNP_QUERY_VER_OFFSET 3 -s32 wl_sd_handle_sd_req( - struct net_device *dev, - u8 * buf, - int len) -{ - struct bcm_cfg80211 *cfg = g_bcm_cfg; - s32 bssidx = 0; - wl_sd_qr_t *sdreq; - u8 proto = 0; - s32 ret = 0; - u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; - u32 tot_len = len + sizeof(wl_sd_qr_t); - u16 version = 0; + memset(buf, 0, buflen); - if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) { - WL_ERR(("find_idx failed\n")); - return -EINVAL; - } - /* Check for the least arg length expected */ - if (!buf || (len < strlen("all"))) { - WL_ERR(("Wrong Arg\n")); - return -EINVAL; - } + cfg = g_bcm_cfg; + list = (wl_uint32_list_t *)buf; + list->count = htod32(WL_NUMCHANSPECS); - if (tot_len > WLC_IOCTL_MAXLEN) { - WL_ERR(("Length > %lu not supported \n", MAX_QR_LEN)); - return -EINVAL; - } + /* Restrict channels to 5GHz, 20MHz BW, no SB. */ + chanspec |= (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_20 | + WL_CHANSPEC_CTL_SB_NONE); + chanspec = wl_chspec_host_to_driver(chanspec); - sdreq = kzalloc(tot_len, kflags); - if (!sdreq) { - WL_ERR(("malloc failed\n")); - return -ENOMEM; + ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec, + sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync); + if (ret < 0) { + WL_ERR(("get 'chanspecs' failed, error = %d\n", ret)); + goto done; } - WL_SD(("%s Len: %d\n", buf, len)); - if ((ret = wl_cfg80211_DsdOffloadParseProto(buf, &proto)) < 0) { - WL_ERR(("Unknown proto \n")); - goto exit; - } + /* Skip DFS and inavlid P2P channel. */ + for (i = 0, j = 0; i < dtoh32(list->count); i++) { + chanspec = (chanspec_t) dtoh32(list->element[i]); + channel = CHSPEC_CHANNEL(chanspec); - sdreq->protocol = proto; - buf += ret; - buf++; /* skip the space */ - sdreq->transaction_id = simple_strtoul(buf, NULL, 16); - WL_SD(("transaction_id:%d\n", sdreq->transaction_id)); - buf += sizeof(sdreq->transaction_id); + ret = wldev_iovar_getint(ndev, "per_chan_info", &channel); + if (ret < 0) { + WL_ERR(("get 'per_chan_info' failed, error = %d\n", ret)); + goto done; + } - if (*buf == '\0') { - WL_SD(("No Query present. Proto:%d \n", proto)); - sdreq->query_len = 0; - } else { - buf++; /* skip the space */ - /* UPNP version needs to put as binary val */ - if (sdreq->protocol == SVC_RPOTYPE_UPNP) { - /* Extract UPNP version */ - version = simple_strtoul(buf, NULL, 16); - buf = buf + UPNP_QUERY_VER_OFFSET; - buf[0] = version; - WL_SD(("Upnp version: 0x%x \n", version)); + if (CHANNEL_IS_RADAR(channel) || + !(wl_cfg80211_valid_channel_p2p(CHSPEC_CHANNEL(chanspec)))) { + continue; + } else { + list->element[j] = list->element[i]; } - len = strlen(buf); - WL_SD(("Len after stripping proto: %d Query: %s\n", len, buf)); - /* copy the query part */ - memcpy(sdreq->qrbuf, buf, len); - sdreq->query_len = len; + j++; } - /* Enable discovery */ - if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) { - WL_ERR(("cfgp2p_enable discovery failed")); - goto exit; + list->count = j; + +done: + return ret; +} + +static s32 +wl_cfg80211_get_best_channel(struct net_device *ndev, void *buf, int buflen, + int *channel) +{ + s32 ret = BCME_ERROR; + int chosen = 0; + int retry = 0; + uint chip; + + /* Start auto channel selection scan. */ + ret = wldev_ioctl(ndev, WLC_START_CHANNEL_SEL, buf, buflen, true); + if (ret < 0) { + WL_ERR(("can't start auto channel scan, error = %d\n", ret)); + *channel = 0; + goto done; } - if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_sd_req_resp", (void*)sdreq, - tot_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, - bssidx, &cfg->ioctl_buf_sync)) < 0) { - WL_ERR(("Find SVC Failed \n")); - goto exit; + /* Wait for auto channel selection, worst case possible delay is 5250ms. */ + retry = CHAN_SEL_RETRY_COUNT; + + while (retry--) { + OSL_SLEEP(CHAN_SEL_IOCTL_DELAY); + + ret = wldev_ioctl(ndev, WLC_GET_CHANNEL_SEL, &chosen, sizeof(chosen), + false); + if ((ret == 0) && (dtoh32(chosen) != 0)) { + chip = dhd_conf_get_chip(dhd_get_pub(ndev)); + if (chip != BCM43362_CHIP_ID && chip != BCM4330_CHIP_ID) { + u32 chanspec = 0; + int ctl_chan; + chanspec = wl_chspec_driver_to_host(chosen); + printf("selected chanspec = 0x%x\n", chanspec); + ctl_chan = wf_chspec_ctlchan(chanspec); + printf("selected ctl_chan = 0x%x\n", ctl_chan); + *channel = (u16)(ctl_chan & 0x00FF); + } else + *channel = (u16)(chosen & 0x00FF); + WL_INFORM(("selected channel = %d\n", *channel)); + break; + } + WL_INFORM(("attempt = %d, ret = %d, chosen = %d\n", + (CHAN_SEL_RETRY_COUNT - retry), ret, dtoh32(chosen))); } - cfg->sdo->sd_state |= WL_SD_SEARCH_SVC; + if (retry <= 0) { + WL_ERR(("failure, auto channel selection timed out\n")); + *channel = 0; + ret = BCME_ERROR; + } -exit: - kfree(sdreq); +done: return ret; } -s32 wl_sd_handle_sd_cancel_req( - struct net_device *dev, - u8 *buf) +static s32 +wl_cfg80211_restore_auto_channel_scan_state(struct net_device *ndev) { + u32 val = 0; + s32 ret = BCME_ERROR; struct bcm_cfg80211 *cfg = g_bcm_cfg; - s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); - if (wldev_iovar_setbuf_bsscfg(dev, "p2po_sd_cancel", NULL, - 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, - bssidx, &cfg->ioctl_buf_sync) < 0) { - WL_ERR(("Cancel SD Failed \n")); - return -EINVAL; - } + /* Clear scan stop driver status. */ + wl_clr_drv_status(cfg, SCANNING, ndev); - cfg->sdo->sd_state &= ~WL_SD_SEARCH_SVC; + /* Enable mpc back to 1, irrespective of initial state. */ + val = 1; - return 0; + ret = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val, + sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, + &cfg->ioctl_buf_sync); + if (ret < 0) { + WL_ERR(("set 'mpc' failed, error = %d\n", ret)); + } + + return ret; } -/* - * register a UPnP service to be discovered - * ./cfg P2P_SD_SVC_ADD upnp 0x10urn:schemas-upnporg:device:InternetGatewayDevice:1 0x10uu - * id:6859dede-8574-59ab-9332-123456789012::urn:schemas-upnporg:device:InternetGate - * wayDevice:1 -*/ -s32 wl_sd_handle_sd_add_svc( - struct net_device *dev, - u8 * buf, - int len) +s32 +wl_cfg80211_get_best_channels(struct net_device *dev, char* cmd, int total_len) { - struct bcm_cfg80211 *cfg = g_bcm_cfg; - s32 bssidx = 0; - wl_sd_qr_t *sdreq; - u8 proto = 0; - u16 version = 0; - s32 ret = 0; - u8 *resp = NULL; - u8 *query = NULL; - u32 tot_len = len + sizeof(wl_sd_qr_t); - u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + int channel = 0, band, band_cur; + s32 ret = BCME_ERROR; + u8 *buf = NULL; + char *pos = cmd; + struct bcm_cfg80211 *cfg = NULL; + struct net_device *ndev = NULL; - if (!buf || !len) - return -EINVAL; + memset(cmd, 0, total_len); - WL_SD(("%s Len: %d\n", buf, len)); - if (tot_len > WLC_IOCTL_MAXLEN) { - WL_ERR(("Query-Resp length > %d not supported \n", WLC_IOCTL_MAXLEN)); + buf = kmalloc(CHANSPEC_BUF_SIZE, GFP_KERNEL); + if (buf == NULL) { + WL_ERR(("failed to allocate chanspec buffer\n")); return -ENOMEM; } - sdreq = kzalloc(tot_len, kflags); - if (!sdreq) { - WL_ERR(("malloc failed\n")); - return -ENOMEM; - } + /* + * Always use primary interface, irrespective of interface on which + * command came. + */ + cfg = g_bcm_cfg; + ndev = bcmcfg_to_prmry_ndev(cfg); - if ((ret = wl_cfg80211_DsdOffloadParseProto(buf, &proto)) < 0) { - WL_ERR(("Unknown Proto \n")); - goto exit; + /* + * Make sure that FW and driver are in right state to do auto channel + * selection scan. + */ + ret = wl_cfg80211_set_auto_channel_scan_state(ndev); + if (ret < 0) { + WL_ERR(("can't set auto channel scan state, error = %d\n", ret)); + goto done; } - sdreq->protocol = proto; - buf += ret; + ret = wldev_ioctl(dev, WLC_GET_BAND, &band_cur, sizeof(band_cur), false); + if (band_cur != WLC_BAND_5G) { + /* Best channel selection in 2.4GHz band. */ + ret = wl_cfg80211_get_chanspecs_2g(ndev, (void *)buf, CHANSPEC_BUF_SIZE); + if (ret < 0) { + WL_ERR(("can't get chanspecs in 2.4GHz, error = %d\n", ret)); + goto done; + } - if (*buf == '\0') { - WL_ERR(("No Query Resp pair present \n")); - ret = -EINVAL; - goto exit; - } + ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE, + &channel); + if (ret < 0) { + WL_ERR(("can't select best channel scan in 2.4GHz, error = %d\n", ret)); + goto done; + } - buf++; /* Skip the space */ - len = strlen(buf); - query = strsep((char **)&buf, " "); - if (!query || !buf) { - WL_ERR(("No Query RESP Present\n")); - ret = -EINVAL; - goto exit; + if (CHANNEL_IS_2G(channel)) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && !defined(WL_COMPAT_WIRELESS) + channel = ieee80211_channel_to_frequency(channel); +#else + channel = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ); +#endif + } else { + WL_ERR(("invalid 2.4GHz channel, channel = %d\n", channel)); + channel = 0; + } } - resp = buf; + pos += snprintf(pos, total_len, "%04d ", channel); - if (sdreq->protocol == SVC_RPOTYPE_UPNP) { - /* Extract UPNP version */ - version = simple_strtoul(query, NULL, 16); - query = query + UPNP_QUERY_VER_OFFSET; - resp = resp + UPNP_QUERY_VER_OFFSET; - query[0] = version; - resp[0] = version; - WL_SD(("Upnp version: 0x%x \n", version)); - } + if (band_cur != WLC_BAND_2G) { + // terence 20140120: fix for some chipsets only return 2.4GHz channel (4330b2/43341b0/4339a0) + band = band_cur==WLC_BAND_2G ? band_cur : WLC_BAND_5G; + ret = wldev_ioctl(dev, WLC_SET_BAND, &band, sizeof(band), true); + if (ret < 0) + WL_ERR(("WLC_SET_BAND error %d\n", ret)); - sdreq->query_len = strlen(query); - sdreq->response_len = strlen(buf); - WL_SD(("query:%s len:%u \n", query, sdreq->query_len)); - WL_SD(("resp:%s len:%u \n", buf, sdreq->response_len)); + /* Best channel selection in 5GHz band. */ + ret = wl_cfg80211_get_chanspecs_5g(ndev, (void *)buf, CHANSPEC_BUF_SIZE); + if (ret < 0) { + WL_ERR(("can't get chanspecs in 5GHz, error = %d\n", ret)); + goto done; + } - memcpy(sdreq->qrbuf, query, sdreq->query_len); - memcpy((sdreq->qrbuf + sdreq->query_len), resp, sdreq->response_len); + ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE, + &channel); + if (ret < 0) { + WL_ERR(("can't select best channel scan in 5GHz, error = %d\n", ret)); + goto done; + } - /* Enable discovery */ - if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) { - WL_ERR(("cfgp2p_enable discovery failed")); - goto exit; + if (CHANNEL_IS_5G(channel)) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && !defined(WL_COMPAT_WIRELESS) + channel = ieee80211_channel_to_frequency(channel); +#else + channel = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ); +#endif + } else { + WL_ERR(("invalid 5GHz channel, channel = %d\n", channel)); + channel = 0; + } + + ret = wldev_ioctl(dev, WLC_SET_BAND, &band_cur, sizeof(band_cur), true); + if (ret < 0) + WL_ERR(("WLC_SET_BAND error %d\n", ret)); } + pos += snprintf(pos, total_len, "%04d ", channel); - if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_addsvc", (void*)sdreq, - tot_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, - bssidx, &cfg->ioctl_buf_sync)) < 0) { - WL_ERR(("FW Failed in doing p2po_addsvc. RET:%d \n", ret)); - goto exit; + /* Set overall best channel same as 5GHz best channel. */ + pos += snprintf(pos, total_len, "%04d ", channel); + +done: + if (NULL != buf) { + kfree(buf); + } + + /* Restore FW and driver back to normal state. */ + ret = wl_cfg80211_restore_auto_channel_scan_state(ndev); + if (ret < 0) { + WL_ERR(("can't restore auto channel scan state, error = %d\n", ret)); } - cfg->sdo->sd_state |= WL_SD_ADV_SVC; + printf("%s: channel %s\n", __FUNCTION__, cmd); -exit: - kfree(sdreq); - return ret; + return (pos - cmd); } +#endif /* WL_SUPPORT_AUTO_CHANNEL */ -s32 wl_sd_handle_sd_del_svc( - struct net_device *dev, - u8 * buf, - int len) -{ - struct bcm_cfg80211 *cfg = g_bcm_cfg; - s32 bssidx = 0; - wl_sd_qr_t *sdreq; - u8 proto = 0; - s32 ret = 0; - u32 tot_len = len + sizeof(wl_sd_qr_t); - u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; - u16 version = 0; - - if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) { - WL_ERR(("find_idx failed\n")); - return -EINVAL; - } - - sdreq = (wl_sd_qr_t *)kzalloc(tot_len, kflags); - if (!sdreq) { - WL_ERR(("malloc failed\n")); - ret = -ENOMEM; - goto exit; - } - - /* Check for the least arg length expected */ - if (buf && len >= strlen("all")) { - WL_DBG(("%s Len: %d\n", buf, len)); - if ((ret = wl_cfg80211_DsdOffloadParseProto(buf, &proto)) < 0) { - WL_ERR(("Unknown Proto \n")); - goto exit; - } - sdreq->protocol = proto; - buf += ret; - - if (*buf == ' ') { - /* Query present */ - buf++; /* Skip the space */ - /* UPNP version needs to put as binary val */ - if (sdreq->protocol == SVC_RPOTYPE_UPNP) { - /* Extract UPNP version */ - version = simple_strtoul(buf, NULL, 16); - buf = buf + UPNP_QUERY_VER_OFFSET; - buf[0] = version; - WL_SD(("Upnp version: 0x%x \n", version)); - } - memcpy(sdreq->qrbuf, buf, strlen(buf)); - sdreq->query_len = strlen(buf); - WL_SD(("Query to be deleted:%s len:%d\n", buf, sdreq->query_len)); - } - } else { - /* ALL */ - proto = 0; - } +static const struct rfkill_ops wl_rfkill_ops = { + .set_block = wl_rfkill_set +}; - sdreq->protocol = proto; - WL_SD(("Proto: %d \n", proto)); +static int wl_rfkill_set(void *data, bool blocked) +{ + struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data; - if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_delsvc", (void*)sdreq, - tot_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, - bssidx, &cfg->ioctl_buf_sync)) < 0) { - WL_ERR(("FW Failed in doing sd_delsvc. ret=%d \n", ret)); - goto exit; - } + WL_DBG(("Enter \n")); + WL_DBG(("RF %s\n", blocked ? "blocked" : "unblocked")); - cfg->sdo->sd_state &= ~WL_SD_ADV_SVC; + if (!cfg) + return -EINVAL; -exit: - if (sdreq) - kfree(sdreq); + cfg->rf_blocked = blocked; - return ret; + return 0; } -s32 wl_sd_handle_sd_stop_discovery( - struct net_device *dev, - u8 * buf, - int len) +static int wl_setup_rfkill(struct bcm_cfg80211 *cfg, bool setup) { - struct bcm_cfg80211 *cfg = g_bcm_cfg; - s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); - int ret = 0; - int sdo_pause = 0; + s32 err = 0; - if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_stop", (void*)&sdo_pause, - sizeof(sdo_pause), cfg->ioctl_buf, WLC_IOCTL_SMLEN, - bssidx, &cfg->ioctl_buf_sync)) < 0) { - WL_ERR(("p2po_stop Failed :%d\n", ret)); - return -1; - } + WL_DBG(("Enter \n")); + if (!cfg) + return -EINVAL; + if (setup) { + cfg->rfkill = rfkill_alloc("brcmfmac-wifi", + wl_cfg80211_get_parent_dev(), + RFKILL_TYPE_WLAN, &wl_rfkill_ops, (void *)cfg); - if (wldev_iovar_setint(dev, "mpc", 1) < 0) { - /* Setting of MPC failed */ - WL_ERR(("mpc enabling back failed\n")); - return -1; - } + if (!cfg->rfkill) { + err = -ENOMEM; + goto err_out; + } - /* clear the states */ - cfg->sdo->dd_state = WL_DD_STATE_IDLE; - wl_clr_p2p_status(cfg, DISC_IN_PROGRESS); + err = rfkill_register(cfg->rfkill); - bzero(&cfg->sdo->sd_listen, sizeof(wl_sd_listen_t)); + if (err) + rfkill_destroy(cfg->rfkill); + } else { + if (!cfg->rfkill) { + err = -ENOMEM; + goto err_out; + } - /* Remove ESCAN from waking up the host if ofind/olisten is enabled */ - wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, true); + rfkill_unregister(cfg->rfkill); + rfkill_destroy(cfg->rfkill); + } - return ret; +err_out: + return err; } -s32 wl_sd_handle_sd_find( - struct net_device *dev, - u8 * buf, - int len) +#ifdef DEBUGFS_CFG80211 +/** +* Format : echo "SCAN:1 DBG:1" > /sys/kernel/debug/dhd/debug_level +* to turn on SCAN and DBG log. +* To turn off SCAN partially, echo "SCAN:0" > /sys/kernel/debug/dhd/debug_level +* To see current setting of debug level, +* cat /sys/kernel/debug/dhd/debug_level +*/ +static ssize_t +wl_debuglevel_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) { - struct bcm_cfg80211 *cfg = g_bcm_cfg; - s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); - int ret = 0; - s32 disc_bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); - vndr_ie_setbuf_t *ie_setbuf; - vndr_ie_t *vndrie; - vndr_ie_buf_t *vndriebuf; - u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; - int tot_len = 0; - uint channel = 0; - - u8 p2pie_buf[] = { - 0x09, 0x02, 0x02, 0x00, 0x27, 0x0c, 0x06, 0x05, 0x00, - 0x55, 0x53, 0x04, 0x51, 0x0b, 0x11, 0x05, 0x00, 0x55, - 0x53, 0x04, 0x51, 0x0b - }; - - /* Enable discovery */ - if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) { - WL_ERR(("cfgp2p_enable discovery failed")); - return -1; - } + char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)], sublog[S_SUBLOGLEVEL]; + char *params, *token, *colon; + uint i, tokens, log_on = 0; + memset(tbuf, 0, sizeof(tbuf)); + memset(sublog, 0, sizeof(sublog)); + if (copy_from_user(&tbuf, userbuf, min_t(size_t, (sizeof(tbuf) - 1), count))) + return -EFAULT; - if (buf && strncmp(buf, "chan=", strlen("chan=")) == 0) { - buf += strlen("chan="); - channel = simple_strtol(buf, NULL, 10); - WL_SD(("listen_chan to be set:%d\n", channel)); - if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen_channel", (void*)&channel, - sizeof(channel), cfg->ioctl_buf, WLC_IOCTL_SMLEN, - bssidx, &cfg->ioctl_buf_sync)) < 0) { - WL_ERR(("p2po_listen_channel Failed :%d\n", ret)); - return -1; + params = &tbuf[0]; + colon = strchr(params, '\n'); + if (colon != NULL) + *colon = '\0'; + while ((token = strsep(¶ms, " ")) != NULL) { + memset(sublog, 0, sizeof(sublog)); + if (token == NULL || !*token) + break; + if (*token == '\0') + continue; + colon = strchr(token, ':'); + if (colon != NULL) { + *colon = ' '; } - } - - tot_len = sizeof(vndr_ie_setbuf_t) + sizeof(p2pie_buf); - ie_setbuf = (vndr_ie_setbuf_t *) kzalloc(tot_len, kflags); - if (!ie_setbuf) { - WL_ERR(("IE memory alloc failed\n")); - return -ENOMEM; - } + tokens = sscanf(token, "%s %u", sublog, &log_on); + if (colon != NULL) + *colon = ':'; - /* Apply the p2p_ie for p2po_find */ - strcpy(ie_setbuf->cmd, "add"); + if (tokens == 2) { + for (i = 0; i < ARRAYSIZE(sublogname_map); i++) { + if (!strncmp(sublog, sublogname_map[i].sublogname, + strlen(sublogname_map[i].sublogname))) { + if (log_on) + wl_dbg_level |= + (sublogname_map[i].log_level); + else + wl_dbg_level &= + ~(sublogname_map[i].log_level); + } + } + } else + WL_ERR(("%s: can't parse '%s' as a " + "SUBMODULE:LEVEL (%d tokens)\n", + tbuf, token, tokens)); - vndriebuf = &ie_setbuf->vndr_ie_buffer; - vndriebuf->iecount = htod32(1); - vndriebuf->vndr_ie_list[0].pktflag = htod32(16); - vndrie = &vndriebuf->vndr_ie_list[0].vndr_ie_data; + } + return count; +} - vndrie->id = (uchar) DOT11_MNG_PROPR_ID; - vndrie->len = sizeof(p2pie_buf); - memcpy(vndrie->oui, WFA_OUI, WFA_OUI_LEN); - memcpy(vndrie->data, p2pie_buf, sizeof(p2pie_buf)); +static ssize_t +wl_debuglevel_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + char *param; + char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)]; + uint i; + memset(tbuf, 0, sizeof(tbuf)); + param = &tbuf[0]; + for (i = 0; i < ARRAYSIZE(sublogname_map); i++) { + param += snprintf(param, sizeof(tbuf) - 1, "%s:%d ", + sublogname_map[i].sublogname, + (wl_dbg_level & sublogname_map[i].log_level) ? 1 : 0); + } + *param = '\n'; + return simple_read_from_buffer(user_buf, count, ppos, tbuf, strlen(&tbuf[0])); - /* Remove ESCAN from waking up the host if SDO is enabled */ - wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, false); +} +static const struct file_operations fops_debuglevel = { + .open = NULL, + .write = wl_debuglevel_write, + .read = wl_debuglevel_read, + .owner = THIS_MODULE, + .llseek = NULL, +}; - if (wldev_iovar_setbuf_bsscfg(dev, "ie", (void*)ie_setbuf, - tot_len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, - disc_bssidx, &cfg->ioctl_buf_sync) < 0) { - WL_ERR(("p2p add_ie failed \n")); - ret = -EINVAL; +static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg) +{ + s32 err = 0; + struct dentry *_dentry; + if (!cfg) + return -EINVAL; + cfg->debugfs = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (!cfg->debugfs || IS_ERR(cfg->debugfs)) { + if (cfg->debugfs == ERR_PTR(-ENODEV)) + WL_ERR(("Debugfs is not enabled on this kernel\n")); + else + WL_ERR(("Can not create debugfs directory\n")); + cfg->debugfs = NULL; goto exit; - } else - WL_SD(("p2p add_ie applied successfully len:%d \n", tot_len)); - if (wldev_iovar_setint(dev, "mpc", 0) < 0) { - /* Setting of MPC failed */ - WL_ERR(("mpc disabling faild\n")); - ret = -1; - goto exit; } - - if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_find", NULL, 0, - cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync)) < 0) { - WL_ERR(("p2po_find Failed :%d\n", ret)); - ret = -1; - goto exit; + _dentry = debugfs_create_file("debug_level", S_IRUSR | S_IWUSR, + cfg->debugfs, cfg, &fops_debuglevel); + if (!_dentry || IS_ERR(_dentry)) { + WL_ERR(("failed to create debug_level debug file\n")); + wl_free_debugfs(cfg); } - - /* set the states */ - cfg->sdo->dd_state = WL_DD_STATE_SEARCH; - wl_set_p2p_status(cfg, DISC_IN_PROGRESS); - exit: - if (ie_setbuf) - kfree(ie_setbuf); + return err; +} +static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg) +{ + if (!cfg) + return -EINVAL; + if (cfg->debugfs) + debugfs_remove_recursive(cfg->debugfs); + cfg->debugfs = NULL; + return 0; +} +#endif /* DEBUGFS_CFG80211 */ - /* Incase of failure enable back the ESCAN event */ - if (ret) - wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, true); +struct device *wl_cfg80211_get_parent_dev(void) +{ + return cfg80211_parent_dev; +} - return ret; +void wl_cfg80211_set_parent_dev(void *dev) +{ + cfg80211_parent_dev = dev; } -s32 wl_sd_handle_sd_listen( - struct net_device *dev, - u8 *buf, - int len) +static void wl_cfg80211_clear_parent_dev(void) { - struct bcm_cfg80211 *cfg = g_bcm_cfg; - s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); - wl_sd_listen_t sd_listen; - int ret = 0; - u8 * ptr = NULL; - uint channel = 0; + cfg80211_parent_dev = NULL; +} - /* Just in case if it is not enabled */ - if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) { - WL_ERR(("cfgp2p_enable discovery failed")); - return -1; +void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac) +{ + wldev_iovar_getbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr", NULL, + 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync); + memcpy(mac->octet, cfg->ioctl_buf, ETHER_ADDR_LEN); +} +static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role) +{ + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + if (((dev_role == NL80211_IFTYPE_AP) && + !(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) || + ((dev_role == NL80211_IFTYPE_P2P_GO) && + !(dhd->op_mode & DHD_FLAG_P2P_GO_MODE))) + { + WL_ERR(("device role select failed role:%d op_mode:%d \n", dev_role, dhd->op_mode)); + return false; } + return true; +} + +int wl_cfg80211_do_driver_init(struct net_device *net) +{ + struct bcm_cfg80211 *cfg = *(struct bcm_cfg80211 **)netdev_priv(net); - if (wldev_iovar_setint(dev, "mpc", 0) < 0) { - /* Setting of MPC failed */ - WL_ERR(("mpc disabling faild\n")); + if (!cfg || !cfg->wdev) + return -EINVAL; + + if (dhd_do_driver_init(cfg->wdev->netdev) < 0) return -1; - } - bzero(&sd_listen, sizeof(wl_sd_listen_t)); + return 0; +} - if (len) { - ptr = strsep((char **)&buf, " "); - if (ptr == NULL) { - /* period and duration given wrongly */ - WL_ERR(("Arguments in wrong format \n")); - return -EINVAL; - } - else if (strncmp(ptr, "chan=", strlen("chan=")) == 0) { - sd_listen.interval = 65535; - sd_listen.period = 65535; - ptr += strlen("chan="); - channel = simple_strtol(ptr, NULL, 10); - } - else { - sd_listen.period = simple_strtol(ptr, NULL, 10); - ptr = strsep((char **)&buf, " "); - if (ptr == NULL) { - WL_ERR(("Arguments in wrong format \n")); - return -EINVAL; - } - sd_listen.interval = simple_strtol(ptr, NULL, 10); - if (buf && strncmp(buf, "chan=", strlen("chan=")) == 0) { - buf += strlen("chan="); - channel = simple_strtol(buf, NULL, 10); - } - } - WL_SD(("listen_period:%d, listen_interval:%d and listen_channel:%d\n", - sd_listen.period, sd_listen.interval, channel)); - } - if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen_channel", (void*)&channel, - sizeof(channel), cfg->ioctl_buf, WLC_IOCTL_SMLEN, - bssidx, &cfg->ioctl_buf_sync)) < 0) { - WL_ERR(("p2po_listen_channel Failed :%d\n", ret)); - return -1; +void wl_cfg80211_enable_trace(u32 level) +{ + wl_dbg_level = level; + printf("%s: wl_dbg_level = 0x%x\n", __FUNCTION__, wl_dbg_level); +} + +#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \ + 2, 0)) +static s32 +wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy, + bcm_struct_cfgdev *cfgdev, u64 cookie) +{ + /* CFG80211 checks for tx_cancel_wait callback when ATTR_DURATION + * is passed with CMD_FRAME. This callback is supposed to cancel + * the OFFCHANNEL Wait. Since we are already taking care of that + * with the tx_mgmt logic, do nothing here. + */ + + return 0; +} +#endif /* WL_SUPPORT_BACKPORTED_PATCHES || KERNEL >= 3.2.0 */ + +#ifdef WL11U +bcm_tlv_t * +wl_cfg80211_find_interworking_ie(u8 *parse, u32 len) +{ + bcm_tlv_t *ie; + + while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_INTERWORKING_ID))) { + return (bcm_tlv_t *)ie; } + return NULL; +} - WL_SD(("p2po_listen period:%d interval:%d \n", - sd_listen.period, sd_listen.interval)); - if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen", (void*)&sd_listen, - sizeof(wl_sd_listen_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN, - bssidx, &cfg->ioctl_buf_sync)) < 0) { - WL_ERR(("p2po_listen Failed :%d\n", ret)); + +static s32 +wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag, + uint8 ie_id, uint8 *data, uint8 data_len) +{ + s32 err = BCME_OK; + s32 buf_len; + s32 iecount; + ie_setbuf_t *ie_setbuf; + + if (ie_id != DOT11_MNG_INTERWORKING_ID) + return BCME_UNSUPPORTED; + + /* Validate the pktflag parameter */ + if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG | + VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG | + VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG| + VNDR_IE_CUSTOM_FLAG))) { + WL_ERR(("cfg80211 Add IE: Invalid packet flag 0x%x\n", pktflag)); return -1; } - /* Remove ESCAN from waking up the host if ofind/olisten is enabled */ - wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, false); + /* use VNDR_IE_CUSTOM_FLAG flags for none vendor IE . currently fixed value */ + pktflag = htod32(pktflag); - /* Store the extended listen values for use in sdo_resume */ - cfg->sdo->sd_listen.interval = sd_listen.interval; - cfg->sdo->sd_listen.period = sd_listen.period; + buf_len = sizeof(ie_setbuf_t) + data_len - 1; + ie_setbuf = (ie_setbuf_t *) kzalloc(buf_len, GFP_KERNEL); - /* set the states */ - cfg->sdo->dd_state = WL_DD_STATE_LISTEN; - wl_set_p2p_status(cfg, DISC_IN_PROGRESS); + if (!ie_setbuf) { + WL_ERR(("Error allocating buffer for IE\n")); + return -ENOMEM; + } - return 0; -} + if (cfg->iw_ie_len == data_len && !memcmp(cfg->iw_ie, data, data_len)) { + WL_ERR(("Previous IW IE is equals to current IE\n")); + err = BCME_OK; + goto exit; + } -s32 wl_cfg80211_sd_offload(struct net_device *dev, char *cmd, char* buf, int len) -{ - int ret = 0; - struct bcm_cfg80211 *cfg = g_bcm_cfg; + strncpy(ie_setbuf->cmd, "add", VNDR_IE_CMD_LEN - 1); + ie_setbuf->cmd[VNDR_IE_CMD_LEN - 1] = '\0'; - WL_SD(("Entry cmd:%s arg_len:%d \n", cmd, len)); + /* Buffer contains only 1 IE */ + iecount = htod32(1); + memcpy((void *)&ie_setbuf->ie_buffer.iecount, &iecount, sizeof(int)); + memcpy((void *)&ie_setbuf->ie_buffer.ie_list[0].pktflag, &pktflag, sizeof(uint32)); + + /* Now, add the IE to the buffer */ + ie_setbuf->ie_buffer.ie_list[0].ie_data.id = ie_id; + + /* if already set with previous values, delete it first */ + if (cfg->iw_ie_len != 0) { + WL_DBG(("Different IW_IE was already set. clear first\n")); - if (!cfg->sdo) { - WL_SD(("Initializing SDO \n")); - if ((ret = wl_cfg80211_sdo_init(cfg)) < 0) + ie_setbuf->ie_buffer.ie_list[0].ie_data.len = 0; + + err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len, + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + + if (err != BCME_OK) goto exit; } - if (strncmp(cmd, "P2P_SD_REQ", strlen("P2P_SD_REQ")) == 0) { - ret = wl_sd_handle_sd_req(dev, buf, len); - } else if (strncmp(cmd, "P2P_SD_CANCEL_REQ", strlen("P2P_SD_CANCEL_REQ")) == 0) { - ret = wl_sd_handle_sd_cancel_req(dev, buf); - } else if (strncmp(cmd, "P2P_SD_SVC_ADD", strlen("P2P_SD_SVC_ADD")) == 0) { - ret = wl_sd_handle_sd_add_svc(dev, buf, len); - } else if (strncmp(cmd, "P2P_SD_SVC_DEL", strlen("P2P_SD_SVC_DEL")) == 0) { - ret = wl_sd_handle_sd_del_svc(dev, buf, len); - } else if (strncmp(cmd, "P2P_SD_FIND", strlen("P2P_SD_FIND")) == 0) { - ret = wl_sd_handle_sd_find(dev, buf, len); - } else if (strncmp(cmd, "P2P_SD_LISTEN", strlen("P2P_SD_LISTEN")) == 0) { - ret = wl_sd_handle_sd_listen(dev, buf, len); - } else if (strncmp(cmd, "P2P_SD_STOP", strlen("P2P_STOP")) == 0) { - ret = wl_sd_handle_sd_stop_discovery(dev, buf, len); - } else { - WL_ERR(("Request for Unsupported CMD:%s \n", buf)); - ret = -EINVAL; + ie_setbuf->ie_buffer.ie_list[0].ie_data.len = data_len; + memcpy((uchar *)&ie_setbuf->ie_buffer.ie_list[0].ie_data.data[0], data, data_len); + + err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len, + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + + if (err == BCME_OK) { + memcpy(cfg->iw_ie, data, data_len); + cfg->iw_ie_len = data_len; + cfg->wl11u = TRUE; + + err = wldev_iovar_setint_bsscfg(ndev, "grat_arp", 1, bssidx); } exit: - return ret; + if (ie_setbuf) + kfree(ie_setbuf); + return err; } -#endif /* WL_SDO */ +#endif /* WL11U */ -#ifdef WLTDLS -static s32 -wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, - const wl_event_msg_t *e, void *data) { +s32 +wl_cfg80211_dfs_ap_move(struct net_device *ndev, char *data, char *command, int total_len) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + char ioctl_buf[50]; + int err = 0; + uint32 val = 0; + chanspec_t chanspec = 0; + int abort; + int bytes_written = 0; + wl_dfs_ap_move_status_t *status; + char chanbuf[CHANSPEC_STR_LEN]; + const char *dfs_state_str[DFS_SCAN_S_MAX] = { + "Radar Free On Channel", + "Radar Found On Channel", + "Radar Scan In Progress", + "Radar Scan Aborted", + "RSDB Mode switch in Progress For Scan" + }; + if (ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) { + bytes_written = snprintf(command, total_len, "AP is not UP\n"); + return bytes_written; + } + if (!*data) { + if ((err = wldev_iovar_getbuf(ndev, "dfs_ap_move", NULL, 0, + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) { + WL_ERR(("setting dfs_ap_move failed with err=%d \n", err)); + return err; + } + status = (wl_dfs_ap_move_status_t *)cfg->ioctl_buf; - struct net_device *ndev = NULL; - u32 reason = ntoh32(e->reason); - s8 *msg = NULL; + if (status->version != WL_DFS_AP_MOVE_VERSION) { + err = BCME_UNSUPPORTED; + WL_ERR(("err=%d version=%d\n", err, status->version)); + return err; + } - ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + if (status->move_status != (int8) DFS_SCAN_S_IDLE) { + chanspec = wl_chspec_driver_to_host(status->chanspec); + if (chanspec != 0 && chanspec != INVCHANSPEC) { + wf_chspec_ntoa(chanspec, chanbuf); + bytes_written = snprintf(command, total_len, + "AP Target Chanspec %s (0x%x)\n", chanbuf, chanspec); + } + bytes_written += snprintf(command + bytes_written, total_len, + "%s\n", dfs_state_str[status->move_status]); + return bytes_written; + } else { + bytes_written = snprintf(command, total_len, "dfs AP move in IDLE state\n"); + return bytes_written; + } - switch (reason) { - case WLC_E_TDLS_PEER_DISCOVERED : - msg = " TDLS PEER DISCOVERD "; - break; - case WLC_E_TDLS_PEER_CONNECTED : -#ifdef PCIE_FULL_DONGLE - dhd_tdls_update_peer_info(ndev, TRUE, (uint8 *)&e->addr.octet[0]); -#endif /* PCIE_FULL_DONGLE */ - if (cfg->tdls_mgmt_frame) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) - cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0, - cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, 0); -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) - cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0, - cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, - 0, GFP_ATOMIC); -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \ - defined(WL_COMPAT_WIRELESS) - cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0, - cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, - GFP_ATOMIC); -#else - cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, - cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, - GFP_ATOMIC); -#endif /* LINUX_VERSION >= VERSION(3, 12, 0) */ + } + + abort = bcm_atoi(data); + if (abort == -1) { + if ((err = wldev_iovar_setbuf(ndev, "dfs_ap_move", &abort, + sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) { + WL_ERR(("seting dfs_ap_move failed with err %d\n", err)); + return err; } - msg = " TDLS PEER CONNECTED "; - break; - case WLC_E_TDLS_PEER_DISCONNECTED : -#ifdef PCIE_FULL_DONGLE - dhd_tdls_update_peer_info(ndev, FALSE, (uint8 *)&e->addr.octet[0]); -#endif /* PCIE_FULL_DONGLE */ - if (cfg->tdls_mgmt_frame) { - kfree(cfg->tdls_mgmt_frame); - cfg->tdls_mgmt_frame = NULL; - cfg->tdls_mgmt_freq = 0; + } else { + chanspec = wf_chspec_aton(data); + if (chanspec != 0) { + val = wl_chspec_host_to_driver(chanspec); + if (val != INVCHANSPEC) { + if ((err = wldev_iovar_setbuf(ndev, "dfs_ap_move", &val, + sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) { + WL_ERR(("seting dfs_ap_move failed with err %d\n", err)); + return err; + } + WL_DBG((" set dfs_ap_move successfull")); + } else { + err = BCME_USAGE_ERROR; + } } - msg = "TDLS PEER DISCONNECTED "; - break; } - if (msg) { - WL_ERR(("%s: " MACDBG " on %s ndev\n", msg, MAC2STRDBG((u8*)(&e->addr)), - (bcmcfg_to_prmry_ndev(cfg) == ndev) ? "primary" : "secondary")); + return err; +} + +s32 +wl_cfg80211_wbtext_config(struct net_device *ndev, char *data, char *command, int total_len) +{ + uint i = 0; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + wl_roam_prof_band_t *rp; + int err = -EINVAL, bytes_written = 0; + size_t len = strlen(data); + int rp_len = 0; + data[len] = '\0'; + rp = (wl_roam_prof_band_t *) kzalloc(sizeof(*rp) + * WL_MAX_ROAM_PROF_BRACKETS, GFP_KERNEL); + if (unlikely(!rp)) { + WL_ERR(("%s: failed to allocate memory\n", __func__)); + err = -ENOMEM; + goto exit; + } + + rp->ver = WL_MAX_ROAM_PROF_VER; + if (*data && (!strncmp(data, "b", 1))) { + rp->band = WLC_BAND_2G; + } else if (*data && (!strncmp(data, "a", 1))) { + rp->band = WLC_BAND_5G; + } else { + err = snprintf(command, total_len, "Missing band\n"); + goto exit; + } + data++; + rp->len = 0; + /* Getting roam profile from fw */ + if ((err = wldev_iovar_getbuf(ndev, "roam_prof", rp, sizeof(*rp), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) { + WL_ERR(("Getting roam_profile failed with err=%d \n", err)); + goto exit; + } + memcpy(rp, cfg->ioctl_buf, sizeof(*rp) * WL_MAX_ROAM_PROF_BRACKETS); + /* roam_prof version get */ + if (rp->ver != WL_MAX_ROAM_PROF_VER) { + WL_ERR(("bad version (=%d) in return data\n", rp->ver)); + err = -EINVAL; + goto exit; + } + if ((rp->len % sizeof(wl_roam_prof_t)) != 0) { + WL_ERR(("bad length (=%d) in return data\n", rp->len)); + err = -EINVAL; + goto exit; } - return 0; + if (!*data) { + for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) { + /* printing contents of roam profile data from fw and exits + * if code hits any of one of the below condtion. If remaining + * length of buffer is less than roam profile size or + * if there is no valid entry. + */ + if (((i * sizeof(wl_roam_prof_t)) > rp->len) || + (rp->roam_prof[i].fullscan_period == 0)) { + break; + } + bytes_written += snprintf(command+bytes_written, + total_len, "RSSI[%d,%d] CU(trigger:%d%%: duration:%ds)\n", + rp->roam_prof[i].roam_trigger, rp->roam_prof[i].rssi_lower, + rp->roam_prof[i].channel_usage, + rp->roam_prof[i].cu_avg_calc_dur); + } + err = bytes_written; + goto exit; + } else { + for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) { + /* reading contents of roam profile data from fw and exits + * if code hits any of one of the below condtion, If remaining + * length of buffer is less than roam profile size or if there + * is no valid entry. + */ + if (((i * sizeof(wl_roam_prof_t)) > rp->len) || + (rp->roam_prof[i].fullscan_period == 0)) { + break; + } + } + /* Do not set roam_prof from upper layer if fw doesn't have 2 rows */ + if (i != 2) { + WL_ERR(("FW must have 2 rows to fill roam_prof\n")); + err = -EINVAL; + goto exit; + } + /* setting roam profile to fw */ + data++; + for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) { + rp->roam_prof[i].roam_trigger = simple_strtol(data, &data, 10); + data++; + rp->roam_prof[i].rssi_lower = simple_strtol(data, &data, 10); + data++; + rp->roam_prof[i].channel_usage = simple_strtol(data, &data, 10); + data++; + rp->roam_prof[i].cu_avg_calc_dur = simple_strtol(data, &data, 10); + + rp_len += sizeof(wl_roam_prof_t); + if (*data == '\0') { + break; + } + data++; + } + if (i != 1) { + WL_ERR(("Only two roam_prof rows supported.\n")); + err = -EINVAL; + goto exit; + } + rp->len = rp_len; + if ((err = wldev_iovar_setbuf(ndev, "roam_prof", rp, + sizeof(*rp), cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL)) < 0) { + WL_ERR(("seting roam_profile failed with err %d\n", err)); + } + } +exit: + if (rp) { + kfree(rp); + } + return err; } -#endif /* WLTDLS */ -#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS) -static s32 -#if defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2) -wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, - u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, - u32 peer_capability, const u8 *data, size_t len) -#else -wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, - u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, const u8 *data, - size_t len) -#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */ +int wl_cfg80211_wbtext_weight_config(struct net_device *ndev, char *data, + char *command, int total_len) { - s32 ret = 0; -#ifdef WLTDLS - struct bcm_cfg80211 *cfg; - tdls_wfd_ie_iovar_t info; - memset(&info, 0, sizeof(tdls_wfd_ie_iovar_t)); - cfg = g_bcm_cfg; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + int bytes_written = 0, err = -EINVAL, argc = 0; + char rssi[5], band[5], weight[5]; + char *endptr = NULL; + wnm_bss_select_weight_cfg_t *bwcfg; + + bwcfg = kzalloc(sizeof(*bwcfg), GFP_KERNEL); + if (unlikely(!bwcfg)) { + WL_ERR(("%s: failed to allocate memory\n", __func__)); + err = -ENOMEM; + goto exit; + } + bwcfg->version = WNM_BSSLOAD_MONITOR_VERSION; + bwcfg->type = 0; + bwcfg->weight = 0; -#if defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2) - /* Some customer platform back ported this feature from kernel 3.15 to kernel 3.10 - * and that cuases build error - */ - BCM_REFERENCE(peer_capability); -#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */ + argc = sscanf(data, "%s %s %s", rssi, band, weight); - switch (action_code) { - /* We need to set TDLS Wifi Display IE to firmware - * using tdls_wfd_ie iovar - */ - case WLAN_TDLS_SET_PROBE_WFD_IE: - info.mode = TDLS_WFD_PROBE_IE_TX; - memcpy(&info.data, data, len); - info.length = len; - break; - case WLAN_TDLS_SET_SETUP_WFD_IE: - info.mode = TDLS_WFD_IE_TX; - memcpy(&info.data, data, len); - info.length = len; - break; - default: - WL_ERR(("Unsupported action code : %d\n", action_code)); - goto out; + if (!strcasecmp(rssi, "rssi")) + bwcfg->type = WNM_BSS_SELECT_TYPE_RSSI; + else if (!strcasecmp(rssi, "cu")) + bwcfg->type = WNM_BSS_SELECT_TYPE_CU; + else { + /* Usage DRIVER WBTEXT_WEIGHT_CONFIG */ + WL_ERR(("%s: Command usage error\n", __func__)); + goto exit; } - ret = wldev_iovar_setbuf(dev, "tdls_wfd_ie", &info, sizeof(info), - cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); - - if (ret) { - WL_ERR(("tdls_wfd_ie error %d\n", ret)); + if (!strcasecmp(band, "a")) + bwcfg->band = WLC_BAND_5G; + else if (!strcasecmp(band, "b")) + bwcfg->band = WLC_BAND_2G; + else if (!strcasecmp(band, "all")) + bwcfg->band = WLC_BAND_ALL; + else { + WL_ERR(("%s: Command usage error\n", __func__)); + goto exit; } -out: -#endif /* WLTDLS */ - return ret; -} -static s32 -wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, - u8 *peer, enum nl80211_tdls_operation oper) -{ - s32 ret = 0; -#ifdef WLTDLS - struct bcm_cfg80211 *cfg; - tdls_iovar_t info; - cfg = g_bcm_cfg; - memset(&info, 0, sizeof(tdls_iovar_t)); - if (peer) - memcpy(&info.ea, peer, ETHER_ADDR_LEN); - switch (oper) { - case NL80211_TDLS_DISCOVERY_REQ: - /* turn on TDLS */ - ret = dhd_tdls_enable(dev, true, false, NULL); - if (ret < 0) - return ret; - /* If the discovery request is broadcast then we need to set - * info.mode to Tunneled Probe Request - */ - if (memcmp(peer, (const uint8 *)BSSID_BROADCAST, ETHER_ADDR_LEN) == 0) { - info.mode = TDLS_MANUAL_EP_WFD_TPQ; + if (argc == 2) { + /* If there is no data after band, getting wnm_bss_select_weight from fw */ + if (bwcfg->band == WLC_BAND_ALL) { + WL_ERR(("band option \"all\" is for set only, not get\n")); + goto exit; } - else { - info.mode = TDLS_MANUAL_EP_DISCOVERY; + if ((err = wldev_iovar_getbuf(ndev, "wnm_bss_select_weight", bwcfg, + sizeof(*bwcfg), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) { + WL_ERR(("Getting wnm_bss_select_weight failed with err=%d \n", err)); + goto exit; } - break; - case NL80211_TDLS_SETUP: - /* auto mode on */ - ret = dhd_tdls_enable(dev, true, true, (struct ether_addr *)peer); - if (ret < 0) - return ret; - break; - case NL80211_TDLS_TEARDOWN: - info.mode = TDLS_MANUAL_EP_DELETE; - /* auto mode off */ - ret = dhd_tdls_enable(dev, true, false, (struct ether_addr *)peer); - if (ret < 0) - return ret; - break; - default: - WL_ERR(("Unsupported operation : %d\n", oper)); - goto out; - } - if (info.mode) { - ret = wldev_iovar_setbuf(dev, "tdls_endpoint", &info, sizeof(info), - cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); - if (ret) { - WL_ERR(("tdls_endpoint error %d\n", ret)); + memcpy(bwcfg, cfg->ioctl_buf, sizeof(*bwcfg)); + bytes_written = snprintf(command, total_len, "%s %s weight = %d\n", + (bwcfg->type == WNM_BSS_SELECT_TYPE_RSSI) ? "RSSI" : "CU", + (bwcfg->band == WLC_BAND_2G) ? "2G" : "5G", bwcfg->weight); + err = bytes_written; + goto exit; + } else { + /* if weight is non integer returns command usage error */ + bwcfg->weight = simple_strtol(weight, &endptr, 0); + if (*endptr != '\0') { + WL_ERR(("%s: Command usage error", __func__)); + goto exit; + } + /* setting weight for iovar wnm_bss_select_weight to fw */ + if ((err = wldev_iovar_setbuf(ndev, "wnm_bss_select_weight", bwcfg, + sizeof(*bwcfg), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) { + WL_ERR(("Getting wnm_bss_select_weight failed with err=%d\n", err)); } } -out: -#endif /* WLTDLS */ - return ret; +exit: + if (bwcfg) { + kfree(bwcfg); + } + return err; } -#endif /* LINUX_VERSION > VERSION(3,2,0) || WL_COMPAT_WIRELESS */ -s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len, - enum wl_management_type type) +/* WBTEXT_TUPLE_MIN_LEN_CHECK :strlen(low)+" "+strlen(high)+" "+strlen(factor) */ +#define WBTEXT_TUPLE_MIN_LEN_CHECK 5 + +int wl_cfg80211_wbtext_table_config(struct net_device *ndev, char *data, + char *command, int total_len) { - struct bcm_cfg80211 *cfg; - struct net_device *ndev = NULL; - struct ether_addr primary_mac; - s32 ret = 0; - s32 bssidx = 0; - s32 pktflag = 0; - cfg = g_bcm_cfg; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + int bytes_written = 0, err = -EINVAL; + char rssi[5], band[5]; + int btcfg_len = 0, i = 0, parsed_len = 0; + wnm_bss_select_factor_cfg_t *btcfg; + size_t slen = strlen(data); + char *start_addr = NULL; + data[slen] = '\0'; + + btcfg = kzalloc((sizeof(*btcfg) + sizeof(*btcfg) * + WL_FACTOR_TABLE_MAX_LIMIT), GFP_KERNEL); + if (unlikely(!btcfg)) { + WL_ERR(("%s: failed to allocate memory\n", __func__)); + err = -ENOMEM; + goto exit; + } - if (wl_get_drv_status(cfg, AP_CREATING, net)) { - /* Vendor IEs should be set to FW - * after SoftAP interface is brought up - */ + btcfg->version = WNM_BSS_SELECT_FACTOR_VERSION; + btcfg->band = WLC_BAND_AUTO; + btcfg->type = 0; + btcfg->count = 0; + + sscanf(data, "%s %s", rssi, band); + + if (!strcasecmp(rssi, "rssi")) { + btcfg->type = WNM_BSS_SELECT_TYPE_RSSI; + } + else if (!strcasecmp(rssi, "cu")) { + btcfg->type = WNM_BSS_SELECT_TYPE_CU; + } + else { + WL_ERR(("%s: Command usage error\n", __func__)); goto exit; - } else if (wl_get_drv_status(cfg, AP_CREATED, net)) { - ndev = net; - bssidx = 0; - } else if (cfg->p2p) { - net = ndev_to_wlc_ndev(net, cfg); - if (!cfg->p2p->on) { - get_primary_mac(cfg, &primary_mac); - wl_cfgp2p_generate_bss_mac(&primary_mac, &cfg->p2p->dev_addr, - &cfg->p2p->int_addr); - /* In case of p2p_listen command, supplicant send remain_on_channel - * without turning on P2P - */ + } - p2p_on(cfg) = true; - ret = wl_cfgp2p_enable_discovery(cfg, net, NULL, 0); + if (!strcasecmp(band, "a")) { + btcfg->band = WLC_BAND_5G; + } + else if (!strcasecmp(band, "b")) { + btcfg->band = WLC_BAND_2G; + } + else if (!strcasecmp(band, "all")) { + btcfg->band = WLC_BAND_ALL; + } + else { + WL_ERR(("%s: Command usage, Wrong band\n", __func__)); + goto exit; + } - if (unlikely(ret)) { - goto exit; - } + if ((slen - 1) == (strlen(rssi) + strlen(band))) { + /* Getting factor table using iovar 'wnm_bss_select_table' from fw */ + if ((err = wldev_iovar_getbuf(ndev, "wnm_bss_select_table", btcfg, + sizeof(*btcfg), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) { + WL_ERR(("Getting wnm_bss_select_table failed with err=%d \n", err)); + goto exit; } - if (net != bcmcfg_to_prmry_ndev(cfg)) { - if (wl_get_mode_by_netdev(cfg, net) == WL_MODE_AP) { - ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION); - bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION); + memcpy(btcfg, cfg->ioctl_buf, sizeof(*btcfg)); + memcpy(btcfg, cfg->ioctl_buf, (btcfg->count+1) * sizeof(*btcfg)); + + bytes_written += snprintf(command + bytes_written, total_len, + "No of entries in table: %d\n", btcfg->count); + bytes_written += snprintf(command + bytes_written, total_len, "%s factor table\n", + (btcfg->type == WNM_BSS_SELECT_TYPE_RSSI) ? "RSSI" : "CU"); + bytes_written += snprintf(command + bytes_written, total_len, + "low\thigh\tfactor\n"); + for (i = 0; i <= btcfg->count-1; i++) { + bytes_written += snprintf(command + bytes_written, total_len, + "%d\t%d\t%d\n", btcfg->params[i].low, btcfg->params[i].high, + btcfg->params[i].factor); + } + err = bytes_written; + goto exit; + } else { + memset(btcfg->params, 0, sizeof(*btcfg) * WL_FACTOR_TABLE_MAX_LIMIT); + data += (strlen(rssi) + strlen(band) + 2); + start_addr = data; + slen = slen - (strlen(rssi) + strlen(band) + 2); + for (i = 0; i < WL_FACTOR_TABLE_MAX_LIMIT; i++) { + if (parsed_len + WBTEXT_TUPLE_MIN_LEN_CHECK <= slen) { + btcfg->params[i].low = simple_strtol(data, &data, 10); + data++; + btcfg->params[i].high = simple_strtol(data, &data, 10); + data++; + btcfg->params[i].factor = simple_strtol(data, &data, 10); + btcfg->count++; + if (*data == '\0') { + break; + } + data++; + parsed_len = data - start_addr; + } else { + WL_ERR(("%s:Command usage:less no of args\n", __func__)); + goto exit; } - } else { - ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY); - bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); } - } - if (ndev != NULL) { - switch (type) { - case WL_BEACON: - pktflag = VNDR_IE_BEACON_FLAG; - break; - case WL_PROBE_RESP: - pktflag = VNDR_IE_PRBRSP_FLAG; - break; - case WL_ASSOC_RESP: - pktflag = VNDR_IE_ASSOCRSP_FLAG; - break; + btcfg_len = sizeof(*btcfg) + ((btcfg->count) * sizeof(*btcfg)); + if ((err = wldev_iovar_setbuf(ndev, "wnm_bss_select_table", btcfg, btcfg_len, + cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL)) < 0) { + WL_ERR(("seting wnm_bss_select_table failed with err %d\n", err)); + goto exit; } - if (pktflag) - ret = wl_cfgp2p_set_management_ie(cfg, ndev, bssidx, pktflag, buf, len); } exit: - return ret; + if (btcfg) { + kfree(btcfg); + } + return err; } -#ifdef WL_SUPPORT_AUTO_CHANNEL -static s32 -wl_cfg80211_set_auto_channel_scan_state(struct net_device *ndev) +s32 +wl_cfg80211_wbtext_delta_config(struct net_device *ndev, char *data, char *command, int total_len) { - u32 val = 0; - s32 ret = BCME_ERROR; + uint i = 0; struct bcm_cfg80211 *cfg = g_bcm_cfg; + int err = -EINVAL, bytes_written = 0, argc = 0, val, len = 0; + char delta[5], band[5], *endptr = NULL; + wl_roam_prof_band_t *rp; + + rp = (wl_roam_prof_band_t *) kzalloc(sizeof(*rp) + * WL_MAX_ROAM_PROF_BRACKETS, GFP_KERNEL); + if (unlikely(!rp)) { + WL_ERR(("%s: failed to allocate memory\n", __func__)); + err = -ENOMEM; + goto exit; + } - /* Disable mpc, to avoid automatic interface down. */ - val = 0; + argc = sscanf(data, "%s %s", band, delta); + if (!strcasecmp(band, "a")) + rp->band = WLC_BAND_5G; + else if (!strcasecmp(band, "b")) + rp->band = WLC_BAND_2G; + else { + WL_ERR(("%s: Missing band\n", __func__)); + goto exit; + } + /* Getting roam profile from fw */ + if ((err = wldev_iovar_getbuf(ndev, "roam_prof", rp, sizeof(*rp), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) { + WL_ERR(("Getting roam_profile failed with err=%d \n", err)); + goto exit; + } + memcpy(rp, cfg->ioctl_buf, sizeof(wl_roam_prof_band_t)); + if (rp->ver != WL_MAX_ROAM_PROF_VER) { + WL_ERR(("bad version (=%d) in return data\n", rp->ver)); + err = -EINVAL; + goto exit; + } + if ((rp->len % sizeof(wl_roam_prof_t)) != 0) { + WL_ERR(("bad length (=%d) in return data\n", rp->len)); + err = -EINVAL; + goto exit; + } - ret = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val, - sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, - &cfg->ioctl_buf_sync); - if (ret < 0) { - WL_ERR(("set 'mpc' failed, error = %d\n", ret)); - goto done; + if (argc == 2) { + /* if delta is non integer returns command usage error */ + val = simple_strtol(delta, &endptr, 0); + if (*endptr != '\0') { + WL_ERR(("%s: Command usage error", __func__)); + goto exit; + } + for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) { + /* + * Checking contents of roam profile data from fw and exits + * if code hits below condtion. If remaining length of buffer is + * less than roam profile size or if there is no valid entry. + */ + if (((i * sizeof(wl_roam_prof_t)) > rp->len) || + (rp->roam_prof[i].fullscan_period == 0)) { + break; + } + if (rp->roam_prof[i].channel_usage != 0) { + rp->roam_prof[i].roam_delta = val; + } + len += sizeof(wl_roam_prof_t); + } + } + else { + if (rp->roam_prof[i].channel_usage != 0) { + bytes_written = snprintf(command, total_len, + "%s Delta %d\n", (rp->band == WLC_BAND_2G) ? "2G" : "5G", + rp->roam_prof[0].roam_delta); + } + err = bytes_written; + goto exit; + } + rp->len = len; + if ((err = wldev_iovar_setbuf(ndev, "roam_prof", rp, + sizeof(*rp), cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL)) < 0) { + WL_ERR(("seting roam_profile failed with err %d\n", err)); } +exit : + if (rp) { + kfree(rp); + } + return err; +} - /* Set interface up, explicitly. */ - val = 1; - ret = wldev_ioctl(ndev, WLC_UP, (void *)&val, sizeof(val), true); - if (ret < 0) { - WL_ERR(("set interface up failed, error = %d\n", ret)); - goto done; - } +int wl_cfg80211_scan_stop(bcm_struct_cfgdev *cfgdev) +{ + struct bcm_cfg80211 *cfg = NULL; + struct net_device *ndev = NULL; + unsigned long flags; + int clear_flag = 0; + int ret = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) + struct cfg80211_scan_info info; +#endif - /* Stop all scan explicitly, till auto channel selection complete. */ - wl_set_drv_status(cfg, SCANNING, ndev); - if (cfg->escan_info.ndev == NULL) { - ret = BCME_OK; - goto done; - } - ret = wl_notify_escan_complete(cfg, ndev, true, true); - if (ret < 0) { - WL_ERR(("set scan abort failed, error = %d\n", ret)); - ret = BCME_OK; // terence 20140115: fix escan_complete error - goto done; + WL_TRACE(("Enter\n")); + + cfg = g_bcm_cfg; + if (!cfg) + return -EINVAL; + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + spin_lock_irqsave(&cfg->cfgdrv_lock, flags); +#ifdef WL_CFG80211_P2P_DEV_IF + if (cfg->scan_request && cfg->scan_request->wdev == cfgdev) +#else + if (cfg->scan_request && cfg->scan_request->dev == cfgdev) +#endif + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) + info.aborted = true; + cfg80211_scan_done(cfg->scan_request, &info); +#else + cfg80211_scan_done(cfg->scan_request, true); +#endif + cfg->scan_request = NULL; + clear_flag = 1; } + spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); + + if (clear_flag) + wl_clr_drv_status(cfg, SCANNING, ndev); -done: return ret; } -static bool -wl_cfg80211_valid_chanspec_p2p(chanspec_t chanspec) +bool wl_cfg80211_is_concurrent_mode(void) { - bool valid = false; - char chanbuf[CHANSPEC_STR_LEN]; - - /* channel 1 to 14 */ - if ((chanspec >= 0x2b01) && (chanspec <= 0x2b0e)) { - valid = true; - } - /* channel 36 to 48 */ - else if ((chanspec >= 0x1b24) && (chanspec <= 0x1b30)) { - valid = true; + if ((g_bcm_cfg) && (wl_get_drv_status_all(g_bcm_cfg, CONNECTED) > 1)) { + return true; + } else { + return false; } - /* channel 149 to 161 */ - else if ((chanspec >= 0x1b95) && (chanspec <= 0x1ba1)) { - valid = true; +} + +void* wl_cfg80211_get_dhdp() +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + return cfg->pub; +} + +bool wl_cfg80211_is_p2p_active(void) +{ + return (g_bcm_cfg && g_bcm_cfg->p2p); +} + +bool wl_cfg80211_is_roam_offload(void) +{ + return (g_bcm_cfg && g_bcm_cfg->roam_offload); +} + +bool wl_cfg80211_is_event_from_connected_bssid(const wl_event_msg_t *e, int ifidx) +{ + dhd_pub_t *dhd = NULL; + struct net_device *ndev = NULL; + u8 *curbssid = NULL; + + dhd = (dhd_pub_t *)(g_bcm_cfg->pub); + + if (dhd) { + ndev = dhd_idx2net(dhd, ifidx); } - else { - valid = false; - WL_INFORM(("invalid P2P chanspec, chanspec = %s\n", - wf_chspec_ntoa_ex(chanspec, chanbuf))); + + if (!dhd || !ndev) { + return false; } - return valid; + curbssid = wl_read_prof(g_bcm_cfg, ndev, WL_PROF_BSSID); + + return memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) == 0; } -static s32 -wl_cfg80211_get_chanspecs_2g(struct net_device *ndev, void *buf, s32 buflen) +static void wl_cfg80211_work_handler(struct work_struct * work) { - s32 ret = BCME_ERROR; struct bcm_cfg80211 *cfg = NULL; - wl_uint32_list_t *list = NULL; - chanspec_t chanspec = 0; + struct net_info *iter, *next; + s32 err = BCME_OK; + s32 pm = PM_FAST; + dhd_pub_t *dhd; + BCM_SET_CONTAINER_OF(cfg, work, struct bcm_cfg80211, pm_enable_work.work); + WL_DBG(("Enter \n")); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + for_each_ndev(cfg, iter, next) { + /* p2p discovery iface ndev could be null */ + if (iter->ndev) { + if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev) || + (wl_get_mode_by_netdev(cfg, iter->ndev) != WL_MODE_BSS && + wl_get_mode_by_netdev(cfg, iter->ndev) != WL_MODE_IBSS)) + continue; + if (iter->ndev) { + dhd = (dhd_pub_t *)(cfg->pub); + if (dhd_conf_get_pm(dhd) >= 0) + pm = dhd_conf_get_pm(dhd); + if ((err = wldev_ioctl(iter->ndev, WLC_SET_PM, + &pm, sizeof(pm), true)) != 0) { + if (err == -ENODEV) + WL_DBG(("%s:netdev not ready\n", + iter->ndev->name)); + else + WL_ERR(("%s:error (%d)\n", + iter->ndev->name, err)); + } else + wl_cfg80211_update_power_mode(iter->ndev); + } + } + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) + _Pragma("GCC diagnostic pop") +#endif + DHD_OS_WAKE_UNLOCK(cfg->pub); +} - memset(buf, 0, buflen); +u8 +wl_get_action_category(void *frame, u32 frame_len) +{ + u8 category; + u8 *ptr = (u8 *)frame; + if (frame == NULL) + return DOT11_ACTION_CAT_ERR_MASK; + if (frame_len < DOT11_ACTION_HDR_LEN) + return DOT11_ACTION_CAT_ERR_MASK; + category = ptr[DOT11_ACTION_CAT_OFF]; + WL_INFORM(("Action Category: %d\n", category)); + return category; +} + +int +wl_get_public_action(void *frame, u32 frame_len, u8 *ret_action) +{ + u8 *ptr = (u8 *)frame; + if (frame == NULL || ret_action == NULL) + return BCME_ERROR; + if (frame_len < DOT11_ACTION_HDR_LEN) + return BCME_ERROR; + if (DOT11_ACTION_CAT_PUBLIC != wl_get_action_category(frame, frame_len)) + return BCME_ERROR; + *ret_action = ptr[DOT11_ACTION_ACT_OFF]; + WL_INFORM(("Public Action : %d\n", *ret_action)); + return BCME_OK; +} - cfg = g_bcm_cfg; - list = (wl_uint32_list_t *)buf; - list->count = htod32(WL_NUMCHANSPECS); - /* Restrict channels to 2.4GHz, 20MHz BW, no SB. */ - chanspec |= (WL_CHANSPEC_BAND_2G | WL_CHANSPEC_BW_20 | - WL_CHANSPEC_CTL_SB_NONE); - chanspec = wl_chspec_host_to_driver(chanspec); +static int +wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const struct ether_addr *bssid) +{ + s32 err; + wl_event_msg_t e; - ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec, - sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync); - if (ret < 0) { - WL_ERR(("get 'chanspecs' failed, error = %d\n", ret)); - } + bzero(&e, sizeof(e)); + e.event_type = cpu_to_be32(WLC_E_BSSID); + memcpy(&e.addr, bssid, ETHER_ADDR_LEN); + /* trigger the roam event handler */ + WL_INFORM(("Delayed roam to " MACDBG "\n", MAC2STRDBG((u8*)(bssid)))); + err = wl_notify_roaming_status(cfg, ndev_to_cfgdev(ndev), &e, NULL); - return ret; + return err; } static s32 -wl_cfg80211_get_chanspecs_5g(struct net_device *ndev, void *buf, s32 buflen) +wl_cfg80211_parse_vndr_ies(u8 *parse, u32 len, + struct parsed_vndr_ies *vndr_ies) { - u32 channel = 0; - s32 ret = BCME_ERROR; - s32 i = 0; - s32 j = 0; - struct bcm_cfg80211 *cfg = NULL; - wl_uint32_list_t *list = NULL; - chanspec_t chanspec = 0; - - memset(buf, 0, buflen); - - cfg = g_bcm_cfg; - list = (wl_uint32_list_t *)buf; - list->count = htod32(WL_NUMCHANSPECS); - - /* Restrict channels to 5GHz, 20MHz BW, no SB. */ - chanspec |= (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_20 | - WL_CHANSPEC_CTL_SB_NONE); - chanspec = wl_chspec_host_to_driver(chanspec); - - ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec, - sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync); - if (ret < 0) { - WL_ERR(("get 'chanspecs' failed, error = %d\n", ret)); - goto done; - } + s32 err = BCME_OK; + vndr_ie_t *vndrie; + bcm_tlv_t *ie; + struct parsed_vndr_ie_info *parsed_info; + u32 count = 0; + s32 remained_len; + + remained_len = (s32)len; + memset(vndr_ies, 0, sizeof(*vndr_ies)); + + WL_INFORM(("---> len %d\n", len)); + ie = (bcm_tlv_t *) parse; + if (!bcm_valid_tlv(ie, remained_len)) + ie = NULL; + while (ie) { + if (count >= MAX_VNDR_IE_NUMBER) + break; + if (ie->id == DOT11_MNG_VS_ID) { + vndrie = (vndr_ie_t *) ie; + /* len should be bigger than OUI length + one data length at least */ + if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) { + WL_ERR(("%s: invalid vndr ie. length is too small %d\n", + __FUNCTION__, vndrie->len)); + goto end; + } + /* if wpa or wme ie, do not add ie */ + if (!bcmp(vndrie->oui, (u8*)WPA_OUI, WPA_OUI_LEN) && + ((vndrie->data[0] == WPA_OUI_TYPE) || + (vndrie->data[0] == WME_OUI_TYPE))) { + CFGP2P_DBG(("Found WPA/WME oui. Do not add it\n")); + goto end; + } - /* Skip DFS and inavlid P2P channel. */ - for (i = 0, j = 0; i < dtoh32(list->count); i++) { - chanspec = (chanspec_t) dtoh32(list->element[i]); - channel = CHSPEC_CHANNEL(chanspec); + parsed_info = &vndr_ies->ie_info[count++]; - ret = wldev_iovar_getint(ndev, "per_chan_info", &channel); - if (ret < 0) { - WL_ERR(("get 'per_chan_info' failed, error = %d\n", ret)); - goto done; - } + /* save vndr ie information */ + parsed_info->ie_ptr = (char *)vndrie; + parsed_info->ie_len = (vndrie->len + TLV_HDR_LEN); + memcpy(&parsed_info->vndrie, vndrie, sizeof(vndr_ie_t)); + vndr_ies->count = count; - if (CHANNEL_IS_RADAR(channel) || - !(wl_cfg80211_valid_chanspec_p2p(chanspec))) { - continue; - } else { - list->element[j] = list->element[i]; + WL_DBG(("\t ** OUI %02x %02x %02x, type 0x%02x len:%d\n", + parsed_info->vndrie.oui[0], parsed_info->vndrie.oui[1], + parsed_info->vndrie.oui[2], parsed_info->vndrie.data[0], + parsed_info->ie_len)); } - - j++; +end: + ie = bcm_next_tlv(ie, &remained_len); } - - list->count = j; - -done: - return ret; + return err; } -static s32 -wl_cfg80211_get_best_channel(struct net_device *ndev, void *buf, int buflen, - int *channel) +s32 +wl_cfg80211_clear_per_bss_ies(struct bcm_cfg80211 *cfg, s32 bssidx) { - s32 ret = BCME_ERROR; - int chosen = 0; - int retry = 0; - uint chip; - - /* Start auto channel selection scan. */ - ret = wldev_ioctl(ndev, WLC_START_CHANNEL_SEL, buf, buflen, true); - if (ret < 0) { - WL_ERR(("can't start auto channel scan, error = %d\n", ret)); - *channel = 0; - goto done; - } - - /* Wait for auto channel selection, worst case possible delay is 5250ms. */ - retry = CHAN_SEL_RETRY_COUNT; + s32 index; + struct net_info *netinfo; + s32 vndrie_flag[] = {VNDR_IE_BEACON_FLAG, VNDR_IE_PRBRSP_FLAG, + VNDR_IE_ASSOCRSP_FLAG, VNDR_IE_PRBREQ_FLAG, VNDR_IE_ASSOCREQ_FLAG}; - while (retry--) { - OSL_SLEEP(CHAN_SEL_IOCTL_DELAY); - - ret = wldev_ioctl(ndev, WLC_GET_CHANNEL_SEL, &chosen, sizeof(chosen), - false); - if ((ret == 0) && (dtoh32(chosen) != 0)) { - chip = dhd_conf_get_chip(dhd_get_pub(ndev)); - if (chip != BCM43362_CHIP_ID && chip != BCM4330_CHIP_ID) { - u32 chanspec = 0; - int ctl_chan; - chanspec = wl_chspec_driver_to_host(chosen); - printf("selected chanspec = 0x%x\n", chanspec); - ctl_chan = wf_chspec_ctlchan(chanspec); - printf("selected ctl_chan = 0x%x\n", ctl_chan); - *channel = (u16)(ctl_chan & 0x00FF); - } else - *channel = (u16)(chosen & 0x00FF); - WL_INFORM(("selected channel = %d\n", *channel)); - break; - } - WL_INFORM(("attempt = %d, ret = %d, chosen = %d\n", - (CHAN_SEL_RETRY_COUNT - retry), ret, dtoh32(chosen))); + netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx); + if (!netinfo || !netinfo->wdev) { + WL_ERR(("netinfo or netinfo->wdev is NULL\n")); + return -1; } - if (retry <= 0) { - WL_ERR(("failure, auto channel selection timed out\n")); - *channel = 0; - ret = BCME_ERROR; + WL_DBG(("clear management vendor IEs for bssidx:%d \n", bssidx)); + /* Clear the IEs set in the firmware so that host is in sync with firmware */ + for (index = 0; index < ARRAYSIZE(vndrie_flag); index++) { + if (wl_cfg80211_set_mgmt_vndr_ies(cfg, wdev_to_cfgdev(netinfo->wdev), + bssidx, vndrie_flag[index], NULL, 0) < 0) + WL_ERR(("vndr_ies clear failed. Ignoring.. \n")); } -done: - return ret; + return 0; } -static s32 -wl_cfg80211_restore_auto_channel_scan_state(struct net_device *ndev) +s32 +wl_cfg80211_clear_mgmt_vndr_ies(struct bcm_cfg80211 *cfg) { - u32 val = 0; - s32 ret = BCME_ERROR; - struct bcm_cfg80211 *cfg = g_bcm_cfg; - - /* Clear scan stop driver status. */ - wl_clr_drv_status(cfg, SCANNING, ndev); - - /* Enable mpc back to 1, irrespective of initial state. */ - val = 1; + struct net_info *iter, *next; - ret = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val, - sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, - &cfg->ioctl_buf_sync); - if (ret < 0) { - WL_ERR(("set 'mpc' failed, error = %d\n", ret)); + WL_DBG(("clear management vendor IEs \n")); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + for_each_ndev(cfg, iter, next) { + wl_cfg80211_clear_per_bss_ies(cfg, iter->bssidx); } - - return ret; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + return 0; } -s32 -wl_cfg80211_get_best_channels(struct net_device *dev, char* cmd, int total_len) +#define WL_VNDR_IE_MAXLEN 2048 +static s8 g_mgmt_ie_buf[WL_VNDR_IE_MAXLEN]; +int +wl_cfg80211_set_mgmt_vndr_ies(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + s32 bssidx, s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len) { - int channel = 0, band, band_cur; - s32 ret = BCME_ERROR; - u8 *buf = NULL; - char *pos = cmd; - struct bcm_cfg80211 *cfg = NULL; struct net_device *ndev = NULL; + s32 ret = BCME_OK; + u8 *curr_ie_buf = NULL; + u8 *mgmt_ie_buf = NULL; + u32 mgmt_ie_buf_len = 0; + u32 *mgmt_ie_len = 0; + u32 del_add_ie_buf_len = 0; + u32 total_ie_buf_len = 0; + u32 parsed_ie_buf_len = 0; + struct parsed_vndr_ies old_vndr_ies; + struct parsed_vndr_ies new_vndr_ies; + s32 i; + u8 *ptr; + s32 remained_buf_len; + wl_bss_vndr_ies_t *ies = NULL; + struct net_info *netinfo; - memset(cmd, 0, total_len); - - buf = kmalloc(CHANSPEC_BUF_SIZE, GFP_KERNEL); - if (buf == NULL) { - WL_ERR(("failed to allocate chanspec buffer\n")); - return -ENOMEM; - } + WL_DBG(("Enter. pktflag:0x%x bssidx:%x vnd_ie_len:%d \n", + pktflag, bssidx, vndr_ie_len)); - /* - * Always use primary interface, irrespective of interface on which - * command came. - */ - cfg = g_bcm_cfg; - ndev = bcmcfg_to_prmry_ndev(cfg); + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); - /* - * Make sure that FW and driver are in right state to do auto channel - * selection scan. - */ - ret = wl_cfg80211_set_auto_channel_scan_state(ndev); - if (ret < 0) { - WL_ERR(("can't set auto channel scan state, error = %d\n", ret)); - goto done; + if (bssidx > WL_MAX_IFS) { + WL_ERR(("bssidx > supported concurrent Ifaces \n")); + return -EINVAL; } - /* Best channel selection in 2.4GHz band. */ - ret = wl_cfg80211_get_chanspecs_2g(ndev, (void *)buf, CHANSPEC_BUF_SIZE); - if (ret < 0) { - WL_ERR(("can't get chanspecs in 2.4GHz, error = %d\n", ret)); - goto done; + netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx); + if (!netinfo) { + WL_ERR(("net_info ptr is NULL \n")); + return -EINVAL; } - ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE, - &channel); - if (ret < 0) { - WL_ERR(("can't select best channel scan in 2.4GHz, error = %d\n", ret)); - goto done; + /* Clear the global buffer */ + memset(g_mgmt_ie_buf, 0, sizeof(g_mgmt_ie_buf)); + curr_ie_buf = g_mgmt_ie_buf; + ies = &netinfo->bss.ies; + + switch (pktflag) { + case VNDR_IE_PRBRSP_FLAG : + mgmt_ie_buf = ies->probe_res_ie; + mgmt_ie_len = &ies->probe_res_ie_len; + mgmt_ie_buf_len = sizeof(ies->probe_res_ie); + break; + case VNDR_IE_ASSOCRSP_FLAG : + mgmt_ie_buf = ies->assoc_res_ie; + mgmt_ie_len = &ies->assoc_res_ie_len; + mgmt_ie_buf_len = sizeof(ies->assoc_res_ie); + break; + case VNDR_IE_BEACON_FLAG : + mgmt_ie_buf = ies->beacon_ie; + mgmt_ie_len = &ies->beacon_ie_len; + mgmt_ie_buf_len = sizeof(ies->beacon_ie); + break; + case VNDR_IE_PRBREQ_FLAG : + mgmt_ie_buf = ies->probe_req_ie; + mgmt_ie_len = &ies->probe_req_ie_len; + mgmt_ie_buf_len = sizeof(ies->probe_req_ie); + break; + case VNDR_IE_ASSOCREQ_FLAG : + mgmt_ie_buf = ies->assoc_req_ie; + mgmt_ie_len = &ies->assoc_req_ie_len; + mgmt_ie_buf_len = sizeof(ies->assoc_req_ie); + break; + default: + mgmt_ie_buf = NULL; + mgmt_ie_len = NULL; + WL_ERR(("not suitable packet type (%d)\n", pktflag)); + return BCME_ERROR; } - if (CHANNEL_IS_2G(channel)) { - channel = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ); + if (vndr_ie_len > mgmt_ie_buf_len) { + WL_ERR(("extra IE size too big\n")); + ret = -ENOMEM; } else { - WL_ERR(("invalid 2.4GHz channel, channel = %d\n", channel)); - channel = 0; - } + /* parse and save new vndr_ie in curr_ie_buff before comparing it */ + if (vndr_ie && vndr_ie_len && curr_ie_buf) { + ptr = curr_ie_buf; +/* must discard vndr_ie constness, attempt to change vndr_ie arg to non-const + * causes cascade of errors in other places, fix involves const casts there + */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + if ((ret = wl_cfg80211_parse_vndr_ies((u8 *)vndr_ie, + vndr_ie_len, &new_vndr_ies)) < 0) { + WL_ERR(("parse vndr ie failed \n")); + goto exit; + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + for (i = 0; i < new_vndr_ies.count; i++) { + struct parsed_vndr_ie_info *vndrie_info = + &new_vndr_ies.ie_info[i]; + + if ((parsed_ie_buf_len + vndrie_info->ie_len) > WL_VNDR_IE_MAXLEN) { + WL_ERR(("IE size is too big (%d > %d)\n", + parsed_ie_buf_len, WL_VNDR_IE_MAXLEN)); + ret = -EINVAL; + goto exit; + } - sprintf(pos, "%04d ", channel); - pos += 5; + memcpy(ptr + parsed_ie_buf_len, vndrie_info->ie_ptr, + vndrie_info->ie_len); + parsed_ie_buf_len += vndrie_info->ie_len; + } + } - // terence 20140120: fix for some chipsets only return 2.4GHz channel (4330b2/43341b0/4339a0) - ret = wldev_ioctl(dev, WLC_GET_BAND, &band_cur, sizeof(band_cur), false); - band = band_cur==WLC_BAND_2G ? band_cur : WLC_BAND_5G; - ret = wldev_ioctl(dev, WLC_SET_BAND, &band, sizeof(band), true); - if (ret < 0) - WL_ERR(("WLC_SET_BAND error %d\n", ret)); + if (mgmt_ie_buf != NULL) { + if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) && + (memcmp(mgmt_ie_buf, curr_ie_buf, parsed_ie_buf_len) == 0)) { + WL_INFORM(("Previous mgmt IE is equals to current IE")); + goto exit; + } - /* Best channel selection in 5GHz band. */ - ret = wl_cfg80211_get_chanspecs_5g(ndev, (void *)buf, CHANSPEC_BUF_SIZE); - if (ret < 0) { - WL_ERR(("can't get chanspecs in 5GHz, error = %d\n", ret)); - goto done; - } + /* parse old vndr_ie */ + if ((ret = wl_cfg80211_parse_vndr_ies(mgmt_ie_buf, *mgmt_ie_len, + &old_vndr_ies)) < 0) { + WL_ERR(("parse vndr ie failed \n")); + goto exit; + } + /* make a command to delete old ie */ + for (i = 0; i < old_vndr_ies.count; i++) { + struct parsed_vndr_ie_info *vndrie_info = + &old_vndr_ies.ie_info[i]; + + WL_INFORM(("DELETED ID : %d, Len: %d , OUI:%02x:%02x:%02x\n", + vndrie_info->vndrie.id, vndrie_info->vndrie.len, + vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1], + vndrie_info->vndrie.oui[2])); + + del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf, + pktflag, vndrie_info->vndrie.oui, + vndrie_info->vndrie.id, + vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN, + vndrie_info->ie_len - VNDR_IE_FIXED_LEN, + "del"); + + curr_ie_buf += del_add_ie_buf_len; + total_ie_buf_len += del_add_ie_buf_len; + } + } - ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE, - &channel); - if (ret < 0) { - WL_ERR(("can't select best channel scan in 5GHz, error = %d\n", ret)); - goto done; + *mgmt_ie_len = 0; + /* Add if there is any extra IE */ + if (mgmt_ie_buf && parsed_ie_buf_len) { + ptr = mgmt_ie_buf; + + remained_buf_len = mgmt_ie_buf_len; + + /* make a command to add new ie */ + for (i = 0; i < new_vndr_ies.count; i++) { + struct parsed_vndr_ie_info *vndrie_info = + &new_vndr_ies.ie_info[i]; + + WL_INFORM(("ADDED ID : %d, Len: %d(%d), OUI:%02x:%02x:%02x\n", + vndrie_info->vndrie.id, vndrie_info->vndrie.len, + vndrie_info->ie_len - 2, + vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1], + vndrie_info->vndrie.oui[2])); + + del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf, + pktflag, vndrie_info->vndrie.oui, + vndrie_info->vndrie.id, + vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN, + vndrie_info->ie_len - VNDR_IE_FIXED_LEN, + "add"); + + /* verify remained buf size before copy data */ + if (remained_buf_len >= vndrie_info->ie_len) { + remained_buf_len -= vndrie_info->ie_len; + } else { + WL_ERR(("no space in mgmt_ie_buf: pktflag = %d, " + "found vndr ies # = %d(cur %d), remained len %d, " + "cur mgmt_ie_len %d, new ie len = %d\n", + pktflag, new_vndr_ies.count, i, remained_buf_len, + *mgmt_ie_len, vndrie_info->ie_len)); + break; + } + + /* save the parsed IE in cfg struct */ + memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr, + vndrie_info->ie_len); + *mgmt_ie_len += vndrie_info->ie_len; + curr_ie_buf += del_add_ie_buf_len; + total_ie_buf_len += del_add_ie_buf_len; + } + } + + if (total_ie_buf_len && cfg->ioctl_buf != NULL) { + ret = wldev_iovar_setbuf_bsscfg(ndev, "vndr_ie", g_mgmt_ie_buf, + total_ie_buf_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, + bssidx, &cfg->ioctl_buf_sync); + if (ret) + WL_ERR(("vndr ie set error : %d\n", ret)); + } } +exit: - if (CHANNEL_IS_5G(channel)) { - channel = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ); - } else { - WL_ERR(("invalid 5GHz channel, channel = %d\n", channel)); - channel = 0; +return ret; +} + +#ifdef WL_CFG80211_ACL +static int +wl_cfg80211_set_mac_acl(struct wiphy *wiphy, struct net_device *cfgdev, + const struct cfg80211_acl_data *acl) +{ + int i; + int ret = 0; + int macnum = 0; + int macmode = MACLIST_MODE_DISABLED; + struct maclist *list; + + /* get the MAC filter mode */ + if (acl && acl->acl_policy == NL80211_ACL_POLICY_DENY_UNLESS_LISTED) { + macmode = MACLIST_MODE_ALLOW; + } else if (acl && acl->acl_policy == NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED && + acl->n_acl_entries) { + macmode = MACLIST_MODE_DENY; } - ret = wldev_ioctl(dev, WLC_SET_BAND, &band_cur, sizeof(band_cur), true); - if (ret < 0) - WL_ERR(("WLC_SET_BAND error %d\n", ret)); + /* if acl == NULL, macmode is still disabled.. */ + if (macmode == MACLIST_MODE_DISABLED) { + if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, NULL)) != 0) + WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret)); - sprintf(pos, "%04d ", channel); - pos += 5; + return ret; + } - /* Set overall best channel same as 5GHz best channel. */ - sprintf(pos, "%04d ", channel); - pos += 5; + macnum = acl->n_acl_entries; + if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) { + WL_ERR(("%s : invalid number of MAC address entries %d\n", + __FUNCTION__, macnum)); + return -1; + } -done: - if (NULL != buf) { - kfree(buf); + /* allocate memory for the MAC list */ + list = (struct maclist*)kmalloc(sizeof(int) + + sizeof(struct ether_addr) * macnum, GFP_KERNEL); + if (!list) { + WL_ERR(("%s : failed to allocate memory\n", __FUNCTION__)); + return -1; } - /* Restore FW and driver back to normal state. */ - ret = wl_cfg80211_restore_auto_channel_scan_state(ndev); - if (ret < 0) { - WL_ERR(("can't restore auto channel scan state, error = %d\n", ret)); + /* prepare the MAC list */ + list->count = htod32(macnum); + for (i = 0; i < macnum; i++) { + memcpy(&list->ea[i], &acl->mac_addrs[i], ETHER_ADDR_LEN); } + /* set the list */ + if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, list)) != 0) + WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret)); - printf("%s: channel %s\n", __FUNCTION__, cmd); + kfree(list); - return (pos - cmd); + return ret; } -#endif /* WL_SUPPORT_AUTO_CHANNEL */ +#endif /* WL_CFG80211_ACL */ -static const struct rfkill_ops wl_rfkill_ops = { - .set_block = wl_rfkill_set -}; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) +int wl_chspec_chandef(chanspec_t chanspec, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) + struct cfg80211_chan_def *chandef, +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0))) + struct chan_info *chaninfo, +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) */ +struct wiphy *wiphy) -static int wl_rfkill_set(void *data, bool blocked) { - struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data; - - WL_DBG(("Enter \n")); - WL_DBG(("RF %s\n", blocked ? "blocked" : "unblocked")); - - if (!cfg) - return -EINVAL; - - cfg->rf_blocked = blocked; + uint16 freq = 0; + int chan_type = 0; + int channel = 0; + struct ieee80211_channel *chan; - return 0; -} + if (!chandef) { + return -1; + } + channel = CHSPEC_CHANNEL(chanspec); -static int wl_setup_rfkill(struct bcm_cfg80211 *cfg, bool setup) -{ - s32 err = 0; + switch (CHSPEC_BW(chanspec)) { + case WL_CHANSPEC_BW_20: + chan_type = NL80211_CHAN_HT20; + break; + case WL_CHANSPEC_BW_40: + { + if (CHSPEC_SB_UPPER(chanspec)) { + channel += CH_10MHZ_APART; + } else { + channel -= CH_10MHZ_APART; + } + } + chan_type = NL80211_CHAN_HT40PLUS; + break; - WL_DBG(("Enter \n")); - if (!cfg) - return -EINVAL; - if (setup) { - cfg->rfkill = rfkill_alloc("brcmfmac-wifi", - wl_cfg80211_get_parent_dev(), - RFKILL_TYPE_WLAN, &wl_rfkill_ops, (void *)cfg); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) + case WL_CHANSPEC_BW_80: + case WL_CHANSPEC_BW_8080: + { + uint16 sb = CHSPEC_CTL_SB(chanspec); + + if (sb == WL_CHANSPEC_CTL_SB_LL) { + channel -= (CH_10MHZ_APART + CH_20MHZ_APART); + } else if (sb == WL_CHANSPEC_CTL_SB_LU) { + channel -= CH_10MHZ_APART; + } else if (sb == WL_CHANSPEC_CTL_SB_UL) { + channel += CH_10MHZ_APART; + } else { + /* WL_CHANSPEC_CTL_SB_UU */ + channel += (CH_10MHZ_APART + CH_20MHZ_APART); + } - if (!cfg->rfkill) { - err = -ENOMEM; - goto err_out; + if (sb == WL_CHANSPEC_CTL_SB_LL || sb == WL_CHANSPEC_CTL_SB_LU) + chan_type = NL80211_CHAN_HT40MINUS; + else if (sb == WL_CHANSPEC_CTL_SB_UL || sb == WL_CHANSPEC_CTL_SB_UU) + chan_type = NL80211_CHAN_HT40PLUS; } + break; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */ + default: + chan_type = NL80211_CHAN_HT20; + break; - err = rfkill_register(cfg->rfkill); + } - if (err) - rfkill_destroy(cfg->rfkill); - } else { - if (!cfg->rfkill) { - err = -ENOMEM; - goto err_out; - } + if (CHSPEC_IS5G(chanspec)) + freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_5GHZ); + else + freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ); - rfkill_unregister(cfg->rfkill); - rfkill_destroy(cfg->rfkill); + chan = ieee80211_get_channel(wiphy, freq); + WL_DBG(("channel:%d freq:%d chan_type: %d chan_ptr:%p \n", + channel, freq, chan_type, chan)); + + if (unlikely(!chan)) { + /* fw and cfg80211 channel lists are not in sync */ + WL_ERR(("Couldn't find matching channel in wiphy channel list \n")); + ASSERT(0); + return -EINVAL; } -err_out: - return err; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) + cfg80211_chandef_create(chandef, chan, chan_type); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \ + \ + \ + \ + 0))) + chaninfo->freq = freq; + chaninfo->chan_type = chan_type; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */ + return 0; } -#ifdef DEBUGFS_CFG80211 -/** -* Format : echo "SCAN:1 DBG:1" > /sys/kernel/debug/dhd/debug_level -* to turn on SCAN and DBG log. -* To turn off SCAN partially, echo "SCAN:0" > /sys/kernel/debug/dhd/debug_level -* To see current setting of debug level, -* cat /sys/kernel/debug/dhd/debug_level -*/ -static ssize_t -wl_debuglevel_write(struct file *file, const char __user *userbuf, - size_t count, loff_t *ppos) +void +wl_cfg80211_ch_switch_notify(struct net_device *dev, uint16 chanspec, struct wiphy *wiphy) { - char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)], sublog[S_SUBLOGLEVEL]; - char *params, *token, *colon; - uint i, tokens, log_on = 0; - memset(tbuf, 0, sizeof(tbuf)); - memset(sublog, 0, sizeof(sublog)); - if (copy_from_user(&tbuf, userbuf, min_t(size_t, (sizeof(tbuf) - 1), count))) - return -EFAULT; - - params = &tbuf[0]; - colon = strchr(params, '\n'); - if (colon != NULL) - *colon = '\0'; - while ((token = strsep(¶ms, " ")) != NULL) { - memset(sublog, 0, sizeof(sublog)); - if (token == NULL || !*token) - break; - if (*token == '\0') - continue; - colon = strchr(token, ':'); - if (colon != NULL) { - *colon = ' '; - } - tokens = sscanf(token, "%s %u", sublog, &log_on); - if (colon != NULL) - *colon = ':'; + u32 freq; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) + struct cfg80211_chan_def chandef; +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \ + \ + \ + \ + 0))) + struct chan_info chaninfo; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */ + + if (!wiphy) { + printf("wiphy is null\n"); + return; + } +#ifndef ALLOW_CHSW_EVT + /* Channel switch support is only for AP/GO/ADHOC/MESH */ + if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION || + dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_CLIENT) { + WL_ERR(("No channel switch notify support for STA/GC\n")); + return; + } +#endif /* !ALLOW_CHSW_EVT */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) + if (wl_chspec_chandef(chanspec, &chandef, wiphy)) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0))) + if (wl_chspec_chandef(chanspec, &chaninfo, wiphy)) +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */ + { + WL_ERR(("%s:chspec_chandef failed\n", __FUNCTION__)); + return; + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) + freq = chandef.chan ? chandef.chan->center_freq : chandef.center_freq1; + cfg80211_ch_switch_notify(dev, &chandef); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0))) + freq = chan_info.freq; + cfg80211_ch_switch_notify(dev, chan_info.freq, chan_info.chan_type); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */ - if (tokens == 2) { - for (i = 0; i < ARRAYSIZE(sublogname_map); i++) { - if (!strncmp(sublog, sublogname_map[i].sublogname, - strlen(sublogname_map[i].sublogname))) { - if (log_on) - wl_dbg_level |= - (sublogname_map[i].log_level); - else - wl_dbg_level &= - ~(sublogname_map[i].log_level); - } - } - } else - WL_ERR(("%s: can't parse '%s' as a " - "SUBMODULE:LEVEL (%d tokens)\n", - tbuf, token, tokens)); + WL_ERR(("Channel switch notification for freq: %d chanspec: 0x%x\n", freq, chanspec)); + return; +} +#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */ +#ifdef WL11ULB +s32 +wl_cfg80211_set_ulb_mode(struct net_device *dev, int mode) +{ + int ret; + int cur_mode; + ret = wldev_iovar_getint(dev, "ulb_mode", &cur_mode); + if (unlikely(ret)) { + WL_ERR(("[ULB] ulb_mode get failed. ret:%d \n", ret)); + return ret; } - return count; -} -static ssize_t -wl_debuglevel_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - char *param; - char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)]; - uint i; - memset(tbuf, 0, sizeof(tbuf)); - param = &tbuf[0]; - for (i = 0; i < ARRAYSIZE(sublogname_map); i++) { - param += snprintf(param, sizeof(tbuf) - 1, "%s:%d ", - sublogname_map[i].sublogname, - (wl_dbg_level & sublogname_map[i].log_level) ? 1 : 0); + if (cur_mode == mode) { + /* If request mode is same as that of the current mode, then + * do nothing (Avoid unnecessary wl down and up). + */ + WL_INFORM(("[ULB] No change in ulb_mode. Do nothing.\n")); + return 0; } - *param = '\n'; - return simple_read_from_buffer(user_buf, count, ppos, tbuf, strlen(&tbuf[0])); -} -static const struct file_operations fops_debuglevel = { - .open = NULL, - .write = wl_debuglevel_write, - .read = wl_debuglevel_read, - .owner = THIS_MODULE, - .llseek = NULL, -}; + /* setting of ulb_mode requires wl to be down */ + ret = wldev_ioctl(dev, WLC_DOWN, NULL, 0, true); + if (unlikely(ret)) { + WL_ERR(("[ULB] WLC_DOWN command failed:[%d]\n", ret)); + return ret; + } -static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg) -{ - s32 err = 0; - struct dentry *_dentry; - if (!cfg) + if (mode >= MAX_SUPP_ULB_MODES) { + WL_ERR(("[ULB] unsupported ulb_mode :[%d]\n", mode)); return -EINVAL; - cfg->debugfs = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (!cfg->debugfs || IS_ERR(cfg->debugfs)) { - if (cfg->debugfs == ERR_PTR(-ENODEV)) - WL_ERR(("Debugfs is not enabled on this kernel\n")); - else - WL_ERR(("Can not create debugfs directory\n")); - cfg->debugfs = NULL; - goto exit; + } + ret = wldev_iovar_setint(dev, "ulb_mode", mode); + if (unlikely(ret)) { + WL_ERR(("[ULB] ulb_mode set failed. ret:%d \n", ret)); + return ret; } - _dentry = debugfs_create_file("debug_level", S_IRUSR | S_IWUSR, - cfg->debugfs, cfg, &fops_debuglevel); - if (!_dentry || IS_ERR(_dentry)) { - WL_ERR(("failed to create debug_level debug file\n")); - wl_free_debugfs(cfg); + + ret = wldev_ioctl(dev, WLC_UP, NULL, 0, true); + if (unlikely(ret)) { + WL_ERR(("[ULB] WLC_DOWN command failed:[%d]\n", ret)); + return ret; } -exit: - return err; + + WL_DBG(("[ULB] ulb_mode set to %d successfully \n", mode)); + + return ret; } -static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg) -{ - if (!cfg) + +static s32 +wl_cfg80211_ulbbw_to_ulbchspec(u32 bw) +{ + if (bw == ULB_BW_DISABLED) { + return WL_CHANSPEC_BW_20; + } else if (bw == ULB_BW_10MHZ) { + return WL_CHANSPEC_BW_10; + } else if (bw == ULB_BW_5MHZ) { + return WL_CHANSPEC_BW_5; + } else if (bw == ULB_BW_2P5MHZ) { + return WL_CHANSPEC_BW_2P5; + } else { + WL_ERR(("[ULB] unsupported value for ulb_bw \n")); return -EINVAL; - if (cfg->debugfs) - debugfs_remove_recursive(cfg->debugfs); - cfg->debugfs = NULL; - return 0; + } } -#endif /* DEBUGFS_CFG80211 */ -struct device *wl_cfg80211_get_parent_dev(void) +static chanspec_t +wl_cfg80211_ulb_get_min_bw_chspec(struct wireless_dev *wdev, s32 bssidx) { - return cfg80211_parent_dev; -} + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct net_info *_netinfo; + + /* + * Return the chspec value corresponding to the + * BW setting for a particular interface + */ + if (wdev) { + /* if wdev is provided, use it */ + _netinfo = wl_get_netinfo_by_wdev(cfg, wdev); + } else if (bssidx >= 0) { + /* if wdev is not provided, use it */ + _netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx); + } else { + WL_ERR(("[ULB] wdev/bssidx not provided\n")); + return INVCHANSPEC; + } + + if (unlikely(!_netinfo)) { + WL_ERR(("[ULB] net_info is null \n")); + return INVCHANSPEC; + } -void wl_cfg80211_set_parent_dev(void *dev) -{ - cfg80211_parent_dev = dev; + if (_netinfo->ulb_bw) { + WL_DBG(("[ULB] wdev_ptr:%p ulb_bw:0x%x \n", _netinfo->wdev, _netinfo->ulb_bw)); + return wl_cfg80211_ulbbw_to_ulbchspec(_netinfo->ulb_bw); + } else { + return WL_CHANSPEC_BW_20; + } } -static void wl_cfg80211_clear_parent_dev(void) +static s32 +wl_cfg80211_get_ulb_bw(struct wireless_dev *wdev) { - cfg80211_parent_dev = NULL; -} + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct net_info *_netinfo = wl_get_netinfo_by_wdev(cfg, wdev); -void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac) -{ - wldev_iovar_getbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr", NULL, - 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync); - memcpy(mac->octet, cfg->ioctl_buf, ETHER_ADDR_LEN); -} -static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role) -{ - dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); - if (((dev_role == NL80211_IFTYPE_AP) && - !(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) || - ((dev_role == NL80211_IFTYPE_P2P_GO) && - !(dhd->op_mode & DHD_FLAG_P2P_GO_MODE))) - { - WL_ERR(("device role select failed\n")); - return false; + /* + * Return the ulb_bw setting for a + * particular interface + */ + if (unlikely(!_netinfo)) { + WL_ERR(("[ULB] net_info is null \n")); + return -1; } - return true; + + return _netinfo->ulb_bw; } -int wl_cfg80211_do_driver_init(struct net_device *net) +s32 +wl_cfg80211_set_ulb_bw(struct net_device *dev, + u32 ulb_bw, char *ifname) { - struct bcm_cfg80211 *cfg = *(struct bcm_cfg80211 **)netdev_priv(net); + struct bcm_cfg80211 *cfg = g_bcm_cfg; + int ret; + int mode; + struct net_info *_netinfo = NULL, *iter, *next; + u32 bssidx; + enum nl80211_iftype iftype; - if (!cfg || !cfg->wdev) + if (!ifname) return -EINVAL; -#if !defined(P2PONEINT) - if (dhd_do_driver_init(cfg->wdev->netdev) < 0) - return -1; -#endif /* BCMDONGLEHOST */ + WL_DBG(("[ULB] Enter. bw_type:%d \n", ulb_bw)); - return 0; + ret = wldev_iovar_getint(dev, "ulb_mode", &mode); + if (unlikely(ret)) { + WL_ERR(("[ULB] ulb_mode not supported \n")); + return ret; + } + + if (mode != ULB_MODE_STD_ALONE_MODE) { + WL_ERR(("[ULB] ulb bw modification allowed only in stand-alone mode\n")); + return -EINVAL; + } + + if (ulb_bw >= MAX_SUPP_ULB_BW) { + WL_ERR(("[ULB] unsupported value (%d) for ulb_bw \n", ulb_bw)); + return -EINVAL; + } + +#ifdef WL_CFG80211_P2P_DEV_IF + if (strcmp(ifname, "p2p-dev-wlan0") == 0) { + iftype = NL80211_IFTYPE_P2P_DEVICE; + /* Use wdev corresponding to the dedicated p2p discovery interface */ + if (likely(cfg->p2p_wdev)) { + _netinfo = wl_get_netinfo_by_wdev(cfg, cfg->p2p_wdev); + } else { + return -ENODEV; + } + } +#endif /* WL_CFG80211_P2P_DEV_IF */ + if (!_netinfo) { + for_each_ndev(cfg, iter, next) { + if (iter->ndev) { + if (strncmp(iter->ndev->name, ifname, strlen(ifname)) == 0) { + _netinfo = wl_get_netinfo_by_netdev(cfg, iter->ndev); + iftype = NL80211_IFTYPE_STATION; + } + } + } + } + + if (!_netinfo) + return -ENODEV; + bssidx = _netinfo->bssidx; + _netinfo->ulb_bw = ulb_bw; + + + WL_DBG(("[ULB] Applying ulb_bw:%d for bssidx:%d \n", ulb_bw, bssidx)); + ret = wldev_iovar_setbuf_bsscfg(dev, "ulb_bw", (void *)&ulb_bw, 4, + cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, + &cfg->ioctl_buf_sync); + if (unlikely(ret)) { + WL_ERR(("[ULB] ulb_bw set failed. ret:%d \n", ret)); + return ret; + } + + return ret; } +#endif /* WL11ULB */ -void wl_cfg80211_enable_trace(u32 level) +static void +wl_ap_channel_ind(struct bcm_cfg80211 *cfg, + struct net_device *ndev, + chanspec_t chanspec) { - wl_dbg_level = level; - printf("%s: wl_dbg_level = 0x%x\n", __FUNCTION__, wl_dbg_level); + u32 channel = LCHSPEC_CHANNEL(chanspec); + + WL_DBG(("(%s) AP channel:%d chspec:0x%x \n", + ndev->name, channel, chanspec)); + if (cfg->ap_oper_channel && (cfg->ap_oper_channel != channel)) { + /* + * If cached channel is different from the channel indicated + * by the event, notify user space about the channel switch. + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) + wl_cfg80211_ch_switch_notify(ndev, chanspec, bcmcfg_to_wiphy(cfg)); +#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */ + cfg->ap_oper_channel = channel; + } } -#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \ - 2, 0)) static s32 -wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy, - bcm_struct_cfgdev *cfgdev, u64 cookie) +wl_ap_start_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, +const wl_event_msg_t *e, void *data) { - /* CFG80211 checks for tx_cancel_wait callback when ATTR_DURATION - * is passed with CMD_FRAME. This callback is supposed to cancel - * the OFFCHANNEL Wait. Since we are already taking care of that - * with the tx_mgmt logic, do nothing here. - */ + struct net_device *ndev = NULL; + chanspec_t chanspec; + u32 channel; - return 0; -} -#endif /* WL_SUPPORT_BACKPORTED_PATCHES || KERNEL >= 3.2.0 */ + WL_DBG(("Enter\n")); + if (unlikely(e->status)) { + WL_ERR(("status:0x%x \n", e->status)); + return -1; + } -#ifdef WL11U -bcm_tlv_t * -wl_cfg80211_find_interworking_ie(u8 *parse, u32 len) -{ - bcm_tlv_t *ie; + if (!data) { + return -EINVAL; + } - while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_INTERWORKING_ID))) { - return (bcm_tlv_t *)ie; + if (likely(cfgdev)) { + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + chanspec = *((chanspec_t *)data); + channel = LCHSPEC_CHANNEL(chanspec); + + if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) { + /* For AP/GO role */ + wl_ap_channel_ind(cfg, ndev, chanspec); + } } - return NULL; -} + return 0; +} static s32 -wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag, - uint8 ie_id, uint8 *data, uint8 data_len) +wl_csa_complete_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, +const wl_event_msg_t *e, void *data) { - s32 err = BCME_OK; - s32 buf_len; - s32 iecount; - ie_setbuf_t *ie_setbuf; - - if (ie_id != DOT11_MNG_INTERWORKING_ID) - return BCME_UNSUPPORTED; + int error = 0; + u32 chanspec = 0; + struct net_device *ndev = NULL; + struct wiphy *wiphy = NULL; - /* Validate the pktflag parameter */ - if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG | - VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG | - VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG| - VNDR_IE_CUSTOM_FLAG))) { - WL_ERR(("cfg80211 Add IE: Invalid packet flag 0x%x\n", pktflag)); + WL_DBG(("Enter\n")); + if (unlikely(e->status)) { + WL_ERR(("status:0x%x \n", e->status)); return -1; } - /* use VNDR_IE_CUSTOM_FLAG flags for none vendor IE . currently fixed value */ - pktflag = htod32(pktflag); - - buf_len = sizeof(ie_setbuf_t) + data_len - 1; - ie_setbuf = (ie_setbuf_t *) kzalloc(buf_len, GFP_KERNEL); + if (likely(cfgdev)) { + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + wiphy = bcmcfg_to_wiphy(cfg); + error = wldev_iovar_getint(ndev, "chanspec", &chanspec); + if (unlikely(error)) { + WL_ERR(("Get chanspec error: %d \n", error)); + return -1; + } - if (!ie_setbuf) { - WL_ERR(("Error allocating buffer for IE\n")); - return -ENOMEM; - } + if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) { + /* For AP/GO role */ + wl_ap_channel_ind(cfg, ndev, chanspec); + } else { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) + wl_cfg80211_ch_switch_notify(ndev, chanspec, wiphy); +#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */ + } - if (cfg->iw_ie_len == data_len && !memcmp(cfg->iw_ie, data, data_len)) { - WL_ERR(("Previous IW IE is equals to current IE\n")); - err = BCME_OK; - goto exit; } - strncpy(ie_setbuf->cmd, "add", VNDR_IE_CMD_LEN - 1); - ie_setbuf->cmd[VNDR_IE_CMD_LEN - 1] = '\0'; + return 0; +} - /* Buffer contains only 1 IE */ - iecount = htod32(1); - memcpy((void *)&ie_setbuf->ie_buffer.iecount, &iecount, sizeof(int)); - memcpy((void *)&ie_setbuf->ie_buffer.ie_list[0].pktflag, &pktflag, sizeof(uint32)); +#ifdef WL_NAN +int +wl_cfg80211_nan_cmd_handler(struct net_device *ndev, char *cmd, int cmd_len) +{ + return wl_cfgnan_cmd_handler(ndev, g_bcm_cfg, cmd, cmd_len); +} +#endif /* WL_NAN */ - /* Now, add the IE to the buffer */ - ie_setbuf->ie_buffer.ie_list[0].ie_data.id = ie_id; +void wl_cfg80211_clear_security(struct bcm_cfg80211 *cfg) +{ + struct net_device *dev = bcmcfg_to_prmry_ndev(cfg); + int err; - /* if already set with previous values, delete it first */ - if (cfg->iw_ie_len != 0) { - WL_DBG(("Different IW_IE was already set. clear first\n")); + /* Clear the security settings on the primary Interface */ + err = wldev_iovar_setint(dev, "wsec", 0); + if (unlikely(err)) { + WL_ERR(("wsec clear failed \n")); + } + err = wldev_iovar_setint(dev, "auth", 0); + if (unlikely(err)) { + WL_ERR(("auth clear failed \n")); + } + err = wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_DISABLED); + if (unlikely(err)) { + WL_ERR(("wpa_auth clear failed \n")); + } +} - ie_setbuf->ie_buffer.ie_list[0].ie_data.len = 0; +#ifdef WL_CFG80211_P2P_DEV_IF +void wl_cfg80211_del_p2p_wdev(void) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wireless_dev *wdev = NULL; - err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len, - cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + WL_DBG(("Enter \n")); + if (!cfg) { + WL_ERR(("Invalid Ptr\n")); + return; + } else { + wdev = cfg->p2p_wdev; + } - if (err != BCME_OK) - goto exit; + if (wdev && cfg->down_disc_if) { + wl_cfgp2p_del_p2p_disc_if(wdev, cfg); + cfg->down_disc_if = FALSE; } +} +#endif /* WL_CFG80211_P2P_DEV_IF */ - ie_setbuf->ie_buffer.ie_list[0].ie_data.len = data_len; - memcpy((uchar *)&ie_setbuf->ie_buffer.ie_list[0].ie_data.data[0], data, data_len); +#if defined(WL_SUPPORT_AUTO_CHANNEL) +int +wl_cfg80211_set_spect(struct net_device *dev, int spect) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + int down = 1; + int up = 1; + int err = BCME_OK; - err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len, - cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + if (!wl_get_drv_status_all(cfg, CONNECTED)) { + err = wldev_ioctl(dev, WLC_DOWN, &down, sizeof(down), true); + if (err) { + WL_ERR(("%s: WLC_DOWN failed: code: %d\n", __func__, err)); + return err; + } - if (err == BCME_OK) { - memcpy(cfg->iw_ie, data, data_len); - cfg->iw_ie_len = data_len; - cfg->wl11u = TRUE; + err = wldev_ioctl(dev, WLC_SET_SPECT_MANAGMENT, &spect, sizeof(spect), true); + if (err) { + WL_ERR(("%s: error setting spect: code: %d\n", __func__, err)); + return err; + } - err = wldev_iovar_setint_bsscfg(ndev, "grat_arp", 1, bssidx); + err = wldev_ioctl(dev, WLC_UP, &up, sizeof(up), true); + if (err) { + WL_ERR(("%s: WLC_UP failed: code: %d\n", __func__, err)); + return err; + } } - -exit: - if (ie_setbuf) - kfree(ie_setbuf); return err; } -#endif /* WL11U */ -#ifdef WL_HOST_BAND_MGMT -s32 -wl_cfg80211_set_band(struct net_device *ndev, int band) +int +wl_cfg80211_get_sta_channel(void) { - struct bcm_cfg80211 *cfg = g_bcm_cfg; - int ret = 0; - s32 roam_off; - char ioctl_buf[50]; + struct net_device *ndev = bcmcfg_to_prmry_ndev(g_bcm_cfg); + int channel = 0; - if ((band < WLC_BAND_AUTO) || (band > WLC_BAND_2G)) { - WL_ERR(("Invalid band\n")); - return -EINVAL; + if (wl_get_drv_status(g_bcm_cfg, CONNECTED, ndev)) { + channel = g_bcm_cfg->channel; } - - if ((ret = wldev_iovar_getint(ndev, "roam_off", &roam_off)) < 0) { - WL_ERR(("geting roam_off failed code=%d\n", ret)); - return ret; - } else if (roam_off == 1) { - WL_DBG(("Roaming off, no need to set roam_band\n")); - cfg->curr_band = band; + return channel; +} +#endif /* WL_SUPPORT_AUTO_CHANNEL */ +#ifdef P2P_LISTEN_OFFLOADING +s32 +wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg) +{ + s32 bssidx; + int ret = 0; + int p2plo_pause = 0; + if (!cfg || !cfg->p2p) { + WL_ERR(("Wl %p or cfg->p2p %p is null\n", + cfg, cfg ? cfg->p2p : 0)); return 0; } - if ((ret = wldev_iovar_setbuf(ndev, "roam_band", &band, - sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) { - WL_ERR(("seting roam_band failed code=%d\n", ret)); - return ret; + bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + ret = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), + "p2po_stop", (void*)&p2plo_pause, sizeof(p2plo_pause), + cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, NULL); + if (ret < 0) { + WL_ERR(("p2po_stop Failed :%d\n", ret)); } - WL_DBG(("Setting band to %d\n", band)); - cfg->curr_band = band; - - return 0; + return ret; } -#endif /* WL_HOST_BAND_MGMT */ - -#if defined(DHCP_SCAN_SUPPRESS) -static void wl_cfg80211_scan_supp_timerfunc(ulong data) +s32 +wl_cfg80211_p2plo_listen_start(struct net_device *dev, u8 *buf, int len) { - struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + wl_p2plo_listen_t p2plo_listen; + int ret = -EAGAIN; + int channel = 0; + int period = 0; + int interval = 0; + int count = 0; - WL_DBG(("Enter \n")); - schedule_work(&cfg->wlan_work); -} + if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) { + WL_ERR(("Sending Action Frames. Try it again.\n")); + goto exit; + } -int wl_cfg80211_scan_suppress(struct net_device *dev, int suppress) -{ - int ret = 0; - struct wireless_dev *wdev; - struct bcm_cfg80211 *cfg; - if (!dev || ((suppress != 0) && (suppress != 1))) { - ret = -EINVAL; + if (wl_get_drv_status_all(cfg, SCANNING)) { + WL_ERR(("Scanning already\n")); goto exit; } - wdev = ndev_to_wdev(dev); - if (!wdev) { - ret = -EINVAL; + + if (wl_get_drv_status(cfg, SCAN_ABORTING, dev)) { + WL_ERR(("Scanning being aborted\n")); goto exit; } - cfg = (struct bcm_cfg80211 *)wiphy_priv(wdev->wiphy); - if (!cfg) { - ret = -EINVAL; + + if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) { + WL_ERR(("p2p listen offloading already running\n")); goto exit; } - if (suppress == cfg->scan_suppressed) { - WL_DBG(("No change in scan_suppress state. Ignoring cmd..\n")); - return 0; + /* Just in case if it is not enabled */ + if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) { + WL_ERR(("cfgp2p_enable discovery failed")); + goto exit; } - if (timer_pending(&cfg->scan_supp_timer)) - del_timer_sync(&cfg->scan_supp_timer); + bzero(&p2plo_listen, sizeof(wl_p2plo_listen_t)); + + if (len) { + sscanf(buf, " %10d %10d %10d %10d", &channel, &period, &interval, &count); + if ((channel == 0) || (period == 0) || + (interval == 0) || (count == 0)) { + WL_ERR(("Wrong argument %d/%d/%d/%d \n", + channel, period, interval, count)); + ret = -EAGAIN; + goto exit; + } + p2plo_listen.period = period; + p2plo_listen.interval = interval; + p2plo_listen.count = count; - if ((ret = wldev_ioctl(dev, WLC_SET_SCANSUPPRESS, - &suppress, sizeof(int), true)) < 0) { - WL_ERR(("Scan suppress setting failed ret:%d \n", ret)); + WL_ERR(("channel:%d period:%d, interval:%d count:%d\n", + channel, period, interval, count)); } else { - WL_DBG(("Scan suppress %s \n", suppress ? "Enabled" : "Disabled")); - cfg->scan_suppressed = suppress; + WL_ERR(("Argument len is wrong.\n")); + ret = -EAGAIN; + goto exit; } - /* If scan_suppress is set, Start a timer to monitor it (just incase) */ - if (cfg->scan_suppressed) { - if (ret) { - WL_ERR(("Retry scan_suppress reset at a later time \n")); - mod_timer(&cfg->scan_supp_timer, - jiffies + msecs_to_jiffies(WL_SCAN_SUPPRESS_RETRY)); - } else { - WL_DBG(("Start wlan_timer to clear of scan_suppress \n")); - mod_timer(&cfg->scan_supp_timer, - jiffies + msecs_to_jiffies(WL_SCAN_SUPPRESS_TIMEOUT)); - } + if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen_channel", (void*)&channel, + sizeof(channel), cfg->ioctl_buf, WLC_IOCTL_SMLEN, + bssidx, &cfg->ioctl_buf_sync)) < 0) { + WL_ERR(("p2po_listen_channel Failed :%d\n", ret)); + goto exit; } -exit: + + if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen", (void*)&p2plo_listen, + sizeof(wl_p2plo_listen_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN, + bssidx, &cfg->ioctl_buf_sync)) < 0) { + WL_ERR(("p2po_listen Failed :%d\n", ret)); + goto exit; + } + + wl_set_p2p_status(cfg, DISC_IN_PROGRESS); + cfg->last_roc_id = P2PO_COOKIE; +exit : return ret; } -#endif /* DHCP_SCAN_SUPPRESS */ - -int wl_cfg80211_scan_stop(bcm_struct_cfgdev *cfgdev) +s32 +wl_cfg80211_p2plo_listen_stop(struct net_device *dev) { - struct bcm_cfg80211 *cfg = NULL; - struct net_device *ndev = NULL; - unsigned long flags; - int clear_flag = 0; - int ret = 0; - - WL_TRACE(("Enter\n")); - - cfg = g_bcm_cfg; - if (!cfg) - return -EINVAL; - - ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + struct bcm_cfg80211 *cfg = g_bcm_cfg; + s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + int ret = -EAGAIN; - spin_lock_irqsave(&cfg->cfgdrv_lock, flags); -#ifdef WL_CFG80211_P2P_DEV_IF - if (cfg->scan_request && cfg->scan_request->wdev == cfgdev) -#else - if (cfg->scan_request && cfg->scan_request->dev == cfgdev) -#endif - { - cfg80211_scan_done(cfg->scan_request, true); - cfg->scan_request = NULL; - clear_flag = 1; + if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_stop", NULL, + 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, + bssidx, &cfg->ioctl_buf_sync)) < 0) { + WL_ERR(("p2po_stop Failed :%d\n", ret)); + goto exit; } - spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); - - if (clear_flag) - wl_clr_drv_status(cfg, SCANNING, ndev); +exit: return ret; } - -bool wl_cfg80211_is_vsdb_mode(void) +#endif /* P2P_LISTEN_OFFLOADING */ +u64 +wl_cfg80211_get_new_roc_id(struct bcm_cfg80211 *cfg) { - return (g_bcm_cfg && g_bcm_cfg->vsdb_mode); + u64 id = 0; + id = ++cfg->last_roc_id; +#ifdef P2P_LISTEN_OFFLOADING + if (id == P2PO_COOKIE) { + id = ++cfg->last_roc_id; + } +#endif /* P2P_LISTEN_OFFLOADING */ + if (id == 0) + id = ++cfg->last_roc_id; + return id; } -void* wl_cfg80211_get_dhdp() +#if defined(SUPPORT_RANDOM_MAC_SCAN) +int +wl_cfg80211_set_random_mac(struct net_device *dev, bool enable) { struct bcm_cfg80211 *cfg = g_bcm_cfg; + int ret; - return cfg->pub; -} + if (cfg->random_mac_enabled == enable) { + WL_ERR(("Random MAC already %s\n", enable ? "Enabled" : "Disabled")); + return BCME_OK; + } -bool wl_cfg80211_is_p2p_active(void) -{ - return (g_bcm_cfg && g_bcm_cfg->p2p); + if (enable) { + ret = wl_cfg80211_random_mac_enable(dev); + } else { + ret = wl_cfg80211_random_mac_disable(dev); + } + + if (!ret) { + cfg->random_mac_enabled = enable; + } + + return ret; } -static void wl_cfg80211_work_handler(struct work_struct * work) -{ - struct bcm_cfg80211 *cfg = NULL; - struct net_info *iter, *next; - s32 err = BCME_OK; - s32 pm = PM_FAST; - dhd_pub_t *dhd; +int +wl_cfg80211_random_mac_enable(struct net_device *dev) +{ + u8 current_mac[ETH_ALEN] = {0, }; + s32 err = BCME_ERROR; + uint8 buffer[20] = {0, }; + wl_scanmac_t *sm = NULL; + int len = 0; + wl_scanmac_enable_t *sm_enable = NULL; + wl_scanmac_config_t *sm_config = NULL; + struct bcm_cfg80211 *cfg = g_bcm_cfg; - cfg = container_of(work, struct bcm_cfg80211, pm_enable_work.work); - WL_DBG(("Enter \n")); - if (cfg->pm_enable_work_on) { - cfg->pm_enable_work_on = false; - for_each_ndev(cfg, iter, next) { - if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev) || - (wl_get_mode_by_netdev(cfg, iter->ndev) != WL_MODE_BSS)) - continue; - if (iter->ndev) { - dhd = (dhd_pub_t *)(cfg->pub); - if (pm != PM_OFF && dhd_conf_get_pm(dhd) >= 0) - pm = dhd_conf_get_pm(dhd); - if ((err = wldev_ioctl(iter->ndev, WLC_SET_PM, - &pm, sizeof(pm), true)) != 0) { - if (err == -ENODEV) - WL_DBG(("%s:netdev not ready\n", iter->ndev->name)); - else - WL_ERR(("%s:error (%d)\n", iter->ndev->name, err)); - } else - wl_cfg80211_update_power_mode(iter->ndev); - } - } + if (wl_get_drv_status_all(cfg, CONNECTED) || wl_get_drv_status_all(cfg, CONNECTING) || + wl_get_drv_status_all(cfg, AP_CREATED) || wl_get_drv_status_all(cfg, AP_CREATING)) { + WL_ERR(("Fail to Set random mac, current state is wrong\n")); + return err; } -#if defined(DHCP_SCAN_SUPPRESS) - else if (cfg->scan_suppressed) { - /* There is pending scan_suppress. Clean it */ - WL_ERR(("Clean up from timer after %d msec\n", WL_SCAN_SUPPRESS_TIMEOUT)); - wl_cfg80211_scan_suppress(bcmcfg_to_prmry_ndev(cfg), 0); + + /* Read current mac address */ + err = wldev_iovar_getbuf_bsscfg(dev, "cur_etheraddr", + NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync); + + if (err != BCME_OK) { + WL_ERR(("failed to get current dongle mac address\n")); + return err; } -#endif /* DHCP_SCAN_SUPPRESS */ -} -u8 -wl_get_action_category(void *frame, u32 frame_len) -{ - u8 category; - u8 *ptr = (u8 *)frame; - if (frame == NULL) - return DOT11_ACTION_CAT_ERR_MASK; - if (frame_len < DOT11_ACTION_HDR_LEN) - return DOT11_ACTION_CAT_ERR_MASK; - category = ptr[DOT11_ACTION_CAT_OFF]; - WL_INFORM(("Action Category: %d\n", category)); - return category; -} + memcpy(current_mac, cfg->ioctl_buf, ETH_ALEN); -int -wl_get_public_action(void *frame, u32 frame_len, u8 *ret_action) -{ - u8 *ptr = (u8 *)frame; - if (frame == NULL || ret_action == NULL) - return BCME_ERROR; - if (frame_len < DOT11_ACTION_HDR_LEN) - return BCME_ERROR; - if (DOT11_ACTION_CAT_PUBLIC != wl_get_action_category(frame, frame_len)) - return BCME_ERROR; - *ret_action = ptr[DOT11_ACTION_ACT_OFF]; - WL_INFORM(("Public Action : %d\n", *ret_action)); - return BCME_OK; -} + /* Enable scan mac */ + sm = (wl_scanmac_t *)buffer; + sm_enable = (wl_scanmac_enable_t *)sm->data; + sm->len = sizeof(*sm_enable); + sm_enable->enable = 1; + len = OFFSETOF(wl_scanmac_t, data) + sm->len; + sm->subcmd_id = WL_SCANMAC_SUBCMD_ENABLE; -#ifdef WLFBT -void -wl_cfg80211_get_fbt_key(uint8 *key) -{ - memcpy(key, g_bcm_cfg->fbt_key, FBT_KEYLEN); -} -#endif /* WLFBT */ + err = wldev_iovar_setbuf_bsscfg(dev, "scanmac", + sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync); -static int -wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev, - const struct ether_addr *bssid) -{ - s32 err; - wl_event_msg_t e; + if (err != BCME_OK) { + WL_ERR(("failed to enable scanmac, err=%d\n", err)); + return err; + } - bzero(&e, sizeof(e)); - e.event_type = cpu_to_be32(WLC_E_BSSID); - memcpy(&e.addr, bssid, ETHER_ADDR_LEN); - /* trigger the roam event handler */ - err = wl_notify_roaming_status(cfg, ndev_to_cfgdev(ndev), &e, NULL); + /* Configure scanmac */ + memset(buffer, 0x0, sizeof(buffer)); + sm_config = (wl_scanmac_config_t *)sm->data; + sm->len = sizeof(*sm_config); + sm->subcmd_id = WL_SCANMAC_SUBCMD_CONFIG; + sm_config->scan_bitmap = WL_SCANMAC_SCAN_UNASSOC; + + /* Set current mac address */ + memcpy(&sm_config->mac.octet, current_mac, ETH_ALEN); + sm_config->mac.octet[3] = 0x0; + sm_config->mac.octet[4] = 0x0; + sm_config->mac.octet[5] = 0x0; + + /* Set randomize mac address(last 3bytes) */ + memset(&sm_config->random_mask.octet, 0x0, ETH_ALEN); + sm_config->random_mask.octet[3] = 0xff; + sm_config->random_mask.octet[4] = 0xff; + sm_config->random_mask.octet[5] = 0xff; + + len = OFFSETOF(wl_scanmac_t, data) + sm->len; + + err = wldev_iovar_setbuf_bsscfg(dev, "scanmac", + sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync); + + if (err != BCME_OK) { + WL_ERR(("failed scanmac configuration\n")); + + /* Disable scan mac for clean-up */ + wl_cfg80211_random_mac_disable(dev); + return err; + } + WL_ERR(("random MAC enable done")); return err; } -#ifdef WL_CFG80211_ACL -static int -wl_cfg80211_set_mac_acl(struct wiphy *wiphy, struct net_device *cfgdev, - const struct cfg80211_acl_data *acl) +int +wl_cfg80211_random_mac_disable(struct net_device *dev) { - int i; - int ret = 0; - int macnum = 0; - int macmode = MACLIST_MODE_DISABLED; - struct maclist *list; + s32 err = BCME_ERROR; + uint8 buffer[20] = {0, }; + wl_scanmac_t *sm = NULL; + int len = 0; + wl_scanmac_enable_t *sm_enable = NULL; + struct bcm_cfg80211 *cfg = g_bcm_cfg; - /* get the MAC filter mode */ - if (acl && acl->acl_policy == NL80211_ACL_POLICY_DENY_UNLESS_LISTED) { - macmode = MACLIST_MODE_ALLOW; - } else if (acl && acl->acl_policy == NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED && - acl->n_acl_entries) { - macmode = MACLIST_MODE_DENY; - } + sm = (wl_scanmac_t *)buffer; + sm_enable = (wl_scanmac_enable_t *)sm->data; + sm->len = sizeof(*sm_enable); + sm_enable->enable = 0; + len = OFFSETOF(wl_scanmac_t, data) + sm->len; - /* if acl == NULL, macmode is still disabled.. */ - if (macmode == MACLIST_MODE_DISABLED) { - if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, NULL)) != 0) - WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret)); + sm->subcmd_id = WL_SCANMAC_SUBCMD_ENABLE; - return ret; - } + err = wldev_iovar_setbuf_bsscfg(dev, "scanmac", + sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync); - macnum = acl->n_acl_entries; - if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) { - WL_ERR(("%s : invalid number of MAC address entries %d\n", - __FUNCTION__, macnum)); - return -1; + if (err != BCME_OK) { + WL_ERR(("failed to disable scanmac, err=%d\n", err)); + return err; } - /* allocate memory for the MAC list */ - list = (struct maclist*)kmalloc(sizeof(int) + - sizeof(struct ether_addr) * macnum, GFP_KERNEL); - if (!list) { - WL_ERR(("%s : failed to allocate memory\n", __FUNCTION__)); - return -1; - } + WL_ERR(("random MAC disable done\n")); + return err; +} +#endif /* SUPPORT_RANDOM_MAC_SCAN */ - /* prepare the MAC list */ - list->count = htod32(macnum); - for (i = 0; i < macnum; i++) { - memcpy(&list->ea[i], &acl->mac_addrs[i], ETHER_ADDR_LEN); - } - /* set the list */ - if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, list)) != 0) - WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret)); +int +wl_cfg80211_iface_count(void) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct net_info *iter, *next; + int iface_count = 0; - kfree(list); + for_each_ndev(cfg, iter, next) { + if (iter->ndev) { + iface_count++; + } + } + return iface_count; +} - return ret; +#ifdef DHD_LOG_DUMP +struct bcm_cfg80211* +wl_get_bcm_cfg80211_ptr(void) +{ + return g_bcm_cfg; } -#endif /* WL_CFG80211_ACL */ +#endif /* DHD_LOG_DUMP */ -#ifdef WL_NAN +#define CHECK_DONGLE_IDLE_TIME 50 +#define CHECK_DONGLE_IDLE_CNT 100 int -wl_cfg80211_nan_cmd_handler(struct net_device *ndev, char *cmd, int cmd_len) +wl_check_dongle_idle(struct wiphy *wiphy) { - return wl_cfgnan_cmd_handler(ndev, g_bcm_cfg, cmd, cmd_len); + int error = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *primary_ndev; + int retry = 0; + struct channel_info ci; + if (!cfg) + return FALSE; + return TRUE; // terence 20160426 + /* Use primary I/F for sending cmds down to firmware */ + primary_ndev = bcmcfg_to_prmry_ndev(cfg); + + while (retry++ < CHECK_DONGLE_IDLE_CNT) { + error = wldev_ioctl(primary_ndev, WLC_GET_CHANNEL, &ci, sizeof(ci), false); + if (error != BCME_OK || ci.scan_channel != 0) { + WL_ERR(("Firmware is busy(err:%d scan channel:%d). wait %dms\n", + error, ci.scan_channel, CHECK_DONGLE_IDLE_TIME)); + } else { + break; + } + wl_delay(CHECK_DONGLE_IDLE_TIME); + } + if (retry >= CHECK_DONGLE_IDLE_CNT) { + WL_ERR(("DONGLE is BUSY too long\n")); + return FALSE; + } + WL_DBG(("DONGLE is idle\n")); + return TRUE; } -#endif /* WL_NAN */ diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.h b/drivers/amlogic/wifi/bcmdhd/wl_cfg80211.h similarity index 58% rename from drivers/net/wireless/bcmdhd/wl_cfg80211.h rename to drivers/amlogic/wifi/bcmdhd/wl_cfg80211.h index 6d4cb3d0c692a..66d0e595f1308 100644 --- a/drivers/net/wireless/bcmdhd/wl_cfg80211.h +++ b/drivers/amlogic/wifi/bcmdhd/wl_cfg80211.h @@ -1,9 +1,30 @@ /* * Linux cfg80211 driver * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: wl_cfg80211.h 505096 2014-09-26 12:49:04Z $ + * + * <> + * + * $Id: wl_cfg80211.h 608788 2015-12-29 10:59:33Z $ */ /** @@ -24,6 +45,7 @@ #include #include #include +#include struct wl_conf; struct wl_iface; @@ -34,6 +56,7 @@ struct wl_ibss; #define htod32(i) (i) #define htod16(i) (i) +#define dtoh64(i) (i) #define dtoh32(i) (i) #define dtoh16(i) (i) #define htodchanspec(i) (i) @@ -47,18 +70,33 @@ struct wl_ibss; #define WL_DBG_INFO (1 << 1) #define WL_DBG_ERR (1 << 0) +#ifdef DHD_LOG_DUMP +extern void dhd_log_dump_print(const char *fmt, ...); +extern char *dhd_log_dump_get_timestamp(void); +struct bcm_cfg80211 *wl_get_bcm_cfg80211_ptr(void); +#endif /* DHD_LOG_DUMP */ + /* 0 invalidates all debug messages. default is 1 */ #define WL_DBG_LEVEL 0xFF +#ifdef CUSTOMER_HW4_DEBUG +#define CFG80211_ERROR_TEXT "CFG80211-INFO2) " +#else #define CFG80211_ERROR_TEXT "CFG80211-ERROR) " - -#define MAX_WAIT_TIME 1500 -#define DNGL_FUNC(func, parameters) func parameters; - -#define PM_BLOCK 1 -#define PM_ENABLE 0 +#endif /* CUSTOMER_HW4_DEBUG */ #if defined(DHD_DEBUG) +#ifdef DHD_LOG_DUMP +#define WL_ERR(args) \ +do { \ + if (wl_dbg_level & WL_DBG_ERR) { \ + printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__); \ + printk args; \ + dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ + dhd_log_dump_print args; \ + } \ +} while (0) +#else #define WL_ERR(args) \ do { \ if (wl_dbg_level & WL_DBG_ERR) { \ @@ -66,6 +104,7 @@ do { \ printk args; \ } \ } while (0) +#endif /* DHD_LOG_DUMP */ #else /* defined(DHD_DEBUG) */ #define WL_ERR(args) \ do { \ @@ -112,7 +151,17 @@ do { \ #ifdef WL_TRACE_HW4 #undef WL_TRACE_HW4 #endif +#ifdef CUSTOMER_HW4_DEBUG +#define WL_TRACE_HW4(args) \ +do { \ + if (wl_dbg_level & WL_DBG_ERR) { \ + printk(KERN_INFO "CFG80211-TRACE) %s : ", __func__); \ + printk args; \ + } \ +} while (0) +#else #define WL_TRACE_HW4 WL_TRACE +#endif /* CUSTOMER_HW4_DEBUG */ #if (WL_DBG_LEVEL > 0) #define WL_DBG(args) \ do { \ @@ -138,29 +187,44 @@ do { \ #define WL_IOCTL_LEN_MAX 2048 #define WL_EXTRA_BUF_MAX 2048 #define WL_SCAN_ERSULTS_LAST (WL_SCAN_RESULTS_NO_MEM+1) -#define WL_AP_MAX 256 +#define WL_AP_MAX 256 #define WL_FILE_NAME_MAX 256 -#define WL_DWELL_TIME 200 -#define WL_MED_DWELL_TIME 400 +#define WL_DWELL_TIME 200 +#define WL_MED_DWELL_TIME 400 #define WL_MIN_DWELL_TIME 100 -#define WL_LONG_DWELL_TIME 1000 -#define IFACE_MAX_CNT 2 -#define WL_SCAN_CONNECT_DWELL_TIME_MS 200 -#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 20 -#define WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 320 -#define WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS 400 -#define WL_AF_TX_MAX_RETRY 5 +#define WL_LONG_DWELL_TIME 1000 +#define IFACE_MAX_CNT 4 +#define WL_SCAN_CONNECT_DWELL_TIME_MS 200 +#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 20 +#define WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 320 +#define WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS 400 +#define WL_AF_TX_MAX_RETRY 5 -#define WL_AF_SEARCH_TIME_MAX 450 -#define WL_AF_TX_EXTRA_TIME_MAX 200 +#define WL_AF_SEARCH_TIME_MAX 450 +#define WL_AF_TX_EXTRA_TIME_MAX 200 #define WL_SCAN_TIMER_INTERVAL_MS 10000 /* Scan timeout */ #define WL_CHANNEL_SYNC_RETRY 5 #define WL_INVALID -1 +#ifdef DHD_LOSSLESS_ROAMING +#define WL_ROAM_TIMEOUT_MS 1000 /* Roam timeout */ +#endif /* Bring down SCB Timeout to 20secs from 60secs default */ #ifndef WL_SCB_TIMEOUT -#define WL_SCB_TIMEOUT 20 +#define WL_SCB_TIMEOUT 20 +#endif + +#ifndef WL_SCB_ACTIVITY_TIME +#define WL_SCB_ACTIVITY_TIME 5 +#endif + +#ifndef WL_SCB_MAX_PROBE +#define WL_SCB_MAX_PROBE 3 +#endif + +#ifndef WL_MIN_PSPRETEND_THRESHOLD +#define WL_MIN_PSPRETEND_THRESHOLD 2 #endif /* SCAN_SUPPRESS timer values in ms */ @@ -169,12 +233,19 @@ do { \ #define WL_PM_ENABLE_TIMEOUT 10000 -#ifdef WLAIBSS -/* Custom AIBSS beacon parameters */ -#define AIBSS_INITIAL_MIN_BCN_DUR 500 -#define AIBSS_MIN_BCN_DUR 5000 -#define AIBSS_BCN_FLOOD_DUR 5000 -#endif /* WLAIBSS */ +/* cfg80211 wowlan definitions */ +#define WL_WOWLAN_MAX_PATTERNS 8 +#define WL_WOWLAN_MIN_PATTERN_LEN 1 +#define WL_WOWLAN_MAX_PATTERN_LEN 255 +#define WL_WOWLAN_PKT_FILTER_ID_FIRST 201 +#define WL_WOWLAN_PKT_FILTER_ID_LAST (WL_WOWLAN_PKT_FILTER_ID_FIRST + \ + WL_WOWLAN_MAX_PATTERNS - 1) + +#ifdef WLTDLS +#define TDLS_TUNNELED_PRB_REQ "\x7f\x50\x6f\x9a\04" +#define TDLS_TUNNELED_PRB_RESP "\x7f\x50\x6f\x9a\05" +#endif /* WLTDLS */ + /* driver status */ enum wl_status { @@ -238,8 +309,8 @@ enum wl_prof_list { /* donlge escan state */ enum wl_escan_state { - WL_ESCAN_STATE_IDLE, - WL_ESCAN_STATE_SCANING + WL_ESCAN_STATE_IDLE, + WL_ESCAN_STATE_SCANING }; /* fw downloading status */ enum wl_fw_status { @@ -253,11 +324,10 @@ enum wl_management_type { WL_ASSOC_RESP = 0x4 }; -enum wl_handler_del_type { - WL_HANDLER_NOTUSE, - WL_HANDLER_DEL, - WL_HANDLER_MAINTAIN, - WL_HANDLER_PEND +enum wl_pm_workq_act_type { + WL_PM_WORKQ_SHORT, + WL_PM_WORKQ_LONG, + WL_PM_WORKQ_DEL }; /* beacon / probe_response */ @@ -328,6 +398,27 @@ struct wl_ibss { u8 channel; }; +typedef struct wl_bss_vndr_ies { + u8 probe_req_ie[VNDR_IES_BUF_LEN]; + u8 probe_res_ie[VNDR_IES_MAX_BUF_LEN]; + u8 assoc_req_ie[VNDR_IES_BUF_LEN]; + u8 assoc_res_ie[VNDR_IES_BUF_LEN]; + u8 beacon_ie[VNDR_IES_MAX_BUF_LEN]; + u32 probe_req_ie_len; + u32 probe_res_ie_len; + u32 assoc_req_ie_len; + u32 assoc_res_ie_len; + u32 beacon_ie_len; +} wl_bss_vndr_ies_t; + +typedef struct wl_cfgbss { + u8 *wpa_ie; + u8 *rsn_ie; + u8 *wps_ie; + bool security_mode; + struct wl_bss_vndr_ies ies; /* Common for STA, P2P GC, GO, AP, P2P Disc Interface */ +} wl_cfgbss_t; + /* cfg driver profile */ struct wl_profile { u32 mode; @@ -352,6 +443,9 @@ struct net_info { bool pm_restore; bool pm_block; s32 pm; + s32 bssidx; + wl_cfgbss_t bss; + u32 ulb_bw; struct list_head list; /* list of all net_info structure */ }; @@ -385,6 +479,11 @@ struct wl_pmk_list { pmkid_t foo[MAXPMKID - 1]; }; +#ifdef DHD_MAX_IFS +#define WL_MAX_IFS DHD_MAX_IFS +#else +#define WL_MAX_IFS 16 +#endif #define ESCAN_BUF_SIZE (64 * 1024) @@ -411,34 +510,12 @@ typedef struct { } removal_element_t; #endif /* ESCAN_BUF_OVERFLOW_MGMT */ -struct ap_info { -/* Structure to hold WPS, WPA IEs for a AP */ - u8 probe_res_ie[VNDR_IES_MAX_BUF_LEN]; - u8 beacon_ie[VNDR_IES_MAX_BUF_LEN]; - u8 assoc_res_ie[VNDR_IES_MAX_BUF_LEN]; - u32 probe_res_ie_len; - u32 beacon_ie_len; - u32 assoc_res_ie_len; - u8 *wpa_ie; - u8 *rsn_ie; - u8 *wps_ie; - bool security_mode; -}; - -struct sta_info { - /* Structure to hold WPS IE for a STA */ - u8 probe_req_ie[VNDR_IES_BUF_LEN]; - u8 assoc_req_ie[VNDR_IES_BUF_LEN]; - u32 probe_req_ie_len; - u32 assoc_req_ie_len; -}; - struct afx_hdl { wl_af_params_t *pending_tx_act_frm; struct ether_addr tx_dst_addr; struct net_device *dev; struct work_struct work; - u32 bssidx; + s32 bssidx; u32 retry; s32 peer_chan; s32 peer_listen_chan; /* search channel: configured by upper layer */ @@ -457,62 +534,24 @@ struct parsed_ies { u32 wpa2_ie_len; }; -#ifdef WL_SDO -/* Service discovery */ -typedef struct { - uint8 transaction_id; /* Transaction ID */ - uint8 protocol; /* Service protocol type */ - uint16 query_len; /* Length of query */ - uint16 response_len; /* Length of response */ - uint8 qrbuf[1]; -} wl_sd_qr_t; +#ifdef P2P_LISTEN_OFFLOADING typedef struct { - uint16 period; /* extended listen period */ - uint16 interval; /* extended listen interval */ -} wl_sd_listen_t; - -#define WL_SD_STATE_IDLE 0x0000 -#define WL_SD_SEARCH_SVC 0x0001 -#define WL_SD_ADV_SVC 0x0002 - -enum wl_dd_state { - WL_DD_STATE_IDLE, - WL_DD_STATE_SEARCH, - WL_DD_STATE_LISTEN -}; - -#define MAX_SDO_PROTO_STR_LEN 20 -typedef struct wl_sdo_proto { - char str[MAX_SDO_PROTO_STR_LEN]; - u32 val; -} wl_sdo_proto_t; - -typedef struct sd_offload { - u32 sd_state; - enum wl_dd_state dd_state; - wl_sd_listen_t sd_listen; -} sd_offload_t; - -typedef struct sdo_event { - u8 addr[ETH_ALEN]; - uint16 freq; /* channel Freq */ - uint8 count; /* Tlv count */ - uint16 update_ind; -} sdo_event_t; -#endif /* WL_SDO */ + uint16 period; /* listen offload period */ + uint16 interval; /* listen offload interval */ + uint16 count; /* listen offload count */ + uint16 pad; /* pad for 32bit align */ +} wl_p2plo_listen_t; +#endif /* P2P_LISTEN_OFFLOADING */ #ifdef WL11U /* Max length of Interworking element */ #define IW_IES_MAX_BUF_LEN 9 #endif -#ifdef WLFBT -#define FBT_KEYLEN 32 -#endif #define MAX_EVENT_BUF_NUM 16 typedef struct wl_eventmsg_buf { - u16 num; - struct { + u16 num; + struct { u16 type; bool set; } event [MAX_EVENT_BUF_NUM]; @@ -526,6 +565,10 @@ typedef struct wl_if_event_info { char name[IFNAMSIZ+1]; } wl_if_event_info; +#if defined(DHD_ENABLE_BIGDATA_LOGGING) +#define GET_BSS_INFO_LEN 90 +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + /* private data of cfg80211 interface */ struct bcm_cfg80211 { struct wireless_dev *wdev; /* representing cfg cfg80211 device */ @@ -538,12 +581,14 @@ struct bcm_cfg80211 { EVENT_HANDLER evt_handler[WLC_E_LAST]; struct list_head eq_list; /* used for event queue */ struct list_head net_list; /* used for struct net_info */ + spinlock_t net_list_sync; /* to protect scan status (and others if needed) */ spinlock_t eq_lock; /* for event queue synchronization */ spinlock_t cfgdrv_lock; /* to protect scan status (and others if needed) */ struct completion act_frm_scan; struct completion iface_disable; struct completion wait_next_af; struct mutex usr_sync; /* maily for up/down synchronization */ + struct mutex scan_complete; /* serialize scan_complete call */ struct wl_scan_results *bss_list; struct wl_scan_results *scan_results; @@ -587,7 +632,7 @@ struct bcm_cfg80211 { bool scan_tried; /* indicates if first scan attempted */ #if defined(BCMSDIO) || defined(BCMPCIE) bool wlfc_on; -#endif +#endif bool vsdb_mode; bool roamoff_on_concurrent; u8 *ioctl_buf; /* ioctl buffer */ @@ -605,19 +650,17 @@ struct bcm_cfg80211 { wl_if_event_info if_event_info; struct completion send_af_done; struct afx_hdl *afx_hdl; - struct ap_info *ap_info; - struct sta_info *sta_info; struct p2p_info *p2p; bool p2p_supported; void *btcoex_info; struct timer_list scan_timeout; /* Timer for catch scan event timeout */ +#if defined(P2P_IE_MISSING_FIX) + bool p2p_prb_noti; +#endif s32(*state_notifier) (struct bcm_cfg80211 *cfg, struct net_info *_net_info, enum wl_status state, bool set); unsigned long interrested_state; wlc_ssid_t hostapd_ssid; -#ifdef WL_SDO - sd_offload_t *sdo; -#endif #ifdef WL11U bool wl11u; u8 iw_ie[IW_IES_MAX_BUF_LEN]; @@ -627,22 +670,16 @@ struct bcm_cfg80211 { #ifdef WL_SCHED_SCAN struct cfg80211_sched_scan_request *sched_scan_req; /* scheduled scan req */ #endif /* WL_SCHED_SCAN */ -#ifdef WL_HOST_BAND_MGMT - u8 curr_band; -#endif /* WL_HOST_BAND_MGMT */ bool scan_suppressed; struct timer_list scan_supp_timer; struct work_struct wlan_work; struct mutex event_sync; /* maily for up/down synchronization */ bool disable_roam_event; - bool pm_enable_work_on; struct delayed_work pm_enable_work; + struct mutex pm_sync; /* mainly for pm work synchronization */ + vndr_ie_setbuf_t *ibss_vsie; /* keep the VSIE for IBSS */ int ibss_vsie_len; -#ifdef WLAIBSS - u32 aibss_txfail_pid; - u32 aibss_txfail_seq; -#endif /* WLAIBSS */ u32 rmc_event_pid; u32 rmc_event_seq; #ifdef WLAIBSS_MCHAN @@ -652,35 +689,160 @@ struct bcm_cfg80211 { bcm_struct_cfgdev *bss_cfgdev; /* For DUAL STA/STA+AP */ s32 cfgdev_bssidx; bool bss_pending_op; /* indicate where there is a pending IF operation */ -#ifdef WLFBT - uint8 fbt_key[FBT_KEYLEN]; -#endif int roam_offload; +#ifdef WL_NAN + bool nan_enable; bool nan_running; +#endif /* WL_NAN */ +#ifdef WL_CFG80211_P2P_DEV_IF + bool down_disc_if; +#endif /* WL_CFG80211_P2P_DEV_IF */ #ifdef P2PLISTEN_AP_SAMECHN bool p2p_resp_apchn_status; #endif /* P2PLISTEN_AP_SAMECHN */ + struct wl_wsec_key wep_key; #ifdef WLTDLS u8 *tdls_mgmt_frame; u32 tdls_mgmt_frame_len; s32 tdls_mgmt_freq; #endif /* WLTDLS */ + bool need_wait_afrx; +#ifdef QOS_MAP_SET + uint8 *up_table; /* user priority table, size is UP_TABLE_MAX */ +#endif /* QOS_MAP_SET */ + struct ether_addr last_roamed_addr; +#ifdef DHD_LOSSLESS_ROAMING + struct timer_list roam_timeout; /* Timer for catch roam timeout */ +#endif + bool rcc_enabled; /* flag for Roam channel cache feature */ +#if defined(DHD_ENABLE_BIGDATA_LOGGING) + char bss_info[GET_BSS_INFO_LEN]; + wl_event_msg_t event_auth_assoc; + u32 assoc_reject_status; + u32 roam_count; +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + u16 ap_oper_channel; + bool revert_ndo_disable; +#if defined(SUPPORT_RANDOM_MAC_SCAN) + bool random_mac_enabled; +#endif /* SUPPORT_RANDOM_MAC_SCAN */ int p2p_disconnected; // terence 20130703: Fix for wrong group_capab (timing issue) struct ether_addr disconnected_bssid; }; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) + +#define BCM_LIST_FOR_EACH_ENTRY_SAFE(pos, next, head, member) \ +_Pragma("GCC diagnostic push") \ +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \ +list_for_each_entry_safe((pos), (next), (head), member) \ +_Pragma("GCC diagnostic pop") \ + +#else +#define BCM_LIST_FOR_EACH_ENTRY_SAFE(pos, next, head, member) \ +list_for_each_entry_safe((pos), (next), (head), member) \ + +#endif /* STRICT_GCC_WARNINGS */ static inline struct wl_bss_info *next_bss(struct wl_scan_results *list, struct wl_bss_info *bss) { return bss = bss ? (struct wl_bss_info *)((uintptr) bss + dtoh32(bss->length)) : list->bss_info; } + +static inline void +wl_probe_wdev_all(struct bcm_cfg80211 *cfg) +{ + struct net_info *_net_info, *next; + unsigned long int flags; + int idx = 0; + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, + &cfg->net_list, list) { + WL_ERR(("%s: net_list[%d] bssidx: %d, " + "ndev: %p, wdev: %p \n", __FUNCTION__, + idx++, _net_info->bssidx, + _net_info->ndev, _net_info->wdev)); + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return; +} + +static inline struct net_info * +wl_get_netinfo_by_bssidx(struct bcm_cfg80211 *cfg, s32 bssidx) +{ + struct net_info *_net_info, *next, *info = NULL; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if ((bssidx >= 0) && (_net_info->bssidx == bssidx)) { + info = _net_info; + break; + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return info; +} + +static inline void +wl_dealloc_netinfo_by_wdev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev) +{ + struct net_info *_net_info, *next; + unsigned long int flags; + +#ifdef DHD_IFDEBUG + WL_ERR(("dealloc_netinfo enter wdev=%p \n", wdev)); +#endif + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (wdev && (_net_info->wdev == wdev)) { + wl_cfgbss_t *bss = &_net_info->bss; + + kfree(bss->wpa_ie); + bss->wpa_ie = NULL; + kfree(bss->rsn_ie); + bss->rsn_ie = NULL; + kfree(bss->wps_ie); + bss->wps_ie = NULL; + list_del(&_net_info->list); + cfg->iface_cnt--; + kfree(_net_info); + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); +#ifdef DHD_IFDEBUG + WL_ERR(("dealloc_netinfo exit iface_cnt=%d \n", cfg->iface_cnt)); +#endif +} + static inline s32 wl_alloc_netinfo(struct bcm_cfg80211 *cfg, struct net_device *ndev, - struct wireless_dev * wdev, s32 mode, bool pm_block) + struct wireless_dev * wdev, s32 mode, bool pm_block, u8 bssidx) { struct net_info *_net_info; s32 err = 0; + unsigned long int flags; +#ifdef DHD_IFDEBUG + WL_ERR(("alloc_netinfo enter bssidx=%d wdev=%p ndev=%p\n", bssidx, wdev, ndev)); +#endif + /* Check whether there is any duplicate entry for the + * same bssidx * + */ + if ((_net_info = wl_get_netinfo_by_bssidx(cfg, bssidx))) { + /* We have a duplicate entry for the same bssidx + * already present which shouldn't have been the case. + * Attempt recovery. + */ + WL_ERR(("Duplicate entry for bssidx=%d present\n", bssidx)); + wl_probe_wdev_all(cfg); +#ifdef DHD_DEBUG + ASSERT(0); +#endif /* DHD_DEBUG */ + WL_ERR(("Removing the Dup entry for bssidx=%d \n", bssidx)); + wl_dealloc_netinfo_by_wdev(cfg, _net_info->wdev); + } if (cfg->iface_cnt == IFACE_MAX_CNT) return -ENOMEM; _net_info = kzalloc(sizeof(struct net_info), GFP_KERNEL); @@ -694,37 +856,41 @@ wl_alloc_netinfo(struct bcm_cfg80211 *cfg, struct net_device *ndev, _net_info->pm = 0; _net_info->pm_block = pm_block; _net_info->roam_off = WL_INVALID; + _net_info->bssidx = bssidx; + spin_lock_irqsave(&cfg->net_list_sync, flags); cfg->iface_cnt++; list_add(&_net_info->list, &cfg->net_list); + spin_unlock_irqrestore(&cfg->net_list_sync, flags); } +#ifdef DHD_IFDEBUG + WL_ERR(("alloc_netinfo exit iface_cnt=%d \n", cfg->iface_cnt)); +#endif return err; } -static inline void -wl_dealloc_netinfo(struct bcm_cfg80211 *cfg, struct net_device *ndev) -{ - struct net_info *_net_info, *next; - list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) { - if (ndev && (_net_info->ndev == ndev)) { - list_del(&_net_info->list); - cfg->iface_cnt--; - kfree(_net_info); - } - } - -} static inline void wl_delete_all_netinfo(struct bcm_cfg80211 *cfg) { struct net_info *_net_info, *next; - - list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) { + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + wl_cfgbss_t *bss = &_net_info->bss; + + kfree(bss->wpa_ie); + bss->wpa_ie = NULL; + kfree(bss->rsn_ie); + bss->rsn_ie = NULL; + kfree(bss->wps_ie); + bss->wps_ie = NULL; list_del(&_net_info->list); - if (_net_info->wdev) - kfree(_net_info->wdev); - kfree(_net_info); + if (_net_info->wdev) + kfree(_net_info->wdev); + kfree(_net_info); } cfg->iface_cnt = 0; + spin_unlock_irqrestore(&cfg->net_list_sync, flags); } static inline u32 wl_get_status_all(struct bcm_cfg80211 *cfg, s32 status) @@ -732,33 +898,46 @@ wl_get_status_all(struct bcm_cfg80211 *cfg, s32 status) { struct net_info *_net_info, *next; u32 cnt = 0; - list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) { + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { if (_net_info->ndev && test_bit(status, &_net_info->sme_state)) cnt++; } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); return cnt; } static inline void wl_set_status_all(struct bcm_cfg80211 *cfg, s32 status, u32 op) { struct net_info *_net_info, *next; - list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) { + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { switch (op) { case 1: - return; /* set all status is not allowed */ + break; /* set all status is not allowed */ case 2: + /* + * Release the spinlock before calling notifier. Else there + * will be nested calls + */ + spin_unlock_irqrestore(&cfg->net_list_sync, flags); clear_bit(status, &_net_info->sme_state); if (cfg->state_notifier && test_bit(status, &(cfg->interrested_state))) cfg->state_notifier(cfg, _net_info, status, false); - break; + return; case 4: - return; /* change all status is not allowed */ + break; /* change all status is not allowed */ default: - return; /* unknown operation */ + break; /* unknown operation */ } } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); } static inline void wl_set_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status, @@ -766,22 +945,34 @@ wl_set_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status, { struct net_info *_net_info, *next; + unsigned long int flags; - list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) { + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { if (ndev && (_net_info->ndev == ndev)) { switch (op) { case 1: + /* + * Release the spinlock before calling notifier. Else there + * will be nested calls + */ + spin_unlock_irqrestore(&cfg->net_list_sync, flags); set_bit(status, &_net_info->sme_state); if (cfg->state_notifier && test_bit(status, &(cfg->interrested_state))) cfg->state_notifier(cfg, _net_info, status, true); - break; + return; case 2: + /* + * Release the spinlock before calling notifier. Else there + * will be nested calls + */ + spin_unlock_irqrestore(&cfg->net_list_sync, flags); clear_bit(status, &_net_info->sme_state); if (cfg->state_notifier && test_bit(status, &(cfg->interrested_state))) cfg->state_notifier(cfg, _net_info, status, false); - break; + return; case 4: change_bit(status, &_net_info->sme_state); break; @@ -789,7 +980,28 @@ wl_set_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status, } } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + +} + +static inline wl_cfgbss_t * +wl_get_cfgbss_by_wdev(struct bcm_cfg80211 *cfg, + struct wireless_dev *wdev) +{ + struct net_info *_net_info, *next; + wl_cfgbss_t *bss = NULL; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (wdev && (_net_info->wdev == wdev)) { + bss = &_net_info->bss; + break; + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return bss; } static inline u32 @@ -797,60 +1009,144 @@ wl_get_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status, struct net_device *ndev) { struct net_info *_net_info, *next; + u32 stat = 0; + unsigned long int flags; - list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) { - if (ndev && (_net_info->ndev == ndev)) - return test_bit(status, &_net_info->sme_state); + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) { + stat = test_bit(status, &_net_info->sme_state); + break; + } } - return 0; + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return stat; } static inline s32 wl_get_mode_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev) { struct net_info *_net_info, *next; + s32 mode = -1; + unsigned long int flags; - list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) { - if (ndev && (_net_info->ndev == ndev)) - return _net_info->mode; + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) { + mode = _net_info->mode; + break; + } } - return -1; + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return mode; } - static inline void wl_set_mode_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 mode) { struct net_info *_net_info, *next; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) + _net_info->mode = mode; + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); +} + +static inline s32 +wl_get_bssidx_by_wdev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev) +{ + struct net_info *_net_info, *next; + s32 bssidx = -1; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (_net_info->wdev && (_net_info->wdev == wdev)) { + bssidx = _net_info->bssidx; + break; + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return bssidx; +} - list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) { - if (ndev && (_net_info->ndev == ndev)) - _net_info->mode = mode; +static inline struct wireless_dev * +wl_get_wdev_by_bssidx(struct bcm_cfg80211 *cfg, s32 bssidx) +{ + struct net_info *_net_info, *next; + struct wireless_dev *wdev = NULL; + unsigned long int flags; + + if (bssidx < 0) + return NULL; + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (_net_info->bssidx == bssidx) { + wdev = _net_info->wdev; + break; + } } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return wdev; } + static inline struct wl_profile * wl_get_profile_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev) { struct net_info *_net_info, *next; + struct wl_profile *prof = NULL; + unsigned long int flags; - list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) { - if (ndev && (_net_info->ndev == ndev)) - return &_net_info->profile; + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) { + prof = &_net_info->profile; + break; + } } - return NULL; + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return prof; } static inline struct net_info * wl_get_netinfo_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev) { - struct net_info *_net_info, *next; + struct net_info *_net_info, *next, *info = NULL; + unsigned long int flags; - list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) { - if (ndev && (_net_info->ndev == ndev)) - return _net_info; + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) { + info = _net_info; + break; + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return info; +} + +static inline struct net_info * +wl_get_netinfo_by_wdev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev) +{ + struct net_info *_net_info, *next, *info = NULL; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (wdev && (_net_info->wdev == wdev)) { + info = _net_info; + break; + } } - return NULL; + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return info; } + +#define is_p2p_group_iface(wdev) (((wdev->iftype == NL80211_IFTYPE_P2P_GO) || \ + (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) ? 1 : 0) #define bcmcfg_to_wiphy(cfg) (cfg->wdev->wiphy) #define bcmcfg_to_prmry_ndev(cfg) (cfg->wdev->netdev) #define bcmcfg_to_prmry_wdev(cfg) (cfg->wdev) @@ -866,10 +1162,10 @@ wl_get_netinfo_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev) #define ndev_to_wlc_ndev(ndev, cfg) (ndev) #endif /* WL_ENABLE_P2P_IF */ -#if defined(WL_CFG80211_P2P_DEV_IF) #define wdev_to_wlc_ndev(wdev, cfg) \ - ((wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) ? \ - bcmcfg_to_prmry_ndev(cfg) : wdev_to_ndev(wdev)) + (wdev_to_ndev(wdev) ? \ + wdev_to_ndev(wdev) : bcmcfg_to_prmry_ndev(cfg)) +#if defined(WL_CFG80211_P2P_DEV_IF) #define cfgdev_to_wlc_ndev(cfgdev, cfg) wdev_to_wlc_ndev(cfgdev, cfg) #define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) bcmcfg_to_prmry_wdev(cfg) #elif defined(WL_ENABLE_P2P_IF) @@ -881,10 +1177,14 @@ wl_get_netinfo_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev) #endif /* WL_CFG80211_P2P_DEV_IF */ #if defined(WL_CFG80211_P2P_DEV_IF) +#define cfgdev_to_wdev(cfgdev) (cfgdev) #define ndev_to_cfgdev(ndev) ndev_to_wdev(ndev) -#define cfgdev_to_ndev(cfgdev) cfgdev ? (cfgdev->netdev) : NULL +#define cfgdev_to_ndev(cfgdev) (cfgdev ? (cfgdev->netdev) : NULL) +#define wdev_to_cfgdev(cfgdev) (cfgdev) #define discover_cfgdev(cfgdev, cfg) (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE) #else +#define cfgdev_to_wdev(cfgdev) (cfgdev->ieee80211_ptr) +#define wdev_to_cfgdev(cfgdev) cfgdev ? (cfgdev->netdev) : NULL #define ndev_to_cfgdev(ndev) (ndev) #define cfgdev_to_ndev(cfgdev) (cfgdev) #define discover_cfgdev(cfgdev, cfg) (cfgdev == cfg->p2p_net) @@ -901,6 +1201,12 @@ wl_get_netinfo_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev) true : false) #endif /* WL_CFG80211_P2P_DEV_IF */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) +#define scan_req_iftype(req) (req->dev->ieee80211_ptr->iftype) +#else +#define scan_req_iftype(req) (req->wdev->iftype) +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) */ + #define wl_to_sr(w) (w->scan_req_int) #if defined(STATIC_WL_PRIV_STRUCT) #define wl_to_ie(w) (w->ie) @@ -929,7 +1235,6 @@ wl_get_netinfo_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev) #define for_each_ndev(cfg, iter, next) \ list_for_each_entry_safe(iter, next, &cfg->net_list, list) - /* In case of WPS from wpa_supplicant, pairwise siute and group suite is 0. * In addtion to that, wpa_version is WPA_VERSION_1 */ @@ -937,7 +1242,19 @@ wl_get_netinfo_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev) ((wl_cfgp2p_find_wpsie((u8 *)_sme->ie, _sme->ie_len) != NULL) && \ (!_sme->crypto.n_ciphers_pairwise) && \ (!_sme->crypto.cipher_group)) -extern s32 wl_cfg80211_attach(struct net_device *ndev, dhd_pub_t *context); + +#define IS_AKM_SUITE_FT(sec) false + +#define IS_AKM_SUITE_CCKM(sec) false + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)) +#define STA_INFO_BIT(info) (1ul << NL80211_STA_ ## info) +#define strnicmp(str1, str2, len) strncasecmp((str1), (str2), (len)) +#else +#define STA_INFO_BIT(info) (STATION_ ## info) +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)) */ + +extern s32 wl_cfg80211_attach(struct net_device *ndev, void *context); extern s32 wl_cfg80211_attach_post(struct net_device *ndev); extern void wl_cfg80211_detach(void *para); @@ -946,19 +1263,25 @@ extern void wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t *e, void wl_cfg80211_set_parent_dev(void *dev); struct device *wl_cfg80211_get_parent_dev(void); +/* clear IEs */ +extern s32 wl_cfg80211_clear_mgmt_vndr_ies(struct bcm_cfg80211 *cfg); +extern s32 wl_cfg80211_clear_per_bss_ies(struct bcm_cfg80211 *cfg, s32 bssidx); + extern s32 wl_cfg80211_up(void *para); extern s32 wl_cfg80211_down(void *para); extern s32 wl_cfg80211_notify_ifadd(int ifidx, char *name, uint8 *mac, uint8 bssidx); extern s32 wl_cfg80211_notify_ifdel(int ifidx, char *name, uint8 *mac, uint8 bssidx); extern s32 wl_cfg80211_notify_ifchange(int ifidx, char *name, uint8 *mac, uint8 bssidx); extern struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, char *name, - uint8 *mac, uint8 bssidx); + uint8 *mac, uint8 bssidx, char *dngl_name); extern int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev); extern int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev); extern int wl_cfg80211_scan_stop(bcm_struct_cfgdev *cfgdev); -extern bool wl_cfg80211_is_vsdb_mode(void); +extern bool wl_cfg80211_is_concurrent_mode(void); extern void* wl_cfg80211_get_dhdp(void); extern bool wl_cfg80211_is_p2p_active(void); +extern bool wl_cfg80211_is_roam_offload(void); +extern bool wl_cfg80211_is_event_from_connected_bssid(const wl_event_msg_t *e, int ifidx); extern void wl_cfg80211_dbg_level(u32 level); extern s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr); extern s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len); @@ -966,6 +1289,12 @@ extern s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len); extern s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len, enum wl_management_type type); extern s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len); +extern s32 wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len); +#ifdef WL11ULB +extern s32 wl_cfg80211_set_ulb_mode(struct net_device *dev, int mode); +extern s32 wl_cfg80211_set_ulb_bw(struct net_device *dev, + u32 ulb_bw, char *ifname); +#endif /* WL11ULB */ #ifdef P2PLISTEN_AP_SAMECHN extern s32 wl_cfg80211_set_p2p_resp_ap_chn(struct net_device *net, s32 enable); #endif /* P2PLISTEN_AP_SAMECHN */ @@ -974,15 +1303,6 @@ extern s32 wl_cfg80211_set_p2p_resp_ap_chn(struct net_device *net, s32 enable); void* wl_cfg80211_btcoex_init(struct net_device *ndev); void wl_cfg80211_btcoex_deinit(void); -#ifdef WL_SDO -extern s32 wl_cfg80211_sdo_init(struct bcm_cfg80211 *cfg); -extern s32 wl_cfg80211_sdo_deinit(struct bcm_cfg80211 *cfg); -extern s32 wl_cfg80211_sd_offload(struct net_device *net, char *cmd, char* buf, int len); -extern s32 wl_cfg80211_pause_sdo(struct net_device *dev, struct bcm_cfg80211 *cfg); -extern s32 wl_cfg80211_resume_sdo(struct net_device *dev, struct bcm_cfg80211 *cfg); - -#endif - #ifdef WL_SUPPORT_AUTO_CHANNEL #define CHANSPEC_BUF_SIZE 1024 #define CHAN_SEL_IOCTL_DELAY 300 @@ -996,33 +1316,32 @@ extern s32 wl_cfg80211_resume_sdo(struct net_device *dev, struct bcm_cfg80211 *c extern s32 wl_cfg80211_get_best_channels(struct net_device *dev, char* command, int total_len); #endif /* WL_SUPPORT_AUTO_CHANNEL */ - extern int wl_cfg80211_ether_atoe(const char *a, struct ether_addr *n); -extern int wl_cfg80211_hex_str_to_bin(unsigned char *data, int dlen, char *str); extern int wl_cfg80211_hang(struct net_device *dev, u16 reason); extern s32 wl_mode_to_nl80211_iftype(s32 mode); int wl_cfg80211_do_driver_init(struct net_device *net); void wl_cfg80211_enable_trace(u32 level); extern s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify); extern s32 wl_cfg80211_if_is_group_owner(void); -extern chanspec_t wl_chspec_host_to_driver(chanspec_t chanspec); -extern chanspec_t wl_ch_host_to_driver(u16 channel); +extern chanspec_t wl_chspec_host_to_driver(chanspec_t chanspec); +extern chanspec_t wl_ch_host_to_driver(s32 bssidx, u16 channel); extern s32 wl_set_tx_power(struct net_device *dev, enum nl80211_tx_power_setting type, s32 dbm); extern s32 wl_get_tx_power(struct net_device *dev, s32 *dbm); extern s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add); extern void wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev); -#ifdef WL_HOST_BAND_MGMT -extern s32 wl_cfg80211_set_band(struct net_device *ndev, int band); -#endif /* WL_HOST_BAND_MGMT */ -#if defined(DHCP_SCAN_SUPPRESS) -extern int wl_cfg80211_scan_suppress(struct net_device *dev, int suppress); -#endif /* OEM_ANDROID */ extern void wl_cfg80211_add_to_eventbuffer(wl_eventmsg_buf_t *ev, u16 event, bool set); extern s32 wl_cfg80211_apply_eventbuffer(struct net_device *ndev, struct bcm_cfg80211 *cfg, wl_eventmsg_buf_t *ev); extern void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac); extern void wl_cfg80211_update_power_mode(struct net_device *dev); +extern void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command); +extern void wl_terminate_event_handler(void); +#if defined(DHD_ENABLE_BIGDATA_LOGGING) +extern s32 wl_cfg80211_get_bss_info(struct net_device *dev, char* cmd, int total_len); +extern s32 wl_cfg80211_get_connect_failed_status(struct net_device *dev, char* cmd, int total_len); +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + #define SCAN_BUF_CNT 2 #define SCAN_BUF_NEXT 1 #define WL_SCANTYPE_LEGACY 0x1 @@ -1036,14 +1355,11 @@ extern void wl_cfg80211_update_power_mode(struct net_device *dev); #define wl_escan_init_sync_id(a) extern void wl_cfg80211_ibss_vsie_set_buffer(vndr_ie_setbuf_t *ibss_vsie, int ibss_vsie_len); extern s32 wl_cfg80211_ibss_vsie_delete(struct net_device *dev); -#ifdef WLAIBSS -extern void wl_cfg80211_set_txfail_pid(int pid); -#endif /* WLAIBSS */ extern void wl_cfg80211_set_rmc_pid(int pid); +extern int wl_cfg80211_set_mgmt_vndr_ies(struct bcm_cfg80211 *cfg, + bcm_struct_cfgdev *cfgdev, s32 bssidx, s32 pktflag, + const u8 *vndr_ie, u32 vndr_ie_len); -#ifdef WLFBT -extern void wl_cfg80211_get_fbt_key(uint8 *key); -#endif /* Action frame specific functions */ extern u8 wl_get_action_category(void *frame, u32 frame_len); @@ -1067,6 +1383,24 @@ struct net_device *wl_cfg80211_get_remain_on_channel_ndev(struct bcm_cfg80211 *c extern int wl_cfg80211_get_ioctl_version(void); extern int wl_cfg80211_enable_roam_offload(struct net_device *dev, int enable); +extern s32 wl_cfg80211_dfs_ap_move(struct net_device *ndev, char *data, + char *command, int total_len); +extern s32 wl_cfg80211_wbtext_config(struct net_device *ndev, char *data, + char *command, int total_len); +extern int wl_cfg80211_wbtext_weight_config(struct net_device *ndev, char *data, + char *command, int total_len); +extern int wl_cfg80211_wbtext_table_config(struct net_device *ndev, char *data, + char *command, int total_len); +extern s32 wl_cfg80211_wbtext_delta_config(struct net_device *ndev, char *data, + char *command, int total_len); +extern s32 wl_cfg80211_get_chanspecs_2g(struct net_device *ndev, + void *buf, s32 buflen); +extern s32 wl_cfg80211_get_chanspecs_5g(struct net_device *ndev, + void *buf, s32 buflen); +#if defined(WL_VIRTUAL_APSTA) +extern int wl_cfg80211_interface_create(struct net_device *dev, char *name); +extern int wl_cfg80211_interface_delete(struct net_device *dev, char *name); +#endif /* defined (WL_VIRTUAL_APSTA) */ #ifdef WL_NAN extern int wl_cfg80211_nan_cmd_handler(struct net_device *ndev, char *cmd, @@ -1077,4 +1411,36 @@ extern int wl_cfg80211_nan_cmd_handler(struct net_device *ndev, char *cmd, extern void wl_cfg80211_del_p2p_wdev(void); #endif /* WL_CFG80211_P2P_DEV_IF */ +#if defined(WL_SUPPORT_AUTO_CHANNEL) +extern int wl_cfg80211_set_spect(struct net_device *dev, int spect); +extern int wl_cfg80211_get_sta_channel(void); +#endif /* WL_SUPPORT_AUTO_CHANNEL */ + +#ifdef P2P_LISTEN_OFFLOADING +extern s32 wl_cfg80211_p2plo_listen_start(struct net_device *dev, u8 *buf, int len); +extern s32 wl_cfg80211_p2plo_listen_stop(struct net_device *dev); +#endif /* P2P_LISTEN_OFFLOADING */ + +#define RETURN_EIO_IF_NOT_UP(wlpriv) \ +do { \ + struct net_device *checkSysUpNDev = bcmcfg_to_prmry_ndev(wlpriv); \ + if (unlikely(!wl_get_drv_status(wlpriv, READY, checkSysUpNDev))) { \ + WL_INFORM(("device is not ready\n")); \ + return -EIO; \ + } \ +} while (0) + +#ifdef QOS_MAP_SET +extern uint8 *wl_get_up_table(void); +#endif /* QOS_MAP_SET */ + +#define P2PO_COOKIE 65535 +u64 wl_cfg80211_get_new_roc_id(struct bcm_cfg80211 *cfg); +#if defined(SUPPORT_RANDOM_MAC_SCAN) +int wl_cfg80211_set_random_mac(struct net_device *dev, bool enable); +int wl_cfg80211_random_mac_enable(struct net_device *dev); +int wl_cfg80211_random_mac_disable(struct net_device *dev); +#endif /* SUPPORT_RANDOM_MAC_SCAN */ +int wl_cfg80211_iface_count(void); +int wl_check_dongle_idle(struct wiphy *wiphy); #endif /* _wl_cfg80211_h_ */ diff --git a/drivers/net/wireless/bcmdhd/wl_cfg_btcoex.c b/drivers/amlogic/wifi/bcmdhd/wl_cfg_btcoex.c similarity index 92% rename from drivers/net/wireless/bcmdhd/wl_cfg_btcoex.c rename to drivers/amlogic/wifi/bcmdhd/wl_cfg_btcoex.c index c8a16ce551feb..81e42ab3b15ca 100644 --- a/drivers/net/wireless/bcmdhd/wl_cfg_btcoex.c +++ b/drivers/amlogic/wifi/bcmdhd/wl_cfg_btcoex.c @@ -1,9 +1,30 @@ /* * Linux cfg80211 driver - Dongle Host Driver (DHD) related * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: wl_cfg_btcoex.c 467328 2014-04-03 01:23:40Z $ + * + * <> + * + * $Id: wl_cfg_btcoex.c 514727 2014-11-12 03:02:48Z $ */ #include @@ -419,10 +440,6 @@ int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, dhd_pub_t *dhd, char *co if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) { WL_TRACE_HW4(("DHCP session starts\n")); -#if defined(DHCP_SCAN_SUPPRESS) - /* Suppress scan during the DHCP */ - wl_cfg80211_scan_suppress(dev, 1); -#endif /* OEM_ANDROID */ #ifdef PKT_FILTER_SUPPORT dhd->dhcp_in_progress = 1; @@ -474,10 +491,6 @@ int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, dhd_pub_t *dhd, char *co else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) { -#if defined(DHCP_SCAN_SUPPRESS) - /* Since DHCP is complete, enable the scan back */ - wl_cfg80211_scan_suppress(dev, 0); -#endif /* OEM_ANDROID */ #ifdef PKT_FILTER_SUPPORT dhd->dhcp_in_progress = 0; diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.c b/drivers/amlogic/wifi/bcmdhd/wl_cfgp2p.c similarity index 71% rename from drivers/net/wireless/bcmdhd/wl_cfgp2p.c rename to drivers/amlogic/wifi/bcmdhd/wl_cfgp2p.c index 3aaa668bc4cb4..fec0fa26e9f31 100644 --- a/drivers/net/wireless/bcmdhd/wl_cfgp2p.c +++ b/drivers/amlogic/wifi/bcmdhd/wl_cfgp2p.c @@ -1,9 +1,30 @@ /* * Linux cfgp2p driver * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: wl_cfgp2p.c 504573 2014-09-24 15:21:25Z $ + * + * <> + * + * $Id: wl_cfgp2p.c 604795 2015-12-08 13:45:42Z $ * */ #include @@ -29,63 +50,34 @@ #include #include #include - -#if defined(P2PONEINT) #include #include -#endif +#include +#include +#include +#include static s8 scanparambuf[WLC_IOCTL_SMLEN]; -static s8 g_mgmt_ie_buf[2048]; static bool wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type); -static u32 -wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag, - s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd); static s32 wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev, struct wireless_dev *wdev, bool notify); -#ifdef P2PONEINT -void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg); -chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy); -s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val); -int wl_cfgp2p_if_open(struct net_device *net); -int wl_cfgp2p_if_stop(struct net_device *net); -#endif - #if defined(WL_ENABLE_P2P_IF) static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev); static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd); -int wl_cfgp2p_if_open(struct net_device *net); -int wl_cfgp2p_if_stop(struct net_device *net); +static int wl_cfgp2p_if_open(struct net_device *net); +static int wl_cfgp2p_if_stop(struct net_device *net); static const struct net_device_ops wl_cfgp2p_if_ops = { .ndo_open = wl_cfgp2p_if_open, .ndo_stop = wl_cfgp2p_if_stop, .ndo_do_ioctl = wl_cfgp2p_do_ioctl, -#ifndef P2PONEINT .ndo_start_xmit = wl_cfgp2p_start_xmit, -#endif }; #endif /* WL_ENABLE_P2P_IF */ -#if defined(WL_NEWCFG_PRIVCMD_SUPPORT) -static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev); -static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd); - -static int wl_cfgp2p_if_dummy(struct net_device *net) -{ - return 0; -} - -static const struct net_device_ops wl_cfgp2p_if_ops = { - .ndo_open = wl_cfgp2p_if_dummy, - .ndo_stop = wl_cfgp2p_if_dummy, - .ndo_do_ioctl = wl_cfgp2p_do_ioctl, - .ndo_start_xmit = wl_cfgp2p_start_xmit, -}; -#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */ bool wl_cfgp2p_is_pub_action(void *frame, u32 frame_len) { @@ -172,6 +164,22 @@ bool wl_cfgp2p_is_gas_action(void *frame, u32 frame_len) if (sd_act_frm->category != P2PSD_ACTION_CATEGORY) return false; +#ifdef WL11U + if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP) + return wl_cfgp2p_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE, + (u8 *)sd_act_frm->query_data + GAS_RESP_OFFSET, + frame_len); + + else if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP) + return wl_cfgp2p_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE, + (u8 *)sd_act_frm->query_data + GAS_CRESP_OFFSET, + frame_len); + else if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ || + sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ) + return true; + else + return false; +#else if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ || sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP || sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ || @@ -179,7 +187,31 @@ bool wl_cfgp2p_is_gas_action(void *frame, u32 frame_len) return true; else return false; +#endif /* WL11U */ } + +bool wl_cfgp2p_is_p2p_gas_action(void *frame, u32 frame_len) +{ + + wifi_p2psd_gas_pub_act_frame_t *sd_act_frm; + + if (frame == NULL) + return false; + + sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame; + if (frame_len < (sizeof(wifi_p2psd_gas_pub_act_frame_t) - 1)) + return false; + if (sd_act_frm->category != P2PSD_ACTION_CATEGORY) + return false; + + if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ) + return wl_cfgp2p_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE, + (u8 *)sd_act_frm->query_data, + frame_len); + else + return false; +} + void wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel) { wifi_p2p_pub_act_frame_t *pact_frm; @@ -228,7 +260,7 @@ void wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel) " channel=%d\n", (tx)? "TX": "RX", channel)); break; default: - CFGP2P_ACTION(("%s Unknown P2P Public Action Frame," + CFGP2P_ACTION(("%s Unknown Public Action Frame," " channel=%d\n", (tx)? "TX": "RX", channel)); } @@ -261,23 +293,23 @@ void wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel) sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame; switch (sd_act_frm->action) { case P2PSD_ACTION_ID_GAS_IREQ: - CFGP2P_ACTION(("%s P2P GAS Initial Request," + CFGP2P_ACTION(("%s GAS Initial Request," " channel=%d\n", (tx)? "TX" : "RX", channel)); break; case P2PSD_ACTION_ID_GAS_IRESP: - CFGP2P_ACTION(("%s P2P GAS Initial Response," + CFGP2P_ACTION(("%s GAS Initial Response," " channel=%d\n", (tx)? "TX" : "RX", channel)); break; case P2PSD_ACTION_ID_GAS_CREQ: - CFGP2P_ACTION(("%s P2P GAS Comback Request," + CFGP2P_ACTION(("%s GAS Comback Request," " channel=%d\n", (tx)? "TX" : "RX", channel)); break; case P2PSD_ACTION_ID_GAS_CRESP: - CFGP2P_ACTION(("%s P2P GAS Comback Response," + CFGP2P_ACTION(("%s GAS Comback Response," " channel=%d\n", (tx)? "TX" : "RX", channel)); break; default: - CFGP2P_ACTION(("%s Unknown P2P GAS Frame," + CFGP2P_ACTION(("%s Unknown GAS Frame," " channel=%d\n", (tx)? "TX" : "RX", channel)); } @@ -296,35 +328,15 @@ wl_cfgp2p_init_priv(struct bcm_cfg80211 *cfg) CFGP2P_ERR(("struct p2p_info allocation failed\n")); return -ENOMEM; } -#define INIT_IE(IE_TYPE, BSS_TYPE) \ - do { \ - memset(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \ - sizeof(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \ - wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \ - } while (0); - - INIT_IE(probe_req, P2PAPI_BSSCFG_PRIMARY); - INIT_IE(probe_res, P2PAPI_BSSCFG_PRIMARY); - INIT_IE(assoc_req, P2PAPI_BSSCFG_PRIMARY); - INIT_IE(assoc_res, P2PAPI_BSSCFG_PRIMARY); - INIT_IE(beacon, P2PAPI_BSSCFG_PRIMARY); - INIT_IE(probe_req, P2PAPI_BSSCFG_DEVICE); - INIT_IE(probe_res, P2PAPI_BSSCFG_DEVICE); - INIT_IE(assoc_req, P2PAPI_BSSCFG_DEVICE); - INIT_IE(assoc_res, P2PAPI_BSSCFG_DEVICE); - INIT_IE(beacon, P2PAPI_BSSCFG_DEVICE); - INIT_IE(probe_req, P2PAPI_BSSCFG_CONNECTION); - INIT_IE(probe_res, P2PAPI_BSSCFG_CONNECTION); - INIT_IE(assoc_req, P2PAPI_BSSCFG_CONNECTION); - INIT_IE(assoc_res, P2PAPI_BSSCFG_CONNECTION); - INIT_IE(beacon, P2PAPI_BSSCFG_CONNECTION); -#undef INIT_IE + wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY) = bcmcfg_to_prmry_ndev(cfg); wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY) = 0; wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL; wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0; - wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION) = NULL; - wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION) = 0; + wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1) = NULL; + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) = -1; + wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION2) = NULL; + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2) = -1; return BCME_OK; } @@ -335,7 +347,7 @@ wl_cfgp2p_init_priv(struct bcm_cfg80211 *cfg) void wl_cfgp2p_deinit_priv(struct bcm_cfg80211 *cfg) { - CFGP2P_DBG(("In\n")); + CFGP2P_ERR(("In\n")); if (cfg->p2p) { kfree(cfg->p2p); cfg->p2p = NULL; @@ -365,7 +377,14 @@ wl_cfgp2p_set_firm_p2p(struct bcm_cfg80211 *cfg) CFGP2P_ERR(("WLC_DOWN error %d\n", ret)); return ret; } - wldev_iovar_setint(ndev, "apsta", val); + + ret = wldev_iovar_setint(ndev, "apsta", val); + if (ret < 0) { + /* return error and fail the initialization */ + CFGP2P_ERR(("wl apsta %d set error. ret: %d\n", val, ret)); + return ret; + } + ret = wldev_ioctl(ndev, WLC_UP, &val, sizeof(s32), true); if (ret < 0) { CFGP2P_ERR(("WLC_UP error %d\n", ret)); @@ -385,6 +404,20 @@ wl_cfgp2p_set_firm_p2p(struct bcm_cfg80211 *cfg) return ret; } +int wl_cfg_multip2p_operational(struct bcm_cfg80211 *cfg) +{ + if (!cfg->p2p) { + CFGP2P_DBG(("p2p not enabled! \n")); + return false; + } + + if ((wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) != -1) && + (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2) != -1)) + return true; + else + return false; +} + /* Create a new P2P BSS. * Parameters: * @mac : MAC address of the BSS to create @@ -398,7 +431,6 @@ wl_cfgp2p_ifadd(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type, { wl_p2p_if_t ifreq; s32 err; - u32 scb_timeout = WL_SCB_TIMEOUT; struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); ifreq.type = if_type; @@ -412,14 +444,11 @@ wl_cfgp2p_ifadd(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type, err = wldev_iovar_setbuf(ndev, "p2p_ifadd", &ifreq, sizeof(ifreq), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); - - if (unlikely(err < 0)) + if (unlikely(err < 0)) { printk("'cfg p2p_ifadd' error %d\n", err); - else if (if_type == WL_P2P_IF_GO) { - err = wldev_ioctl(ndev, WLC_SET_SCB_TIMEOUT, &scb_timeout, sizeof(u32), true); - if (unlikely(err < 0)) - printk("'cfg scb_timeout' error %d\n", err); + return err; } + return err; } @@ -472,13 +501,12 @@ wl_cfgp2p_ifdel(struct bcm_cfg80211 *cfg, struct ether_addr *mac) */ s32 wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type, - chanspec_t chspec) + chanspec_t chspec, s32 conn_idx) { wl_p2p_if_t ifreq; s32 err; - u32 scb_timeout = WL_SCB_TIMEOUT; - struct net_device *netdev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION); + struct net_device *netdev = wl_to_p2p_bss_ndev(cfg, conn_idx); ifreq.type = if_type; ifreq.chspec = chspec; @@ -492,13 +520,10 @@ wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type, err = wldev_iovar_setbuf(netdev, "p2p_ifupd", &ifreq, sizeof(ifreq), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); - if (unlikely(err < 0)) { printk("'cfg p2p_ifupd' error %d\n", err); } else if (if_type == WL_P2P_IF_GO) { - err = wldev_ioctl(netdev, WLC_SET_SCB_TIMEOUT, &scb_timeout, sizeof(u32), true); - if (unlikely(err < 0)) - printk("'cfg scb_timeout' error %d\n", err); + cfg->p2p->p2p_go_count++; } return err; } @@ -594,21 +619,9 @@ wl_cfgp2p_set_p2p_mode(struct bcm_cfg80211 *cfg, u8 mode, u32 channel, u16 liste } #endif /* P2PLISTEN_AP_SAMECHN */ -#if defined(CUSTOM_PLATFORM_NV_TEGRA) -#if defined(P2P_DISCOVERY_WAR) - if (mode == WL_P2P_DISC_ST_LISTEN || mode == WL_P2P_DISC_ST_SEARCH) { - if (!cfg->p2p->vif_created) { - if (wldev_iovar_setint(wl_to_prmry_ndev(cfg), "mpc", 0) < 0) { - WL_ERR(("mpc disabling failed\n")); - } - } - } -#endif /* defined(P2P_DISCOVERY_WAR) */ -#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */ - /* Put the WL driver into P2P Listen Mode to respond to P2P probe reqs */ discovery_mode.state = mode; - discovery_mode.chspec = wl_ch_host_to_driver(channel); + discovery_mode.chspec = wl_ch_host_to_driver(bssidx, channel); discovery_mode.dwell = listen_ms; ret = wldev_iovar_setbuf_bsscfg(dev, "p2p_state", &discovery_mode, sizeof(discovery_mode), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, @@ -634,11 +647,37 @@ wl_cfgp2p_get_disc_idx(struct bcm_cfg80211 *cfg, s32 *index) return ret; } +int wl_cfgp2p_get_conn_idx(struct bcm_cfg80211 *cfg) +{ + int i; + s32 connected_cnt; + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + if (!dhd) + return (-ENODEV); + for (i = P2PAPI_BSSCFG_CONNECTION1; i < P2PAPI_BSSCFG_MAX; i++) { + if (wl_to_p2p_bss_bssidx(cfg, i) == -1) { + if (i == P2PAPI_BSSCFG_CONNECTION2) { + if (!(dhd->op_mode & DHD_FLAG_MP2P_MODE)) { + CFGP2P_ERR(("Multi p2p not supported")); + return BCME_ERROR; + } + if ((connected_cnt = wl_get_drv_status_all(cfg, CONNECTED)) > 1) { + CFGP2P_ERR(("Failed to create second p2p interface" + "Already one connection exists")); + return BCME_ERROR; + } + } + return i; + } + } + return BCME_ERROR; +} + s32 wl_cfgp2p_init_discovery(struct bcm_cfg80211 *cfg) { - s32 index = 0; + s32 bssidx = 0; s32 ret = BCME_OK; CFGP2P_DBG(("enter\n")); @@ -654,14 +693,22 @@ wl_cfgp2p_init_discovery(struct bcm_cfg80211 *cfg) return ret; } /* Enable P2P Discovery in the WL Driver */ - ret = wl_cfgp2p_get_disc_idx(cfg, &index); + ret = wl_cfgp2p_get_disc_idx(cfg, &bssidx); if (ret < 0) { return ret; } + /* In case of CFG80211 case, check if p2p_discovery interface has allocated p2p_wdev */ + if (!cfg->p2p_wdev) { + CFGP2P_ERR(("p2p_wdev is NULL.\n")); + return BCME_NODEVICE; + } + /* Make an entry in the netinfo */ + wl_alloc_netinfo(cfg, NULL, cfg->p2p_wdev, WL_MODE_BSS, 0, bssidx); + wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY); - wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = index; + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = bssidx; /* Set the initial discovery state to SCAN */ ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, @@ -686,26 +733,30 @@ static s32 wl_cfgp2p_deinit_discovery(struct bcm_cfg80211 *cfg) { s32 ret = BCME_OK; - CFGP2P_DBG(("enter\n")); + s32 bssidx; - if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) <= 0) { + CFGP2P_DBG(("enter\n")); + bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + if (bssidx <= 0) { CFGP2P_ERR(("do nothing, not initialized\n")); return -1; } + + /* Clear our saved WPS and P2P IEs for the discovery BSS */ + wl_cfg80211_clear_per_bss_ies(cfg, bssidx); + /* Set the discovery state to SCAN */ - ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, - wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE)); + wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, + bssidx); /* Disable P2P discovery in the WL driver (deletes the discovery BSSCFG) */ ret = wl_cfgp2p_set_discovery(cfg, 0); - /* Clear our saved WPS and P2P IEs for the discovery BSS. The driver - * deleted these IEs when wl_cfgp2p_set_discovery() deleted the discovery - * BSS. - */ + /* Remove the p2p disc entry in the netinfo */ +#ifdef DHD_IFDEBUG + WL_ERR(("dealloc_net_info by wdev=%p\n", cfg->p2p_wdev)); +#endif + wl_dealloc_netinfo_by_wdev(cfg, cfg->p2p_wdev); - /* Clear the saved bsscfg index of the discovery BSSCFG to indicate we - * have no discovery BSS. - */ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = WL_INVALID; wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL; @@ -726,20 +777,19 @@ wl_cfgp2p_enable_discovery(struct bcm_cfg80211 *cfg, struct net_device *dev, s32 ret = BCME_OK; s32 bssidx; + CFGP2P_DBG(("enter\n")); if (wl_get_p2p_status(cfg, DISCOVERY_ON)) { CFGP2P_INFO((" DISCOVERY is already initialized, we have nothing to do\n")); goto set_ie; } - wl_set_p2p_status(cfg, DISCOVERY_ON); - - CFGP2P_DBG(("enter\n")); - ret = wl_cfgp2p_init_discovery(cfg); if (unlikely(ret < 0)) { CFGP2P_ERR((" init discovery error %d\n", ret)); goto exit; } + + wl_set_p2p_status(cfg, DISCOVERY_ON); /* Set wsec to any non-zero value in the discovery bsscfg to ensure our * P2P probe responses have the privacy bit set in the 802.11 WPA IE. * Some peer devices may not initiate WPS with us if this bit is not set. @@ -751,14 +801,15 @@ wl_cfgp2p_enable_discovery(struct bcm_cfg80211 *cfg, struct net_device *dev, } set_ie: if (ie_len) { + if (bcmcfg_to_prmry_ndev(cfg) == dev) { bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); - } else if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) { - WL_ERR(("Find p2p index from dev(%p) failed\n", dev)); + } else if ((bssidx = wl_get_bssidx_by_wdev(cfg, cfg->p2p_wdev)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", cfg->p2p_wdev)); return BCME_ERROR; } - ret = wl_cfgp2p_set_management_ie(cfg, dev, + ret = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx, VNDR_IE_PRBREQ_FLAG, ie, ie_len); @@ -780,6 +831,8 @@ s32 wl_cfgp2p_disable_discovery(struct bcm_cfg80211 *cfg) { s32 ret = BCME_OK; + s32 bssidx; + CFGP2P_DBG((" enter\n")); wl_clr_p2p_status(cfg, DISCOVERY_ON); @@ -789,16 +842,19 @@ wl_cfgp2p_disable_discovery(struct bcm_cfg80211 *cfg) goto exit; } - if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) == 0) { +#ifdef DHD_IFDEBUG + WL_ERR(("%s: (cfg)->p2p->bss[type].bssidx: %d\n", + __FUNCTION__, (cfg)->p2p->bss[P2PAPI_BSSCFG_DEVICE].bssidx)); +#endif + bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + if (bssidx <= 0) { CFGP2P_ERR((" do nothing, not initialized\n")); - goto exit; + return 0; } ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, - wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE)); - + bssidx); if (unlikely(ret < 0)) { - CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n")); } /* Do a scan abort to stop the driver's scan engine in case it is still @@ -932,7 +988,7 @@ wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active, (num_chans & WL_SCAN_PARAMS_COUNT_MASK)); for (i = 0; i < num_chans; i++) { - eparams->params.channel_list[i] = wl_ch_host_to_driver(channels[i]); + eparams->params.channel_list[i] = wl_ch_host_to_driver(bssidx, channels[i]); } eparams->version = htod32(ESCAN_REQ_VERSION); eparams->action = htod16(action); @@ -949,6 +1005,7 @@ wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active, ret = wldev_iovar_setbuf_bsscfg(pri_dev, "p2p_scan", memblk, memsize, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + WL_SCAN(("P2P_SEARCH sync ID: %d, bssidx: %d\n", eparams->sync_id, bssidx)); if (ret == BCME_OK) wl_set_p2p_status(cfg, SCANNING); return ret; @@ -970,7 +1027,7 @@ wl_cfgp2p_act_frm_search(struct bcm_cfg80211 *cfg, struct net_device *ndev, u16 *default_chan_list = NULL; p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_AFX_PEER_NORMAL; if (!p2p_is_on(cfg) || ndev == NULL || bssidx == WL_INVALID) - return -BCME_ERROR; + return -EINVAL; WL_TRACE_HW4((" Enter\n")); if (bssidx == wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY)) bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); @@ -978,6 +1035,16 @@ wl_cfgp2p_act_frm_search(struct bcm_cfg80211 *cfg, struct net_device *ndev, chan_cnt = AF_PEER_SEARCH_CNT; else chan_cnt = SOCIAL_CHAN_CNT; + + if (cfg->afx_hdl->pending_tx_act_frm && cfg->afx_hdl->is_active) { + wl_action_frame_t *action_frame; + action_frame = &(cfg->afx_hdl->pending_tx_act_frm->action_frame); + if (wl_cfgp2p_is_p2p_gas_action(action_frame->data, action_frame->len)) { + chan_cnt = 1; + p2p_scan_purpose = P2P_SCAN_AFX_PEER_REDUCED; + } + } + default_chan_list = kzalloc(chan_cnt * sizeof(*default_chan_list), GFP_KERNEL); if (default_chan_list == NULL) { CFGP2P_ERR(("channel list allocation failed \n")); @@ -1019,354 +1086,6 @@ wl_cfgp2p_act_frm_search(struct bcm_cfg80211 *cfg, struct net_device *ndev, #define wl_cfgp2p_is_wfd_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \ (const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_WFD) -static s32 -wl_cfgp2p_parse_vndr_ies(u8 *parse, u32 len, - struct parsed_vndr_ies *vndr_ies) -{ - s32 err = BCME_OK; - vndr_ie_t *vndrie; - bcm_tlv_t *ie; - struct parsed_vndr_ie_info *parsed_info; - u32 count = 0; - s32 remained_len; - - remained_len = (s32)len; - memset(vndr_ies, 0, sizeof(*vndr_ies)); - - WL_INFORM(("---> len %d\n", len)); - ie = (bcm_tlv_t *) parse; - if (!bcm_valid_tlv(ie, remained_len)) - ie = NULL; - while (ie) { - if (count >= MAX_VNDR_IE_NUMBER) - break; - if (ie->id == DOT11_MNG_VS_ID) { - vndrie = (vndr_ie_t *) ie; - /* len should be bigger than OUI length + one data length at least */ - if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) { - CFGP2P_ERR(("%s: invalid vndr ie. length is too small %d\n", - __FUNCTION__, vndrie->len)); - goto end; - } - /* if wpa or wme ie, do not add ie */ - if (!bcmp(vndrie->oui, (u8*)WPA_OUI, WPA_OUI_LEN) && - ((vndrie->data[0] == WPA_OUI_TYPE) || - (vndrie->data[0] == WME_OUI_TYPE))) { - CFGP2P_DBG(("Found WPA/WME oui. Do not add it\n")); - goto end; - } - - parsed_info = &vndr_ies->ie_info[count++]; - - /* save vndr ie information */ - parsed_info->ie_ptr = (char *)vndrie; - parsed_info->ie_len = (vndrie->len + TLV_HDR_LEN); - memcpy(&parsed_info->vndrie, vndrie, sizeof(vndr_ie_t)); - - vndr_ies->count = count; - - CFGP2P_DBG(("\t ** OUI %02x %02x %02x, type 0x%02x \n", - parsed_info->vndrie.oui[0], parsed_info->vndrie.oui[1], - parsed_info->vndrie.oui[2], parsed_info->vndrie.data[0])); - } -end: - ie = bcm_next_tlv(ie, &remained_len); - } - return err; -} - - -/* Delete and Set a management vndr ie to firmware - * Parameters: - * @cfg : wl_private data - * @ndev : net device for bssidx - * @bssidx : bssidx for BSS - * @pktflag : packet flag for IE (VNDR_IE_PRBREQ_FLAG,VNDR_IE_PRBRSP_FLAG, VNDR_IE_ASSOCRSP_FLAG, - * VNDR_IE_ASSOCREQ_FLAG) - * @ie : VNDR IE (such as P2P IE , WPS IE) - * @ie_len : VNDR IE Length - * Returns 0 if success. - */ - -s32 -wl_cfgp2p_set_management_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, - s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len) -{ - s32 ret = BCME_OK; - u8 *curr_ie_buf = NULL; - u8 *mgmt_ie_buf = NULL; - u32 mgmt_ie_buf_len = 0; - u32 *mgmt_ie_len = 0; - u32 del_add_ie_buf_len = 0; - u32 total_ie_buf_len = 0; - u32 parsed_ie_buf_len = 0; - struct parsed_vndr_ies old_vndr_ies; - struct parsed_vndr_ies new_vndr_ies; - s32 i; - u8 *ptr; - s32 type = -1; - s32 remained_buf_len; -#define IE_TYPE(type, bsstype) (wl_to_p2p_bss_saved_ie(cfg, bsstype).p2p_ ## type ## _ie) -#define IE_TYPE_LEN(type, bsstype) (wl_to_p2p_bss_saved_ie(cfg, bsstype).p2p_ ## type ## _ie_len) - memset(g_mgmt_ie_buf, 0, sizeof(g_mgmt_ie_buf)); - curr_ie_buf = g_mgmt_ie_buf; - CFGP2P_DBG((" bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag)); - -#ifdef DUAL_STA - if ((cfg->p2p != NULL) && ((bssidx == 0) || (bssidx != cfg->cfgdev_bssidx))) -#else - if (cfg->p2p != NULL) -#endif - { - if (wl_cfgp2p_find_type(cfg, bssidx, &type)) { - CFGP2P_ERR(("cannot find type from bssidx : %d\n", bssidx)); - return BCME_ERROR; - } - - switch (pktflag) { - case VNDR_IE_PRBREQ_FLAG : - mgmt_ie_buf = IE_TYPE(probe_req, type); - mgmt_ie_len = &IE_TYPE_LEN(probe_req, type); - mgmt_ie_buf_len = sizeof(IE_TYPE(probe_req, type)); - break; - case VNDR_IE_PRBRSP_FLAG : - mgmt_ie_buf = IE_TYPE(probe_res, type); - mgmt_ie_len = &IE_TYPE_LEN(probe_res, type); - mgmt_ie_buf_len = sizeof(IE_TYPE(probe_res, type)); - break; - case VNDR_IE_ASSOCREQ_FLAG : - mgmt_ie_buf = IE_TYPE(assoc_req, type); - mgmt_ie_len = &IE_TYPE_LEN(assoc_req, type); - mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_req, type)); - break; - case VNDR_IE_ASSOCRSP_FLAG : - mgmt_ie_buf = IE_TYPE(assoc_res, type); - mgmt_ie_len = &IE_TYPE_LEN(assoc_res, type); - mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_res, type)); - break; - case VNDR_IE_BEACON_FLAG : - mgmt_ie_buf = IE_TYPE(beacon, type); - mgmt_ie_len = &IE_TYPE_LEN(beacon, type); - mgmt_ie_buf_len = sizeof(IE_TYPE(beacon, type)); - break; - default: - mgmt_ie_buf = NULL; - mgmt_ie_len = NULL; - CFGP2P_ERR(("not suitable type\n")); - return BCME_ERROR; - } - } else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) { - if (cfg->ap_info == NULL) { - CFGP2P_ERR(("hostapd ap_info null ptr refrence while setting IE\n")); - return BCME_ERROR; - - } - switch (pktflag) { - case VNDR_IE_PRBRSP_FLAG : - mgmt_ie_buf = cfg->ap_info->probe_res_ie; - mgmt_ie_len = &cfg->ap_info->probe_res_ie_len; - mgmt_ie_buf_len = sizeof(cfg->ap_info->probe_res_ie); - break; - case VNDR_IE_BEACON_FLAG : - mgmt_ie_buf = cfg->ap_info->beacon_ie; - mgmt_ie_len = &cfg->ap_info->beacon_ie_len; - mgmt_ie_buf_len = sizeof(cfg->ap_info->beacon_ie); - break; - case VNDR_IE_ASSOCRSP_FLAG : - /* WPS-AP WSC2.0 assoc res includes wps_ie */ - mgmt_ie_buf = cfg->ap_info->assoc_res_ie; - mgmt_ie_len = &cfg->ap_info->assoc_res_ie_len; - mgmt_ie_buf_len = sizeof(cfg->ap_info->assoc_res_ie); - break; - default: - mgmt_ie_buf = NULL; - mgmt_ie_len = NULL; - CFGP2P_ERR(("not suitable type\n")); - return BCME_ERROR; - } - bssidx = 0; - } else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_BSS) { - switch (pktflag) { - case VNDR_IE_PRBREQ_FLAG : - mgmt_ie_buf = cfg->sta_info->probe_req_ie; - mgmt_ie_len = &cfg->sta_info->probe_req_ie_len; - mgmt_ie_buf_len = sizeof(cfg->sta_info->probe_req_ie); - break; - case VNDR_IE_ASSOCREQ_FLAG : - mgmt_ie_buf = cfg->sta_info->assoc_req_ie; - mgmt_ie_len = &cfg->sta_info->assoc_req_ie_len; - mgmt_ie_buf_len = sizeof(cfg->sta_info->assoc_req_ie); - break; - default: - mgmt_ie_buf = NULL; - mgmt_ie_len = NULL; - CFGP2P_ERR(("not suitable type\n")); - return BCME_ERROR; - } - bssidx = 0; - } else { - CFGP2P_ERR(("not suitable type\n")); - return BCME_ERROR; - } - - if (vndr_ie_len > mgmt_ie_buf_len) { - CFGP2P_ERR(("extra IE size too big\n")); - ret = -ENOMEM; - } else { - /* parse and save new vndr_ie in curr_ie_buff before comparing it */ - if (vndr_ie && vndr_ie_len && curr_ie_buf) { - ptr = curr_ie_buf; - - wl_cfgp2p_parse_vndr_ies((u8*)vndr_ie, - vndr_ie_len, &new_vndr_ies); - - for (i = 0; i < new_vndr_ies.count; i++) { - struct parsed_vndr_ie_info *vndrie_info = - &new_vndr_ies.ie_info[i]; - - memcpy(ptr + parsed_ie_buf_len, vndrie_info->ie_ptr, - vndrie_info->ie_len); - parsed_ie_buf_len += vndrie_info->ie_len; - } - } - - if (mgmt_ie_buf != NULL) { - if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) && - (memcmp(mgmt_ie_buf, curr_ie_buf, parsed_ie_buf_len) == 0)) { - CFGP2P_INFO(("Previous mgmt IE is equals to current IE\n")); - goto exit; - } - - /* parse old vndr_ie */ - wl_cfgp2p_parse_vndr_ies(mgmt_ie_buf, *mgmt_ie_len, - &old_vndr_ies); - - /* make a command to delete old ie */ - for (i = 0; i < old_vndr_ies.count; i++) { - struct parsed_vndr_ie_info *vndrie_info = - &old_vndr_ies.ie_info[i]; - - CFGP2P_INFO(("DELETED ID : %d, Len: %d , OUI:%02x:%02x:%02x\n", - vndrie_info->vndrie.id, vndrie_info->vndrie.len, - vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1], - vndrie_info->vndrie.oui[2])); - - del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf, - pktflag, vndrie_info->vndrie.oui, - vndrie_info->vndrie.id, - vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN, - vndrie_info->ie_len - VNDR_IE_FIXED_LEN, - "del"); - - curr_ie_buf += del_add_ie_buf_len; - total_ie_buf_len += del_add_ie_buf_len; - } - } - - *mgmt_ie_len = 0; - /* Add if there is any extra IE */ - if (mgmt_ie_buf && parsed_ie_buf_len) { - ptr = mgmt_ie_buf; - - remained_buf_len = mgmt_ie_buf_len; - - /* make a command to add new ie */ - for (i = 0; i < new_vndr_ies.count; i++) { - struct parsed_vndr_ie_info *vndrie_info = - &new_vndr_ies.ie_info[i]; - - CFGP2P_INFO(("ADDED ID : %d, Len: %d(%d), OUI:%02x:%02x:%02x\n", - vndrie_info->vndrie.id, vndrie_info->vndrie.len, - vndrie_info->ie_len - 2, - vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1], - vndrie_info->vndrie.oui[2])); - - del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf, - pktflag, vndrie_info->vndrie.oui, - vndrie_info->vndrie.id, - vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN, - vndrie_info->ie_len - VNDR_IE_FIXED_LEN, - "add"); - - /* verify remained buf size before copy data */ - if (remained_buf_len >= vndrie_info->ie_len) { - remained_buf_len -= vndrie_info->ie_len; - } else { - CFGP2P_ERR(("no space in mgmt_ie_buf: pktflag = %d, " - "found vndr ies # = %d(cur %d), remained len %d, " - "cur mgmt_ie_len %d, new ie len = %d\n", - pktflag, new_vndr_ies.count, i, remained_buf_len, - *mgmt_ie_len, vndrie_info->ie_len)); - break; - } - - /* save the parsed IE in cfg struct */ - memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr, - vndrie_info->ie_len); - *mgmt_ie_len += vndrie_info->ie_len; - - curr_ie_buf += del_add_ie_buf_len; - total_ie_buf_len += del_add_ie_buf_len; - } - } - if (total_ie_buf_len) { - ret = wldev_iovar_setbuf_bsscfg(ndev, "vndr_ie", g_mgmt_ie_buf, - total_ie_buf_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, - bssidx, &cfg->ioctl_buf_sync); - if (ret) - CFGP2P_ERR(("vndr ie set error : %d\n", ret)); - } - } -#undef IE_TYPE -#undef IE_TYPE_LEN -exit: - return ret; -} - -/* Clear the manament IE buffer of BSSCFG - * Parameters: - * @cfg : wl_private data - * @bssidx : bssidx for BSS - * - * Returns 0 if success. - */ -s32 -wl_cfgp2p_clear_management_ie(struct bcm_cfg80211 *cfg, s32 bssidx) -{ - - s32 vndrie_flag[] = {VNDR_IE_BEACON_FLAG, VNDR_IE_PRBRSP_FLAG, VNDR_IE_ASSOCRSP_FLAG, - VNDR_IE_PRBREQ_FLAG, VNDR_IE_ASSOCREQ_FLAG}; - s32 index = -1; - s32 type = -1; - struct net_device *ndev = wl_cfgp2p_find_ndev(cfg, bssidx); -#define INIT_IE(IE_TYPE, BSS_TYPE) \ - do { \ - memset(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \ - sizeof(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \ - wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \ - } while (0); - - if (bssidx < 0 || ndev == NULL) { - CFGP2P_ERR(("invalid %s\n", (bssidx < 0) ? "bssidx" : "ndev")); - return BCME_BADARG; - } - - if (wl_cfgp2p_find_type(cfg, bssidx, &type)) { - CFGP2P_ERR(("invalid argument\n")); - return BCME_BADARG; - } - for (index = 0; index < ARRAYSIZE(vndrie_flag); index++) { - /* clean up vndr ies in dongle */ - wl_cfgp2p_set_management_ie(cfg, ndev, bssidx, vndrie_flag[index], NULL, 0); - } - INIT_IE(probe_req, type); - INIT_IE(probe_res, type); - INIT_IE(assoc_req, type); - INIT_IE(assoc_res, type); - INIT_IE(beacon, type); - return BCME_OK; -} - /* Is any of the tlvs the expected entry? If * not update the tlvs buffer pointer/length. @@ -1444,7 +1163,7 @@ wl_cfgp2p_find_wfdie(u8 *parse, u32 len) } return NULL; } -static u32 +u32 wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag, s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd) { @@ -1497,47 +1216,6 @@ wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag, } -/* - * Search the bssidx based on dev argument - * Parameters: - * @cfg : wl_private data - * @ndev : net device to search bssidx - * @bssidx : output arg to store bssidx of the bsscfg of firmware. - * Returns error - */ -s32 -wl_cfgp2p_find_idx(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 *bssidx) -{ - u32 i; - if (ndev == NULL || bssidx == NULL) { - CFGP2P_ERR((" argument is invalid\n")); - return BCME_BADARG; - } - if (!cfg->p2p_supported) { - *bssidx = P2PAPI_BSSCFG_PRIMARY; - return BCME_OK; - } - /* we cannot find the bssidx of DISCOVERY BSS - * because the ndev is same with ndev of PRIMARY BSS. - */ - for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) { - if (ndev == wl_to_p2p_bss_ndev(cfg, i)) { - *bssidx = wl_to_p2p_bss_bssidx(cfg, i); - return BCME_OK; - } - } - -#ifdef DUAL_STA - if (cfg->bss_cfgdev && (cfg->bss_cfgdev == ndev_to_cfgdev(ndev))) { - CFGP2P_INFO(("cfgdev is present, return the bssidx")); - *bssidx = cfg->cfgdev_bssidx; - return BCME_OK; - } -#endif - - return BCME_BADARG; - -} struct net_device * wl_cfgp2p_find_ndev(struct bcm_cfg80211 *cfg, s32 bssidx) { @@ -1560,7 +1238,9 @@ wl_cfgp2p_find_ndev(struct bcm_cfg80211 *cfg, s32 bssidx) } /* * Search the driver array idx based on bssidx argument - * Parameters: + * Parameters: Note that this idx is applicable only + * for primary and P2P interfaces. The virtual AP/STA is not + * covered here. * @cfg : wl_private data * @bssidx : bssidx which indicate bsscfg->idx of firmware. * @type : output arg to store array idx of p2p->bss. @@ -1583,14 +1263,6 @@ wl_cfgp2p_find_type(struct bcm_cfg80211 *cfg, s32 bssidx, s32 *type) } } -#ifdef DUAL_STA - if (bssidx == cfg->cfgdev_bssidx) { - CFGP2P_DBG(("bssidx matching with the virtual I/F \n")); - *type = 1; - return BCME_OK; - } -#endif - exit: return BCME_BADARG; } @@ -1605,22 +1277,37 @@ wl_cfgp2p_listen_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, s32 ret = BCME_OK; struct net_device *ndev = NULL; - if (!cfg || !cfg->p2p) + if (!cfg || !cfg->p2p || !cfgdev) return BCME_ERROR; CFGP2P_DBG((" Enter\n")); +#ifdef DHD_IFDEBUG + WL_ERR(("%s: cfg: %p, cfgdev: %p, cfg->wdev: %p, cfg->p2p_wdev: %p\n", + __FUNCTION__, cfg, cfgdev, cfg->wdev, cfg->p2p_wdev)); +#endif ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); -#if defined(CUSTOM_PLATFORM_NV_TEGRA) -#if defined(P2P_DISCOVERY_WAR) - if (!cfg->p2p->vif_created) { - if (wldev_iovar_setint(ndev, "mpc", 1) < 0) { - WL_ERR(("mpc enabling back failed\n")); +#ifdef P2P_LISTEN_OFFLOADING + if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) { + wl_clr_p2p_status(cfg, DISC_IN_PROGRESS); + CFGP2P_ERR(("DISC_IN_PROGRESS cleared\n")); + if (ndev && (ndev->ieee80211_ptr != NULL)) { +#if defined(WL_CFG80211_P2P_DEV_IF) + if (cfgdev && ((struct wireless_dev *)cfgdev)->wiphy) { + cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id, + &cfg->remain_on_chan, GFP_KERNEL); + } else { + CFGP2P_ERR(("Invalid cfgdev. Dropping the" + "remain_on_channel_expired event.\n")); + } +#else + cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id, + &cfg->remain_on_chan, cfg->remain_on_chan_type, GFP_KERNEL); +#endif /* WL_CFG80211_P2P_DEV_IF */ } } -#endif /* defined(P2P_DISCOVERY_WAR) */ -#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */ +#endif /* P2P_LISTEN_OFFLOADING */ if (wl_get_p2p_status(cfg, LISTEN_EXPIRED) == 0) { wl_set_p2p_status(cfg, LISTEN_EXPIRED); @@ -1701,6 +1388,7 @@ wl_cfgp2p_listen_expired(unsigned long data) CFGP2P_DBG((" Enter\n")); bzero(&msg, sizeof(wl_event_msg_t)); msg.event_type = hton32(WLC_E_P2P_DISC_LISTEN_COMPLETE); + msg.bsscfgidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); #if defined(WL_ENABLE_P2P_IF) wl_cfg80211_event(cfg->p2p_net ? cfg->p2p_net : wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE), &msg, NULL); @@ -1724,13 +1412,9 @@ wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev, del_timer_sync(&cfg->p2p->listen_timer); if (notify) { #if defined(WL_CFG80211_P2P_DEV_IF) -#ifdef P2PONEINT - if (wdev == NULL) - wdev = bcmcfg_to_p2p_wdev(cfg); -#endif if (wdev) - cfg80211_remain_on_channel_expired(bcmcfg_to_p2p_wdev(cfg), - cfg->last_roc_id, &cfg->remain_on_chan, GFP_KERNEL); + cfg80211_remain_on_channel_expired(wdev, cfg->last_roc_id, + &cfg->remain_on_chan, GFP_KERNEL); #else if (ndev && ndev->ieee80211_ptr) cfg80211_remain_on_channel_expired(ndev, cfg->last_roc_id, @@ -1857,6 +1541,10 @@ wl_cfgp2p_action_tx_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev if (status == WLC_E_STATUS_SUCCESS) { wl_set_p2p_status(cfg, ACTION_TX_COMPLETED); CFGP2P_DBG(("WLC_E_ACTION_FRAME_COMPLETE : ACK\n")); + if (!cfg->need_wait_afrx && cfg->af_sent_channel) { + CFGP2P_DBG(("no need to wait next AF.\n")); + wl_stop_wait_next_action_frame(cfg, ndev); + } } else if (!wl_get_p2p_status(cfg, ACTION_TX_COMPLETED)) { wl_set_p2p_status(cfg, ACTION_TX_NOACK); @@ -1952,27 +1640,23 @@ wl_cfgp2p_tx_action_frame(struct bcm_cfg80211 *cfg, struct net_device *dev, * MAC address. */ void -wl_cfgp2p_generate_bss_mac(struct ether_addr *primary_addr, - struct ether_addr *out_dev_addr, struct ether_addr *out_int_addr) +wl_cfgp2p_generate_bss_mac(struct bcm_cfg80211 *cfg, struct ether_addr *primary_addr) { - memset(out_dev_addr, 0, sizeof(*out_dev_addr)); - memset(out_int_addr, 0, sizeof(*out_int_addr)); + struct ether_addr *mac_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE); + struct ether_addr *int_addr; - /* Generate the P2P Device Address. This consists of the device's - * primary MAC address with the locally administered bit set. - */ - memcpy(out_dev_addr, primary_addr, sizeof(*out_dev_addr)); - out_dev_addr->octet[0] |= 0x02; + memcpy(mac_addr, primary_addr, sizeof(struct ether_addr)); + mac_addr->octet[0] |= 0x02; + WL_DBG(("P2P Discovery address:"MACDBG "\n", MAC2STRDBG(mac_addr->octet))); - /* Generate the P2P Interface Address. If the discovery and connection - * BSSCFGs need to simultaneously co-exist, then this address must be - * different from the P2P Device Address. - */ - memcpy(out_int_addr, out_dev_addr, sizeof(*out_int_addr)); -#ifndef P2PONEINT - out_int_addr->octet[4] ^= 0x80; -#endif + int_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_CONNECTION1); + memcpy(int_addr, mac_addr, sizeof(struct ether_addr)); + int_addr->octet[4] ^= 0x80; + WL_DBG(("Primary P2P Interface address:"MACDBG "\n", MAC2STRDBG(int_addr->octet))); + int_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_CONNECTION2); + memcpy(int_addr, mac_addr, sizeof(struct ether_addr)); + int_addr->octet[4] ^= 0x90; } /* P2P IF Address change to Virtual Interface MAC Address */ @@ -2059,10 +1743,10 @@ wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx) /* Bring up or down a BSS */ s32 -wl_cfgp2p_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, s32 up) +wl_cfgp2p_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, s32 is_up) { s32 ret = BCME_OK; - s32 val = up ? 1 : 0; + s32 val = is_up ? 1 : 0; struct { s32 cfg; @@ -2071,12 +1755,12 @@ wl_cfgp2p_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, bss_setbuf.cfg = htod32(bsscfg_idx); bss_setbuf.val = htod32(val); - CFGP2P_INFO(("---cfg bss -C %d %s\n", bsscfg_idx, up ? "up" : "down")); + CFGP2P_INFO(("---cfg bss -C %d %s\n", bsscfg_idx, is_up ? "up" : "down")); ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); if (ret != 0) { - CFGP2P_ERR(("'bss %d' failed with %d\n", up, ret)); + CFGP2P_ERR(("'bss %d' failed with %d\n", is_up, ret)); } return ret; @@ -2116,39 +1800,56 @@ wl_cfgp2p_down(struct bcm_cfg80211 *cfg) s32 i = 0, index = -1; #if defined(WL_CFG80211_P2P_DEV_IF) - wdev = bcmcfg_to_p2p_wdev(cfg); -#ifdef P2PONEINT - ndev = wdev_to_ndev(wdev); -#else ndev = bcmcfg_to_prmry_ndev(cfg); -#endif + wdev = bcmcfg_to_p2p_wdev(cfg); #elif defined(WL_ENABLE_P2P_IF) ndev = cfg->p2p_net ? cfg->p2p_net : bcmcfg_to_prmry_ndev(cfg); wdev = ndev_to_wdev(ndev); #endif /* WL_CFG80211_P2P_DEV_IF */ wl_cfgp2p_cancel_listen(cfg, ndev, wdev, TRUE); + wl_cfgp2p_disable_discovery(cfg); + +#if defined(WL_CFG80211_P2P_DEV_IF) && !defined(KEEP_WIFION_OPTION) + if (cfg->p2p_wdev) { + /* If p2p wdev is left out, clean it up */ + WL_ERR(("Clean up the p2p discovery IF\n")); + wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg); + } +#endif /* WL_CFG80211_P2P_DEV_IF !defined(KEEP_WIFION_OPTION) */ + for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) { index = wl_to_p2p_bss_bssidx(cfg, i); if (index != WL_INVALID) - wl_cfgp2p_clear_management_ie(cfg, index); + wl_cfg80211_clear_per_bss_ies(cfg, index); } wl_cfgp2p_deinit_priv(cfg); return 0; } + +int wl_cfgp2p_vif_created(struct bcm_cfg80211 *cfg) +{ + if (cfg->p2p && ((wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) != -1) || + (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2) != -1))) + return true; + else + return false; + +} + s32 wl_cfgp2p_set_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len) { s32 ret = -1; int count, start, duration; wl_p2p_sched_t dongle_noa; - + s32 bssidx, type; + int iovar_len = sizeof(dongle_noa); CFGP2P_DBG((" Enter\n")); memset(&dongle_noa, 0, sizeof(dongle_noa)); - if (cfg->p2p && cfg->p2p->vif_created) { - + if (wl_cfgp2p_vif_created(cfg)) { cfg->p2p->noa.desc[0].start = 0; sscanf(buf, "%10d %10d %10d", &count, &start, &duration); @@ -2178,7 +1879,7 @@ wl_cfgp2p_set_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* b } else { /* Continuous NoA interval. */ - dongle_noa.action = WL_P2P_SCHED_ACTION_NONE; + dongle_noa.action = WL_P2P_SCHED_ACTION_DOZE; dongle_noa.type = WL_P2P_SCHED_TYPE_ABS; if ((cfg->p2p->noa.desc[0].interval == 102) || (cfg->p2p->noa.desc[0].interval == 100)) { @@ -2201,9 +1902,16 @@ wl_cfgp2p_set_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* b dongle_noa.desc[0].duration = htod32(cfg->p2p->noa.desc[0].duration*1000); } dongle_noa.desc[0].interval = htod32(cfg->p2p->noa.desc[0].interval*1000); + bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr); + if (wl_cfgp2p_find_type(cfg, bssidx, &type) != BCME_OK) + return BCME_ERROR; - ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION), - "p2p_noa", &dongle_noa, sizeof(dongle_noa), cfg->ioctl_buf, + if (dongle_noa.action == WL_P2P_SCHED_ACTION_RESET) { + iovar_len -= sizeof(wl_p2p_sched_desc_t); + } + + ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(cfg, type), + "p2p_noa", &dongle_noa, iovar_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); if (ret < 0) { @@ -2225,7 +1933,7 @@ wl_cfgp2p_get_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* b CFGP2P_DBG((" Enter\n")); buf[0] = '\0'; - if (cfg->p2p && cfg->p2p->vif_created) { + if (wl_cfgp2p_vif_created(cfg)) { if (cfg->p2p->noa.desc[0].count || cfg->p2p->ops.ops) { _buf[0] = 1; /* noa index */ _buf[1] = (cfg->p2p->ops.ops ? 0x80: 0) | @@ -2264,13 +1972,19 @@ wl_cfgp2p_set_p2p_ps(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* bu int ps, ctw; int ret = -1; s32 legacy_ps; + s32 conn_idx; + s32 bssidx; struct net_device *dev; CFGP2P_DBG((" Enter\n")); - if (cfg->p2p && cfg->p2p->vif_created) { + if (wl_cfgp2p_vif_created(cfg)) { sscanf(buf, "%10d %10d %10d", &legacy_ps, &ps, &ctw); CFGP2P_DBG((" Enter legacy_ps %d ps %d ctw %d\n", legacy_ps, ps, ctw)); - dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION); + + bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr); + if (wl_cfgp2p_find_type(cfg, bssidx, &conn_idx) != BCME_OK) + return BCME_ERROR; + dev = wl_to_p2p_bss_ndev(cfg, conn_idx); if (ctw != -1) { cfg->p2p->ops.ctw = ctw; ret = 0; @@ -2302,6 +2016,59 @@ wl_cfgp2p_set_p2p_ps(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* bu return ret; } +s32 +wl_cfgp2p_set_p2p_ecsa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len) +{ + int ch, bw; + s32 conn_idx; + s32 bssidx; + struct net_device *dev; + char smbuf[WLC_IOCTL_SMLEN]; + wl_chan_switch_t csa_arg; + u32 chnsp = 0; + int err = 0; + + CFGP2P_DBG((" Enter\n")); + if (wl_cfgp2p_vif_created(cfg)) { + sscanf(buf, "%10d %10d", &ch, &bw); + CFGP2P_DBG(("Enter ch %d bw %d\n", ch, bw)); + + bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr); + if (wl_cfgp2p_find_type(cfg, bssidx, &conn_idx) != BCME_OK) { + return BCME_ERROR; + } + dev = wl_to_p2p_bss_ndev(cfg, conn_idx); + if (ch <= 0 || bw <= 0) { + CFGP2P_ERR(("Negative value not permitted!\n")); + return BCME_ERROR; + } + + csa_arg.mode = DOT11_CSA_MODE_ADVISORY; + csa_arg.count = P2P_ECSA_CNT; + csa_arg.reg = 0; + + sprintf(buf, "%d/%d", ch, bw); + chnsp = wf_chspec_aton(buf); + if (chnsp == 0) { + CFGP2P_ERR(("%s:chsp is not correct\n", __FUNCTION__)); + return BCME_ERROR; + } + chnsp = wl_chspec_host_to_driver(chnsp); + csa_arg.chspec = chnsp; + + err = wldev_iovar_setbuf(dev, "csa", &csa_arg, sizeof(csa_arg), + smbuf, sizeof(smbuf), NULL); + if (err) { + CFGP2P_ERR(("%s:set p2p_ecsa failed:%d\n", __FUNCTION__, err)); + return BCME_ERROR; + } + } else { + CFGP2P_ERR(("ERROR: set_p2p_ecsa in non-p2p mode\n")); + return BCME_ERROR; + } + return BCME_OK; +} + u8 * wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id) { @@ -2434,161 +2201,13 @@ struct ethtool_ops cfgp2p_ethtool_ops = { }; #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */ -#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT) || \ - defined(P2PONEINT) -#ifdef P2PONEINT -s32 -wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg) -{ - - struct net_device *_ndev; - struct ether_addr primary_mac; - struct net_device *new_ndev; - chanspec_t chspec; - uint8 name[IFNAMSIZ]; - s32 mode = 0; - s32 val = 0; - - - s32 wlif_type = -1; - s32 err, timeout = -1; - - memset(name, 0, IFNAMSIZ); - strncpy(name, "p2p0", 4); - name[IFNAMSIZ - 1] = '\0'; - - if (cfg->p2p_net) { - CFGP2P_ERR(("p2p_net defined already.\n")); - return -EINVAL; - } - - if (!cfg->p2p) - return -EINVAL; - - if (cfg->p2p && !cfg->p2p->on && strstr(name, WL_P2P_INTERFACE_PREFIX)) { - p2p_on(cfg) = true; - wl_cfgp2p_set_firm_p2p(cfg); - wl_cfgp2p_init_discovery(cfg); - get_primary_mac(cfg, &primary_mac); - wl_cfgp2p_generate_bss_mac(&primary_mac, - &cfg->p2p->dev_addr, &cfg->p2p->int_addr); - } - - _ndev = bcmcfg_to_prmry_ndev(cfg); - memset(cfg->p2p->vir_ifname, 0, IFNAMSIZ); - strncpy(cfg->p2p->vir_ifname, name, IFNAMSIZ - 1); - - wl_cfg80211_scan_abort(cfg); - - - /* In concurrency case, STA may be already associated in a particular channel. - * so retrieve the current channel of primary interface and then start the virtual - * interface on that. - */ - chspec = wl_cfg80211_get_shared_freq(cfg->wdev->wiphy); - - /* For P2P mode, use P2P-specific driver features to create the - * bss: "cfg p2p_ifadd" - */ - wl_set_p2p_status(cfg, IF_ADDING); - memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info)); - wlif_type = WL_P2P_IF_CLIENT; - - - err = wl_cfgp2p_ifadd(cfg, &cfg->p2p->int_addr, htod32(wlif_type), chspec); - if (unlikely(err)) { - wl_clr_p2p_status(cfg, IF_ADDING); - WL_ERR((" virtual iface add failed (%d) \n", err)); - return -ENOMEM; - } - - timeout = wait_event_interruptible_timeout(cfg->netif_change_event, - (wl_get_p2p_status(cfg, IF_ADDING) == false), - msecs_to_jiffies(MAX_WAIT_TIME)); - - - if (timeout > 0 && !wl_get_p2p_status(cfg, IF_ADDING) && cfg->if_event_info.valid) { - struct wireless_dev *vwdev; - int pm_mode = PM_ENABLE; - wl_if_event_info *event = &cfg->if_event_info; - - /* IF_ADD event has come back, we can proceed to to register - * the new interface now, use the interface name provided by caller (thus - * ignore the one from wlc) - */ - strncpy(cfg->if_event_info.name, name, IFNAMSIZ - 1); - new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, cfg->p2p->vir_ifname, - event->mac, event->bssidx); - if (new_ndev == NULL) - goto fail; - - wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION) = new_ndev; - wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION) = event->bssidx; - - vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL); - if (unlikely(!vwdev)) { - WL_ERR(("Could not allocate wireless device\n")); - goto fail; - } - vwdev->wiphy = cfg->wdev->wiphy; - WL_TRACE(("virtual interface(%s) is created\n", cfg->p2p->vir_ifname)); - vwdev->iftype = NL80211_IFTYPE_P2P_DEVICE; - vwdev->netdev = new_ndev; - new_ndev->ieee80211_ptr = vwdev; - SET_NETDEV_DEV(new_ndev, wiphy_dev(vwdev->wiphy)); - wl_set_drv_status(cfg, READY, new_ndev); - cfg->p2p->vif_created = true; - wl_set_mode_by_netdev(cfg, new_ndev, mode); - - if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK) { - wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev); - goto fail; - } - - wl_alloc_netinfo(cfg, new_ndev, vwdev, mode, pm_mode); - val = 1; - /* Disable firmware roaming for P2P interface */ - wldev_iovar_setint(new_ndev, "roam_off", val); - - if (mode != WL_MODE_AP) - wldev_iovar_setint(new_ndev, "buf_key_b4_m4", 1); - - WL_ERR((" virtual interface(%s) is " - "created net attach done\n", cfg->p2p->vir_ifname)); - - /* reinitialize completion to clear previous count */ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) - INIT_COMPLETION(cfg->iface_disable); -#else - init_completion(&cfg->iface_disable); -#endif - cfg->p2p_net = new_ndev; - cfg->p2p_wdev = vwdev; - - return 0; - } else { - wl_clr_p2p_status(cfg, IF_ADDING); - WL_ERR((" virtual interface(%s) is not created \n", cfg->p2p->vir_ifname)); - memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ); - cfg->p2p->vif_created = false; - } - - -fail: - if (wlif_type == WL_P2P_IF_GO) - wldev_iovar_setint(_ndev, "mpc", 1); - return -ENODEV; - -} -#else +#if defined(WL_ENABLE_P2P_IF) s32 wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg) { int ret = 0; struct net_device* net = NULL; -#ifndef WL_NEWCFG_PRIVCMD_SUPPORT struct wireless_dev *wdev = NULL; -#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */ uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x33, 0x22, 0x11 }; if (cfg->p2p_net) { @@ -2602,14 +2221,12 @@ wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg) return -ENODEV; } -#ifndef WL_NEWCFG_PRIVCMD_SUPPORT wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); if (unlikely(!wdev)) { WL_ERR(("Could not allocate wireless device\n")); free_netdev(net); return -ENOMEM; } -#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */ strncpy(net->name, "p2p%d", sizeof(net->name) - 1); net->name[IFNAMSIZ - 1] = '\0'; @@ -2631,48 +2248,39 @@ wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg) /* Register with a dummy MAC addr */ memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN); -#ifndef WL_NEWCFG_PRIVCMD_SUPPORT wdev->wiphy = cfg->wdev->wiphy; wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS); net->ieee80211_ptr = wdev; -#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) net->ethtool_ops = &cfgp2p_ethtool_ops; #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */ -#ifndef WL_NEWCFG_PRIVCMD_SUPPORT SET_NETDEV_DEV(net, wiphy_dev(wdev->wiphy)); /* Associate p2p0 network interface with new wdev */ wdev->netdev = net; -#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */ ret = register_netdev(net); if (ret) { CFGP2P_ERR((" register_netdevice failed (%d)\n", ret)); free_netdev(net); -#ifndef WL_NEWCFG_PRIVCMD_SUPPORT kfree(wdev); -#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */ return -ENODEV; } /* store p2p net ptr for further reference. Note that iflist won't have this * entry as there corresponding firmware interface is a "Hidden" interface. */ -#ifndef WL_NEWCFG_PRIVCMD_SUPPORT cfg->p2p_wdev = wdev; -#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */ cfg->p2p_net = net; - printf("%s: P2P Interface Registered\n", net->name); + printk("%s: P2P Interface Registered\n", net->name); return ret; } -#endif /* P2PONEINT */ s32 wl_cfgp2p_unregister_ndev(struct bcm_cfg80211 *cfg) @@ -2688,8 +2296,6 @@ wl_cfgp2p_unregister_ndev(struct bcm_cfg80211 *cfg) return 0; } - -#ifndef P2PONEINT static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev) { @@ -2724,16 +2330,10 @@ static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd return ret; } -#endif /* P2PONEINT */ -#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT || defined(P2PONEINT) */ +#endif -#if defined(WL_ENABLE_P2P_IF) || defined(P2PONEINT) -int -#ifdef P2PONEINT -wl_cfgp2p_if_open(struct net_device *net) -#else -wl_cfgp2p_if_open(struct net_device *net) -#endif +#if defined(WL_ENABLE_P2P_IF) +static int wl_cfgp2p_if_open(struct net_device *net) { struct wireless_dev *wdev = net->ieee80211_ptr; @@ -2755,26 +2355,14 @@ wl_cfgp2p_if_open(struct net_device *net) return 0; } -int -#ifdef P2PONEINT -wl_cfgp2p_if_stop(struct net_device *net) -#else -wl_cfgp2p_if_stop(struct net_device *net) -#endif +static int wl_cfgp2p_if_stop(struct net_device *net) { struct wireless_dev *wdev = net->ieee80211_ptr; -#ifdef P2PONEINT - bcm_struct_cfgdev *cfgdev; -#endif + if (!wdev) return -EINVAL; -#ifdef P2PONEINT - cfgdev = ndev_to_cfgdev(net); - wl_cfg80211_scan_stop(cfgdev); -#else wl_cfg80211_scan_stop(net); -#endif #if !defined(WL_IFACE_COMB_NUM_CHANNELS) wdev->wiphy->interface_modes = (wdev->wiphy->interface_modes) @@ -2783,9 +2371,7 @@ wl_cfgp2p_if_stop(struct net_device *net) #endif /* !WL_IFACE_COMB_NUM_CHANNELS */ return 0; } -#endif /* defined(WL_ENABLE_P2P_IF) || defined(P2PONEINT) */ -#if defined(WL_ENABLE_P2P_IF) bool wl_cfgp2p_is_ifops(const struct net_device_ops *if_ops) { return (if_ops == &wl_cfgp2p_if_ops); @@ -2805,13 +2391,34 @@ wl_cfgp2p_add_p2p_disc_if(struct bcm_cfg80211 *cfg) WL_TRACE(("Enter\n")); if (cfg->p2p_wdev) { +#ifndef EXPLICIT_DISCIF_CLEANUP + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); +#endif /* EXPLICIT_DISCIF_CLEANUP */ + /* + * This is not expected. This can happen due to + * supplicant crash/unclean de-initialization which + * didn't free the p2p discovery interface. Indicate + * driver hang to user space so that the framework + * can rei-init the Wi-Fi. + */ CFGP2P_ERR(("p2p_wdev defined already.\n")); -#if (defined(CUSTOMER_HW10) && defined(CONFIG_ARCH_ODIN)) + wl_probe_wdev_all(cfg); +#ifdef EXPLICIT_DISCIF_CLEANUP + /* + * CUSTOMER_HW4 design doesn't delete the p2p discovery + * interface on ifconfig wlan0 down context which comes + * without a preceeding NL80211_CMD_DEL_INTERFACE for p2p + * discovery. But during supplicant crash the DEL_IFACE + * command will not happen and will cause a left over iface + * even after ifconfig wlan0 down. So delete the iface + * first and then indicate the HANG event + */ wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg); - CFGP2P_ERR(("p2p_wdev deleted.\n")); #else - return ERR_PTR(-ENFILE); -#endif + dhd->hang_reason = HANG_REASON_P2P_IFACE_DEL_FAILURE; + net_os_send_hang_message(bcmcfg_to_prmry_ndev(cfg)); + return ERR_PTR(-ENODEV); +#endif /* EXPLICIT_DISCIF_CLEANUP */ } wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); @@ -2822,22 +2429,18 @@ wl_cfgp2p_add_p2p_disc_if(struct bcm_cfg80211 *cfg) memset(&primary_mac, 0, sizeof(primary_mac)); get_primary_mac(cfg, &primary_mac); - wl_cfgp2p_generate_bss_mac(&primary_mac, - &cfg->p2p->dev_addr, &cfg->p2p->int_addr); + wl_cfgp2p_generate_bss_mac(cfg, &primary_mac); wdev->wiphy = cfg->wdev->wiphy; wdev->iftype = NL80211_IFTYPE_P2P_DEVICE; - memcpy(wdev->address, &cfg->p2p->dev_addr, ETHER_ADDR_LEN); + memcpy(wdev->address, wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE), ETHER_ADDR_LEN); -#if defined(WL_NEWCFG_PRIVCMD_SUPPORT) - if (cfg->p2p_net) - memcpy(cfg->p2p_net->dev_addr, &cfg->p2p->dev_addr, ETHER_ADDR_LEN); -#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */ /* store p2p wdev ptr for further reference. */ cfg->p2p_wdev = wdev; printf("P2P interface registered\n"); + printf("%s: wdev: %p, wdev->net: %p\n", __FUNCTION__, wdev, wdev->netdev); return wdev; } @@ -2866,6 +2469,9 @@ wl_cfgp2p_start_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev) } p2p_on(cfg) = true; +#if defined(P2P_IE_MISSING_FIX) + cfg->p2p_prb_noti = false; +#endif printf("P2P interface started\n"); @@ -2882,7 +2488,7 @@ wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev) if (!cfg) return; - WL_TRACE(("Enter\n")); + CFGP2P_DBG(("Enter\n")); ret = wl_cfg80211_scan_stop(wdev); if (unlikely(ret < 0)) { @@ -2899,7 +2505,7 @@ wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev) p2p_on(cfg) = false; - printf("P2P interface stopped\n"); + printf("Exit. P2P interface stopped\n"); return; } @@ -2912,11 +2518,8 @@ wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev, struct bcm_cfg80211 *cfg) if (!wdev) return -EINVAL; -#ifdef P2PONEINT - return -EINVAL; -#endif - WL_TRACE(("Enter\n")); + printf("%s: wdev: %p, wdev->net: %p\n", __FUNCTION__, wdev, wdev->netdev); if (!rtnl_is_locked()) { rtnl_lock(); @@ -2928,6 +2531,8 @@ wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev, struct bcm_cfg80211 *cfg) if (rollback_lock) rtnl_unlock(); + synchronize_rcu(); + kfree(wdev); if (cfg) @@ -2938,3 +2543,41 @@ wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev, struct bcm_cfg80211 *cfg) return 0; } #endif /* WL_CFG80211_P2P_DEV_IF */ + +void +wl_cfgp2p_need_wait_actfrmae(struct bcm_cfg80211 *cfg, void *frame, u32 frame_len, bool tx) +{ + wifi_p2p_pub_act_frame_t *pact_frm; + int status = 0; + + if (!frame || (frame_len < (sizeof(*pact_frm) + WL_P2P_AF_STATUS_OFFSET - 1))) { + return; + } + + if (wl_cfgp2p_is_pub_action(frame, frame_len)) { + pact_frm = (wifi_p2p_pub_act_frame_t *)frame; + if (pact_frm->subtype == P2P_PAF_GON_RSP && tx) { + CFGP2P_ACTION(("Check TX P2P Group Owner Negotiation Rsp Frame status\n")); + status = pact_frm->elts[WL_P2P_AF_STATUS_OFFSET]; + if (status) { + cfg->need_wait_afrx = false; + return; + } + } + } + + cfg->need_wait_afrx = true; + return; +} + +int +wl_cfgp2p_is_p2p_specific_scan(struct cfg80211_scan_request *request) +{ + if (request && (request->n_ssids == 1) && + (request->n_channels == 1) && + IS_P2P_SSID(request->ssids[0].ssid, WL_P2P_WILDCARD_SSID_LEN) && + (request->ssids[0].ssid_len > WL_P2P_WILDCARD_SSID_LEN)) { + return true; + } + return false; +} diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.h b/drivers/amlogic/wifi/bcmdhd/wl_cfgp2p.h similarity index 78% rename from drivers/net/wireless/bcmdhd/wl_cfgp2p.h rename to drivers/amlogic/wifi/bcmdhd/wl_cfgp2p.h index bcabc2038597e..9e970400175a3 100644 --- a/drivers/net/wireless/bcmdhd/wl_cfgp2p.h +++ b/drivers/amlogic/wifi/bcmdhd/wl_cfgp2p.h @@ -1,9 +1,30 @@ /* * Linux cfgp2p driver * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: wl_cfgp2p.h 497431 2014-08-19 11:03:27Z $ + * + * <> + * + * $Id: wl_cfgp2p.h 608203 2015-12-24 05:30:44Z $ */ #ifndef _wl_cfgp2p_h_ #define _wl_cfgp2p_h_ @@ -21,7 +42,8 @@ typedef struct wifi_p2p_ie wifi_wfd_ie_t; typedef enum { P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */ P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */ - P2PAPI_BSSCFG_CONNECTION, /* maps to driver's P2P connection bsscfg */ + P2PAPI_BSSCFG_CONNECTION1, /* maps to driver's P2P connection bsscfg */ + P2PAPI_BSSCFG_CONNECTION2, P2PAPI_BSSCFG_MAX } p2p_bsscfg_type_t; @@ -41,44 +63,28 @@ typedef enum { /* normal vendor ies buffer length */ #define VNDR_IES_BUF_LEN 512 -/* Structure to hold all saved P2P and WPS IEs for a BSSCFG */ -struct p2p_saved_ie { - u8 p2p_probe_req_ie[VNDR_IES_BUF_LEN]; - u8 p2p_probe_res_ie[VNDR_IES_MAX_BUF_LEN]; - u8 p2p_assoc_req_ie[VNDR_IES_BUF_LEN]; - u8 p2p_assoc_res_ie[VNDR_IES_BUF_LEN]; - u8 p2p_beacon_ie[VNDR_IES_MAX_BUF_LEN]; - u32 p2p_probe_req_ie_len; - u32 p2p_probe_res_ie_len; - u32 p2p_assoc_req_ie_len; - u32 p2p_assoc_res_ie_len; - u32 p2p_beacon_ie_len; -}; - struct p2p_bss { s32 bssidx; struct net_device *dev; - struct p2p_saved_ie saved_ie; void *private_data; + struct ether_addr mac_addr; }; struct p2p_info { bool on; /* p2p on/off switch */ bool scan; int16 search_state; - bool vif_created; s8 vir_ifname[IFNAMSIZ]; unsigned long status; - struct ether_addr dev_addr; - struct ether_addr int_addr; struct p2p_bss bss[P2PAPI_BSSCFG_MAX]; struct timer_list listen_timer; wl_p2p_sched_t noa; wl_p2p_ops_t ops; wlc_ssid_t ssid; + s8 p2p_go_count; }; -#define MAX_VNDR_IE_NUMBER 5 +#define MAX_VNDR_IE_NUMBER 10 struct parsed_vndr_ie_info { char *ie_ptr; @@ -110,6 +116,7 @@ enum wl_cfgp2p_status { #define wl_to_p2p_bss_ndev(cfg, type) ((cfg)->p2p->bss[type].dev) #define wl_to_p2p_bss_bssidx(cfg, type) ((cfg)->p2p->bss[type].bssidx) +#define wl_to_p2p_bss_macaddr(cfg, type) &((cfg)->p2p->bss[type].mac_addr) #define wl_to_p2p_bss_saved_ie(cfg, type) ((cfg)->p2p->bss[type].saved_ie) #define wl_to_p2p_bss_private(cfg, type) ((cfg)->p2p->bss[type].private_data) #define wl_to_p2p_bss(cfg, type) ((cfg)->p2p->bss[type]) @@ -128,16 +135,32 @@ enum wl_cfgp2p_status { /* dword align allocation */ #define WLC_IOCTL_MAXLEN 8192 +#ifdef CUSTOMER_HW4_DEBUG +#define CFGP2P_ERROR_TEXT "CFGP2P-INFO2) " +#else #define CFGP2P_ERROR_TEXT "CFGP2P-ERROR) " +#endif /* CUSTOMER_HW4_DEBUG */ - +#ifdef DHD_LOG_DUMP #define CFGP2P_ERR(args) \ do { \ if (wl_dbg_level & WL_DBG_ERR) { \ printk(KERN_INFO CFGP2P_ERROR_TEXT "%s : ", __func__); \ printk args; \ + dhd_log_dump_print("[%s] %s: ", \ + dhd_log_dump_get_timestamp(), __func__); \ + dhd_log_dump_print args; \ } \ } while (0) +#else +#define CFGP2P_ERR(args) \ + do { \ + if (wl_dbg_level & WL_DBG_ERR) { \ + printk(KERN_INFO CFGP2P_ERROR_TEXT "%s : ", __func__); \ + printk args; \ + } \ + } while (0) +#endif /* DHD_LOG_DUMP */ #define CFGP2P_INFO(args) \ do { \ if (wl_dbg_level & WL_DBG_INFO) { \ @@ -169,6 +192,18 @@ enum wl_cfgp2p_status { add_timer(timer); \ } while (0); +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 8)) +#ifdef WL_SUPPORT_BACKPORTED_KPATCHES +#undef WL_SUPPORT_BACKPORTED_KPATCHES +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) +#ifdef WL_CFG80211_STA_EVENT +#undef WL_CFG80211_STA_EVENT +#endif +#endif + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) && !defined(WL_CFG80211_P2P_DEV_IF) #define WL_CFG80211_P2P_DEV_IF @@ -189,9 +224,6 @@ enum wl_cfgp2p_status { #endif /* (LINUX_VERSION >= VERSION(3, 8, 0)) */ #ifndef WL_CFG80211_P2P_DEV_IF -#ifdef WL_NEWCFG_PRIVCMD_SUPPORT -#undef WL_NEWCFG_PRIVCMD_SUPPORT -#endif #endif /* WL_CFG80211_P2P_DEV_IF */ #if defined(WL_ENABLE_P2P_IF) && (defined(WL_CFG80211_P2P_DEV_IF) || \ @@ -210,6 +242,8 @@ enum wl_cfgp2p_status { #define bcm_struct_cfgdev struct net_device #endif /* WL_CFG80211_P2P_DEV_IF */ +#define P2P_ECSA_CNT 50 + extern void wl_cfgp2p_listen_expired(unsigned long data); extern bool @@ -220,6 +254,8 @@ extern bool wl_cfgp2p_is_gas_action(void *frame, u32 frame_len); extern bool wl_cfgp2p_find_gas_subtype(u8 subtype, u8* data, u32 len); +extern bool +wl_cfgp2p_is_p2p_gas_action(void *frame, u32 frame_len); extern void wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel); extern s32 @@ -239,7 +275,8 @@ wl_cfgp2p_ifdisable(struct bcm_cfg80211 *cfg, struct ether_addr *mac); extern s32 wl_cfgp2p_ifdel(struct bcm_cfg80211 *cfg, struct ether_addr *mac); extern s32 -wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type, chanspec_t chspec); +wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type, + chanspec_t chspec, s32 conn_idx); extern s32 wl_cfgp2p_ifidx(struct bcm_cfg80211 *cfg, struct ether_addr *mac, s32 *index); @@ -278,8 +315,6 @@ wl_cfgp2p_set_management_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s extern s32 wl_cfgp2p_clear_management_ie(struct bcm_cfg80211 *cfg, s32 bssidx); -extern s32 -wl_cfgp2p_find_idx(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 *index); extern struct net_device * wl_cfgp2p_find_ndev(struct bcm_cfg80211 *cfg, s32 bssidx); extern s32 @@ -304,8 +339,7 @@ wl_cfgp2p_tx_action_frame(struct bcm_cfg80211 *cfg, struct net_device *dev, wl_af_params_t *af_params, s32 bssidx); extern void -wl_cfgp2p_generate_bss_mac(struct ether_addr *primary_addr, struct ether_addr *out_dev_addr, - struct ether_addr *out_int_addr); +wl_cfgp2p_generate_bss_mac(struct bcm_cfg80211 *cfg, struct ether_addr *primary_addr); extern void wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id); @@ -331,6 +365,9 @@ wl_cfgp2p_get_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* b extern s32 wl_cfgp2p_set_p2p_ps(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len); +extern s32 +wl_cfgp2p_set_p2p_ecsa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len); + extern u8 * wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id); @@ -349,6 +386,18 @@ wl_cfgp2p_unregister_ndev(struct bcm_cfg80211 *cfg); extern bool wl_cfgp2p_is_ifops(const struct net_device_ops *if_ops); +extern u32 +wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag, + s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd); + +extern int wl_cfgp2p_get_conn_idx(struct bcm_cfg80211 *cfg); + +extern +int wl_cfg_multip2p_operational(struct bcm_cfg80211 *cfg); + +extern +int wl_cfgp2p_vif_created(struct bcm_cfg80211 *cfg); + #if defined(WL_CFG80211_P2P_DEV_IF) extern struct wireless_dev * wl_cfgp2p_add_p2p_disc_if(struct bcm_cfg80211 *cfg); @@ -361,8 +410,15 @@ wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev); extern int wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev, struct bcm_cfg80211 *cfg); + #endif /* WL_CFG80211_P2P_DEV_IF */ +extern void +wl_cfgp2p_need_wait_actfrmae(struct bcm_cfg80211 *cfg, void *frame, u32 frame_len, bool tx); + +extern int +wl_cfgp2p_is_p2p_specific_scan(struct cfg80211_scan_request *request); + /* WiFi Direct */ #define SOCIAL_CHAN_1 1 #define SOCIAL_CHAN_2 6 @@ -376,6 +432,7 @@ wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev, struct bcm_cfg80211 *cfg); #define WL_P2P_WILDCARD_SSID_LEN 7 #define WL_P2P_INTERFACE_PREFIX "p2p" #define WL_P2P_TEMP_CHAN 11 +#define WL_P2P_AF_STATUS_OFFSET 9 /* If the provision discovery is for JOIN operations, * or the device discoverablity frame is destined to GO diff --git a/drivers/amlogic/wifi/bcmdhd/wl_cfgvendor.c b/drivers/amlogic/wifi/bcmdhd/wl_cfgvendor.c new file mode 100644 index 0000000000000..75dd87722f17c --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/wl_cfgvendor.c @@ -0,0 +1,1548 @@ +/* + * Linux cfg80211 Vendor Extension Code + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_cfgvendor.c 605796 2015-12-11 13:45:36Z $ + */ + +/* + * New vendor interface additon to nl80211/cfg80211 to allow vendors + * to implement proprietary features over the cfg80211 stack. +*/ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef PNO_SUPPORT +#include +#endif /* PNO_SUPPORT */ +#ifdef RTT_SUPPORT +#include +#endif /* RTT_SUPPORT */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#ifdef PROP_TXSTATUS +#include +#endif +#include + +#if defined(WL_VENDOR_EXT_SUPPORT) +/* + * This API is to be used for asynchronous vendor events. This + * shouldn't be used in response to a vendor command from its + * do_it handler context (instead wl_cfgvendor_send_cmd_reply should + * be used). + */ +int wl_cfgvendor_send_async_event(struct wiphy *wiphy, + struct net_device *dev, int event_id, const void *data, int len) +{ + u16 kflags; + struct sk_buff *skb; + + kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + + /* Alloc the SKB for vendor_event */ +#if defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC) + skb = cfg80211_vendor_event_alloc(wiphy, NULL, len, event_id, kflags); +#else + skb = cfg80211_vendor_event_alloc(wiphy, len, event_id, kflags); +#endif /* CONFIG_ARCH_MSM && SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC */ + if (!skb) { + WL_ERR(("skb alloc failed")); + return -ENOMEM; + } + + /* Push the data to the skb */ + nla_put_nohdr(skb, len, data); + + cfg80211_vendor_event(skb, kflags); + + return 0; +} + +static int +wl_cfgvendor_send_cmd_reply(struct wiphy *wiphy, + struct net_device *dev, const void *data, int len) +{ + struct sk_buff *skb; + + /* Alloc the SKB for vendor_event */ + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, len); + if (unlikely(!skb)) { + WL_ERR(("skb alloc failed")); + return -ENOMEM; + } + + /* Push the data to the skb */ + nla_put_nohdr(skb, len, data); + + return cfg80211_vendor_cmd_reply(skb); +} + +static int +wl_cfgvendor_get_feature_set(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + int reply; + + reply = dhd_dev_get_feature_set(bcmcfg_to_prmry_ndev(cfg)); + + err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg), + &reply, sizeof(int)); + if (unlikely(err)) + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + + return err; +} + +static int +wl_cfgvendor_get_feature_set_matrix(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct sk_buff *skb; + int *reply; + int num, mem_needed, i; + + reply = dhd_dev_get_feature_set_matrix(bcmcfg_to_prmry_ndev(cfg), &num); + + if (!reply) { + WL_ERR(("Could not get feature list matrix\n")); + err = -EINVAL; + return err; + } + mem_needed = VENDOR_REPLY_OVERHEAD + (ATTRIBUTE_U32_LEN * num) + + ATTRIBUTE_U32_LEN; + + /* Alloc the SKB for vendor_event */ + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed); + if (unlikely(!skb)) { + WL_ERR(("skb alloc failed")); + err = -ENOMEM; + goto exit; + } + + nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET, num); + for (i = 0; i < num; i++) { + nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_FEATURE_SET, reply[i]); + } + + err = cfg80211_vendor_cmd_reply(skb); + + if (unlikely(err)) + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + +exit: + kfree(reply); + return err; +} + +static int +wl_cfgvendor_set_pno_mac_oui(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + int type; + uint8 pno_random_mac_oui[DOT11_OUI_LEN]; + + type = nla_type(data); + + if (type == ANDR_WIFI_ATTRIBUTE_PNO_RANDOM_MAC_OUI) { + memcpy(pno_random_mac_oui, nla_data(data), DOT11_OUI_LEN); + + err = dhd_dev_pno_set_mac_oui(bcmcfg_to_prmry_ndev(cfg), pno_random_mac_oui); + + if (unlikely(err)) + WL_ERR(("Bad OUI, could not set:%d \n", err)); + + + } else { + err = -1; + } + + return err; +} + +#ifdef CUSTOM_FORCE_NODFS_FLAG +static int +wl_cfgvendor_set_nodfs_flag(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + int type; + u32 nodfs; + + type = nla_type(data); + if (type == ANDR_WIFI_ATTRIBUTE_NODFS_SET) { + nodfs = nla_get_u32(data); + err = dhd_dev_set_nodfs(bcmcfg_to_prmry_ndev(cfg), nodfs); + } else { + err = -1; + } + return err; +} +#endif /* CUSTOM_FORCE_NODFS_FLAG */ + +#ifdef GSCAN_SUPPORT +int +wl_cfgvendor_send_hotlist_event(struct wiphy *wiphy, + struct net_device *dev, void *data, int len, wl_vendor_event_t event) +{ + u16 kflags; + const void *ptr; + struct sk_buff *skb; + int malloc_len, total, iter_cnt_to_send, cnt; + gscan_results_cache_t *cache = (gscan_results_cache_t *)data; + total = len/sizeof(wifi_gscan_result_t); + while (total > 0) { + malloc_len = (total * sizeof(wifi_gscan_result_t)) + VENDOR_DATA_OVERHEAD; + if (malloc_len > NLMSG_DEFAULT_SIZE) { + malloc_len = NLMSG_DEFAULT_SIZE; + } + iter_cnt_to_send = + (malloc_len - VENDOR_DATA_OVERHEAD)/sizeof(wifi_gscan_result_t); + total = total - iter_cnt_to_send; + + kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + + /* Alloc the SKB for vendor_event */ +#if defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC) + skb = cfg80211_vendor_event_alloc(wiphy, NULL, malloc_len, event, kflags); +#else + skb = cfg80211_vendor_event_alloc(wiphy, malloc_len, event, kflags); +#endif /* CONFIG_ARCH_MSM && SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC */ + if (!skb) { + WL_ERR(("skb alloc failed")); + return -ENOMEM; + } + + while (cache && iter_cnt_to_send) { + ptr = (const void *) &cache->results[cache->tot_consumed]; + + if (iter_cnt_to_send < (cache->tot_count - cache->tot_consumed)) { + cnt = iter_cnt_to_send; + } else { + cnt = (cache->tot_count - cache->tot_consumed); + } + + iter_cnt_to_send -= cnt; + cache->tot_consumed += cnt; + /* Push the data to the skb */ + nla_append(skb, cnt * sizeof(wifi_gscan_result_t), ptr); + if (cache->tot_consumed == cache->tot_count) { + cache = cache->next; + } + + } + + cfg80211_vendor_event(skb, kflags); + } + + return 0; +} + + +static int +wl_cfgvendor_gscan_get_capabilities(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + dhd_pno_gscan_capabilities_t *reply = NULL; + uint32 reply_len = 0; + + + reply = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg), + DHD_PNO_GET_CAPABILITIES, NULL, &reply_len); + if (!reply) { + WL_ERR(("Could not get capabilities\n")); + err = -EINVAL; + return err; + } + + err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg), + reply, reply_len); + + if (unlikely(err)) { + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + } + + kfree(reply); + return err; +} + +static int +wl_cfgvendor_gscan_get_channel_list(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0, type, band; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + uint16 *reply = NULL; + uint32 reply_len = 0, num_channels, mem_needed; + struct sk_buff *skb; + + type = nla_type(data); + + if (type == GSCAN_ATTRIBUTE_BAND) { + band = nla_get_u32(data); + } else { + return -EINVAL; + } + + reply = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg), + DHD_PNO_GET_CHANNEL_LIST, &band, &reply_len); + + if (!reply) { + WL_ERR(("Could not get channel list\n")); + err = -EINVAL; + return err; + } + num_channels = reply_len/ sizeof(uint32); + mem_needed = reply_len + VENDOR_REPLY_OVERHEAD + (ATTRIBUTE_U32_LEN * 2); + + /* Alloc the SKB for vendor_event */ + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed); + if (unlikely(!skb)) { + WL_ERR(("skb alloc failed")); + err = -ENOMEM; + goto exit; + } + + nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_CHANNELS, num_channels); + nla_put(skb, GSCAN_ATTRIBUTE_CHANNEL_LIST, reply_len, reply); + + err = cfg80211_vendor_cmd_reply(skb); + + if (unlikely(err)) { + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + } +exit: + kfree(reply); + return err; +} + +static int +wl_cfgvendor_gscan_get_batch_results(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + gscan_results_cache_t *results, *iter; + uint32 reply_len, complete = 0, num_results_iter; + int32 mem_needed; + wifi_gscan_result_t *ptr; + uint16 num_scan_ids, num_results; + struct sk_buff *skb; + struct nlattr *scan_hdr; + + dhd_dev_wait_batch_results_complete(bcmcfg_to_prmry_ndev(cfg)); + dhd_dev_pno_lock_access_batch_results(bcmcfg_to_prmry_ndev(cfg)); + results = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg), + DHD_PNO_GET_BATCH_RESULTS, NULL, &reply_len); + + if (!results) { + WL_ERR(("No results to send %d\n", err)); + err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg), + results, 0); + + if (unlikely(err)) + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg)); + return err; + } + num_scan_ids = reply_len & 0xFFFF; + num_results = (reply_len & 0xFFFF0000) >> 16; + mem_needed = (num_results * sizeof(wifi_gscan_result_t)) + + (num_scan_ids * GSCAN_BATCH_RESULT_HDR_LEN) + + VENDOR_REPLY_OVERHEAD + SCAN_RESULTS_COMPLETE_FLAG_LEN; + + if (mem_needed > (int32)NLMSG_DEFAULT_SIZE) { + mem_needed = (int32)NLMSG_DEFAULT_SIZE; + complete = 0; + } else { + complete = 1; + } + + WL_TRACE(("complete %d mem_needed %d max_mem %d\n", complete, mem_needed, + (int)NLMSG_DEFAULT_SIZE)); + /* Alloc the SKB for vendor_event */ + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed); + if (unlikely(!skb)) { + WL_ERR(("skb alloc failed")); + dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg)); + return -ENOMEM; + } + iter = results; + + nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE, complete); + mem_needed = mem_needed - (SCAN_RESULTS_COMPLETE_FLAG_LEN + VENDOR_REPLY_OVERHEAD); + while (iter && ((mem_needed - GSCAN_BATCH_RESULT_HDR_LEN) > 0)) { + + scan_hdr = nla_nest_start(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS); + nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_ID, iter->scan_id); + nla_put_u8(skb, GSCAN_ATTRIBUTE_SCAN_FLAGS, iter->flag); + + num_results_iter = + (mem_needed - GSCAN_BATCH_RESULT_HDR_LEN)/sizeof(wifi_gscan_result_t); + + if ((iter->tot_count - iter->tot_consumed) < num_results_iter) + num_results_iter = iter->tot_count - iter->tot_consumed; + nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_OF_RESULTS, num_results_iter); + if (num_results_iter) { + ptr = &iter->results[iter->tot_consumed]; + iter->tot_consumed += num_results_iter; + nla_put(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS, + num_results_iter * sizeof(wifi_gscan_result_t), ptr); + } + nla_nest_end(skb, scan_hdr); + mem_needed -= GSCAN_BATCH_RESULT_HDR_LEN + + (num_results_iter * sizeof(wifi_gscan_result_t)); + iter = iter->next; + } + + dhd_dev_gscan_batch_cache_cleanup(bcmcfg_to_prmry_ndev(cfg)); + dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg)); + + return cfg80211_vendor_cmd_reply(skb); +} + +static int +wl_cfgvendor_initiate_gscan(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + int type, tmp = len; + int run = 0xFF; + int flush = 0; + const struct nlattr *iter; + + nla_for_each_attr(iter, data, len, tmp) { + type = nla_type(iter); + if (type == GSCAN_ATTRIBUTE_ENABLE_FEATURE) + run = nla_get_u32(iter); + else if (type == GSCAN_ATTRIBUTE_FLUSH_FEATURE) + flush = nla_get_u32(iter); + } + + if (run != 0xFF) { + err = dhd_dev_pno_run_gscan(bcmcfg_to_prmry_ndev(cfg), run, flush); + + if (unlikely(err)) { + WL_ERR(("Could not run gscan:%d \n", err)); + } + return err; + } else { + return -EINVAL; + } + + +} + +static int +wl_cfgvendor_enable_full_scan_result(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + int type; + bool real_time = FALSE; + + type = nla_type(data); + + if (type == GSCAN_ATTRIBUTE_ENABLE_FULL_SCAN_RESULTS) { + real_time = nla_get_u32(data); + + err = dhd_dev_pno_enable_full_scan_result(bcmcfg_to_prmry_ndev(cfg), real_time); + + if (unlikely(err)) { + WL_ERR(("Could not run gscan:%d \n", err)); + } + + } else { + err = -EINVAL; + } + + return err; +} + +static int +wl_cfgvendor_set_scan_cfg(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + gscan_scan_params_t *scan_param; + int j = 0; + int type, tmp, tmp1, tmp2, k = 0; + const struct nlattr *iter, *iter1, *iter2; + struct dhd_pno_gscan_channel_bucket *ch_bucket; + + scan_param = kzalloc(sizeof(gscan_scan_params_t), GFP_KERNEL); + if (!scan_param) { + WL_ERR(("Could not set GSCAN scan cfg, mem alloc failure\n")); + err = -EINVAL; + return err; + + } + + scan_param->scan_fr = PNO_SCAN_MIN_FW_SEC; + nla_for_each_attr(iter, data, len, tmp) { + type = nla_type(iter); + + if (j >= GSCAN_MAX_CH_BUCKETS) { + break; + } + + switch (type) { + case GSCAN_ATTRIBUTE_BASE_PERIOD: + scan_param->scan_fr = nla_get_u32(iter)/1000; + break; + case GSCAN_ATTRIBUTE_NUM_BUCKETS: + scan_param->nchannel_buckets = nla_get_u32(iter); + break; + case GSCAN_ATTRIBUTE_CH_BUCKET_1: + case GSCAN_ATTRIBUTE_CH_BUCKET_2: + case GSCAN_ATTRIBUTE_CH_BUCKET_3: + case GSCAN_ATTRIBUTE_CH_BUCKET_4: + case GSCAN_ATTRIBUTE_CH_BUCKET_5: + case GSCAN_ATTRIBUTE_CH_BUCKET_6: + case GSCAN_ATTRIBUTE_CH_BUCKET_7: + nla_for_each_nested(iter1, iter, tmp1) { + type = nla_type(iter1); + ch_bucket = + scan_param->channel_bucket; + + switch (type) { + case GSCAN_ATTRIBUTE_BUCKET_ID: + break; + case GSCAN_ATTRIBUTE_BUCKET_PERIOD: + ch_bucket[j].bucket_freq_multiple = + nla_get_u32(iter1)/1000; + break; + case GSCAN_ATTRIBUTE_BUCKET_NUM_CHANNELS: + ch_bucket[j].num_channels = + nla_get_u32(iter1); + break; + case GSCAN_ATTRIBUTE_BUCKET_CHANNELS: + nla_for_each_nested(iter2, iter1, tmp2) { + if (k >= PFN_SWC_RSSI_WINDOW_MAX) + break; + ch_bucket[j].chan_list[k] = + nla_get_u32(iter2); + k++; + } + k = 0; + break; + case GSCAN_ATTRIBUTE_BUCKETS_BAND: + ch_bucket[j].band = (uint16) + nla_get_u32(iter1); + break; + case GSCAN_ATTRIBUTE_REPORT_EVENTS: + ch_bucket[j].report_flag = (uint8) + nla_get_u32(iter1); + break; + default: + WL_ERR(("bucket attribute type error %d\n", + type)); + break; + } + } + j++; + break; + default: + WL_ERR(("Unknown type %d\n", type)); + break; + } + } + + if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg), + DHD_PNO_SCAN_CFG_ID, scan_param, 0) < 0) { + WL_ERR(("Could not set GSCAN scan cfg\n")); + err = -EINVAL; + } + + kfree(scan_param); + return err; + +} + +static int +wl_cfgvendor_hotlist_cfg(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + gscan_hotlist_scan_params_t *hotlist_params; + int tmp, tmp1, tmp2, type, j = 0, dummy; + const struct nlattr *outer, *inner, *iter; + uint8 flush = 0; + struct bssid_t *pbssid; + + hotlist_params = (gscan_hotlist_scan_params_t *)kzalloc(len, GFP_KERNEL); + if (!hotlist_params) { + WL_ERR(("Cannot Malloc mem to parse config commands size - %d bytes \n", len)); + return -ENOMEM; + } + + hotlist_params->lost_ap_window = GSCAN_LOST_AP_WINDOW_DEFAULT; + + nla_for_each_attr(iter, data, len, tmp2) { + type = nla_type(iter); + switch (type) { + case GSCAN_ATTRIBUTE_HOTLIST_BSSIDS: + pbssid = hotlist_params->bssid; + nla_for_each_nested(outer, iter, tmp) { + nla_for_each_nested(inner, outer, tmp1) { + type = nla_type(inner); + + switch (type) { + case GSCAN_ATTRIBUTE_BSSID: + memcpy(&(pbssid[j].macaddr), + nla_data(inner), ETHER_ADDR_LEN); + break; + case GSCAN_ATTRIBUTE_RSSI_LOW: + pbssid[j].rssi_reporting_threshold = + (int8) nla_get_u8(inner); + break; + case GSCAN_ATTRIBUTE_RSSI_HIGH: + dummy = (int8) nla_get_u8(inner); + break; + default: + WL_ERR(("ATTR unknown %d\n", + type)); + break; + } + } + j++; + } + hotlist_params->nbssid = j; + break; + case GSCAN_ATTRIBUTE_HOTLIST_FLUSH: + flush = nla_get_u8(iter); + break; + case GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE: + hotlist_params->lost_ap_window = nla_get_u32(iter); + break; + default: + WL_ERR(("Unknown type %d\n", type)); + break; + } + + } + + if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg), + DHD_PNO_GEOFENCE_SCAN_CFG_ID, + hotlist_params, flush) < 0) { + WL_ERR(("Could not set GSCAN HOTLIST cfg\n")); + err = -EINVAL; + goto exit; + } +exit: + kfree(hotlist_params); + return err; +} +static int +wl_cfgvendor_set_batch_scan_cfg(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0, tmp, type; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + gscan_batch_params_t batch_param; + const struct nlattr *iter; + + batch_param.mscan = batch_param.bestn = 0; + batch_param.buffer_threshold = GSCAN_BATCH_NO_THR_SET; + + nla_for_each_attr(iter, data, len, tmp) { + type = nla_type(iter); + + switch (type) { + case GSCAN_ATTRIBUTE_NUM_AP_PER_SCAN: + batch_param.bestn = nla_get_u32(iter); + break; + case GSCAN_ATTRIBUTE_NUM_SCANS_TO_CACHE: + batch_param.mscan = nla_get_u32(iter); + break; + case GSCAN_ATTRIBUTE_REPORT_THRESHOLD: + batch_param.buffer_threshold = nla_get_u32(iter); + break; + default: + WL_ERR(("Unknown type %d\n", type)); + break; + } + } + + if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg), + DHD_PNO_BATCH_SCAN_CFG_ID, + &batch_param, 0) < 0) { + WL_ERR(("Could not set batch cfg\n")); + err = -EINVAL; + return err; + } + + return err; +} + +static int +wl_cfgvendor_significant_change_cfg(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + gscan_swc_params_t *significant_params; + int tmp, tmp1, tmp2, type, j = 0; + const struct nlattr *outer, *inner, *iter; + uint8 flush = 0; + wl_pfn_significant_bssid_t *bssid; + + significant_params = (gscan_swc_params_t *) kzalloc(len, GFP_KERNEL); + if (!significant_params) { + WL_ERR(("Cannot Malloc mem to parse config commands size - %d bytes \n", len)); + return -ENOMEM; + } + + nla_for_each_attr(iter, data, len, tmp2) { + type = nla_type(iter); + + switch (type) { + case GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_FLUSH: + flush = nla_get_u8(iter); + break; + case GSCAN_ATTRIBUTE_RSSI_SAMPLE_SIZE: + significant_params->rssi_window = nla_get_u16(iter); + break; + case GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE: + significant_params->lost_ap_window = nla_get_u16(iter); + break; + case GSCAN_ATTRIBUTE_MIN_BREACHING: + significant_params->swc_threshold = nla_get_u16(iter); + break; + case GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_BSSIDS: + bssid = significant_params->bssid_elem_list; + nla_for_each_nested(outer, iter, tmp) { + nla_for_each_nested(inner, outer, tmp1) { + switch (nla_type(inner)) { + case GSCAN_ATTRIBUTE_BSSID: + memcpy(&(bssid[j].macaddr), + nla_data(inner), + ETHER_ADDR_LEN); + break; + case GSCAN_ATTRIBUTE_RSSI_HIGH: + bssid[j].rssi_high_threshold + = (int8) nla_get_u8(inner); + break; + case GSCAN_ATTRIBUTE_RSSI_LOW: + bssid[j].rssi_low_threshold + = (int8) nla_get_u8(inner); + break; + default: + WL_ERR(("ATTR unknown %d\n", + type)); + break; + } + } + j++; + } + break; + default: + WL_ERR(("Unknown type %d\n", type)); + break; + } + } + significant_params->nbssid = j; + + if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg), + DHD_PNO_SIGNIFICANT_SCAN_CFG_ID, + significant_params, flush) < 0) { + WL_ERR(("Could not set GSCAN significant cfg\n")); + err = -EINVAL; + goto exit; + } +exit: + kfree(significant_params); + return err; +} +#endif /* GSCAN_SUPPORT */ + +#ifdef RTT_SUPPORT +void +wl_cfgvendor_rtt_evt(void *ctx, void *rtt_data) +{ + struct wireless_dev *wdev = (struct wireless_dev *)ctx; + struct wiphy *wiphy; + struct sk_buff *skb; + uint32 tot_len = NLMSG_DEFAULT_SIZE, entry_len = 0; + gfp_t kflags; + rtt_report_t *rtt_report = NULL; + rtt_result_t *rtt_result = NULL; + struct list_head *rtt_list; + wiphy = wdev->wiphy; + + WL_DBG(("In\n")); + /* Push the data to the skb */ + if (!rtt_data) { + WL_ERR(("rtt_data is NULL\n")); + goto exit; + } + rtt_list = (struct list_head *)rtt_data; + kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + /* Alloc the SKB for vendor_event */ +#if defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC) + skb = cfg80211_vendor_event_alloc(wiphy, NULL, tot_len, GOOGLE_RTT_COMPLETE_EVENT, kflags); +#else + skb = cfg80211_vendor_event_alloc(wiphy, tot_len, GOOGLE_RTT_COMPLETE_EVENT, kflags); +#endif /* CONFIG_ARCH_MSM && SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC */ + if (!skb) { + WL_ERR(("skb alloc failed")); + goto exit; + } + /* fill in the rtt results on each entry */ + list_for_each_entry(rtt_result, rtt_list, list) { + entry_len = 0; + entry_len = sizeof(rtt_report_t); + rtt_report = kzalloc(entry_len, kflags); + if (!rtt_report) { + WL_ERR(("rtt_report alloc failed")); + kfree_skb(skb); + goto exit; + } + rtt_report->addr = rtt_result->peer_mac; + rtt_report->num_measurement = 1; /* ONE SHOT */ + rtt_report->status = rtt_result->err_code; + rtt_report->type = + (rtt_result->TOF_type == TOF_TYPE_ONE_WAY) ? RTT_ONE_WAY: RTT_TWO_WAY; + rtt_report->peer = rtt_result->target_info->peer; + rtt_report->channel = rtt_result->target_info->channel; + rtt_report->rssi = rtt_result->avg_rssi; + /* tx_rate */ + rtt_report->tx_rate = rtt_result->tx_rate; + /* RTT */ + rtt_report->rtt = rtt_result->meanrtt; + rtt_report->rtt_sd = rtt_result->sdrtt/10; + /* convert to centi meter */ + if (rtt_result->distance != 0xffffffff) + rtt_report->distance = (rtt_result->distance >> 2) * 25; + else /* invalid distance */ + rtt_report->distance = -1; + rtt_report->ts = rtt_result->ts; + nla_append(skb, entry_len, rtt_report); + kfree(rtt_report); + } + cfg80211_vendor_event(skb, kflags); +exit: + return; +} + +static int +wl_cfgvendor_rtt_set_config(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) { + int err = 0, rem, rem1, rem2, type; + rtt_config_params_t rtt_param; + rtt_target_info_t* rtt_target = NULL; + const struct nlattr *iter, *iter1, *iter2; + int8 eabuf[ETHER_ADDR_STR_LEN]; + int8 chanbuf[CHANSPEC_STR_LEN]; + int32 feature_set = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + feature_set = dhd_dev_get_feature_set(bcmcfg_to_prmry_ndev(cfg)); + + WL_DBG(("In\n")); + err = dhd_dev_rtt_register_noti_callback(wdev->netdev, wdev, wl_cfgvendor_rtt_evt); + if (err < 0) { + WL_ERR(("failed to register rtt_noti_callback\n")); + goto exit; + } + memset(&rtt_param, 0, sizeof(rtt_param)); + nla_for_each_attr(iter, data, len, rem) { + type = nla_type(iter); + switch (type) { + case RTT_ATTRIBUTE_TARGET_CNT: + rtt_param.rtt_target_cnt = nla_get_u8(iter); + if (rtt_param.rtt_target_cnt > RTT_MAX_TARGET_CNT) { + WL_ERR(("exceed max target count : %d\n", + rtt_param.rtt_target_cnt)); + err = BCME_RANGE; + goto exit; + } + break; + case RTT_ATTRIBUTE_TARGET_INFO: + rtt_target = rtt_param.target_info; + nla_for_each_nested(iter1, iter, rem1) { + nla_for_each_nested(iter2, iter1, rem2) { + type = nla_type(iter2); + switch (type) { + case RTT_ATTRIBUTE_TARGET_MAC: + memcpy(&rtt_target->addr, nla_data(iter2), + ETHER_ADDR_LEN); + break; + case RTT_ATTRIBUTE_TARGET_TYPE: + rtt_target->type = nla_get_u8(iter2); + if (!(feature_set & WIFI_FEATURE_D2D_RTT)) { + if (rtt_target->type == RTT_TWO_WAY || + rtt_target->type == RTT_INVALID) { + WL_ERR(("doesn't support RTT type" + " : %d\n", + rtt_target->type)); + err = -EINVAL; + goto exit; + } else if (rtt_target->type == RTT_AUTO) { + rtt_target->type = RTT_ONE_WAY; + } + } else if (rtt_target->type == RTT_INVALID) { + WL_ERR(("doesn't support RTT type" + " : %d\n", + rtt_target->type)); + err = -EINVAL; + goto exit; + } + break; + case RTT_ATTRIBUTE_TARGET_PEER: + rtt_target->peer = nla_get_u8(iter2); + if (rtt_target->peer != RTT_PEER_AP) { + WL_ERR(("doesn't support peer type : %d\n", + rtt_target->peer)); + err = -EINVAL; + goto exit; + } + break; + case RTT_ATTRIBUTE_TARGET_CHAN: + memcpy(&rtt_target->channel, nla_data(iter2), + sizeof(rtt_target->channel)); + break; + case RTT_ATTRIBUTE_TARGET_MODE: + rtt_target->continuous = nla_get_u8(iter2); + break; + case RTT_ATTRIBUTE_TARGET_INTERVAL: + rtt_target->interval = nla_get_u32(iter2); + break; + case RTT_ATTRIBUTE_TARGET_NUM_MEASUREMENT: + rtt_target->measure_cnt = nla_get_u32(iter2); + break; + case RTT_ATTRIBUTE_TARGET_NUM_PKT: + rtt_target->ftm_cnt = nla_get_u32(iter2); + break; + case RTT_ATTRIBUTE_TARGET_NUM_RETRY: + rtt_target->retry_cnt = nla_get_u32(iter2); + } + } + /* convert to chanspec value */ + rtt_target->chanspec = + dhd_rtt_convert_to_chspec(rtt_target->channel); + if (rtt_target->chanspec == 0) { + WL_ERR(("Channel is not valid \n")); + goto exit; + } + WL_INFORM(("Target addr %s, Channel : %s for RTT \n", + bcm_ether_ntoa((const struct ether_addr *)&rtt_target->addr, + eabuf), + wf_chspec_ntoa(rtt_target->chanspec, chanbuf))); + rtt_target++; + } + break; + } + } + WL_DBG(("leave :target_cnt : %d\n", rtt_param.rtt_target_cnt)); + if (dhd_dev_rtt_set_cfg(bcmcfg_to_prmry_ndev(cfg), &rtt_param) < 0) { + WL_ERR(("Could not set RTT configuration\n")); + err = -EINVAL; + } +exit: + return err; +} + +static int +wl_cfgvendor_rtt_cancel_config(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + int err = 0, rem, type, target_cnt = 0; + int target_cnt_chk = 0; + const struct nlattr *iter; + struct ether_addr *mac_list = NULL, *mac_addr = NULL; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + + nla_for_each_attr(iter, data, len, rem) { + type = nla_type(iter); + switch (type) { + case RTT_ATTRIBUTE_TARGET_CNT: + if (mac_list != NULL) { + WL_ERR(("mac_list is not NULL\n")); + goto exit; + } + target_cnt = nla_get_u8(iter); + mac_list = (struct ether_addr *)kzalloc(target_cnt * ETHER_ADDR_LEN, + GFP_KERNEL); + if (mac_list == NULL) { + WL_ERR(("failed to allocate mem for mac list\n")); + goto exit; + } + mac_addr = &mac_list[0]; + break; + case RTT_ATTRIBUTE_TARGET_MAC: + if (mac_addr) { + memcpy(mac_addr++, nla_data(iter), ETHER_ADDR_LEN); + target_cnt_chk++; + if (target_cnt_chk > target_cnt) { + WL_ERR(("over target count\n")); + goto exit; + } + break; + } else { + WL_ERR(("mac_list is NULL\n")); + goto exit; + } + } + } + if (dhd_dev_rtt_cancel_cfg(bcmcfg_to_prmry_ndev(cfg), mac_list, target_cnt) < 0) { + WL_ERR(("Could not cancel RTT configuration\n")); + err = -EINVAL; + goto exit; + } + +exit: + if (mac_list) { + kfree(mac_list); + } + return err; +} +static int +wl_cfgvendor_rtt_get_capability(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + rtt_capabilities_t capability; + + err = dhd_dev_rtt_capability(bcmcfg_to_prmry_ndev(cfg), &capability); + if (unlikely(err)) { + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + goto exit; + } + err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg), + &capability, sizeof(capability)); + + if (unlikely(err)) { + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + } +exit: + return err; +} + +#endif /* RTT_SUPPORT */ + +static int +wl_cfgvendor_priv_string_handler(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + int ret = 0; + int ret_len = 0, payload = 0, msglen; + const struct bcm_nlmsg_hdr *nlioc = data; + void *buf = NULL, *cur; + int maxmsglen = PAGE_SIZE - 0x100; + struct sk_buff *reply; + + WL_ERR(("entry: cmd = %d\n", nlioc->cmd)); + + len -= sizeof(struct bcm_nlmsg_hdr); + ret_len = nlioc->len; + if (ret_len > 0 || len > 0) { + if (len > DHD_IOCTL_MAXLEN) { + WL_ERR(("oversize input buffer %d\n", len)); + len = DHD_IOCTL_MAXLEN; + } + if (ret_len > DHD_IOCTL_MAXLEN) { + WL_ERR(("oversize return buffer %d\n", ret_len)); + ret_len = DHD_IOCTL_MAXLEN; + } + payload = max(ret_len, len) + 1; + buf = vzalloc(payload); + if (!buf) { + return -ENOMEM; + } + memcpy(buf, (void *)nlioc + nlioc->offset, len); + *(char *)(buf + len) = '\0'; + } + + ret = dhd_cfgvendor_priv_string_handler(cfg, wdev, nlioc, buf); + if (ret) { + WL_ERR(("dhd_cfgvendor returned error %d", ret)); + vfree(buf); + return ret; + } + cur = buf; + while (ret_len > 0) { + msglen = nlioc->len > maxmsglen ? maxmsglen : ret_len; + ret_len -= msglen; + payload = msglen + sizeof(msglen); + reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload); + if (!reply) { + WL_ERR(("Failed to allocate reply msg\n")); + ret = -ENOMEM; + break; + } + + if (nla_put(reply, BCM_NLATTR_DATA, msglen, cur) || + nla_put_u16(reply, BCM_NLATTR_LEN, msglen)) { + kfree_skb(reply); + ret = -ENOBUFS; + break; + } + + ret = cfg80211_vendor_cmd_reply(reply); + if (ret) { + WL_ERR(("testmode reply failed:%d\n", ret)); + break; + } + cur += msglen; + } + + return ret; +} + +static int +wl_cfgvendor_priv_bcm_handler(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + int err = 0; + int data_len = 0; + + WL_INFORM(("%s: Enter \n", __func__)); + + bzero(cfg->ioctl_buf, WLC_IOCTL_MAXLEN); + + if (strncmp((char *)data, BRCM_VENDOR_SCMD_CAPA, strlen(BRCM_VENDOR_SCMD_CAPA)) == 0) { + err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "cap", NULL, 0, + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + return err; + } + data_len = strlen(cfg->ioctl_buf); + cfg->ioctl_buf[data_len] = '\0'; + } + + err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg), + cfg->ioctl_buf, data_len+1); + if (unlikely(err)) + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + else + WL_INFORM(("Vendor Command reply sent successfully!\n")); + + return err; +} + +#ifdef LINKSTAT_SUPPORT +#define NUM_RATE 32 +#define NUM_PEER 1 +#define NUM_CHAN 11 +#define HEADER_SIZE sizeof(ver_len) +static int wl_cfgvendor_lstats_get_info(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + static char iovar_buf[WLC_IOCTL_MAXLEN]; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + int err = 0, i; + wifi_iface_stat *iface; + wifi_radio_stat *radio; + wl_wme_cnt_t *wl_wme_cnt; + wl_cnt_v_le10_mcst_t *macstat_cnt; + wl_cnt_wlc_t *wlc_cnt; + scb_val_t scbval; + char *output; + + WL_INFORM(("%s: Enter \n", __func__)); + RETURN_EIO_IF_NOT_UP(cfg); + + bzero(cfg->ioctl_buf, WLC_IOCTL_MAXLEN); + bzero(iovar_buf, WLC_IOCTL_MAXLEN); + + output = cfg->ioctl_buf; + + err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "radiostat", NULL, 0, + iovar_buf, WLC_IOCTL_MAXLEN, NULL); + if (err != BCME_OK && err != BCME_UNSUPPORTED) { + WL_ERR(("error (%d) - size = %zu\n", err, sizeof(wifi_radio_stat))); + return err; + } + radio = (wifi_radio_stat *)iovar_buf; + radio->num_channels = NUM_CHAN; + memcpy(output, iovar_buf+HEADER_SIZE, sizeof(wifi_radio_stat)-HEADER_SIZE); + + output += (sizeof(wifi_radio_stat) - HEADER_SIZE); + output += (NUM_CHAN*sizeof(wifi_channel_stat)); + + err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "wme_counters", NULL, 0, + iovar_buf, WLC_IOCTL_MAXLEN, NULL); + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + return err; + } + wl_wme_cnt = (wl_wme_cnt_t *)iovar_buf; + iface = (wifi_iface_stat *)output; + + iface->ac[WIFI_AC_VO].ac = WIFI_AC_VO; + iface->ac[WIFI_AC_VO].tx_mpdu = wl_wme_cnt->tx[AC_VO].packets; + iface->ac[WIFI_AC_VO].rx_mpdu = wl_wme_cnt->rx[AC_VO].packets; + iface->ac[WIFI_AC_VO].mpdu_lost = wl_wme_cnt->tx_failed[WIFI_AC_VO].packets; + + iface->ac[WIFI_AC_VI].ac = WIFI_AC_VI; + iface->ac[WIFI_AC_VI].tx_mpdu = wl_wme_cnt->tx[AC_VI].packets; + iface->ac[WIFI_AC_VI].rx_mpdu = wl_wme_cnt->rx[AC_VI].packets; + iface->ac[WIFI_AC_VI].mpdu_lost = wl_wme_cnt->tx_failed[WIFI_AC_VI].packets; + + iface->ac[WIFI_AC_BE].ac = WIFI_AC_BE; + iface->ac[WIFI_AC_BE].tx_mpdu = wl_wme_cnt->tx[AC_BE].packets; + iface->ac[WIFI_AC_BE].rx_mpdu = wl_wme_cnt->rx[AC_BE].packets; + iface->ac[WIFI_AC_BE].mpdu_lost = wl_wme_cnt->tx_failed[WIFI_AC_BE].packets; + + iface->ac[WIFI_AC_BK].ac = WIFI_AC_BK; + iface->ac[WIFI_AC_BK].tx_mpdu = wl_wme_cnt->tx[AC_BK].packets; + iface->ac[WIFI_AC_BK].rx_mpdu = wl_wme_cnt->rx[AC_BK].packets; + iface->ac[WIFI_AC_BK].mpdu_lost = wl_wme_cnt->tx_failed[WIFI_AC_BK].packets; + bzero(iovar_buf, WLC_IOCTL_MAXLEN); + + err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "counters", NULL, 0, + iovar_buf, WLC_IOCTL_MAXLEN, NULL); + if (unlikely(err)) { + WL_ERR(("error (%d) - size = %zu\n", err, sizeof(wl_cnt_wlc_t))); + return err; + } + + /* Translate traditional (ver <= 10) counters struct to new xtlv type struct */ + err = wl_cntbuf_to_xtlv_format(NULL, iovar_buf, WL_CNTBUF_MAX_SIZE, 0); + if (err != BCME_OK) { + WL_ERR(("%s wl_cntbuf_to_xtlv_format ERR %d\n", __FUNCTION__, err)); + return err; + } + + if (!(wlc_cnt = GET_WLCCNT_FROM_CNTBUF(iovar_buf))) { + WL_ERR(("%s wlc_cnt NULL!\n", __FUNCTION__)); + return BCME_ERROR; + } + + iface->ac[WIFI_AC_BE].retries = wlc_cnt->txretry; + + if ((macstat_cnt = bcm_get_data_from_xtlv_buf(((wl_cnt_info_t *)iovar_buf)->data, + ((wl_cnt_info_t *)iovar_buf)->datalen, + WL_CNT_XTLV_CNTV_LE10_UCODE, NULL, + BCM_XTLV_OPTION_ALIGN32)) == NULL) { + macstat_cnt = bcm_get_data_from_xtlv_buf(((wl_cnt_info_t *)iovar_buf)->data, + ((wl_cnt_info_t *)iovar_buf)->datalen, + WL_CNT_XTLV_LT40_UCODE_V1, NULL, + BCM_XTLV_OPTION_ALIGN32); + } + + if (macstat_cnt == NULL) { + printf("wlmTxGetAckedPackets: macstat_cnt NULL!\n"); + return FALSE; + } + + iface->beacon_rx = macstat_cnt->rxbeaconmbss; + + err = wldev_get_rssi(bcmcfg_to_prmry_ndev(cfg), &scbval); + if (unlikely(err)) { + WL_ERR(("get_rssi error (%d)\n", err)); + return err; + } + iface->rssi_mgmt = scbval.val; + + iface->num_peers = NUM_PEER; + iface->peer_info->num_rate = NUM_RATE; + + bzero(iovar_buf, WLC_IOCTL_MAXLEN); + output = (char *)iface + sizeof(wifi_iface_stat) + NUM_PEER*sizeof(wifi_peer_info); + + err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "ratestat", NULL, 0, + iovar_buf, WLC_IOCTL_MAXLEN, NULL); + if (err != BCME_OK && err != BCME_UNSUPPORTED) { + WL_ERR(("error (%d) - size = %zu\n", err, NUM_RATE*sizeof(wifi_rate_stat))); + return err; + } + for (i = 0; i < NUM_RATE; i++) + memcpy(output, iovar_buf+HEADER_SIZE+i*sizeof(wifi_rate_stat), + sizeof(wifi_rate_stat)-HEADER_SIZE); + + err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg), + cfg->ioctl_buf, + sizeof(wifi_radio_stat)-HEADER_SIZE + + NUM_CHAN*sizeof(wifi_channel_stat) + + sizeof(wifi_iface_stat)+NUM_PEER*sizeof(wifi_peer_info) + + NUM_RATE*(sizeof(wifi_rate_stat)-HEADER_SIZE)); + if (unlikely(err)) + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + + return err; +} +#endif /* LINKSTAT_SUPPORT */ + +static const struct wiphy_vendor_command wl_vendor_cmds [] = { + { + { + .vendor_id = OUI_BRCM, + .subcmd = BRCM_VENDOR_SCMD_PRIV_STR + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_priv_string_handler + }, + { + { + .vendor_id = OUI_BRCM, + .subcmd = BRCM_VENDOR_SCMD_BCM_STR + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_priv_bcm_handler + }, +#ifdef GSCAN_SUPPORT + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_GET_CAPABILITIES + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_gscan_get_capabilities + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_SET_CONFIG + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_set_scan_cfg + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_SET_SCAN_CONFIG + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_set_batch_scan_cfg + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_ENABLE_GSCAN + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_initiate_gscan + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_ENABLE_FULL_SCAN_RESULTS + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_enable_full_scan_result + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_SET_HOTLIST + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_hotlist_cfg + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_SET_SIGNIFICANT_CHANGE_CONFIG + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_significant_change_cfg + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_GET_SCAN_RESULTS + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_gscan_get_batch_results + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_GET_CHANNEL_LIST + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_gscan_get_channel_list + }, +#endif /* GSCAN_SUPPORT */ +#ifdef RTT_SUPPORT + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = RTT_SUBCMD_SET_CONFIG + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_rtt_set_config + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = RTT_SUBCMD_CANCEL_CONFIG + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_rtt_cancel_config + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = RTT_SUBCMD_GETCAPABILITY + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_rtt_get_capability + }, +#endif /* RTT_SUPPORT */ + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = ANDR_WIFI_SUBCMD_GET_FEATURE_SET + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_get_feature_set + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = ANDR_WIFI_SUBCMD_GET_FEATURE_SET_MATRIX + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_get_feature_set_matrix + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = ANDR_WIFI_PNO_RANDOM_MAC_OUI + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_set_pno_mac_oui + }, +#ifdef CUSTOM_FORCE_NODFS_FLAG + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = ANDR_WIFI_NODFS_CHANNELS + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_set_nodfs_flag + + }, +#endif /* CUSTOM_FORCE_NODFS_FLAG */ +#ifdef LINKSTAT_SUPPORT + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = LSTATS_SUBCMD_GET_INFO + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_lstats_get_info + }, +#endif /* LINKSTAT_SUPPORT */ +}; + +static const struct nl80211_vendor_cmd_info wl_vendor_events [] = { + { OUI_BRCM, BRCM_VENDOR_EVENT_UNSPEC }, + { OUI_BRCM, BRCM_VENDOR_EVENT_PRIV_STR }, +#ifdef GSCAN_SUPPORT + { OUI_GOOGLE, GOOGLE_GSCAN_SIGNIFICANT_EVENT }, + { OUI_GOOGLE, GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT }, + { OUI_GOOGLE, GOOGLE_GSCAN_BATCH_SCAN_EVENT }, + { OUI_GOOGLE, GOOGLE_SCAN_FULL_RESULTS_EVENT }, +#endif /* GSCAN_SUPPORT */ +#ifdef RTT_SUPPORT + { OUI_GOOGLE, GOOGLE_RTT_COMPLETE_EVENT }, +#endif /* RTT_SUPPORT */ +#ifdef GSCAN_SUPPORT + { OUI_GOOGLE, GOOGLE_SCAN_COMPLETE_EVENT }, + { OUI_GOOGLE, GOOGLE_GSCAN_GEOFENCE_LOST_EVENT }, +#endif /* GSCAN_SUPPORT */ + { OUI_BRCM, BRCM_VENDOR_EVENT_IDSUP_STATUS } +}; + +int wl_cfgvendor_attach(struct wiphy *wiphy) +{ + + WL_INFORM(("Vendor: Register BRCM cfg80211 vendor cmd(0x%x) interface \n", + NL80211_CMD_VENDOR)); + + wiphy->vendor_commands = wl_vendor_cmds; + wiphy->n_vendor_commands = ARRAY_SIZE(wl_vendor_cmds); + wiphy->vendor_events = wl_vendor_events; + wiphy->n_vendor_events = ARRAY_SIZE(wl_vendor_events); + + return 0; +} + +int wl_cfgvendor_detach(struct wiphy *wiphy) +{ + WL_INFORM(("Vendor: Unregister BRCM cfg80211 vendor interface \n")); + + wiphy->vendor_commands = NULL; + wiphy->vendor_events = NULL; + wiphy->n_vendor_commands = 0; + wiphy->n_vendor_events = 0; + + return 0; +} +#endif /* defined(WL_VENDOR_EXT_SUPPORT) */ diff --git a/drivers/amlogic/wifi/bcmdhd/wl_cfgvendor.h b/drivers/amlogic/wifi/bcmdhd/wl_cfgvendor.h new file mode 100644 index 0000000000000..f3e464e725dfa --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/wl_cfgvendor.h @@ -0,0 +1,270 @@ +/* + * Linux cfg80211 Vendor Extension Code + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_cfgvendor.h 605796 2015-12-11 13:45:36Z $ + */ + + +#ifndef _wl_cfgvendor_h_ +#define _wl_cfgvendor_h_ + +#define OUI_BRCM 0x001018 +#define OUI_GOOGLE 0x001A11 +#define BRCM_VENDOR_SUBCMD_PRIV_STR 1 +#define ATTRIBUTE_U32_LEN (NLA_HDRLEN + 4) +#define VENDOR_ID_OVERHEAD ATTRIBUTE_U32_LEN +#define VENDOR_SUBCMD_OVERHEAD ATTRIBUTE_U32_LEN +#define VENDOR_DATA_OVERHEAD (NLA_HDRLEN) + +#define SCAN_RESULTS_COMPLETE_FLAG_LEN ATTRIBUTE_U32_LEN +#define SCAN_INDEX_HDR_LEN (NLA_HDRLEN) +#define SCAN_ID_HDR_LEN ATTRIBUTE_U32_LEN +#define SCAN_FLAGS_HDR_LEN ATTRIBUTE_U32_LEN +#define GSCAN_NUM_RESULTS_HDR_LEN ATTRIBUTE_U32_LEN +#define GSCAN_RESULTS_HDR_LEN (NLA_HDRLEN) +#define GSCAN_BATCH_RESULT_HDR_LEN (SCAN_INDEX_HDR_LEN + SCAN_ID_HDR_LEN + \ + SCAN_FLAGS_HDR_LEN + \ + GSCAN_NUM_RESULTS_HDR_LEN + \ + GSCAN_RESULTS_HDR_LEN) + +#define VENDOR_REPLY_OVERHEAD (VENDOR_ID_OVERHEAD + \ + VENDOR_SUBCMD_OVERHEAD + \ + VENDOR_DATA_OVERHEAD) + +#define GSCAN_ATTR_SET1 10 +#define GSCAN_ATTR_SET2 20 +#define GSCAN_ATTR_SET3 30 +#define GSCAN_ATTR_SET4 40 +#define GSCAN_ATTR_SET5 50 +#define GSCAN_ATTR_SET6 60 + +typedef enum { + /* don't use 0 as a valid subcommand */ + VENDOR_NL80211_SUBCMD_UNSPECIFIED, + + /* define all vendor startup commands between 0x0 and 0x0FFF */ + VENDOR_NL80211_SUBCMD_RANGE_START = 0x0001, + VENDOR_NL80211_SUBCMD_RANGE_END = 0x0FFF, + + /* define all GScan related commands between 0x1000 and 0x10FF */ + ANDROID_NL80211_SUBCMD_GSCAN_RANGE_START = 0x1000, + ANDROID_NL80211_SUBCMD_GSCAN_RANGE_END = 0x10FF, + + /* define all NearbyDiscovery related commands between 0x1100 and 0x11FF */ + ANDROID_NL80211_SUBCMD_NBD_RANGE_START = 0x1100, + ANDROID_NL80211_SUBCMD_NBD_RANGE_END = 0x11FF, + + /* define all RTT related commands between 0x1100 and 0x11FF */ + ANDROID_NL80211_SUBCMD_RTT_RANGE_START = 0x1100, + ANDROID_NL80211_SUBCMD_RTT_RANGE_END = 0x11FF, + + ANDROID_NL80211_SUBCMD_LSTATS_RANGE_START = 0x1200, + ANDROID_NL80211_SUBCMD_LSTATS_RANGE_END = 0x12FF, + + ANDROID_NL80211_SUBCMD_TDLS_RANGE_START = 0x1300, + ANDROID_NL80211_SUBCMD_TDLS_RANGE_END = 0x13FF, + /* This is reserved for future usage */ + +} ANDROID_VENDOR_SUB_COMMAND; + +enum andr_vendor_subcmd { + GSCAN_SUBCMD_GET_CAPABILITIES = ANDROID_NL80211_SUBCMD_GSCAN_RANGE_START, + GSCAN_SUBCMD_SET_CONFIG, + GSCAN_SUBCMD_SET_SCAN_CONFIG, + GSCAN_SUBCMD_ENABLE_GSCAN, + GSCAN_SUBCMD_GET_SCAN_RESULTS, + GSCAN_SUBCMD_SCAN_RESULTS, + GSCAN_SUBCMD_SET_HOTLIST, + GSCAN_SUBCMD_SET_SIGNIFICANT_CHANGE_CONFIG, + GSCAN_SUBCMD_ENABLE_FULL_SCAN_RESULTS, + GSCAN_SUBCMD_GET_CHANNEL_LIST, + /* ANDR_WIFI_XXX although not related to gscan are defined here */ + ANDR_WIFI_SUBCMD_GET_FEATURE_SET, + ANDR_WIFI_SUBCMD_GET_FEATURE_SET_MATRIX, + ANDR_WIFI_PNO_RANDOM_MAC_OUI, + ANDR_WIFI_NODFS_CHANNELS, + RTT_SUBCMD_SET_CONFIG = ANDROID_NL80211_SUBCMD_RTT_RANGE_START, + RTT_SUBCMD_CANCEL_CONFIG, + RTT_SUBCMD_GETCAPABILITY, + + LSTATS_SUBCMD_GET_INFO = ANDROID_NL80211_SUBCMD_LSTATS_RANGE_START, + /* Add more sub commands here */ + VENDOR_SUBCMD_MAX +}; + +enum gscan_attributes { + GSCAN_ATTRIBUTE_NUM_BUCKETS = GSCAN_ATTR_SET1, + GSCAN_ATTRIBUTE_BASE_PERIOD, + GSCAN_ATTRIBUTE_BUCKETS_BAND, + GSCAN_ATTRIBUTE_BUCKET_ID, + GSCAN_ATTRIBUTE_BUCKET_PERIOD, + GSCAN_ATTRIBUTE_BUCKET_NUM_CHANNELS, + GSCAN_ATTRIBUTE_BUCKET_CHANNELS, + GSCAN_ATTRIBUTE_NUM_AP_PER_SCAN, + GSCAN_ATTRIBUTE_REPORT_THRESHOLD, + GSCAN_ATTRIBUTE_NUM_SCANS_TO_CACHE, + GSCAN_ATTRIBUTE_BAND = GSCAN_ATTRIBUTE_BUCKETS_BAND, + + GSCAN_ATTRIBUTE_ENABLE_FEATURE = GSCAN_ATTR_SET2, + GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE, + GSCAN_ATTRIBUTE_FLUSH_FEATURE, + GSCAN_ATTRIBUTE_ENABLE_FULL_SCAN_RESULTS, + GSCAN_ATTRIBUTE_REPORT_EVENTS, + /* remaining reserved for additional attributes */ + GSCAN_ATTRIBUTE_NUM_OF_RESULTS = GSCAN_ATTR_SET3, + GSCAN_ATTRIBUTE_FLUSH_RESULTS, + GSCAN_ATTRIBUTE_SCAN_RESULTS, /* flat array of wifi_scan_result */ + GSCAN_ATTRIBUTE_SCAN_ID, /* indicates scan number */ + GSCAN_ATTRIBUTE_SCAN_FLAGS, /* indicates if scan was aborted */ + GSCAN_ATTRIBUTE_AP_FLAGS, /* flags on significant change event */ + GSCAN_ATTRIBUTE_NUM_CHANNELS, + GSCAN_ATTRIBUTE_CHANNEL_LIST, + + /* remaining reserved for additional attributes */ + + GSCAN_ATTRIBUTE_SSID = GSCAN_ATTR_SET4, + GSCAN_ATTRIBUTE_BSSID, + GSCAN_ATTRIBUTE_CHANNEL, + GSCAN_ATTRIBUTE_RSSI, + GSCAN_ATTRIBUTE_TIMESTAMP, + GSCAN_ATTRIBUTE_RTT, + GSCAN_ATTRIBUTE_RTTSD, + + /* remaining reserved for additional attributes */ + + GSCAN_ATTRIBUTE_HOTLIST_BSSIDS = GSCAN_ATTR_SET5, + GSCAN_ATTRIBUTE_RSSI_LOW, + GSCAN_ATTRIBUTE_RSSI_HIGH, + GSCAN_ATTRIBUTE_HOSTLIST_BSSID_ELEM, + GSCAN_ATTRIBUTE_HOTLIST_FLUSH, + + /* remaining reserved for additional attributes */ + GSCAN_ATTRIBUTE_RSSI_SAMPLE_SIZE = GSCAN_ATTR_SET6, + GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE, + GSCAN_ATTRIBUTE_MIN_BREACHING, + GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_BSSIDS, + GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_FLUSH, + GSCAN_ATTRIBUTE_MAX +}; + +enum gscan_bucket_attributes { + GSCAN_ATTRIBUTE_CH_BUCKET_1, + GSCAN_ATTRIBUTE_CH_BUCKET_2, + GSCAN_ATTRIBUTE_CH_BUCKET_3, + GSCAN_ATTRIBUTE_CH_BUCKET_4, + GSCAN_ATTRIBUTE_CH_BUCKET_5, + GSCAN_ATTRIBUTE_CH_BUCKET_6, + GSCAN_ATTRIBUTE_CH_BUCKET_7 +}; + +enum gscan_ch_attributes { + GSCAN_ATTRIBUTE_CH_ID_1, + GSCAN_ATTRIBUTE_CH_ID_2, + GSCAN_ATTRIBUTE_CH_ID_3, + GSCAN_ATTRIBUTE_CH_ID_4, + GSCAN_ATTRIBUTE_CH_ID_5, + GSCAN_ATTRIBUTE_CH_ID_6, + GSCAN_ATTRIBUTE_CH_ID_7 +}; + +enum rtt_attributes { + RTT_ATTRIBUTE_TARGET_CNT, + RTT_ATTRIBUTE_TARGET_INFO, + RTT_ATTRIBUTE_TARGET_MAC, + RTT_ATTRIBUTE_TARGET_TYPE, + RTT_ATTRIBUTE_TARGET_PEER, + RTT_ATTRIBUTE_TARGET_CHAN, + RTT_ATTRIBUTE_TARGET_MODE, + RTT_ATTRIBUTE_TARGET_INTERVAL, + RTT_ATTRIBUTE_TARGET_NUM_MEASUREMENT, + RTT_ATTRIBUTE_TARGET_NUM_PKT, + RTT_ATTRIBUTE_TARGET_NUM_RETRY +}; + +typedef enum wl_vendor_event { + BRCM_VENDOR_EVENT_UNSPEC, + BRCM_VENDOR_EVENT_PRIV_STR, + GOOGLE_GSCAN_SIGNIFICANT_EVENT, + GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT, + GOOGLE_GSCAN_BATCH_SCAN_EVENT, + GOOGLE_SCAN_FULL_RESULTS_EVENT, + GOOGLE_RTT_COMPLETE_EVENT, + GOOGLE_SCAN_COMPLETE_EVENT, + GOOGLE_GSCAN_GEOFENCE_LOST_EVENT, + BRCM_VENDOR_EVENT_IDSUP_STATUS +} wl_vendor_event_t; + +enum andr_wifi_attr { + ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET, + ANDR_WIFI_ATTRIBUTE_FEATURE_SET, + ANDR_WIFI_ATTRIBUTE_PNO_RANDOM_MAC_OUI, + ANDR_WIFI_ATTRIBUTE_NODFS_SET +}; + +typedef enum wl_vendor_gscan_attribute { + ATTR_START_GSCAN, + ATTR_STOP_GSCAN, + ATTR_SET_SCAN_BATCH_CFG_ID, /* set batch scan params */ + ATTR_SET_SCAN_GEOFENCE_CFG_ID, /* set list of bssids to track */ + ATTR_SET_SCAN_SIGNIFICANT_CFG_ID, /* set list of bssids, rssi threshold etc.. */ + ATTR_SET_SCAN_CFG_ID, /* set common scan config params here */ + ATTR_GET_GSCAN_CAPABILITIES_ID, + /* Add more sub commands here */ + ATTR_GSCAN_MAX +} wl_vendor_gscan_attribute_t; + +typedef enum gscan_batch_attribute { + ATTR_GSCAN_BATCH_BESTN, + ATTR_GSCAN_BATCH_MSCAN, + ATTR_GSCAN_BATCH_BUFFER_THRESHOLD +} gscan_batch_attribute_t; + +typedef enum gscan_geofence_attribute { + ATTR_GSCAN_NUM_HOTLIST_BSSID, + ATTR_GSCAN_HOTLIST_BSSID +} gscan_geofence_attribute_t; + +typedef enum gscan_complete_event { + WIFI_SCAN_BUFFER_FULL, + WIFI_SCAN_COMPLETE +} gscan_complete_event_t; + +/* Capture the BRCM_VENDOR_SUBCMD_PRIV_STRINGS* here */ +#define BRCM_VENDOR_SCMD_CAPA "cap" + +#if defined(WL_VENDOR_EXT_SUPPORT) || defined(CONFIG_BCMDHD_VENDOR_EXT) +extern int wl_cfgvendor_attach(struct wiphy *wiphy); +extern int wl_cfgvendor_detach(struct wiphy *wiphy); +extern int wl_cfgvendor_send_async_event(struct wiphy *wiphy, + struct net_device *dev, int event_id, const void *data, int len); +extern int wl_cfgvendor_send_hotlist_event(struct wiphy *wiphy, + struct net_device *dev, void *data, int len, wl_vendor_event_t event); +#else +static INLINE int cfgvendor_attach(struct wiphy *wiphy) { return 0; } +static INLINE int cfgvendor_detach(struct wiphy *wiphy) { return 0; } +#endif /* defined(WL_VENDOR_EXT_SUPPORT) */ + +#endif /* _wl_cfgvendor_h_ */ diff --git a/drivers/net/wireless/bcmdhd/wl_dbg.h b/drivers/amlogic/wifi/bcmdhd/wl_dbg.h similarity index 51% rename from drivers/net/wireless/bcmdhd/wl_dbg.h rename to drivers/amlogic/wifi/bcmdhd/wl_dbg.h index 82d61e192d7a3..291911611daaa 100644 --- a/drivers/net/wireless/bcmdhd/wl_dbg.h +++ b/drivers/amlogic/wifi/bcmdhd/wl_dbg.h @@ -2,9 +2,30 @@ * Minimal debug/trace/assert driver definitions for * Broadcom 802.11 Networking Adapter. * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: wl_dbg.h 472390 2014-04-23 23:32:01Z $ + * + * <> + * + * $Id: wl_dbg.h 519338 2014-12-05 21:23:30Z $ */ @@ -17,14 +38,7 @@ extern uint32 wl_msg_level2; #define WL_TIMESTAMP() -#if 0 && (VERSION_MAJOR > 9) -extern int osl_printf(const char *fmt, ...); -#include -#define WL_PRINT(args) do { osl_printf args; } while (0) -#define RELEASE_PRINT(args) do { WL_PRINT(args); IO8Log args; } while (0) -#else #define WL_PRINT(args) do { WL_TIMESTAMP(); printf args; } while (0) -#endif #if defined(EVENT_LOG_COMPILE) && defined(WLMSG_SRSCAN) #define _WL_SRSCAN(fmt, ...) EVENT_LOG(EVENT_LOG_TAG_SRSCAN, fmt, ##__VA_ARGS__) @@ -93,12 +107,12 @@ extern int osl_printf(const char *fmt, ...); #define WL_MCNX(args) #define WL_PROT(args) #define WL_PSTA(args) +#define WL_WFDS(m, b, n) #define WL_TRF_MGMT(args) #define WL_L2FILTER(args) #define WL_MQ(args) #define WL_TXBF(args) #define WL_P2PO(args) -#define WL_NET_DETECT(args) #define WL_ROAM(args) #define WL_WNM(args) @@ -121,7 +135,8 @@ extern int osl_printf(const char *fmt, ...); #define WL_WSEC(args) #define WL_WSEC_DUMP(args) #define WL_PCIE(args) -#define WL_CHANLOG(w, s, i, j) +#define WL_TSLOG(w, s, i, j) +#define WL_FBT(args) #define WL_ERROR_ON() (wl_msg_level & WL_ERROR_VAL) #define WL_TRACE_ON() 0 @@ -160,8 +175,7 @@ extern int osl_printf(const char *fmt, ...); #define WL_L2FILTER_ON() 0 #define WL_TXBF_ON() 0 #define WL_P2PO_ON() 0 -#define WL_CHANLOG_ON() 0 -#define WL_NET_DETECT_ON() 0 +#define WL_TSLOG_ON() 0 #define WL_WNM_ON() 0 #define WL_PCIE_ON() 0 @@ -177,203 +191,6 @@ extern int osl_printf(const char *fmt, ...); #define WL_ERROR(args) #define WL_TRACE(args) -#ifndef LINUX_POSTMOGRIFY_REMOVAL -#ifdef WLMSG_PRHDRS -#define WL_PRHDRS_MSG(args) WL_PRINT(args) -#define WL_PRHDRS(i, p, f, t, r, l) wlc_print_hdrs(i, p, f, t, r, l) -#else -#define WL_PRHDRS_MSG(args) -#define WL_PRHDRS(i, p, f, t, r, l) -#endif -#ifdef WLMSG_PRPKT -#define WL_PRPKT(m, b, n) prhex(m, b, n) -#else -#define WL_PRPKT(m, b, n) -#endif -#ifdef WLMSG_INFORM -#define WL_INFORM(args) WL_PRINT(args) -#else -#define WL_INFORM(args) -#endif -#define WL_TMP(args) -#ifdef WLMSG_OID -#define WL_OID(args) WL_PRINT(args) -#else -#define WL_OID(args) -#endif -#define WL_RATE(args) -#ifdef WLMSG_ASSOC -#define WL_ASSOC(args) WL_PRINT(args) -#else -#define WL_ASSOC(args) -#endif -#define WL_PRUSR(m, b, n) -#ifdef WLMSG_PS -#define WL_PS(args) WL_PRINT(args) -#else -#define WL_PS(args) -#endif -#ifdef WLMSG_ROAM -#define WL_ROAM(args) WL_PRINT(args) -#else -#define WL_ROAM(args) -#endif -#define WL_PORT(args) -#define WL_DUAL(args) -#define WL_REGULATORY(args) - -#ifdef WLMSG_MPC -#define WL_MPC(args) WL_PRINT(args) -#else -#define WL_MPC(args) -#endif -#define WL_APSTA(args) -#define WL_APSTA_BCN(args) -#define WL_APSTA_TX(args) -#define WL_APSTA_TSF(args) -#define WL_APSTA_BSSID(args) -#define WL_BA(args) -#define WL_MBSS(args) -#define WL_MODE_SWITCH(args) -#define WL_PROTO(args) - -#define WL_CAC(args) -#define WL_AMSDU(args) -#define WL_AMPDU(args) -#define WL_FFPLD(args) -#define WL_MCHAN(args) - -/* Define WLMSG_DFS automatically for WLTEST builds */ - -#ifdef WLMSG_DFS -#define WL_DFS(args) do {if (wl_msg_level & WL_DFS_VAL) WL_PRINT(args);} while (0) -#else /* WLMSG_DFS */ -#define WL_DFS(args) -#endif /* WLMSG_DFS */ -#define WL_WOWL(args) -#ifdef WLMSG_SCAN -#define WL_SCAN(args) WL_PRINT(args) -#else -#define WL_SCAN(args) -#endif -#define WL_COEX(args) -#define WL_RTDC(w, s, i, j) -#define WL_RTDC2(w, s, i, j) -#define WL_CHANINT(args) -#ifdef WLMSG_BTA -#define WL_BTA(args) WL_PRINT(args) -#else -#define WL_BTA(args) -#endif -#define WL_WMF(args) -#define WL_P2P(args) -#define WL_ITFR(args) -#define WL_TDLS(args) -#define WL_MCNX(args) -#define WL_PROT(args) -#define WL_PSTA(args) -#define WL_TBTT(args) -#define WL_TRF_MGMT(args) -#define WL_L2FILTER(args) -#define WL_MQ(args) -#define WL_P2PO(args) -#define WL_WNM(args) -#define WL_TXBF(args) -#define WL_CHANLOG(w, s, i, j) -#define WL_NET_DETECT(args) - -#define WL_ERROR_ON() 0 -#define WL_TRACE_ON() 0 -#ifdef WLMSG_PRHDRS -#define WL_PRHDRS_ON() 1 -#else -#define WL_PRHDRS_ON() 0 -#endif -#ifdef WLMSG_PRPKT -#define WL_PRPKT_ON() 1 -#else -#define WL_PRPKT_ON() 0 -#endif -#ifdef WLMSG_INFORM -#define WL_INFORM_ON() 1 -#else -#define WL_INFORM_ON() 0 -#endif -#ifdef WLMSG_OID -#define WL_OID_ON() 1 -#else -#define WL_OID_ON() 0 -#endif -#define WL_TMP_ON() 0 -#define WL_RATE_ON() 0 -#ifdef WLMSG_ASSOC -#define WL_ASSOC_ON() 1 -#else -#define WL_ASSOC_ON() 0 -#endif -#define WL_PORT_ON() 0 -#ifdef WLMSG_WSEC -#define WL_WSEC_ON() 1 -#define WL_WSEC_DUMP_ON() 1 -#else -#define WL_WSEC_ON() 0 -#define WL_WSEC_DUMP_ON() 0 -#endif -#ifdef WLMSG_MPC -#define WL_MPC_ON() 1 -#else -#define WL_MPC_ON() 0 -#endif -#define WL_REGULATORY_ON() 0 - -#define WL_APSTA_ON() 0 -#define WL_BA_ON() 0 -#define WL_MBSS_ON() 0 -#define WL_MODE_SWITCH_ON() 0 -#ifdef WLMSG_DFS -#define WL_DFS_ON() 1 -#else /* WLMSG_DFS */ -#define WL_DFS_ON() 0 -#endif /* WLMSG_DFS */ -#ifdef WLMSG_SCAN -#define WL_SCAN_ON() 1 -#else -#define WL_SCAN_ON() 0 -#endif -#ifdef WLMSG_BTA -#define WL_BTA_ON() 1 -#else -#define WL_BTA_ON() 0 -#endif -#define WL_WMF_ON() 0 -#define WL_P2P_ON() 0 -#define WL_MCHAN_ON() 0 -#define WL_TDLS_ON() 0 -#define WL_MCNX_ON() 0 -#define WL_PROT_ON() 0 -#define WL_TBTT_ON() 0 -#define WL_PWRSEL_ON() 0 -#define WL_L2FILTER_ON() 0 -#define WL_MQ_ON() 0 -#define WL_P2PO_ON() 0 -#define WL_TXBF_ON() 0 -#define WL_CHANLOG_ON() 0 - -#define WL_AMPDU_UPDN(args) -#define WL_AMPDU_RX(args) -#define WL_AMPDU_ERR(args) -#define WL_AMPDU_TX(args) -#define WL_AMPDU_CTL(args) -#define WL_AMPDU_HW(args) -#define WL_AMPDU_HWTXS(args) -#define WL_AMPDU_HWDBG(args) -#define WL_AMPDU_STAT(args) -#define WL_AMPDU_ERR_ON() 0 -#define WL_AMPDU_HW_ON() 0 -#define WL_AMPDU_HWTXS_ON() 0 - -#define WL_WNM_ON() 0 -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ #define WL_APSTA_UPDN(args) #define WL_APSTA_RX(args) #ifdef WLMSG_WSEC @@ -385,7 +202,9 @@ extern int osl_printf(const char *fmt, ...); #endif #define WL_PCIE(args) do {if (wl_msg_level2 & WL_PCIE_VAL) WL_PRINT(args);} while (0) #define WL_PCIE_ON() (wl_msg_level2 & WL_PCIE_VAL) -#endif +#define WL_PFN(args) do {if (wl_msg_level & WL_PFN_VAL) WL_PRINT(args);} while (0) +#define WL_PFN_ON() (wl_msg_level & WL_PFN_VAL) +#endif extern uint32 wl_msg_level; extern uint32 wl_msg_level2; diff --git a/drivers/amlogic/wifi/bcmdhd/wl_escan.c b/drivers/amlogic/wifi/bcmdhd/wl_escan.c new file mode 100644 index 0000000000000..259b3d9d7f39d --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/wl_escan.c @@ -0,0 +1,1457 @@ + +#if defined(WL_ESCAN) + +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +/* message levels */ +#define ESCAN_ERROR_LEVEL 0x0001 +#define ESCAN_SCAN_LEVEL 0x0002 +#define ESCAN_TRACE_LEVEL 0x0004 + +#define ESCAN_ERROR(x) \ + do { \ + if (iw_msg_level & ESCAN_ERROR_LEVEL) { \ + printf(KERN_ERR "ESCAN-ERROR) "); \ + printf x; \ + } \ + } while (0) +#define ESCAN_SCAN(x) \ + do { \ + if (iw_msg_level & ESCAN_SCAN_LEVEL) { \ + printf(KERN_ERR "ESCAN-SCAN) "); \ + printf x; \ + } \ + } while (0) +#define ESCAN_TRACE(x) \ + do { \ + if (iw_msg_level & ESCAN_TRACE_LEVEL) { \ + printf(KERN_ERR "ESCAN-TRACE) "); \ + printf x; \ + } \ + } while (0) + +/* IOCTL swapping mode for Big Endian host with Little Endian dongle. Default to off */ +#define htod32(i) (i) +#define htod16(i) (i) +#define dtoh32(i) (i) +#define dtoh16(i) (i) +#define htodchanspec(i) (i) +#define dtohchanspec(i) (i) + +#define wl_escan_get_buf(a) ((wl_scan_results_t *) (a)->escan_buf) + +#define for_each_bss(list, bss, __i) \ + for (__i = 0; __i < list->count && __i < IW_MAX_AP; __i++, bss = next_bss(list, bss)) + +#define wl_escan_set_sync_id(a) ((a) = htod16(0x1234)) + +#ifdef ESCAN_BUF_OVERFLOW_MGMT +#define BUF_OVERFLOW_MGMT_COUNT 3 +typedef struct { + int RSSI; + int length; + struct ether_addr BSSID; +} removal_element_t; +#endif /* ESCAN_BUF_OVERFLOW_MGMT */ + +struct wl_escan_info *g_escan = NULL; + +#if defined(RSSIAVG) +static wl_rssi_cache_ctrl_t g_rssi_cache_ctrl; +static wl_rssi_cache_ctrl_t g_connected_rssi_cache_ctrl; +#endif +#if defined(BSSCACHE) +static wl_bss_cache_ctrl_t g_bss_cache_ctrl; +#endif + +/* Return a new chanspec given a legacy chanspec + * Returns INVCHANSPEC on error + */ +static chanspec_t +wl_chspec_from_legacy(chanspec_t legacy_chspec) +{ + chanspec_t chspec; + + /* get the channel number */ + chspec = LCHSPEC_CHANNEL(legacy_chspec); + + /* convert the band */ + if (LCHSPEC_IS2G(legacy_chspec)) { + chspec |= WL_CHANSPEC_BAND_2G; + } else { + chspec |= WL_CHANSPEC_BAND_5G; + } + + /* convert the bw and sideband */ + if (LCHSPEC_IS20(legacy_chspec)) { + chspec |= WL_CHANSPEC_BW_20; + } else { + chspec |= WL_CHANSPEC_BW_40; + if (LCHSPEC_CTL_SB(legacy_chspec) == WL_LCHANSPEC_CTL_SB_LOWER) { + chspec |= WL_CHANSPEC_CTL_SB_L; + } else { + chspec |= WL_CHANSPEC_CTL_SB_U; + } + } + + if (wf_chspec_malformed(chspec)) { + ESCAN_ERROR(("wl_chspec_from_legacy: output chanspec (0x%04X) malformed\n", + chspec)); + return INVCHANSPEC; + } + + return chspec; +} + +/* Return a legacy chanspec given a new chanspec + * Returns INVCHANSPEC on error + */ +static chanspec_t +wl_chspec_to_legacy(chanspec_t chspec) +{ + chanspec_t lchspec; + + if (wf_chspec_malformed(chspec)) { + ESCAN_ERROR(("wl_chspec_to_legacy: input chanspec (0x%04X) malformed\n", + chspec)); + return INVCHANSPEC; + } + + /* get the channel number */ + lchspec = CHSPEC_CHANNEL(chspec); + + /* convert the band */ + if (CHSPEC_IS2G(chspec)) { + lchspec |= WL_LCHANSPEC_BAND_2G; + } else { + lchspec |= WL_LCHANSPEC_BAND_5G; + } + + /* convert the bw and sideband */ + if (CHSPEC_IS20(chspec)) { + lchspec |= WL_LCHANSPEC_BW_20; + lchspec |= WL_LCHANSPEC_CTL_SB_NONE; + } else if (CHSPEC_IS40(chspec)) { + lchspec |= WL_LCHANSPEC_BW_40; + if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_L) { + lchspec |= WL_LCHANSPEC_CTL_SB_LOWER; + } else { + lchspec |= WL_LCHANSPEC_CTL_SB_UPPER; + } + } else { + /* cannot express the bandwidth */ + char chanbuf[CHANSPEC_STR_LEN]; + ESCAN_ERROR(( + "wl_chspec_to_legacy: unable to convert chanspec %s (0x%04X) " + "to pre-11ac format\n", + wf_chspec_ntoa(chspec, chanbuf), chspec)); + return INVCHANSPEC; + } + + return lchspec; +} + +/* given a chanspec value from the driver, do the endian and chanspec version conversion to + * a chanspec_t value + * Returns INVCHANSPEC on error + */ +static chanspec_t +wl_chspec_driver_to_host(int ioctl_ver, chanspec_t chanspec) +{ + chanspec = dtohchanspec(chanspec); + if (ioctl_ver == 1) { + chanspec = wl_chspec_from_legacy(chanspec); + } + + return chanspec; +} + +/* given a chanspec value, do the endian and chanspec version conversion to + * a chanspec_t value + * Returns INVCHANSPEC on error + */ +static chanspec_t +wl_chspec_host_to_driver(chanspec_t chanspec) +{ + if (1) { + chanspec = wl_chspec_to_legacy(chanspec); + if (chanspec == INVCHANSPEC) { + return chanspec; + } + } + chanspec = htodchanspec(chanspec); + + return chanspec; +} + +/* given a channel value, do the endian and chanspec version conversion to + * a chanspec_t value + * Returns INVCHANSPEC on error + */ +static chanspec_t +wl_ch_host_to_driver(s32 bssidx, u16 channel) +{ + chanspec_t chanspec; + + chanspec = channel & WL_CHANSPEC_CHAN_MASK; + + if (channel <= CH_MAX_2G_CHANNEL) + chanspec |= WL_CHANSPEC_BAND_2G; + else + chanspec |= WL_CHANSPEC_BAND_5G; + + chanspec |= WL_CHANSPEC_BW_20; + + chanspec |= WL_CHANSPEC_CTL_SB_NONE; + + return wl_chspec_host_to_driver(chanspec); +} + +static inline struct wl_bss_info *next_bss(struct wl_scan_results *list, struct wl_bss_info *bss) +{ + return bss = bss ? + (struct wl_bss_info *)((uintptr) bss + dtoh32(bss->length)) : list->bss_info; +} + +#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5] + +static int +rssi_to_qual(int rssi) +{ + if (rssi <= WL_IW_RSSI_NO_SIGNAL) + return 0; + else if (rssi <= WL_IW_RSSI_VERY_LOW) + return 1; + else if (rssi <= WL_IW_RSSI_LOW) + return 2; + else if (rssi <= WL_IW_RSSI_GOOD) + return 3; + else if (rssi <= WL_IW_RSSI_VERY_GOOD) + return 4; + else + return 5; +} + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \ +_Pragma("GCC diagnostic push") \ +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \ +(entry) = list_first_entry((ptr), type, member); \ +_Pragma("GCC diagnostic pop") \ + +#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \ +_Pragma("GCC diagnostic push") \ +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \ +entry = container_of((ptr), type, member); \ +_Pragma("GCC diagnostic pop") \ + +#else +#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \ +(entry) = list_first_entry((ptr), type, member); \ + +#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \ +entry = container_of((ptr), type, member); \ + +#endif /* STRICT_GCC_WARNINGS */ + +static unsigned long wl_lock_eq(struct wl_escan_info *escan) +{ + unsigned long flags; + + spin_lock_irqsave(&escan->eq_lock, flags); + return flags; +} + +static void wl_unlock_eq(struct wl_escan_info *escan, unsigned long flags) +{ + spin_unlock_irqrestore(&escan->eq_lock, flags); +} + +static void wl_init_eq(struct wl_escan_info *escan) +{ + spin_lock_init(&escan->eq_lock); + INIT_LIST_HEAD(&escan->eq_list); +} + +static void wl_flush_eq(struct wl_escan_info *escan) +{ + struct escan_event_q *e; + unsigned long flags; + + flags = wl_lock_eq(escan); + while (!list_empty_careful(&escan->eq_list)) { + BCM_SET_LIST_FIRST_ENTRY(e, &escan->eq_list, struct escan_event_q, eq_list); + list_del(&e->eq_list); + kfree(e); + } + wl_unlock_eq(escan, flags); +} + +static struct escan_event_q *wl_deq_event(struct wl_escan_info *escan) +{ + struct escan_event_q *e = NULL; + unsigned long flags; + + flags = wl_lock_eq(escan); + if (likely(!list_empty(&escan->eq_list))) { + BCM_SET_LIST_FIRST_ENTRY(e, &escan->eq_list, struct escan_event_q, eq_list); + list_del(&e->eq_list); + } + wl_unlock_eq(escan, flags); + + return e; +} + +/* + * push event to tail of the queue + */ + +static s32 +wl_enq_event(struct wl_escan_info *escan, struct net_device *ndev, u32 event, + const wl_event_msg_t *msg, void *data) +{ + struct escan_event_q *e; + s32 err = 0; + uint32 evtq_size; + uint32 data_len; + unsigned long flags; + gfp_t aflags; + + data_len = 0; + if (data) + data_len = ntoh32(msg->datalen); + evtq_size = sizeof(struct escan_event_q) + data_len; + aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL; + e = kzalloc(evtq_size, aflags); + if (unlikely(!e)) { + ESCAN_ERROR(("event alloc failed\n")); + return -ENOMEM; + } + e->etype = event; + memcpy(&e->emsg, msg, sizeof(wl_event_msg_t)); + if (data) + memcpy(e->edata, data, data_len); + flags = wl_lock_eq(escan); + list_add_tail(&e->eq_list, &escan->eq_list); + wl_unlock_eq(escan, flags); + + return err; +} + +static void wl_put_event(struct escan_event_q *e) +{ + kfree(e); +} + +static void wl_wakeup_event(struct wl_escan_info *escan) +{ + dhd_pub_t *dhd = (dhd_pub_t *)(escan->pub); + + if (dhd->up && (escan->event_tsk.thr_pid >= 0)) { + up(&escan->event_tsk.sema); + } +} + +static s32 wl_escan_event_handler(void *data) +{ + struct wl_escan_info *escan = NULL; + struct escan_event_q *e; + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + + escan = (struct wl_escan_info *)tsk->parent; + + printf("tsk Enter, tsk = 0x%p\n", tsk); + + while (down_interruptible (&tsk->sema) == 0) { + SMP_RD_BARRIER_DEPENDS(); + if (tsk->terminated) { + break; + } + while (escan && (e = wl_deq_event(escan))) { + ESCAN_TRACE(("dev=%p, event type (%d), ifidx: %d bssidx: %d \n", + escan->dev, e->etype, e->emsg.ifidx, e->emsg.bsscfgidx)); + + if (e->emsg.ifidx > WL_MAX_IFS) { + ESCAN_ERROR(("Event ifidx not in range. val:%d \n", e->emsg.ifidx)); + goto fail; + } + + if (escan->dev && escan->evt_handler[e->etype]) { + dhd_pub_t *dhd = (struct dhd_pub *)(escan->pub); + if (dhd->busstate == DHD_BUS_DOWN) { + ESCAN_ERROR((": BUS is DOWN.\n")); + } else { + escan->evt_handler[e->etype](escan, &e->emsg, e->edata); + } + } else { + ESCAN_TRACE(("Unknown Event (%d): ignoring\n", e->etype)); + } +fail: + wl_put_event(e); + DHD_EVENT_WAKE_UNLOCK(escan->pub); + } + } + printf("%s: was terminated\n", __FUNCTION__); + complete_and_exit(&tsk->completed, 0); + return 0; +} + +void +wl_escan_event(struct net_device *ndev, const wl_event_msg_t * e, void *data) +{ + u32 event_type = ntoh32(e->event_type); + struct wl_escan_info *escan = g_escan; + + if (!escan || !escan->dev) { + return; + } + + if (escan->event_tsk.thr_pid == -1) { + ESCAN_ERROR(("Event handler is not created\n")); + return; + } + + if (escan == NULL) { + ESCAN_ERROR(("Stale event ignored\n")); + return; + } + + if (event_type == WLC_E_PFN_NET_FOUND) { + ESCAN_TRACE(("PNOEVENT: PNO_NET_FOUND\n")); + } + else if (event_type == WLC_E_PFN_NET_LOST) { + ESCAN_TRACE(("PNOEVENT: PNO_NET_LOST\n")); + } + + DHD_EVENT_WAKE_LOCK(escan->pub); + if (likely(!wl_enq_event(escan, ndev, event_type, e, data))) { + wl_wakeup_event(escan); + } else { + DHD_EVENT_WAKE_UNLOCK(escan->pub); + } +} + +static s32 wl_escan_inform_bss(struct wl_escan_info *escan) +{ + struct wl_scan_results *bss_list; + s32 err = 0; +#if defined(RSSIAVG) + int rssi; +#endif + + bss_list = escan->bss_list; + + /* Delete disconnected cache */ +#if defined(BSSCACHE) + wl_delete_disconnected_bss_cache(&g_bss_cache_ctrl, (u8*)&escan->disconnected_bssid); +#if defined(RSSIAVG) + wl_delete_disconnected_rssi_cache(&g_rssi_cache_ctrl, (u8*)&escan->disconnected_bssid); +#endif +#endif + + /* Update cache */ +#if defined(RSSIAVG) + wl_update_rssi_cache(&g_rssi_cache_ctrl, bss_list); + if (!in_atomic()) + wl_update_connected_rssi_cache(escan->dev, &g_rssi_cache_ctrl, &rssi); +#endif +#if defined(BSSCACHE) + wl_update_bss_cache(&g_bss_cache_ctrl, +#if defined(RSSIAVG) + &g_rssi_cache_ctrl, +#endif + bss_list); +#endif + + /* delete dirty cache */ +#if defined(RSSIAVG) + wl_delete_dirty_rssi_cache(&g_rssi_cache_ctrl); + wl_reset_rssi_cache(&g_rssi_cache_ctrl); +#endif +#if defined(BSSCACHE) + wl_delete_dirty_bss_cache(&g_bss_cache_ctrl); + wl_reset_bss_cache(&g_bss_cache_ctrl); +#endif + + ESCAN_TRACE(("scanned AP count (%d)\n", bss_list->count)); + + return err; +} + +static wl_scan_params_t * +wl_escan_alloc_params(int channel, int nprobes, int *out_params_size) +{ + wl_scan_params_t *params; + int params_size; + int num_chans; + int bssidx = 0; + + *out_params_size = 0; + + /* Our scan params only need space for 1 channel and 0 ssids */ + params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16); + params = (wl_scan_params_t*) kzalloc(params_size, GFP_KERNEL); + if (params == NULL) { + ESCAN_ERROR(("mem alloc failed (%d bytes)\n", params_size)); + return params; + } + memset(params, 0, params_size); + params->nprobes = nprobes; + + num_chans = (channel == 0) ? 0 : 1; + + memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN); + params->bss_type = DOT11_BSSTYPE_ANY; + params->scan_type = DOT11_SCANTYPE_ACTIVE; + params->nprobes = htod32(1); + params->active_time = htod32(-1); + params->passive_time = htod32(-1); + params->home_time = htod32(10); + if (channel == -1) + params->channel_list[0] = htodchanspec(channel); + else + params->channel_list[0] = wl_ch_host_to_driver(bssidx, channel); + + /* Our scan params have 1 channel and 0 ssids */ + params->channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) | + (num_chans & WL_SCAN_PARAMS_COUNT_MASK)); + + *out_params_size = params_size; /* rtn size to the caller */ + return params; +} + +static void wl_escan_abort(struct wl_escan_info *escan) +{ + wl_scan_params_t *params = NULL; + s32 params_size = 0; + s32 err = BCME_OK; + if (!in_atomic()) { + /* Our scan params only need space for 1 channel and 0 ssids */ + params = wl_escan_alloc_params(-1, 0, ¶ms_size); + if (params == NULL) { + ESCAN_ERROR(("scan params allocation failed \n")); + err = -ENOMEM; + } else { + /* Do a scan abort to stop the driver's scan engine */ + err = wldev_ioctl(escan->dev, WLC_SCAN, params, params_size, true); + if (err < 0) { + ESCAN_ERROR(("scan abort failed \n")); + } + kfree(params); + } + } +} + +static s32 wl_notify_escan_complete(struct wl_escan_info *escan, bool fw_abort) +{ + s32 err = BCME_OK; + int cmd = 0; +#if WIRELESS_EXT > 13 + union iwreq_data wrqu; + char extra[IW_CUSTOM_MAX + 1]; + + memset(extra, 0, sizeof(extra)); +#endif + + ESCAN_TRACE(("Enter\n")); + + if (!escan || !escan->dev) { + ESCAN_ERROR(("escan or dev is null\n")); + err = BCME_ERROR; + goto out; + } + if (fw_abort && !in_atomic()) + wl_escan_abort(escan); + + if (timer_pending(&escan->scan_timeout)) + del_timer_sync(&escan->scan_timeout); +#if defined(ESCAN_RESULT_PATCH) + escan->bss_list = wl_escan_get_buf(escan); + wl_escan_inform_bss(escan); +#endif /* ESCAN_RESULT_PATCH */ + +#if WIRELESS_EXT > 13 +#if WIRELESS_EXT > 14 + cmd = SIOCGIWSCAN; +#endif + ESCAN_TRACE(("event WLC_E_SCAN_COMPLETE\n")); + // terence 20150224: fix "wlan0: (WE) : Wireless Event too big (65306)" + memset(&wrqu, 0, sizeof(wrqu)); + if (cmd) { + if (cmd == SIOCGIWSCAN) { + wireless_send_event(escan->dev, cmd, &wrqu, NULL); + } else + wireless_send_event(escan->dev, cmd, &wrqu, extra); + } +#endif + +out: + return err; +} + +#ifdef ESCAN_BUF_OVERFLOW_MGMT +static void +wl_cfg80211_find_removal_candidate(wl_bss_info_t *bss, removal_element_t *candidate) +{ + int idx; + for (idx = 0; idx < BUF_OVERFLOW_MGMT_COUNT; idx++) { + int len = BUF_OVERFLOW_MGMT_COUNT - idx - 1; + if (bss->RSSI < candidate[idx].RSSI) { + if (len) + memcpy(&candidate[idx + 1], &candidate[idx], + sizeof(removal_element_t) * len); + candidate[idx].RSSI = bss->RSSI; + candidate[idx].length = bss->length; + memcpy(&candidate[idx].BSSID, &bss->BSSID, ETHER_ADDR_LEN); + return; + } + } +} + +static void +wl_cfg80211_remove_lowRSSI_info(wl_scan_results_t *list, removal_element_t *candidate, + wl_bss_info_t *bi) +{ + int idx1, idx2; + int total_delete_len = 0; + for (idx1 = 0; idx1 < BUF_OVERFLOW_MGMT_COUNT; idx1++) { + int cur_len = WL_SCAN_RESULTS_FIXED_SIZE; + wl_bss_info_t *bss = NULL; + if (candidate[idx1].RSSI >= bi->RSSI) + continue; + for (idx2 = 0; idx2 < list->count; idx2++) { + bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length)) : + list->bss_info; + if (!bcmp(&candidate[idx1].BSSID, &bss->BSSID, ETHER_ADDR_LEN) && + candidate[idx1].RSSI == bss->RSSI && + candidate[idx1].length == dtoh32(bss->length)) { + u32 delete_len = dtoh32(bss->length); + ESCAN_TRACE(("delete scan info of " MACDBG " to add new AP\n", + MAC2STRDBG(bss->BSSID.octet))); + if (idx2 < list->count -1) { + memmove((u8 *)bss, (u8 *)bss + delete_len, + list->buflen - cur_len - delete_len); + } + list->buflen -= delete_len; + list->count--; + total_delete_len += delete_len; + /* if delete_len is greater than or equal to result length */ + if (total_delete_len >= bi->length) { + return; + } + break; + } + cur_len += dtoh32(bss->length); + } + } +} +#endif /* ESCAN_BUF_OVERFLOW_MGMT */ + +static s32 wl_escan_handler(struct wl_escan_info *escan, + const wl_event_msg_t *e, void *data) +{ + s32 err = BCME_OK; + s32 status = ntoh32(e->status); + wl_bss_info_t *bi; + wl_escan_result_t *escan_result; + wl_bss_info_t *bss = NULL; + wl_scan_results_t *list; + u32 bi_length; + u32 i; + u16 channel; + + ESCAN_TRACE(("enter event type : %d, status : %d \n", + ntoh32(e->event_type), ntoh32(e->status))); + + mutex_lock(&escan->usr_sync); + escan_result = (wl_escan_result_t *)data; + + if (escan->escan_state != ESCAN_STATE_SCANING) { + ESCAN_TRACE(("Not my scan\n")); + goto exit; + } + + if (status == WLC_E_STATUS_PARTIAL) { + ESCAN_TRACE(("WLC_E_STATUS_PARTIAL \n")); + if (!escan_result) { + ESCAN_ERROR(("Invalid escan result (NULL pointer)\n")); + goto exit; + } + if (dtoh16(escan_result->bss_count) != 1) { + ESCAN_ERROR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count)); + goto exit; + } + bi = escan_result->bss_info; + if (!bi) { + ESCAN_ERROR(("Invalid escan bss info (NULL pointer)\n")); + goto exit; + } + bi_length = dtoh32(bi->length); + if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) { + ESCAN_ERROR(("Invalid bss_info length %d: ignoring\n", bi_length)); + goto exit; + } + + /* +++++ terence 20130524: skip invalid bss */ + channel = + bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec)); + if (!dhd_conf_match_channel(escan->pub, channel)) + goto exit; + /* ----- terence 20130524: skip invalid bss */ + + { + int cur_len = WL_SCAN_RESULTS_FIXED_SIZE; +#ifdef ESCAN_BUF_OVERFLOW_MGMT + removal_element_t candidate[BUF_OVERFLOW_MGMT_COUNT]; + int remove_lower_rssi = FALSE; + + bzero(candidate, sizeof(removal_element_t)*BUF_OVERFLOW_MGMT_COUNT); +#endif /* ESCAN_BUF_OVERFLOW_MGMT */ + + list = wl_escan_get_buf(escan); +#ifdef ESCAN_BUF_OVERFLOW_MGMT + if (bi_length > ESCAN_BUF_SIZE - list->buflen) + remove_lower_rssi = TRUE; +#endif /* ESCAN_BUF_OVERFLOW_MGMT */ + + ESCAN_TRACE(("%s("MACDBG") RSSI %d flags 0x%x length %d\n", bi->SSID, + MAC2STRDBG(bi->BSSID.octet), bi->RSSI, bi->flags, bi->length)); + for (i = 0; i < list->count; i++) { + bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length)) + : list->bss_info; +#ifdef ESCAN_BUF_OVERFLOW_MGMT + ESCAN_TRACE(("%s("MACDBG"), i=%d bss: RSSI %d list->count %d\n", + bss->SSID, MAC2STRDBG(bss->BSSID.octet), + i, bss->RSSI, list->count)); + + if (remove_lower_rssi) + wl_cfg80211_find_removal_candidate(bss, candidate); +#endif /* ESCAN_BUF_OVERFLOW_MGMT */ + if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) && + (CHSPEC_BAND(wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec)) + == CHSPEC_BAND(wl_chspec_driver_to_host(escan->ioctl_ver, bss->chanspec))) && + bi->SSID_len == bss->SSID_len && + !bcmp(bi->SSID, bss->SSID, bi->SSID_len)) { + + /* do not allow beacon data to update + *the data recd from a probe response + */ + if (!(bss->flags & WL_BSS_FLAGS_FROM_BEACON) && + (bi->flags & WL_BSS_FLAGS_FROM_BEACON)) + goto exit; + + ESCAN_TRACE(("%s("MACDBG"), i=%d prev: RSSI %d" + " flags 0x%x, new: RSSI %d flags 0x%x\n", + bss->SSID, MAC2STRDBG(bi->BSSID.octet), i, + bss->RSSI, bss->flags, bi->RSSI, bi->flags)); + + if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) == + (bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL)) { + /* preserve max RSSI if the measurements are + * both on-channel or both off-channel + */ + ESCAN_TRACE(("%s("MACDBG"), same onchan" + ", RSSI: prev %d new %d\n", + bss->SSID, MAC2STRDBG(bi->BSSID.octet), + bss->RSSI, bi->RSSI)); + bi->RSSI = MAX(bss->RSSI, bi->RSSI); + } else if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) && + (bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) == 0) { + /* preserve the on-channel rssi measurement + * if the new measurement is off channel + */ + ESCAN_TRACE(("%s("MACDBG"), prev onchan" + ", RSSI: prev %d new %d\n", + bss->SSID, MAC2STRDBG(bi->BSSID.octet), + bss->RSSI, bi->RSSI)); + bi->RSSI = bss->RSSI; + bi->flags |= WL_BSS_FLAGS_RSSI_ONCHANNEL; + } + if (dtoh32(bss->length) != bi_length) { + u32 prev_len = dtoh32(bss->length); + + ESCAN_TRACE(("bss info replacement" + " is occured(bcast:%d->probresp%d)\n", + bss->ie_length, bi->ie_length)); + ESCAN_TRACE(("%s("MACDBG"), replacement!(%d -> %d)\n", + bss->SSID, MAC2STRDBG(bi->BSSID.octet), + prev_len, bi_length)); + + if (list->buflen - prev_len + bi_length + > ESCAN_BUF_SIZE) { + ESCAN_ERROR(("Buffer is too small: keep the" + " previous result of this AP\n")); + /* Only update RSSI */ + bss->RSSI = bi->RSSI; + bss->flags |= (bi->flags + & WL_BSS_FLAGS_RSSI_ONCHANNEL); + goto exit; + } + + if (i < list->count - 1) { + /* memory copy required by this case only */ + memmove((u8 *)bss + bi_length, + (u8 *)bss + prev_len, + list->buflen - cur_len - prev_len); + } + list->buflen -= prev_len; + list->buflen += bi_length; + } + list->version = dtoh32(bi->version); + memcpy((u8 *)bss, (u8 *)bi, bi_length); + goto exit; + } + cur_len += dtoh32(bss->length); + } + if (bi_length > ESCAN_BUF_SIZE - list->buflen) { +#ifdef ESCAN_BUF_OVERFLOW_MGMT + wl_cfg80211_remove_lowRSSI_info(list, candidate, bi); + if (bi_length > ESCAN_BUF_SIZE - list->buflen) { + ESCAN_TRACE(("RSSI(" MACDBG ") is too low(%d) to add Buffer\n", + MAC2STRDBG(bi->BSSID.octet), bi->RSSI)); + goto exit; + } +#else + ESCAN_ERROR(("Buffer is too small: ignoring\n")); + goto exit; +#endif /* ESCAN_BUF_OVERFLOW_MGMT */ + } + + if (strlen(bi->SSID) == 0) { // terence: fix for hidden SSID + ESCAN_SCAN(("Skip hidden SSID %pM\n", &bi->BSSID)); + goto exit; + } + + memcpy(&(((char *)list)[list->buflen]), bi, bi_length); + list->version = dtoh32(bi->version); + list->buflen += bi_length; + list->count++; + } + } + else if (status == WLC_E_STATUS_SUCCESS) { + escan->escan_state = ESCAN_STATE_IDLE; + + ESCAN_TRACE(("ESCAN COMPLETED\n")); + escan->bss_list = wl_escan_get_buf(escan); + ESCAN_TRACE(("SCAN COMPLETED: scanned AP count=%d\n", + escan->bss_list->count)); + wl_escan_inform_bss(escan); + wl_notify_escan_complete(escan, false); + + } else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) || + (status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) || + (status == WLC_E_STATUS_NEWASSOC)) { + /* Handle all cases of scan abort */ + escan->escan_state = ESCAN_STATE_IDLE; + ESCAN_TRACE(("ESCAN ABORT reason: %d\n", status)); + wl_escan_inform_bss(escan); + wl_notify_escan_complete(escan, false); + } else if (status == WLC_E_STATUS_TIMEOUT) { + ESCAN_ERROR(("WLC_E_STATUS_TIMEOUT\n")); + ESCAN_ERROR(("reason[0x%x]\n", e->reason)); + if (e->reason == 0xFFFFFFFF) { + wl_notify_escan_complete(escan, true); + } + } else { + ESCAN_ERROR(("unexpected Escan Event %d : abort\n", status)); + escan->escan_state = ESCAN_STATE_IDLE; + escan->bss_list = wl_escan_get_buf(escan); + ESCAN_TRACE(("SCAN ABORTED(UNEXPECTED): scanned AP count=%d\n", + escan->bss_list->count)); + wl_escan_inform_bss(escan); + wl_notify_escan_complete(escan, false); + } +exit: + mutex_unlock(&escan->usr_sync); + return err; +} + +static int +wl_escan_prep(struct wl_escan_info *escan, wl_uint32_list_t *list, + wl_scan_params_t *params, wlc_ssid_t *ssid) +{ + int err = 0; + wl_scan_results_t *results; + s32 offset; + char *ptr; + int i = 0, j = 0; + wlc_ssid_t ssid_tmp; + u32 n_channels = 0; + uint channel; + chanspec_t chanspec; + + results = wl_escan_get_buf(escan); + results->version = 0; + results->count = 0; + results->buflen = WL_SCAN_RESULTS_FIXED_SIZE; + escan->escan_state = ESCAN_STATE_SCANING; + + /* Arm scan timeout timer */ + mod_timer(&escan->scan_timeout, jiffies + msecs_to_jiffies(WL_ESCAN_TIMER_INTERVAL_MS)); + + memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN); + params->bss_type = DOT11_BSSTYPE_ANY; + params->scan_type = 0; + params->nprobes = -1; + params->active_time = -1; + params->passive_time = -1; + params->home_time = -1; + params->channel_num = 0; + + params->nprobes = htod32(params->nprobes); + params->active_time = htod32(params->active_time); + params->passive_time = htod32(params->passive_time); + params->home_time = htod32(params->home_time); + + n_channels = dtoh32(list->count); + /* Copy channel array if applicable */ + ESCAN_SCAN(("### List of channelspecs to scan ###\n")); + if (n_channels > 0) { + for (i = 0; i < n_channels; i++) { + channel = dtoh32(list->element[i]); + if (!dhd_conf_match_channel(escan->pub, channel)) + continue; + chanspec = WL_CHANSPEC_BW_20; + if (chanspec == INVCHANSPEC) { + ESCAN_ERROR(("Invalid chanspec! Skipping channel\n")); + continue; + } + if (channel <= CH_MAX_2G_CHANNEL) { + chanspec |= WL_CHANSPEC_BAND_2G; + } else { + chanspec |= WL_CHANSPEC_BAND_5G; + } + params->channel_list[j] = channel; + params->channel_list[j] &= WL_CHANSPEC_CHAN_MASK; + params->channel_list[j] |= chanspec; + ESCAN_SCAN(("Chan : %d, Channel spec: %x \n", + channel, params->channel_list[j])); + params->channel_list[j] = wl_chspec_host_to_driver(params->channel_list[j]); + j++; + } + } else { + ESCAN_SCAN(("Scanning all channels\n")); + } + + if (ssid && ssid->SSID_len) { + /* Copy ssid array if applicable */ + ESCAN_SCAN(("### List of SSIDs to scan ###\n")); + offset = offsetof(wl_scan_params_t, channel_list) + n_channels * sizeof(u16); + offset = roundup(offset, sizeof(u32)); + ptr = (char*)params + offset; + + ESCAN_SCAN(("0: Broadcast scan\n")); + memset(&ssid_tmp, 0, sizeof(wlc_ssid_t)); + ssid_tmp.SSID_len = 0; + memcpy(ptr, &ssid_tmp, sizeof(wlc_ssid_t)); + ptr += sizeof(wlc_ssid_t); + + memset(&ssid_tmp, 0, sizeof(wlc_ssid_t)); + ssid_tmp.SSID_len = ssid->SSID_len; + memcpy(ssid_tmp.SSID, ssid->SSID, ssid->SSID_len); + memcpy(ptr, &ssid_tmp, sizeof(wlc_ssid_t)); + ptr += sizeof(wlc_ssid_t); + ESCAN_SCAN(("1: scan for %s size=%d\n", ssid_tmp.SSID, ssid_tmp.SSID_len)); + /* Adding mask to channel numbers */ + params->channel_num = + htod32((2 << WL_SCAN_PARAMS_NSSID_SHIFT) | + (n_channels & WL_SCAN_PARAMS_COUNT_MASK)); + } + else { + ESCAN_SCAN(("Broadcast scan\n")); + } + + return err; +} + +static int wl_escan_reset(void) { + struct wl_escan_info *escan = g_escan; + + if (timer_pending(&escan->scan_timeout)) + del_timer_sync(&escan->scan_timeout); + escan->escan_state = ESCAN_STATE_IDLE; + + return 0; +} + +static void wl_escan_timeout(unsigned long data) +{ + wl_event_msg_t msg; + struct wl_escan_info *escan = (struct wl_escan_info *)data; + struct wl_scan_results *bss_list; + struct wl_bss_info *bi = NULL; + s32 i; + u32 channel; + + bss_list = wl_escan_get_buf(escan); + if (!bss_list) { + ESCAN_ERROR(("bss_list is null. Didn't receive any partial scan results\n")); + } else { + ESCAN_ERROR(("%s: scanned AP count (%d)\n", __FUNCTION__, bss_list->count)); + bi = next_bss(bss_list, bi); + for_each_bss(bss_list, bi, i) { + channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec)); + ESCAN_ERROR(("SSID :%s Channel :%d\n", bi->SSID, channel)); + } + } + + if (!escan->dev) { + ESCAN_ERROR(("No dev present\n")); + return; + } + + bzero(&msg, sizeof(wl_event_msg_t)); + ESCAN_ERROR(("timer expired\n")); + + msg.event_type = hton32(WLC_E_ESCAN_RESULT); + msg.status = hton32(WLC_E_STATUS_TIMEOUT); + msg.reason = 0xFFFFFFFF; + wl_escan_event(escan->dev, &msg, NULL); + + // terence 20130729: workaround to fix out of memory in firmware +// if (dhd_conf_get_chip(dhd_get_pub(dev)) == BCM43362_CHIP_ID) { +// ESCAN_ERROR(("Send hang event\n")); +// net_os_send_hang_message(dev); +// } +} + +int +wl_escan_set_scan( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + s32 err = BCME_OK; + s32 params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params)); + wl_escan_params_t *params = NULL; + scb_val_t scbval; + static int cnt = 0; + struct wl_escan_info *escan = NULL; + wlc_ssid_t ssid; + u32 n_channels = 0; + wl_uint32_list_t *list; + u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)]; + s32 val = 0; + + ESCAN_TRACE(("Enter \n")); + + escan = g_escan; + if (!escan) { + ESCAN_ERROR(("device is not ready\n")); \ + return -EIO; + } + mutex_lock(&escan->usr_sync); + + if (!escan->ioctl_ver) { + val = 1; + if ((err = wldev_ioctl(dev, WLC_GET_VERSION, &val, sizeof(int), false) < 0)) { + ANDROID_ERROR(("WLC_GET_VERSION failed, err=%d\n", err)); + goto exit; + } + val = dtoh32(val); + if (val != WLC_IOCTL_VERSION && val != 1) { + ANDROID_ERROR(("Version mismatch, please upgrade. Got %d, expected %d or 1\n", + val, WLC_IOCTL_VERSION)); + goto exit; + } + escan->ioctl_ver = val; + printf("%s: ioctl_ver=%d\n", __FUNCTION__, val); + } + + /* default Broadcast scan */ + memset(&ssid, 0, sizeof(ssid)); + +#if WIRELESS_EXT > 17 + /* check for given essid */ + if (wrqu->data.length == sizeof(struct iw_scan_req)) { + if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { + struct iw_scan_req *req = (struct iw_scan_req *)extra; + ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len); + memcpy(ssid.SSID, req->essid, ssid.SSID_len); + ssid.SSID_len = htod32(ssid.SSID_len); + } + } +#endif + if (escan->escan_state == ESCAN_STATE_SCANING) { + ESCAN_ERROR(("Scanning already\n")); + goto exit; + } + + /* if scan request is not empty parse scan request paramters */ + memset(valid_chan_list, 0, sizeof(valid_chan_list)); + list = (wl_uint32_list_t *)(void *) valid_chan_list; + list->count = htod32(WL_NUMCHANNELS); + err = wldev_ioctl(escan->dev, WLC_GET_VALID_CHANNELS, valid_chan_list, sizeof(valid_chan_list), false); + if (err != 0) { + ESCAN_ERROR(("%s: get channels failed with %d\n", __FUNCTION__, err)); + goto exit; + } + n_channels = dtoh32(list->count); + /* Allocate space for populating ssids in wl_escan_params_t struct */ + if (dtoh32(list->count) % 2) + /* If n_channels is odd, add a padd of u16 */ + params_size += sizeof(u16) * (n_channels + 1); + else + params_size += sizeof(u16) * n_channels; + if (ssid.SSID_len) { + params_size += sizeof(struct wlc_ssid) * 2; + } + + params = (wl_escan_params_t *) kzalloc(params_size, GFP_KERNEL); + if (params == NULL) { + err = -ENOMEM; + goto exit; + } + wl_escan_prep(escan, list, ¶ms->params, &ssid); + + params->version = htod32(ESCAN_REQ_VERSION); + params->action = htod16(WL_SCAN_ACTION_START); + wl_escan_set_sync_id(params->sync_id); + if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) { + ESCAN_ERROR(("ioctl buffer length not sufficient\n")); + kfree(params); + err = -ENOMEM; + goto exit; + } + params->params.scan_type = DOT11_SCANTYPE_ACTIVE; + ESCAN_TRACE(("Passive scan_type %d\n", params->params.scan_type)); + + err = wldev_iovar_setbuf(dev, "escan", params, params_size, + escan->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL); + if (unlikely(err)) { + if (err == BCME_EPERM) + /* Scan Not permitted at this point of time */ + ESCAN_TRACE(("Escan not permitted at this time (%d)\n", err)); + else + ESCAN_ERROR(("Escan set error (%d)\n", err)); + wl_escan_reset(); + } + kfree(params); + +exit: + if (unlikely(err)) { + /* Don't print Error incase of Scan suppress */ + if ((err == BCME_EPERM)) + ESCAN_TRACE(("Escan failed: Scan Suppressed \n")); + else { + cnt++; + ESCAN_ERROR(("error (%d), cnt=%d\n", err, cnt)); + // terence 20140111: send disassoc to firmware + if (cnt >= 4) { + memset(&scbval, 0, sizeof(scb_val_t)); + wldev_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t), true); + ESCAN_ERROR(("Send disassoc to break the busy dev=%p\n", dev)); + cnt = 0; + } + } + } else { + cnt = 0; + } + mutex_unlock(&escan->usr_sync); + return err; +} + +int +wl_escan_get_scan( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + s32 err = BCME_OK; + struct iw_event iwe; + int i, j; + char *event = extra, *end = extra + dwrq->length, *value; + int16 rssi; + int channel; + wl_bss_info_t *bi = NULL; + struct wl_escan_info *escan = g_escan; + struct wl_scan_results *bss_list; +#if defined(BSSCACHE) + wl_bss_cache_t *node; +#endif + + ESCAN_TRACE(("%s: %s SIOCGIWSCAN, len=%d\n", __FUNCTION__, dev->name, dwrq->length)); + + if (!extra) + return -EINVAL; + + mutex_lock(&escan->usr_sync); + + /* Check for scan in progress */ + if (escan->escan_state == ESCAN_STATE_SCANING) { + ESCAN_TRACE(("%s: SIOCGIWSCAN GET still scanning\n", dev->name)); + err = -EAGAIN; + goto exit; + } + +#if defined(BSSCACHE) + bss_list = &g_bss_cache_ctrl.m_cache_head->results; + node = g_bss_cache_ctrl.m_cache_head; + for (i=0; node && ibss_list; + bi = next_bss(bss_list, bi); + for_each_bss(bss_list, bi, i) +#endif + { +#if defined(BSSCACHE) + bi = node->results.bss_info; +#endif + /* overflow check cover fields before wpa IEs */ + if (event + ETHER_ADDR_LEN + bi->SSID_len + IW_EV_UINT_LEN + IW_EV_FREQ_LEN + + IW_EV_QUAL_LEN >= end) { + err = -E2BIG; + goto exit; + } + +#if defined(RSSIAVG) + rssi = wl_get_avg_rssi(&g_rssi_cache_ctrl, &bi->BSSID); + if (rssi == RSSI_MINVAL) + rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL); +#else + // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS + rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL); +#endif + channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec)); + ESCAN_SCAN(("%s: BSSID="MACSTR", channel=%d, RSSI=%d, SSID=\"%s\"\n", + __FUNCTION__, MAC2STR(bi->BSSID.octet), channel, rssi, bi->SSID)); + + /* First entry must be the BSSID */ + iwe.cmd = SIOCGIWAP; + iwe.u.ap_addr.sa_family = ARPHRD_ETHER; + memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN); + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN); + + /* SSID */ + iwe.u.data.length = dtoh32(bi->SSID_len); + iwe.cmd = SIOCGIWESSID; + iwe.u.data.flags = 1; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID); + + /* Mode */ + if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) { + iwe.cmd = SIOCGIWMODE; + if (dtoh16(bi->capability) & DOT11_CAP_ESS) + iwe.u.mode = IW_MODE_INFRA; + else + iwe.u.mode = IW_MODE_ADHOC; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN); + } + + /* Channel */ + iwe.cmd = SIOCGIWFREQ; +#if 1 + iwe.u.freq.m = wf_channel2mhz(channel, channel <= CH_MAX_2G_CHANNEL ? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G); +#else + iwe.u.freq.m = wf_channel2mhz(bi->n_cap ? + bi->ctl_ch : CHSPEC_CHANNEL(bi->chanspec), + CHSPEC_CHANNEL(bi->chanspec) <= CH_MAX_2G_CHANNEL ? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G); +#endif + iwe.u.freq.e = 6; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN); + + /* Channel quality */ + iwe.cmd = IWEVQUAL; + iwe.u.qual.qual = rssi_to_qual(rssi); + iwe.u.qual.level = 0x100 + rssi; + iwe.u.qual.noise = 0x100 + bi->phy_noise; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN); + + wl_iw_handle_scanresults_ies(&event, end, info, bi); + + /* Encryption */ + iwe.cmd = SIOCGIWENCODE; + if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY) + iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; + else + iwe.u.data.flags = IW_ENCODE_DISABLED; + iwe.u.data.length = 0; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event); + + /* Rates */ + if (bi->rateset.count <= sizeof(bi->rateset.rates)) { + if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end) { + err = -E2BIG; + goto exit; + } + value = event + IW_EV_LCP_LEN; + iwe.cmd = SIOCGIWRATE; + /* Those two flags are ignored... */ + iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; + for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) { + iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000; + value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe, + IW_EV_PARAM_LEN); + } + event = value; + } +#if defined(BSSCACHE) + node = node->next; +#endif + } + + dwrq->length = event - extra; + dwrq->flags = 0; /* todo */ + ESCAN_SCAN(("scanned AP count (%d)\n", i)); +exit: + mutex_unlock(&escan->usr_sync); + return err; +} + +static s32 wl_create_event_handler(struct wl_escan_info *escan) +{ + int ret = 0; + ESCAN_TRACE(("Enter \n")); + + /* Do not use DHD in cfg driver */ + escan->event_tsk.thr_pid = -1; + + PROC_START(wl_escan_event_handler, escan, &escan->event_tsk, 0, "wl_escan_handler"); + if (escan->event_tsk.thr_pid < 0) + ret = -ENOMEM; + return ret; +} + +static void wl_destroy_event_handler(struct wl_escan_info *escan) +{ + if (escan->event_tsk.thr_pid >= 0) + PROC_STOP(&escan->event_tsk); +} + +static void wl_escan_deinit(void) +{ + struct wl_escan_info *escan = g_escan; + + printf("%s: Enter\n", __FUNCTION__); + if (!escan) { + ESCAN_ERROR(("device is not ready\n")); \ + return; + } + wl_destroy_event_handler(escan); + wl_flush_eq(escan); + del_timer_sync(&escan->scan_timeout); + +#if defined(RSSIAVG) + wl_free_rssi_cache(&g_rssi_cache_ctrl); +#endif +#if defined(BSSCACHE) + wl_free_bss_cache(&g_bss_cache_ctrl); +#endif +} + +static s32 wl_escan_init(void) +{ + struct wl_escan_info *escan = g_escan; + int err = 0; + + printf("%s: Enter\n", __FUNCTION__); + if (!escan) { + ESCAN_ERROR(("device is not ready\n")); \ + return -EIO; + } + + /* Init scan_timeout timer */ + init_timer(&escan->scan_timeout); + escan->scan_timeout.data = (unsigned long) escan; + escan->scan_timeout.function = wl_escan_timeout; + + if (wl_create_event_handler(escan)) { + err = -ENOMEM; + goto err; + } + memset(escan->evt_handler, 0, sizeof(escan->evt_handler)); + + escan->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler; + escan->escan_state = ESCAN_STATE_IDLE; + + mutex_init(&escan->usr_sync); + + return 0; +err: + wl_escan_deinit(); + return err; +} + +void wl_escan_detach(void) +{ + struct wl_escan_info *escan = g_escan; + + printf("%s: Enter\n", __FUNCTION__); + + if (!escan) { + ESCAN_ERROR(("device is not ready\n")); \ + return; + } + + wl_escan_deinit(); + + if (escan->escan_ioctl_buf) { + kfree(escan->escan_ioctl_buf); + escan->escan_ioctl_buf = NULL; + } + + kfree(escan); + g_escan = NULL; +} + +int +wl_escan_attach(struct net_device *dev, void * dhdp) +{ + struct wl_escan_info *escan = NULL; + + printf("%s: Enter\n", __FUNCTION__); + + if (!dev) + return 0; + + escan = kmalloc(sizeof(struct wl_escan_info), GFP_KERNEL); + if (!escan) + return -ENOMEM; + memset(escan, 0, sizeof(struct wl_escan_info)); + + /* we only care about main interface so save a global here */ + g_escan = escan; + escan->dev = dev; + escan->pub = dhdp; + escan->escan_state = ESCAN_STATE_IDLE; + + escan->escan_ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL); + if (unlikely(!escan->escan_ioctl_buf)) { + ESCAN_ERROR(("Ioctl buf alloc failed\n")); + goto err ; + } + wl_init_eq(escan); +#ifdef WL_ESCAN + wl_escan_init(); +#endif + + return 0; +err: + wl_escan_detach(); + return -ENOMEM; +} + +#endif /* WL_ESCAN */ + diff --git a/drivers/amlogic/wifi/bcmdhd/wl_escan.h b/drivers/amlogic/wifi/bcmdhd/wl_escan.h new file mode 100644 index 0000000000000..6be090acae8ec --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/wl_escan.h @@ -0,0 +1,75 @@ + +#ifndef _wl_escan_ +#define _wl_escan_ + +#include +#include +#include +#include +#include + + +#ifdef DHD_MAX_IFS +#define WL_MAX_IFS DHD_MAX_IFS +#else +#define WL_MAX_IFS 16 +#endif + +#define ESCAN_BUF_SIZE (64 * 1024) + +#define WL_ESCAN_TIMER_INTERVAL_MS 10000 /* Scan timeout */ + +/* event queue for cfg80211 main event */ +struct escan_event_q { + struct list_head eq_list; + u32 etype; + wl_event_msg_t emsg; + s8 edata[1]; +}; + +/* donlge escan state */ +enum escan_state { + ESCAN_STATE_IDLE, + ESCAN_STATE_SCANING +}; + +struct wl_escan_info; + +typedef s32(*ESCAN_EVENT_HANDLER) (struct wl_escan_info *escan, + const wl_event_msg_t *e, void *data); + +typedef struct wl_escan_info { + struct net_device *dev; + dhd_pub_t *pub; + struct timer_list scan_timeout; /* Timer for catch scan event timeout */ + int escan_state; + int ioctl_ver; + + char ioctlbuf[WLC_IOCTL_SMLEN]; + u8 escan_buf[ESCAN_BUF_SIZE]; + struct wl_scan_results *bss_list; + struct wl_scan_results *scan_results; + struct ether_addr disconnected_bssid; + u8 *escan_ioctl_buf; + spinlock_t eq_lock; /* for event queue synchronization */ + struct list_head eq_list; /* used for event queue */ + tsk_ctl_t event_tsk; /* task of main event handler thread */ + ESCAN_EVENT_HANDLER evt_handler[WLC_E_LAST]; + struct mutex usr_sync; /* maily for up/down synchronization */ +} wl_escan_info_t; + +void wl_escan_event(struct net_device *ndev, const wl_event_msg_t * e, void *data); + +int wl_escan_set_scan( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +); +int wl_escan_get_scan(struct net_device *dev, struct iw_request_info *info, + struct iw_point *dwrq, char *extra); +int wl_escan_attach(struct net_device *dev, void * dhdp); +void wl_escan_detach(void); + +#endif /* _wl_escan_ */ + diff --git a/drivers/net/wireless/bcmdhd/wl_iw.c b/drivers/amlogic/wifi/bcmdhd/wl_iw.c similarity index 92% rename from drivers/net/wireless/bcmdhd/wl_iw.c rename to drivers/amlogic/wifi/bcmdhd/wl_iw.c index a56579166c5b9..389d8f8f0510b 100644 --- a/drivers/net/wireless/bcmdhd/wl_iw.c +++ b/drivers/amlogic/wifi/bcmdhd/wl_iw.c @@ -1,9 +1,30 @@ /* * Linux Wireless Extensions support * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: wl_iw.c 467328 2014-04-03 01:23:40Z $ + * + * <> + * + * $Id: wl_iw.c 591286 2015-10-07 11:59:26Z $ */ #if defined(USE_IW) @@ -20,10 +41,16 @@ #include #include -typedef const struct si_pub si_t; #include +#ifdef WL_NAN +#include +#endif #include +#ifdef WL_ESCAN +#include +#endif +typedef const struct si_pub si_t; /* message levels */ #define WL_ERROR_LEVEL 0x0001 @@ -50,32 +77,6 @@ uint iw_msg_level = WL_ERROR_LEVEL; #include -#ifdef BCMWAPI_WPI -/* these items should evetually go into wireless.h of the linux system headfile dir */ -#ifndef IW_ENCODE_ALG_SM4 -#define IW_ENCODE_ALG_SM4 0x20 -#endif - -#ifndef IW_AUTH_WAPI_ENABLED -#define IW_AUTH_WAPI_ENABLED 0x20 -#endif - -#ifndef IW_AUTH_WAPI_VERSION_1 -#define IW_AUTH_WAPI_VERSION_1 0x00000008 -#endif - -#ifndef IW_AUTH_CIPHER_SMS4 -#define IW_AUTH_CIPHER_SMS4 0x00000020 -#endif - -#ifndef IW_AUTH_KEY_MGMT_WAPI_PSK -#define IW_AUTH_KEY_MGMT_WAPI_PSK 4 -#endif - -#ifndef IW_AUTH_KEY_MGMT_WAPI_CERT -#define IW_AUTH_KEY_MGMT_WAPI_CERT 8 -#endif -#endif /* BCMWAPI_WPI */ /* Broadcom extensions to WEXT, linux upstream has obsoleted WEXT */ #ifndef IW_AUTH_KEY_MGMT_FT_802_1X @@ -131,7 +132,7 @@ extern int dhd_wait_pend8021x(struct net_device *dev); #define IW_EVENT_IDX(cmd) ((cmd) - IWEVFIRST) #endif /* WIRELESS_EXT < 19 */ - +#ifndef WL_ESCAN #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) #define DAEMONIZE(a) do { \ allow_signal(SIGKILL); \ @@ -185,6 +186,7 @@ iscan_info_t *g_iscan = NULL; static void wl_iw_timerfunc(ulong data); static void wl_iw_set_event_mask(struct net_device *dev); static int wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action); +#endif /* WL_ESCAN */ /* priv_link becomes netdev->priv and is the link between netdev and wlif struct */ typedef struct priv_link { @@ -245,7 +247,8 @@ dev_wlc_ioctl( ioc.buf = arg; ioc.len = len; - strcpy(ifr.ifr_name, dev->name); + strncpy(ifr.ifr_name, dev->name, sizeof(ifr.ifr_name)); + ifr.ifr_name[sizeof(ifr.ifr_name) - 1] = '\0'; ifr.ifr_data = (caddr_t) &ioc; fs = get_fs(); @@ -281,6 +284,7 @@ dev_wlc_intvar_set( return (dev_wlc_ioctl(dev, WLC_SET_VAR, buf, len)); } +#ifndef WL_ESCAN static int dev_iw_iovar_setbuf( struct net_device *dev, @@ -316,6 +320,7 @@ dev_iw_iovar_getbuf( return (dev_wlc_ioctl(dev, WLC_GET_VAR, bufptr, buflen)); } +#endif #if WIRELESS_EXT > 17 static int @@ -476,7 +481,8 @@ wl_iw_send_priv_event( if (strlen(flag) > sizeof(extra)) return -1; - strcpy(extra, flag); + strncpy(extra, flag, sizeof(extra)); + extra[sizeof(extra) - 1] = '\0'; wrqu.data.length = strlen(extra); wireless_send_event(dev, cmd, &wrqu, extra); WL_TRACE(("Send IWEVCUSTOM Event as %s\n", extra)); @@ -538,27 +544,27 @@ wl_iw_get_name( band[0] = dtoh32(band[0]); switch (phytype) { case WLC_PHY_TYPE_A: - strcpy(cap, "a"); + strncpy(cap, "a", sizeof(cap)); break; case WLC_PHY_TYPE_B: - strcpy(cap, "b"); + strncpy(cap, "b", sizeof(cap)); break; - case WLC_PHY_TYPE_LP: case WLC_PHY_TYPE_G: if (band[0] >= 2) - strcpy(cap, "abg"); + strncpy(cap, "abg", sizeof(cap)); else - strcpy(cap, "bg"); + strncpy(cap, "bg", sizeof(cap)); break; case WLC_PHY_TYPE_N: if (band[0] >= 2) - strcpy(cap, "abgn"); + strncpy(cap, "abgn", sizeof(cap)); else - strcpy(cap, "bgn"); + strncpy(cap, "bgn", sizeof(cap)); break; } done: - snprintf(cwrq->name, IFNAMSIZ, "IEEE 802.11%s", cap); + (void)snprintf(cwrq->name, IFNAMSIZ, "IEEE 802.11%s", cap); + return 0; } @@ -592,15 +598,16 @@ wl_iw_set_freq( fwrq->m /= 10; } /* handle 4.9GHz frequencies as Japan 4 GHz based channelization */ - if (fwrq->m > 4000 && fwrq->m < 5000) + if (fwrq->m > 4000 && fwrq->m < 5000) { sf = WF_CHAN_FACTOR_4_G; /* start factor for 4 GHz */ + } - chan = wf_mhz2channel(fwrq->m, sf); + chan = wf_mhz2channel(fwrq->m, sf); } WL_ERROR(("%s: chan=%d\n", __FUNCTION__, chan)); chan = htod32(chan); if ((error = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &chan, sizeof(chan)))) { - WL_ERROR(("%s: WLC_DISASSOC failed (%d).\n", __FUNCTION__, error)); + WL_ERROR(("%s: WLC_SET_CHANNEL failed (%d).\n", __FUNCTION__, error)); return error; } @@ -772,8 +779,8 @@ wl_iw_get_range( return error; if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype)))) return error; - if (nmode == 1 && ((phytype == WLC_PHY_TYPE_SSN) || (phytype == WLC_PHY_TYPE_LCN) || - (phytype == WLC_PHY_TYPE_LCN40))) { + if (nmode == 1 && (((phytype == WLC_PHY_TYPE_LCN) || + (phytype == WLC_PHY_TYPE_LCN40)))) { if ((error = dev_wlc_intvar_get(dev, "mimo_bw_cap", &bw_cap))) return error; if ((error = dev_wlc_intvar_get(dev, "sgi_tx", &sgi_tx))) @@ -902,6 +909,7 @@ wl_iw_get_range( return 0; } +#ifndef WL_ESCAN static int rssi_to_qual(int rssi) { @@ -918,6 +926,7 @@ rssi_to_qual(int rssi) else return 5; } +#endif /* WL_ESCAN */ static int wl_iw_set_spy( @@ -1074,6 +1083,7 @@ wl_iw_mlme( } #endif /* WIRELESS_EXT > 17 */ +#ifndef WL_ESCAN static int wl_iw_get_aplist( struct net_device *dev, @@ -1221,8 +1231,10 @@ wl_iw_iscan_get_aplist( return 0; } +#endif #if WIRELESS_EXT > 13 +#ifndef WL_ESCAN static int wl_iw_set_scan( struct net_device *dev, @@ -1304,6 +1316,7 @@ wl_iw_iscan_set_scan( return 0; } +#endif /* WL_ESCAN */ #if WIRELESS_EXT > 17 static bool @@ -1351,60 +1364,23 @@ ie_is_wps_ie(uint8 **wpsie, uint8 **tlvs, int *tlvs_len) } #endif /* WIRELESS_EXT > 17 */ -#ifdef BCMWAPI_WPI -static inline int _wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data, - size_t len, int uppercase) -{ - size_t i; - char *pos = buf, *end = buf + buf_size; - int ret; - if (buf_size == 0) - return 0; - for (i = 0; i < len; i++) { - ret = snprintf(pos, end - pos, uppercase ? "%02X" : "%02x", - data[i]); - if (ret < 0 || ret >= end - pos) { - end[-1] = '\0'; - return pos - buf; - } - pos += ret; - } - end[-1] = '\0'; - return pos - buf; -} -/** - * wpa_snprintf_hex - Print data as a hex string into a buffer - * @buf: Memory area to use as the output buffer - * @buf_size: Maximum buffer size in bytes (should be at least 2 * len + 1) - * @data: Data to be printed - * @len: Length of data in bytes - * Returns: Number of bytes written - */ -static int -wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data, size_t len) -{ - return _wpa_snprintf_hex(buf, buf_size, data, len, 0); -} -#endif /* BCMWAPI_WPI */ - -static int +#ifndef WL_ESCAN +static +#endif +int wl_iw_handle_scanresults_ies(char **event_p, char *end, struct iw_request_info *info, wl_bss_info_t *bi) { #if WIRELESS_EXT > 17 struct iw_event iwe; char *event; -#ifdef BCMWAPI_WPI - char *buf; - int custom_event_len; -#endif event = *event_p; if (bi->ie_length) { /* look for wpa/rsn ies in the ie list... */ bcm_tlv_t *ie; - uint8 *ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); + uint8 *ptr = ((uint8 *)bi) + bi->ie_offset; int ptr_len = bi->ie_length; /* OSEN IE */ @@ -1416,21 +1392,21 @@ wl_iw_handle_scanresults_ies(char **event_p, char *end, iwe.u.data.length = ie->len + 2; event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); } - ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); + ptr = ((uint8 *)bi) + bi->ie_offset; if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_RSN_ID))) { iwe.cmd = IWEVGENIE; iwe.u.data.length = ie->len + 2; event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); } - ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); + ptr = ((uint8 *)bi) + bi->ie_offset; if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_MDIE_ID))) { iwe.cmd = IWEVGENIE; iwe.u.data.length = ie->len + 2; event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); } - ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); + ptr = ((uint8 *)bi) + bi->ie_offset; while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) { /* look for WPS IE */ @@ -1442,7 +1418,7 @@ wl_iw_handle_scanresults_ies(char **event_p, char *end, } } - ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); + ptr = ((uint8 *)bi) + bi->ie_offset; ptr_len = bi->ie_length; while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) { if (ie_is_wpa_ie(((uint8 **)&ie), &ptr, &ptr_len)) { @@ -1453,38 +1429,6 @@ wl_iw_handle_scanresults_ies(char **event_p, char *end, } } -#ifdef BCMWAPI_WPI - ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); - ptr_len = bi->ie_length; - - while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WAPI_ID))) { - WL_TRACE(("%s: found a WAPI IE...\n", __FUNCTION__)); -#ifdef WAPI_IE_USE_GENIE - iwe.cmd = IWEVGENIE; - iwe.u.data.length = ie->len + 2; - event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); -#else /* using CUSTOM event */ - iwe.cmd = IWEVCUSTOM; - custom_event_len = strlen("wapi_ie=") + 2*(ie->len + 2); - iwe.u.data.length = custom_event_len; - - buf = kmalloc(custom_event_len+1, GFP_KERNEL); - if (buf == NULL) - { - WL_ERROR(("malloc(%d) returned NULL...\n", custom_event_len)); - break; - } - - memcpy(buf, "wapi_ie=", 8); - wpa_snprintf_hex(buf + 8, 2+1, &(ie->id), 1); - wpa_snprintf_hex(buf + 10, 2+1, &(ie->len), 1); - wpa_snprintf_hex(buf + 12, 2*ie->len+1, ie->data, ie->len); - event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, buf); - kfree(buf); -#endif /* WAPI_IE_USE_GENIE */ - break; - } -#endif /* BCMWAPI_WPI */ *event_p = event; } @@ -1492,6 +1436,7 @@ wl_iw_handle_scanresults_ies(char **event_p, char *end, return 0; } +#ifndef WL_ESCAN static int wl_iw_get_scan( struct net_device *dev, @@ -1546,7 +1491,7 @@ wl_iw_get_scan( // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL); channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch; - WL_SCAN(("%s: BSSID="MACSTR", channel=%d, RSSI=%d, merge broadcast SSID=\"%s\"\n", + WL_SCAN(("%s: BSSID="MACSTR", channel=%d, RSSI=%d, SSID=\"%s\"\n", __FUNCTION__, MAC2STR(bi->BSSID.octet), channel, rssi, bi->SSID)); /* First entry must be the BSSID */ @@ -1573,7 +1518,8 @@ wl_iw_get_scan( /* Channel */ iwe.cmd = SIOCGIWFREQ; - iwe.u.freq.m = wf_channel2mhz(channel, + + iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec), (CHSPEC_IS2G(bi->chanspec)) ? WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G); iwe.u.freq.e = 6; @@ -1586,7 +1532,6 @@ wl_iw_get_scan( iwe.u.qual.noise = 0x100 + bi->phy_noise; event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN); - /* WPA, WPA2, WPS, WAPI IEs */ wl_iw_handle_scanresults_ies(&event, end, info, bi); /* Encryption */ @@ -1680,7 +1625,7 @@ wl_iw_iscan_get_scan( // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL); channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch; - WL_SCAN(("%s: BSSID="MACSTR", channel=%d, RSSI=%d, merge broadcast SSID=\"%s\"\n", + WL_SCAN(("%s: BSSID="MACSTR", channel=%d, RSSI=%d, SSID=\"%s\"\n", __FUNCTION__, MAC2STR(bi->BSSID.octet), channel, rssi, bi->SSID)); /* First entry must be the BSSID */ @@ -1707,7 +1652,7 @@ wl_iw_iscan_get_scan( /* Channel */ iwe.cmd = SIOCGIWFREQ; - iwe.u.freq.m = wf_channel2mhz(channel, + iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec), (CHSPEC_IS2G(bi->chanspec)) ? WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G); iwe.u.freq.e = 6; @@ -1720,7 +1665,6 @@ wl_iw_iscan_get_scan( iwe.u.qual.noise = 0x100 + bi->phy_noise; event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN); - /* WPA, WPA2, WPS, WAPI IEs */ wl_iw_handle_scanresults_ies(&event, end, info, bi); /* Encryption */ @@ -1754,10 +1698,11 @@ wl_iw_iscan_get_scan( dwrq->length = event - extra; dwrq->flags = 0; /* todo */ + WL_SCAN(("%s: apcnt=%d\n", __FUNCTION__, apcnt)); return 0; } - +#endif /* WL_ESCAN */ #endif /* WIRELESS_EXT > 13 */ @@ -2421,21 +2366,6 @@ wl_iw_set_wpaie( char *extra ) { -#if defined(BCMWAPI_WPI) - uchar buf[WLC_IOCTL_SMLEN] = {0}; - uchar *p = buf; - int wapi_ie_size; - - WL_TRACE(("%s: SIOCSIWGENIE\n", dev->name)); - - if (extra[0] == DOT11_MNG_WAPI_ID) - { - wapi_ie_size = iwp->length; - memcpy(p, extra, iwp->length); - dev_wlc_bufvar_set(dev, "wapiie", buf, wapi_ie_size); - } - else -#endif dev_wlc_bufvar_set(dev, "wpaie", extra, iwp->length); return 0; @@ -2520,7 +2450,7 @@ wl_iw_set_encodeext( /* copy the raw hex key to the appropriate format */ for (j = 0; j < (WSEC_MAX_PSK_LEN / 2); j++) { - sprintf(charptr, "%02x", iwe->key[j]); + (void)snprintf(charptr, 3, "%02x", iwe->key[j]); charptr += 2; } len = strlen(keystring); @@ -2581,14 +2511,6 @@ wl_iw_set_encodeext( case IW_ENCODE_ALG_CCMP: key.algo = CRYPTO_ALGO_AES_CCM; break; -#ifdef BCMWAPI_WPI - case IW_ENCODE_ALG_SM4: - key.algo = CRYPTO_ALGO_SMS4; - if (iwe->ext_flags & IW_ENCODE_EXT_GROUP_KEY) { - key.flags &= ~WL_PRIMARY_KEY; - } - break; -#endif default: break; } @@ -2736,10 +2658,6 @@ wl_iw_set_wpaauth( val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED; else if (paramval & IW_AUTH_WPA_VERSION_WPA2) val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED; -#ifdef BCMWAPI_WPI - else if (paramval & IW_AUTH_WAPI_VERSION_1) - val = WAPI_AUTH_UNSPECIFIED; -#endif WL_TRACE(("%s: %d: setting wpa_auth to 0x%0x\n", __FUNCTION__, __LINE__, val)); if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val))) return error; @@ -2770,11 +2688,6 @@ wl_iw_set_wpaauth( val |= TKIP_ENABLED; if (cipher_combined & IW_AUTH_CIPHER_CCMP) val |= AES_ENABLED; -#ifdef BCMWAPI_WPI - val &= ~SMS4_ENABLED; - if (cipher_combined & IW_AUTH_CIPHER_SMS4) - val |= SMS4_ENABLED; -#endif if (iw->privacy_invoked && !val) { WL_WSEC(("%s: %s: 'Privacy invoked' TRUE but clearing wsec, assuming " @@ -2842,10 +2755,6 @@ wl_iw_set_wpaauth( if (paramval & (IW_AUTH_KEY_MGMT_FT_802_1X | IW_AUTH_KEY_MGMT_FT_PSK)) val |= WPA2_AUTH_FT; } -#ifdef BCMWAPI_WPI - if (paramval & (IW_AUTH_KEY_MGMT_WAPI_PSK | IW_AUTH_KEY_MGMT_WAPI_CERT)) - val = WAPI_AUTH_UNSPECIFIED; -#endif WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val)); if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val))) return error; @@ -2928,29 +2837,6 @@ wl_iw_set_wpaauth( #endif /* WIRELESS_EXT > 17 */ -#ifdef BCMWAPI_WPI - - case IW_AUTH_WAPI_ENABLED: - if ((error = dev_wlc_intvar_get(dev, "wsec", &val))) - return error; - if (paramval) { - val |= SMS4_ENABLED; - if ((error = dev_wlc_intvar_set(dev, "wsec", val))) { - WL_ERROR(("%s: setting wsec to 0x%0x returned error %d\n", - __FUNCTION__, val, error)); - return error; - } - if ((error = dev_wlc_intvar_set(dev, "wpa_auth", WAPI_AUTH_UNSPECIFIED))) { - WL_ERROR(("%s: setting wpa_auth(%d) returned %d\n", - __FUNCTION__, WAPI_AUTH_UNSPECIFIED, - error)); - return error; - } - } - - break; - -#endif /* BCMWAPI_WPI */ default: break; @@ -3085,10 +2971,19 @@ static const iw_handler wl_iw_handler[] = #else (iw_handler) NULL, /* -- hole -- */ #endif +#ifdef WL_ESCAN + (iw_handler) NULL, /* SIOCGIWAPLIST */ +#else (iw_handler) wl_iw_iscan_get_aplist, /* SIOCGIWAPLIST */ +#endif #if WIRELESS_EXT > 13 +#ifdef WL_ESCAN + (iw_handler) wl_escan_set_scan, /* SIOCSIWSCAN */ + (iw_handler) wl_escan_get_scan, /* SIOCGIWSCAN */ +#else (iw_handler) wl_iw_iscan_set_scan, /* SIOCSIWSCAN */ (iw_handler) wl_iw_iscan_get_scan, /* SIOCGIWSCAN */ +#endif #else /* WIRELESS_EXT > 13 */ (iw_handler) NULL, /* SIOCSIWSCAN */ (iw_handler) NULL, /* SIOCGIWSCAN */ @@ -3170,7 +3065,7 @@ const struct iw_handler_def wl_iw_handler_def = .num_standard = ARRAYSIZE(wl_iw_handler), .num_private = ARRAY_SIZE(wl_iw_priv_handler), .num_private_args = ARRAY_SIZE(wl_iw_priv_args), - .standard = (iw_handler *) wl_iw_handler, + .standard = (const iw_handler *) wl_iw_handler, .private = wl_iw_priv_handler, .private_args = wl_iw_priv_args, #if WIRELESS_EXT >= 19 @@ -3227,9 +3122,11 @@ wl_iw_ioctl( #if WIRELESS_EXT > 13 case SIOCGIWSCAN: +#ifndef WL_ESCAN if (g_iscan) max_tokens = wrq->u.data.length; else +#endif max_tokens = IW_SCAN_MAX_DATA; break; #endif /* WIRELESS_EXT > 13 */ @@ -3350,8 +3247,7 @@ wl_iw_conn_status_str(uint32 event_type, uint32 status, uint32 reason, /* If found, generate a connection failure string and return TRUE */ if (cause) { memset(stringBuf, 0, buflen); - snprintf(stringBuf, buflen, "%s %s %02d %02d", - name, cause, status, reason); + (void)snprintf(stringBuf, buflen, "%s %s %02d %02d", name, cause, status, reason); WL_TRACE(("Connection status: %s\n", stringBuf)); return TRUE; } else { @@ -3421,7 +3317,6 @@ wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data) break; case WLC_E_LINK: - case WLC_E_NDIS_LINK: cmd = SIOCGIWAP; wrqu.data.length = strlen(extra); if (!(flags & WLC_EVENT_MSG_LINK)) { @@ -3511,6 +3406,13 @@ wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data) } #endif /* WIRELESS_EXT > 17 */ +#ifdef WL_ESCAN + case WLC_E_ESCAN_RESULT: + WL_TRACE(("event WLC_E_ESCAN_RESULT\n")); + wl_escan_event(dev, e, data); + break; +#else + case WLC_E_SCAN_COMPLETE: #if WIRELESS_EXT > 14 cmd = SIOCGIWSCAN; @@ -3522,6 +3424,7 @@ wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data) (g_iscan->iscan_state != ISCAN_STATE_IDLE)) up(&g_iscan->sysioc_sem); break; +#endif default: /* Cannot translate event */ @@ -3529,11 +3432,13 @@ wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data) } if (cmd) { +#ifndef WL_ESCAN if (cmd == SIOCGIWSCAN) { if ((!g_iscan) || (g_iscan->sysioc_pid < 0)) { wireless_send_event(dev, cmd, &wrqu, NULL); }; } else +#endif wireless_send_event(dev, cmd, &wrqu, extra); } @@ -3552,13 +3457,79 @@ wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data) #endif /* WIRELESS_EXT > 13 */ } +#ifdef WL_NAN +static int wl_iw_get_wireless_stats_cbfn(void *ctx, uint8 *data, uint16 type, uint16 len) +{ + struct iw_statistics *wstats = ctx; + int res = BCME_OK; + + switch (type) { + case WL_CNT_XTLV_WLC: { + wl_cnt_wlc_t *cnt = (wl_cnt_wlc_t *)data; + if (len > sizeof(wl_cnt_wlc_t)) { + printf("counter structure length invalid! %d > %d\n", + len, (int)sizeof(wl_cnt_wlc_t)); + } + wstats->discard.nwid = 0; + wstats->discard.code = dtoh32(cnt->rxundec); + wstats->discard.fragment = dtoh32(cnt->rxfragerr); + wstats->discard.retries = dtoh32(cnt->txfail); + wstats->discard.misc = dtoh32(cnt->rxrunt) + dtoh32(cnt->rxgiant); + wstats->miss.beacon = 0; + WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n", + dtoh32(cnt->txframe), dtoh32(cnt->txbyte))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n", + dtoh32(cnt->rxundec))); + WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n", + dtoh32(cnt->txfail))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n", + dtoh32(cnt->rxfragerr))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n", + dtoh32(cnt->rxrunt))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n", + dtoh32(cnt->rxgiant))); + break; + } + case WL_CNT_XTLV_CNTV_LE10_UCODE: + case WL_CNT_XTLV_LT40_UCODE_V1: + case WL_CNT_XTLV_GE40_UCODE_V1: + { + /* Offsets of rxfrmtoolong and rxbadplcp are the same in + * wl_cnt_v_le10_mcst_t, wl_cnt_lt40mcst_v1_t, and wl_cnt_ge40mcst_v1_t. + * So we can just cast to wl_cnt_v_le10_mcst_t here. + */ + wl_cnt_v_le10_mcst_t *cnt = (wl_cnt_v_le10_mcst_t *)data; + if (len != WL_CNT_MCST_STRUCT_SZ) { + printf("counter structure length mismatch! %d != %d\n", + len, WL_CNT_MCST_STRUCT_SZ); + } + WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n", + dtoh32(cnt->rxfrmtoolong))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n", + dtoh32(cnt->rxbadplcp))); + BCM_REFERENCE(cnt); + break; + } + default: + WL_ERROR(("%s %d: Unsupported type %d\n", __FUNCTION__, __LINE__, type)); + break; + } + return res; +} +#endif + int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats) { int res = 0; - wl_cnt_t cnt; int phy_noise; int rssi; scb_val_t scb_val; +#if WIRELESS_EXT > 11 + char *cntbuf = NULL; + wl_cnt_info_t *cntinfo; + uint16 ver; + uint32 corerev = 0; +#endif /* WIRELESS_EXT > 11 */ phy_noise = 0; if ((res = dev_wlc_ioctl(dev, WLC_GET_PHY_NOISE, &phy_noise, sizeof(phy_noise)))) @@ -3572,6 +3543,7 @@ int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstat goto done; rssi = dtoh32(scb_val.val); + rssi = MIN(rssi, RSSI_MAXVAL); WL_TRACE(("wl_iw_get_wireless_stats rssi=%d ****** \n", rssi)); if (rssi <= WL_IW_RSSI_NO_SIGNAL) wstats->qual.qual = 0; @@ -3596,46 +3568,76 @@ int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstat #endif /* WIRELESS_EXT > 18 */ #if WIRELESS_EXT > 11 - WL_TRACE(("wl_iw_get_wireless_stats counters=%d\n *****", (int)sizeof(wl_cnt_t))); + WL_TRACE(("wl_iw_get_wireless_stats counters=%d\n *****", WL_CNTBUF_MAX_SIZE)); - memset(&cnt, 0, sizeof(wl_cnt_t)); - res = dev_wlc_bufvar_get(dev, "counters", (char *)&cnt, sizeof(wl_cnt_t)); + if (WL_CNTBUF_MAX_SIZE > MAX_WLIW_IOCTL_LEN) + { + WL_ERROR(("wl_iw_get_wireless_stats buffer too short %d < %d\n", + WL_CNTBUF_MAX_SIZE, MAX_WLIW_IOCTL_LEN)); + res = BCME_BUFTOOSHORT; + goto done; + } + + cntbuf = kmalloc(WL_CNTBUF_MAX_SIZE, GFP_KERNEL); + if (!cntbuf) { + res = BCME_NOMEM; + goto done; + } + + memset(cntbuf, 0, WL_CNTBUF_MAX_SIZE); + res = dev_wlc_bufvar_get(dev, "counters", cntbuf, WL_CNTBUF_MAX_SIZE); if (res) { WL_ERROR(("wl_iw_get_wireless_stats counters failed error=%d ****** \n", res)); goto done; } - cnt.version = dtoh16(cnt.version); - if (cnt.version != WL_CNT_T_VERSION) { + cntinfo = (wl_cnt_info_t *)cntbuf; + cntinfo->version = dtoh16(cntinfo->version); + cntinfo->datalen = dtoh16(cntinfo->datalen); + ver = cntinfo->version; + if (ver > WL_CNT_T_VERSION) { WL_TRACE(("\tIncorrect version of counters struct: expected %d; got %d\n", - WL_CNT_T_VERSION, cnt.version)); + WL_CNT_T_VERSION, ver)); + res = BCME_VERSION; goto done; } - wstats->discard.nwid = 0; - wstats->discard.code = dtoh32(cnt.rxundec); - wstats->discard.fragment = dtoh32(cnt.rxfragerr); - wstats->discard.retries = dtoh32(cnt.txfail); - wstats->discard.misc = dtoh32(cnt.rxrunt) + dtoh32(cnt.rxgiant); - wstats->miss.beacon = 0; + if (ver == WL_CNT_VERSION_11) { + wlc_rev_info_t revinfo; + memset(&revinfo, 0, sizeof(revinfo)); + res = dev_wlc_ioctl(dev, WLC_GET_REVINFO, &revinfo, sizeof(revinfo)); + if (res) { + WL_ERROR(("%s: WLC_GET_REVINFO failed %d\n", __FUNCTION__, res)); + goto done; + } + corerev = dtoh32(revinfo.corerev); + } - WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n", - dtoh32(cnt.txframe), dtoh32(cnt.txbyte))); - WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n", dtoh32(cnt.rxfrmtoolong))); - WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n", dtoh32(cnt.rxbadplcp))); - WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n", dtoh32(cnt.rxundec))); - WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n", dtoh32(cnt.rxfragerr))); - WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n", dtoh32(cnt.txfail))); - WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n", dtoh32(cnt.rxrunt))); - WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n", dtoh32(cnt.rxgiant))); +#ifdef WL_NAN + res = wl_cntbuf_to_xtlv_format(NULL, cntinfo, WL_CNTBUF_MAX_SIZE, corerev); + if (res) { + WL_ERROR(("%s: wl_cntbuf_to_xtlv_format failed %d\n", __FUNCTION__, res)); + goto done; + } + if ((res = bcm_unpack_xtlv_buf(wstats, cntinfo->data, cntinfo->datalen, + BCM_XTLV_OPTION_ALIGN32, wl_iw_get_wireless_stats_cbfn))) { + goto done; + } +#endif #endif /* WIRELESS_EXT > 11 */ done: +#if WIRELESS_EXT > 11 + if (cntbuf) { + kfree(cntbuf); + } +#endif /* WIRELESS_EXT > 11 */ return res; } +#ifndef WL_ESCAN static void wl_iw_timerfunc(ulong data) { @@ -3849,10 +3851,12 @@ _iscan_sysioc_thread(void *data) printf("%s: was terminated\n", __FUNCTION__); complete_and_exit(&iscan->sysioc_exited, 0); } +#endif /* WL_ESCAN */ int wl_iw_attach(struct net_device *dev, void * dhdp) { +#ifndef WL_ESCAN iscan_info_t *iscan = NULL; printf("%s: Enter\n", __FUNCTION__); @@ -3873,7 +3877,6 @@ wl_iw_attach(struct net_device *dev, void * dhdp) iscan->dev = dev; iscan->iscan_state = ISCAN_STATE_IDLE; - /* Set up the timer */ iscan->timer_ms = 2000; init_timer(&iscan->timer); @@ -3890,11 +3893,13 @@ wl_iw_attach(struct net_device *dev, void * dhdp) #endif if (iscan->sysioc_pid < 0) return -ENOMEM; +#endif /* WL_ESCAN */ return 0; } void wl_iw_detach(void) { +#ifndef WL_ESCAN iscan_buf_t *buf; iscan_info_t *iscan = g_iscan; if (!iscan) @@ -3911,6 +3916,7 @@ void wl_iw_detach(void) } kfree(iscan); g_iscan = NULL; +#endif } #endif /* USE_IW */ diff --git a/drivers/net/wireless/bcmdhd/wl_iw.h b/drivers/amlogic/wifi/bcmdhd/wl_iw.h similarity index 77% rename from drivers/net/wireless/bcmdhd/wl_iw.h rename to drivers/amlogic/wifi/bcmdhd/wl_iw.h index dfadb0c3951d6..dc80b2af0f05f 100644 --- a/drivers/net/wireless/bcmdhd/wl_iw.h +++ b/drivers/amlogic/wifi/bcmdhd/wl_iw.h @@ -1,9 +1,30 @@ /* * Linux Wireless Extensions support * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: wl_iw.h 488316 2014-06-30 15:22:21Z $ + * + * <> + * + * $Id: wl_iw.h 514727 2014-11-12 03:02:48Z $ */ #ifndef _wl_iw_h_ @@ -37,7 +58,7 @@ #define TXPOWER_SET_CMD "TXPOWER" #define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5] -#define MACSTR "%02X:%02X:%02X:%02X:%02X:%02X" +#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x" /* Structure to keep global parameters */ typedef struct wl_iw_extra_params { @@ -109,6 +130,10 @@ extern void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data); extern int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats); int wl_iw_attach(struct net_device *dev, void * dhdp); int wl_iw_send_priv_event(struct net_device *dev, char *flag); +#ifdef WL_ESCAN +int wl_iw_handle_scanresults_ies(char **event_p, char *end, + struct iw_request_info *info, wl_bss_info_t *bi); +#endif void wl_iw_detach(void); diff --git a/drivers/net/wireless/bcmdhd/wl_linux_mon.c b/drivers/amlogic/wifi/bcmdhd/wl_linux_mon.c similarity index 89% rename from drivers/net/wireless/bcmdhd/wl_linux_mon.c rename to drivers/amlogic/wifi/bcmdhd/wl_linux_mon.c index 70b3b93f88eb0..65e624092e80f 100644 --- a/drivers/net/wireless/bcmdhd/wl_linux_mon.c +++ b/drivers/amlogic/wifi/bcmdhd/wl_linux_mon.c @@ -1,9 +1,30 @@ /* * Broadcom Dongle Host Driver (DHD), Linux monitor network interface * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: wl_linux_mon.c 467328 2014-04-03 01:23:40Z $ + * + * <> + * + * $Id: wl_linux_mon.c 514727 2014-11-12 03:02:48Z $ */ #include diff --git a/drivers/amlogic/wifi/bcmdhd/wl_roam.c b/drivers/amlogic/wifi/bcmdhd/wl_roam.c new file mode 100644 index 0000000000000..bddc755f82049 --- /dev/null +++ b/drivers/amlogic/wifi/bcmdhd/wl_roam.c @@ -0,0 +1,28 @@ +/* + * Linux roam cache + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_roam.c 599089 2015-11-12 10:41:33Z $ + */ diff --git a/drivers/net/wireless/bcmdhd/wldev_common.c b/drivers/amlogic/wifi/bcmdhd/wldev_common.c similarity index 70% rename from drivers/net/wireless/bcmdhd/wldev_common.c rename to drivers/amlogic/wifi/bcmdhd/wldev_common.c index 12780c951170e..c09e4512c1265 100644 --- a/drivers/net/wireless/bcmdhd/wldev_common.c +++ b/drivers/amlogic/wifi/bcmdhd/wldev_common.c @@ -1,9 +1,30 @@ /* * Common function shared by Linux WEXT, cfg80211 and p2p drivers * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: wldev_common.c 504503 2014-09-24 11:28:56Z $ + * + * <> + * + * $Id: wldev_common.c 585478 2015-09-10 13:33:58Z $ */ #include @@ -24,7 +45,13 @@ #define WLDEV_ERROR(args) \ do { \ - printk(KERN_ERR "WLDEV-ERROR) %s : ", __func__); \ + printk(KERN_ERR "WLDEV-ERROR) "); \ + printk args; \ + } while (0) + +#define WLDEV_INFO(args) \ + do { \ + printk(KERN_INFO "WLDEV-INFO) "); \ printk args; \ } while (0) @@ -53,7 +80,7 @@ s32 wldev_ioctl( * wl_iw, wl_cfg80211 and wl_cfgp2p */ static s32 wldev_mkiovar( - s8 *iovar_name, s8 *param, s32 paramlen, + const s8 *iovar_name, s8 *param, s32 paramlen, s8 *iovar_buf, u32 buflen) { s32 iolen = 0; @@ -142,8 +169,8 @@ s32 wldev_mkiovar_bsscfg( u32 iolen; if (bssidx == 0) { - return wldev_mkiovar((s8*)iovar_name, (s8 *)param, paramlen, - (s8 *) iovar_buf, buflen); + return wldev_mkiovar(iovar_name, param, paramlen, + iovar_buf, buflen); } prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */ @@ -265,20 +292,17 @@ int wldev_get_link_speed( } int wldev_get_rssi( - struct net_device *dev, int *prssi) + struct net_device *dev, scb_val_t *scb_val) { - scb_val_t scb_val; int error; - if (!prssi) + if (!scb_val) return -ENOMEM; - bzero(&scb_val, sizeof(scb_val_t)); - error = wldev_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t), 0); + error = wldev_ioctl(dev, WLC_GET_RSSI, scb_val, sizeof(scb_val_t), 0); if (unlikely(error)) return error; - *prssi = dtoh32(scb_val.val); return error; } @@ -317,9 +341,76 @@ int wldev_set_band( } return error; } +int wldev_get_datarate(struct net_device *dev, int *datarate) +{ + int error = 0; + + error = wldev_ioctl(dev, WLC_GET_RATE, datarate, sizeof(int), false); + if (error) { + return -1; + } else { + *datarate = dtoh32(*datarate); + } + + return error; +} + +#ifdef WL_CFG80211 +extern chanspec_t +wl_chspec_driver_to_host(chanspec_t chanspec); +#define WL_EXTRA_BUF_MAX 2048 +int wldev_get_mode( + struct net_device *dev, uint8 *cap) +{ + int error = 0; + int chanspec = 0; + uint16 band = 0; + uint16 bandwidth = 0; + wl_bss_info_t *bss = NULL; + char* buf = kmalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL); + if (!buf) + return -1; + *(u32*) buf = htod32(WL_EXTRA_BUF_MAX); + error = wldev_ioctl(dev, WLC_GET_BSS_INFO, (void*)buf, WL_EXTRA_BUF_MAX, false); + if (error) { + WLDEV_ERROR(("%s:failed:%d\n", __FUNCTION__, error)); + return -1; + } + bss = (struct wl_bss_info *)(buf + 4); + chanspec = wl_chspec_driver_to_host(bss->chanspec); + + band = chanspec & WL_CHANSPEC_BAND_MASK; + bandwidth = chanspec & WL_CHANSPEC_BW_MASK; + + if (band == WL_CHANSPEC_BAND_2G) { + if (bss->n_cap) + strcpy(cap, "n"); + else + strcpy(cap, "bg"); + } else if (band == WL_CHANSPEC_BAND_5G) { + if (bandwidth == WL_CHANSPEC_BW_80) + strcpy(cap, "ac"); + else if ((bandwidth == WL_CHANSPEC_BW_40) || (bandwidth == WL_CHANSPEC_BW_20)) { + if ((bss->nbss_cap & 0xf00) && (bss->n_cap)) + strcpy(cap, "n|ac"); + else if (bss->n_cap) + strcpy(cap, "n"); + else if (bss->vht_cap) + strcpy(cap, "ac"); + else + strcpy(cap, "a"); + } else { + WLDEV_ERROR(("%s:Mode get failed\n", __FUNCTION__)); + return -1; + } + + } + return error; +} +#endif int wldev_set_country( - struct net_device *dev, char *country_code, bool notify, bool user_enforced) + struct net_device *dev, char *country_code, bool notify, bool user_enforced, int revinfo) { int error = -1; wl_country_t cspec = {{0}, 0, {0}}; @@ -337,7 +428,8 @@ int wldev_set_country( } if ((error < 0) || - (strncmp(country_code, cspec.country_abbrev, WLC_CNTRY_BUF_SZ) != 0)) { + dhd_force_country_change(dev) || + (strncmp(country_code, cspec.ccode, WLC_CNTRY_BUF_SZ) != 0)) { if (user_enforced) { bzero(&scbval, sizeof(scb_val_t)); @@ -349,12 +441,12 @@ int wldev_set_country( } } - cspec.rev = -1; + cspec.rev = revinfo; memcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ); memcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ); - error = dhd_conf_get_country_from_config(dhd_get_pub(dev), &cspec); - if (error) - dhd_get_customized_country_code(dev, (char *)&cspec.country_abbrev, &cspec); + error = dhd_conf_get_country_from_config(dhd_get_pub(dev), &cspec); + if (error) + dhd_get_customized_country_code(dev, (char *)&cspec.country_abbrev, &cspec); error = wldev_iovar_setbuf(dev, "country", &cspec, sizeof(cspec), smbuf, sizeof(smbuf), NULL); if (error < 0) { diff --git a/drivers/net/wireless/bcmdhd/wldev_common.h b/drivers/amlogic/wifi/bcmdhd/wldev_common.h similarity index 68% rename from drivers/net/wireless/bcmdhd/wldev_common.h rename to drivers/amlogic/wifi/bcmdhd/wldev_common.h index f8193010b70b9..4cf421cfa19e4 100644 --- a/drivers/net/wireless/bcmdhd/wldev_common.h +++ b/drivers/amlogic/wifi/bcmdhd/wldev_common.h @@ -1,9 +1,30 @@ /* * Common function shared by Linux WEXT, cfg80211 and p2p drivers * - * $Copyright Open Broadcom Corporation$ + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * - * $Id: wldev_common.h 504503 2014-09-24 11:28:56Z $ + * + * <> + * + * $Id: wldev_common.h 556083 2015-05-12 14:03:00Z $ */ #ifndef __WLDEV_COMMON_H__ #define __WLDEV_COMMON_H__ @@ -73,9 +94,10 @@ extern int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, extern void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code, wl_country_t *cspec); extern void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify); +extern bool dhd_force_country_change(struct net_device *dev); extern void dhd_bus_band_set(struct net_device *dev, uint band); extern int wldev_set_country(struct net_device *dev, char *country_code, bool notify, - bool user_enforced); + bool user_enforced, int revinfo); extern int net_os_wake_lock(struct net_device *dev); extern int net_os_wake_unlock(struct net_device *dev); extern int net_os_wake_lock_timeout(struct net_device *dev); @@ -83,24 +105,19 @@ extern int net_os_wake_lock_timeout_enable(struct net_device *dev, int val); extern int net_os_set_dtim_skip(struct net_device *dev, int val); extern int net_os_set_suspend_disable(struct net_device *dev, int val); extern int net_os_set_suspend(struct net_device *dev, int val, int force); -extern int wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid, +extern int wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid, int max, int *bytes_left); /* Get the link speed from dongle, speed is in kpbs */ int wldev_get_link_speed(struct net_device *dev, int *plink_speed); -int wldev_get_rssi(struct net_device *dev, int *prssi); +int wldev_get_rssi(struct net_device *dev, scb_val_t *prssi); int wldev_get_ssid(struct net_device *dev, wlc_ssid_t *pssid); int wldev_get_band(struct net_device *dev, uint *pband); - +int wldev_get_mode(struct net_device *dev, uint8 *pband); +int wldev_get_datarate(struct net_device *dev, int *datarate); int wldev_set_band(struct net_device *dev, uint band); -#if defined(CUSTOM_PLATFORM_NV_TEGRA) -int wldev_miracast_tuning(struct net_device *dev, char *command, int total_len); -int wldev_get_assoc_resp_ie(struct net_device *dev, char *command, int total_len); -int wldev_get_rx_rate_stats(struct net_device *dev, char *command, int total_len); -#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */ - #endif /* __WLDEV_COMMON_H__ */ diff --git a/drivers/amlogic/wifi/wifi_dt.c b/drivers/amlogic/wifi/wifi_dt.c index 140341ff949f4..6e111b0793b30 100644 --- a/drivers/amlogic/wifi/wifi_dt.c +++ b/drivers/amlogic/wifi/wifi_dt.c @@ -36,34 +36,12 @@ #include #include #include -#include -#include -#include #define OWNER_NAME "sdio_wifi" int wifi_power_gpio = 0; int wifi_power_gpio2 = 0; - -/* -*there are two pwm channel outputs using one gpio -*for gxtvbb and the follows soc -*/ -struct pwm_config_gxtvbb { - unsigned int pwm_channel1; - unsigned int pwm_channel2; - unsigned int pwm_config1[3]; - unsigned int pwm_config2[3]; - /*pwm_config is used to storage peroid , duty and times*/ -}; - -struct pwm_config_gxbb { - unsigned int pwm_channel; - unsigned int pwm_config[2]; - /*pwm_config is used to storage peroid and duty*/ -}; - struct wifi_plat_info { int interrupt_pin; int irq_num; @@ -71,7 +49,6 @@ struct wifi_plat_info { int power_on_pin; int power_on_pin_level; - int power_on_pin_OD; int power_on_pin2; int clock_32k_pin; @@ -81,8 +58,6 @@ struct wifi_plat_info { int plat_info_valid; struct pinctrl *p; struct device *dev; - struct pwm_config_gxtvbb gxtv_conf; - struct pwm_config_gxbb gxb_conf; }; #define WIFI_POWER_MODULE_NAME "wifi_power" @@ -94,7 +69,6 @@ struct wifi_plat_info { #define USB_POWER_DOWN _IO('m', 2) #define SDIO_POWER_UP _IO('m', 3) #define SDIO_POWER_DOWN _IO('m', 4) -#define SDIO_GET_DEV_TYPE _IO('m', 5) static struct wifi_plat_info wifi_info; static dev_t wifi_power_devno; static struct cdev *wifi_power_cdev; @@ -150,29 +124,12 @@ static struct wifi_plat_info *wifi_get_driver_data static int set_power(int value) { - if (!wifi_info.power_on_pin_OD) { - if (wifi_info.power_on_pin_level) - return gpio_direction_output(wifi_info.power_on_pin, - !value); - else - return gpio_direction_output(wifi_info.power_on_pin, - value); - } else { - if (wifi_info.power_on_pin_level) { - if (value) - gpio_direction_input(wifi_info.power_on_pin); - else - gpio_direction_output(wifi_info.power_on_pin, - 0); - } else { - if (value) - gpio_direction_output(wifi_info.power_on_pin, - 0); - else - gpio_direction_input(wifi_info.power_on_pin); - } - } - return 0; + if (wifi_info.power_on_pin_level) + return gpio_direction_output(wifi_info.power_on_pin, + !value); + else + return gpio_direction_output(wifi_info.power_on_pin, + value); } static int set_power2(int value) @@ -254,10 +211,10 @@ static int wifi_power_release(struct inode *inode, struct file *file) { return 0; } + static long wifi_power_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { - char dev_type[10] = {'\0'}; switch (cmd) { case USB_POWER_UP: @@ -280,14 +237,6 @@ static long wifi_power_ioctl(struct file *filp, case SDIO_POWER_DOWN: extern_wifi_set_enable(0); break; - case SDIO_GET_DEV_TYPE: - memcpy(dev_type, get_wifi_inf(), strlen(get_wifi_inf())); - WIFI_INFO("wifi interface dev type: %s, length = %d\n", - dev_type, (int)strlen(dev_type)); - if (copy_to_user((char __user *)arg, - dev_type, strlen(dev_type))) - return -ENOTTY; - break; default: WIFI_INFO("usb wifi_power_ioctl: default !!!\n"); return -EINVAL; @@ -296,7 +245,6 @@ static long wifi_power_ioctl(struct file *filp, } static const struct file_operations wifi_power_fops = { - .unlocked_ioctl = wifi_power_ioctl, .compat_ioctl = wifi_power_ioctl, .open = wifi_power_open, .release = wifi_power_release, @@ -406,194 +354,6 @@ static void wifi_teardown_dt(void) } - -/* -* fot gxb soc -*/ -int pwm_single_channel_conf_dt(struct wifi_plat_info *plat) -{ - phandle pwm_phandle; - int val; - int ret; - int count = 2; - struct device_node *np_wifi_pwm_conf = plat->dev->of_node; - - ret = of_property_read_u32(np_wifi_pwm_conf, "pwm_config", &val); - if (ret) { - pr_err("not match wifi_pwm_config node\n"); - return -1; - } else { - pwm_phandle = val; - np_wifi_pwm_conf = of_find_node_by_phandle(pwm_phandle); - if (!np_wifi_pwm_conf) { - pr_err("can't find wifi_pwm_config node\n"); - return -1; - } - } - - ret = of_property_read_u32(np_wifi_pwm_conf, "pwm_channel", - &(plat->gxb_conf.pwm_channel)); - if (ret) { - pr_err("not config pwm channel num\n"); - return -1; - } - - ret = of_property_read_u32_array(np_wifi_pwm_conf, "pwm_channel_conf", - (plat->gxb_conf.pwm_config), count); - if (ret) { - pr_err("not config pwm channel parameters\n"); - return -1; - } - - WIFI_INFO("pwm phandle val=%x,pwm-channel=%d\n", - val, plat->gxb_conf.pwm_channel); - WIFI_INFO("pwm_config[0] = %d,pwm_config[1] = %d\n", - plat->gxb_conf.pwm_config[0], plat->gxb_conf.pwm_config[1]); - WIFI_INFO("wifi pwm dt ok\n"); - - return 0; -} - -/* -*configuration for single pwm -*/ -int pwm_single_channel_conf(struct wifi_plat_info *plat) -{ - struct pwm_device *pwm_ch = NULL; - struct aml_pwm_chip *aml_chip = NULL; - struct pwm_config_gxbb pg = plat->gxb_conf; - unsigned int pwm_num = pg.pwm_channel; - unsigned int pwm_period = pg.pwm_config[0]; - unsigned int pwm_duty = pg.pwm_config[1]; - - pwm_ch = pwm_request(pwm_num, NULL); - if (IS_ERR(pwm_ch)) { - WIFI_INFO("request pwm %d failed\n", - plat->gxb_conf.pwm_channel); - } - aml_chip = to_aml_pwm_chip(pwm_ch->chip); - pwm_set_period(pwm_ch, pwm_period); - pwm_config(pwm_ch, pwm_duty, pwm_period); - pwm_enable(pwm_ch); - WIFI_INFO("wifi pwm conf ok\n"); - - return 0; -} - -/* -* for gxtvbb,gxl,gxm,txl soc -*/ -int pwm_double_channel_conf_dt(struct wifi_plat_info *plat) -{ - phandle pwm_phandle; - int val; - int ret; - int count = 3; - int i; - struct device_node *np_wifi_pwm_conf = plat->dev->of_node; - - ret = of_property_read_u32(np_wifi_pwm_conf, "pwm_config", &val); - if (ret) { - pr_err("not match wifi_pwm_config node\n"); - return -1; - } else { - pwm_phandle = val; - np_wifi_pwm_conf = of_find_node_by_phandle(pwm_phandle); - if (!np_wifi_pwm_conf) { - pr_err("can't find wifi_pwm_config node\n"); - return -1; - } - } - - ret = of_property_read_u32(np_wifi_pwm_conf, "pwm_channel1", - &(plat->gxtv_conf.pwm_channel1)); - if (ret) { - pr_err("not config pwm channel 1 num\n"); - return -1; - } - ret = of_property_read_u32(np_wifi_pwm_conf, "pwm_channel2", - &(plat->gxtv_conf.pwm_channel2)); - if (ret) { - pr_err("not config pwm channel 2 num\n"); - return -1; - } - ret = of_property_read_u32_array(np_wifi_pwm_conf, "pwm_channel1_conf", - (plat->gxtv_conf.pwm_config1), count); - if (ret) { - pr_err("not config pwm channel 1 parameters\n"); - return -1; - } - ret = of_property_read_u32_array(np_wifi_pwm_conf, "pwm_channel2_conf", - (plat->gxtv_conf.pwm_config2), count); - if (ret) { - pr_err("not config pwm channel 2 parameters\n"); - return -1; - } - - WIFI_INFO("pwm phandle val=%x;pwm-channel1=%d;pwm-channel2=%d\n", - val, plat->gxtv_conf.pwm_channel1, - plat->gxtv_conf.pwm_channel2); - for (i = 0; i < count; i++) { - WIFI_INFO("pwm_config1[%d] = %d\n", - i, plat->gxtv_conf.pwm_config1[i]); - WIFI_INFO("pwm_config2[%d] = %d\n", - i, plat->gxtv_conf.pwm_config2[i]); - } - WIFI_INFO("wifi pwm dt ok\n"); - - return 0; -} - -/* -*configuration for double pwm -*/ -int pwm_double_channel_conf(struct wifi_plat_info *plat) -{ - struct pwm_device *pwm_ch1 = NULL; - struct pwm_device *pwm_ch2 = NULL; - struct aml_pwm_chip *aml_chip1 = NULL; - struct aml_pwm_chip *aml_chip2 = NULL; - struct pwm_config_gxtvbb pg = plat->gxtv_conf; - unsigned int pwm_ch1_num = pg.pwm_channel1; - unsigned int pwm_ch2_num = pg.pwm_channel2; - unsigned int pwm_ch1_period = pg.pwm_config1[0]; - unsigned int pwm_ch1_duty = pg.pwm_config1[1]; - unsigned int pwm_ch1_times = pg.pwm_config1[2]; - unsigned int pwm_ch2_period = pg.pwm_config2[0]; - unsigned int pwm_ch2_duty = pg.pwm_config2[1]; - unsigned int pwm_ch2_times = pg.pwm_config2[2]; - - - pwm_ch1 = pwm_request(pwm_ch1_num, NULL); - if (IS_ERR(pwm_ch1)) { - WIFI_INFO("request pwm %d failed\n", - plat->gxtv_conf.pwm_channel1); - } - pwm_ch2 = pwm_request(pwm_ch2_num, NULL); - if (IS_ERR(pwm_ch2)) { - WIFI_INFO("request pwm %d failed\n", - plat->gxtv_conf.pwm_channel2); - } - - aml_chip1 = to_aml_pwm_chip(pwm_ch1->chip); - aml_chip2 = to_aml_pwm_chip(pwm_ch2->chip); - - pwm_set_period(pwm_ch1, pwm_ch1_period); - pwm_set_period(pwm_ch2, pwm_ch2_period); - - pwm_config(pwm_ch1, pwm_ch1_duty, pwm_ch1_period); - pwm_config(pwm_ch2, pwm_ch2_duty, pwm_ch2_period); - - pwm_set_times(aml_chip1, pwm_ch1_num, pwm_ch1_times); - pwm_set_times(aml_chip2, pwm_ch2_num, pwm_ch2_times); - - pwm_enable(pwm_ch1); - pwm_enable(pwm_ch2); - WIFI_INFO("wifi pwm conf ok\n"); - - return 0; -} - static int wifi_dev_probe(struct platform_device *pdev) { int ret; @@ -658,7 +418,6 @@ static int wifi_dev_probe(struct platform_device *pdev) if (ret) { WIFI_INFO("no power_on_pin"); plat->power_on_pin = 0; - plat->power_on_pin_OD = 0; } else { wifi_power_gpio = 1; desc = of_get_named_gpiod_flags(pdev->dev.of_node, @@ -670,11 +429,6 @@ static int wifi_dev_probe(struct platform_device *pdev) ret = of_property_read_u32(pdev->dev.of_node, "power_on_pin_level", &plat->power_on_pin_level); - ret = of_property_read_u32(pdev->dev.of_node, - "power_on_pin_OD", &plat->power_on_pin_OD); - if (ret) - plat->power_on_pin_OD = 0; - pr_info("wifi: power_on_pin_OD = %d;\n", plat->power_on_pin_OD); ret = of_property_read_string(pdev->dev.of_node, "power_on_pin2", &value); if (ret) { @@ -701,12 +455,35 @@ static int wifi_dev_probe(struct platform_device *pdev) if (of_get_property(pdev->dev.of_node, "pinctrl-names", NULL)) { unsigned int pwm_misc; + unsigned int pwm_time_count; if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) { - pwm_double_channel_conf_dt(plat); - pwm_double_channel_conf(plat); + WIFI_INFO("set pwm as 32k output"); + aml_write_cbus(0x21b0, 0x16d016e); + aml_write_cbus(0x21b5, 0x16d016d); + + pwm_time_count = aml_read_cbus(0x21b4); + pwm_time_count &= ~(0xffff << 16); + pwm_time_count |= ((3 << 16) | (2 << 24)); + aml_write_cbus(0x21b4, pwm_time_count); + + pwm_misc = aml_read_cbus(0x21b2); + pwm_misc &= ~((0x7f << 8) | (3 << 4) | + (1 << 2) | (1 << 0)); + pwm_misc |= ((1 << 25) | (1 << 15) | + (0 << 8) | (0 << 4)); + aml_write_cbus(0x21b2, (pwm_misc | (1 << 0))); + } else if (get_cpu_type() == MESON_CPU_MAJOR_ID_GXBB) { - pwm_single_channel_conf_dt(plat); - pwm_single_channel_conf(plat); + /* pwm_e */ + WIFI_INFO("set pwm as 32k output"); + aml_write_cbus(0x21b0, 0x7f107f2); + pwm_misc = aml_read_cbus(0x21b2); + pwm_misc &= ~((0x7f << 8) | (3 << 4) | + (1 << 2) | (1 << 0)); + pwm_misc |= ((1 << 15) | (4 << 8) | (3 << 4)); + aml_write_cbus(0x21b2, pwm_misc); + aml_write_cbus(0x21b2, (pwm_misc | (1 << 0))); + } else if (get_cpu_type() == MESON_CPU_MAJOR_ID_M8B) { /* pwm_e */ WIFI_INFO("set pwm as 32k output"); diff --git a/drivers/amlogic/wifi/wifi_dummy.c b/drivers/amlogic/wifi/wifi_dummy.c new file mode 100644 index 0000000000000..5fdc450d800c4 --- /dev/null +++ b/drivers/amlogic/wifi/wifi_dummy.c @@ -0,0 +1,30 @@ +#include +#include +#include +#include + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("kszaq"); +MODULE_DESCRIPTION("Amlogic WiFi power on and SDIO rescan module"); + +extern void extern_wifi_set_enable(int); +extern void sdio_reinit(void); + +static int __init wifi_dummy_init(void) +{ + printk(KERN_INFO "Triggered SDIO WiFi power on and bus rescan.\n"); + extern_wifi_set_enable(0); + msleep(300); + extern_wifi_set_enable(1); + msleep(300); + sdio_reinit(); + return 0; +} + +static void __exit wifi_dummy_cleanup(void) +{ + printk(KERN_INFO "Cleaning up module.\n"); +} + +module_init(wifi_dummy_init); +module_exit(wifi_dummy_cleanup); diff --git a/drivers/atm/.gitignore b/drivers/atm/.gitignore deleted file mode 100644 index fc0ae5eb05d81..0000000000000 --- a/drivers/atm/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -# Ignore generated files -fore200e_mkfirm -fore200e_pca_fw.c -pca200e.bin -pca200e_ecd.bin2 diff --git a/drivers/eisa/.gitignore b/drivers/eisa/.gitignore deleted file mode 100644 index 4b335c0aedb0a..0000000000000 --- a/drivers/eisa/.gitignore +++ /dev/null @@ -1 +0,0 @@ -devlist.h diff --git a/drivers/gpu/arm/Kconfig b/drivers/gpu/arm/Kconfig new file mode 100644 index 0000000000000..efe78169ac0c2 --- /dev/null +++ b/drivers/gpu/arm/Kconfig @@ -0,0 +1,2 @@ +source "drivers/gpu/arm/mali/Kconfig" +source "drivers/gpu/arm/ump/Kconfig" diff --git a/drivers/gpu/arm/Makefile b/drivers/gpu/arm/Makefile index 27bd5ac96104a..ff8fa7c2503de 100644 --- a/drivers/gpu/arm/Makefile +++ b/drivers/gpu/arm/Makefile @@ -1,2 +1,3 @@ -obj-$(CONFIG_UMP) += ump/ -obj-$(CONFIG_MALI400) += mali/ +obj-$(CONFIG_MALI400) += mali/ +obj-$(CONFIG_UMP) += ump/ +obj-$(CONFIG_UMP) += umplock/ \ No newline at end of file diff --git a/drivers/gpu/arm/bindings/arm/mali-utgard.txt b/drivers/gpu/arm/bindings/arm/mali-utgard.txt deleted file mode 100644 index c80d56275ed82..0000000000000 --- a/drivers/gpu/arm/bindings/arm/mali-utgard.txt +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (C) 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained from Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ -* ARM Mali-300/400/450 GPU - -Required properties: -- compatible: - At least one of these: "arm,mali-300", "arm,mali-400", "arm,mali-450" - Always: "arm,mali-utgard" - Mali-450 can also include "arm,mali-400" as it is compatible. - - "arm,mali-400", "arm,mali-utgard" for any Mali-400 GPU. - - "arm,mali-450", "arm,mali-400", "arm,mali-utgard" for any Mali-450 GPU. -- reg: - Physical base address and length of the GPU's registers. -- interrupts: - - List of all Mali interrupts. - - This list must match the number of and the order of entries in - interrupt-names. -- interrupt-names: - - IRQPP - Name for PP interrupts. - - IRQPPMMU - Name for interrupts from the PP MMU. - - IRQPP - Name for the PP broadcast interrupt (Mali-450 only). - - IRQGP - Name for the GP interrupt. - - IRQGPMMU - Name for the interrupt from the GP MMU. - - IRQPMU - Name for the PMU interrupt (If pmu is implemented in HW, it must be contained). - -Optional properties: -- pmu_domain_config: - - If the Mali internal PMU is present and the PMU IRQ is specified in - interrupt/interrupt-names ("IRQPMU").This contains the mapping of - Mali HW units to the PMU power domain. - -Mali Dynamic power domain configuration in sequence from 0-11, like: - . -- pmu-switch-delay: - - Only needed if the power gates are connected to the PMU in a high fanout - network. This value is the number of Mali clock cycles it takes to - enable the power gates and turn on the power mesh. This value will - have no effect if a daisy chain implementation is used. - -Platform related properties: -- clocks: Phandle to clock for Mali utgard device. -- clock-names: the corresponding names of clock in clocks property. -- regulator: Phandle to regulator which is power supplier of mali device. - -Example for a Mali400_MP1_PMU device: - -/ { - ... - - gpu@12300000 { - compatible = "arm,mali-400", "arm,mali-utgard"; - reg = <0x12300000 0x30000>; - interrupts = <0 55 4>, <0 56 4>, <0 57 4>, <0 58 4>, <0 59 4>; - interrupt-names = "IRQGP", "IRQGPMMU", "IRQPP0", "IRQPPMMU0", "IRQPMU"; - - pmu_domain_config = <0x1 0x4 0x0 0x0 0x0 0x0 0x0 0x0 0x0 0x2 0x0 0x0>; - pmu_switch_delay = <0xff>; - clocks = , ; - clock-names = "mali_parent", "mali"; - vdd_g3d-supply = ; - }; -} diff --git a/drivers/gpu/arm/egl/x11/drm_module/mali_drm/Makefile b/drivers/gpu/arm/egl/x11/drm_module/mali_drm/Makefile deleted file mode 100644 index bf0ec73375845..0000000000000 --- a/drivers/gpu/arm/egl/x11/drm_module/mali_drm/Makefile +++ /dev/null @@ -1,77 +0,0 @@ -# -# Copyright (C) 2010, 2012-2013, 2015 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the GNU General Public License version 2 -# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained from Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# - -MALI_DRM_FILE_PREFIX = - -# For each arch check: CROSS_COMPILE , KDIR , CFLAGS += -DARCH - -ARCH ?= arm -## @note Should allow overriding of building MALI DRM for non-debug: -EXTRA_CFLAGS += -DDEBUG - -DRM_SYMVERS_FILE = ../drm/Module.symvers -KBUILD_EXTRA_SYMBOLS = $(KBUILD_EXTMOD)/$(DRM_SYMVERS_FILE) - - -# linux build system integration - -ifneq ($(KERNELRELEASE),) -# Inside the kernel build system - -EXTRA_CFLAGS += -I$(KBUILD_EXTMOD) -I$(KBUILD_EXTMOD)/include -I$(KBUILD_EXTMOD)/../drm/include/ - -SRC += $(MALI_DRM_FILE_PREFIX)mali/mali_drv.c \ - $(MALI_DRM_FILE_PREFIX)mali/mali_mm.c - -# Selecting files to compile by parsing the config file - -MODULE:=mali_drm.ko - -obj-m := $(MODULE:.ko=.o) -$(MODULE:.ko=-y) := $(SRC:.c=.o) - -else -# Outside the kernel build system - -# Get any user defined KDIR- or maybe even a hardcoded KDIR --include KDIR_CONFIGURATION - -# Define host system directory -KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build - -ifeq ($(ARCH), arm) - # when compiling for ARM we're cross compiling - export CROSS_COMPILE ?= arm-none-linux-gnueabi- - # default to Virtex5 - CONFIG ?= pb-virtex5 -else - # Compiling for the host - CONFIG ?= $(shell uname -m) -endif - -# default cpu to select -CPU ?= ct11mp - -# look up KDIR based om CPU selection -KDIR ?= $(KDIR-$(CPU)) -ifeq ($(KDIR),) -$(error No KDIR found for platform $(CPU)) -endif - -all: - $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) modules - -kernelrelease: - $(MAKE) -C $(KDIR) kernelrelease - -clean: - $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean - -endif diff --git a/drivers/gpu/arm/egl/x11/drm_module/mali_drm/include/Kbuild b/drivers/gpu/arm/egl/x11/drm_module/mali_drm/include/Kbuild deleted file mode 100755 index c921ab1667a43..0000000000000 --- a/drivers/gpu/arm/egl/x11/drm_module/mali_drm/include/Kbuild +++ /dev/null @@ -1,11 +0,0 @@ -unifdef-y += drm.h drm_sarea.h -unifdef-y += i810_drm.h -unifdef-y += i830_drm.h -unifdef-y += i915_drm.h -unifdef-y += mga_drm.h -unifdef-y += r128_drm.h -unifdef-y += radeon_drm.h -unifdef-y += sis_drm.h -unifdef-y += savage_drm.h -unifdef-y += via_drm.h -unifdef-y += mali_drm.h diff --git a/drivers/gpu/arm/egl/x11/drm_module/mali_drm/include/mali_drm.h b/drivers/gpu/arm/egl/x11/drm_module/mali_drm/include/mali_drm.h deleted file mode 100644 index f59bdc80fb060..0000000000000 --- a/drivers/gpu/arm/egl/x11/drm_module/mali_drm/include/mali_drm.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (C) 2010, 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained from Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __MALI_DRM_H__ -#define __MALI_DRM_H__ - -/* Mali specific ioctls */ -#define NOT_USED_0_3 -#define DRM_MALI_FB_ALLOC 0x04 -#define DRM_MALI_FB_FREE 0x05 -#define NOT_USED_6_12 -#define DRM_MALI_MEM_INIT 0x13 -#define DRM_MALI_MEM_ALLOC 0x14 -#define DRM_MALI_MEM_FREE 0x15 -#define DRM_MALI_FB_INIT 0x16 - -#define DRM_IOCTL_MALI_FB_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_MALI_FB_ALLOC, drm_mali_mem_t) -#define DRM_IOCTL_MALI_FB_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_MALI_FB_FREE, drm_mali_mem_t) -#define DRM_IOCTL_MALI_MEM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MALI_MEM_INIT, drm_mali_mem_t) -#define DRM_IOCTL_MALI_MEM_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_MALI_MEM_ALLOC, drm_mali_mem_t) -#define DRM_IOCTL_MALI_MEM_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_MALI_MEM_FREE, drm_mali_mem_t) -#define DRM_IOCTL_MALI_FB_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MALI_FB_INIT, drm_mali_fb_t) - -typedef struct -{ - int context; - unsigned int offset; - unsigned int size; - unsigned long free; -} drm_mali_mem_t; - -typedef struct -{ - unsigned int offset, size; -} drm_mali_fb_t; - -#endif /* __MALI_DRM_H__ */ diff --git a/drivers/gpu/arm/egl/x11/drm_module/mali_drm/mali/Makefile b/drivers/gpu/arm/egl/x11/drm_module/mali_drm/mali/Makefile deleted file mode 100644 index f861f8cb0a248..0000000000000 --- a/drivers/gpu/arm/egl/x11/drm_module/mali_drm/mali/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -# -# Copyright (C) 2010, 2012-2013, 2015 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the GNU General Public License version 2 -# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained from Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -# -# Makefile for the drm device driver. This driver provides support for the -# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. - -ccflags-y = -Iinclude/drm -mali_drm-y := mali_drv.o mali_mm.o - -obj-$(CONFIG_DRM_MALI) += mali_drm.o - - diff --git a/drivers/gpu/arm/egl/x11/drm_module/mali_drm/mali/mali_drv.c b/drivers/gpu/arm/egl/x11/drm_module/mali_drm/mali/mali_drv.c deleted file mode 100644 index cef4551dcbb82..0000000000000 --- a/drivers/gpu/arm/egl/x11/drm_module/mali_drm/mali/mali_drv.c +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Copyright (C) 2010, 2012-2013, 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained from Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#include -#include -#include "drmP.h" -#include "mali_drm.h" -#include "mali_drv.h" - -static struct platform_device *pdev; - -static int mali_platform_drm_probe(struct platform_device *pdev) -{ - return 0; -} - -static int mali_platform_drm_remove(struct platform_device *pdev) -{ - return 0; -} - -static int mali_platform_drm_suspend(struct platform_device *dev, pm_message_t state) -{ - return 0; -} - -static int mali_platform_drm_resume(struct platform_device *dev) -{ - return 0; -} - - -static char mali_drm_device_name[] = "mali_drm"; -static struct platform_driver platform_drm_driver = -{ - .probe = mali_platform_drm_probe, - .remove = mali_platform_drm_remove, - .suspend = mali_platform_drm_suspend, - .resume = mali_platform_drm_resume, - .driver = { - .name = mali_drm_device_name, - .owner = THIS_MODULE, - }, -}; - -#if 0 -static const struct drm_device_id dock_device_ids[] = -{ - {"MALIDRM", 0}, - {"", 0}, -}; -#endif - -static int mali_driver_load(struct drm_device *dev, unsigned long chipset) -{ - int ret; - unsigned long base, size; - drm_mali_private_t *dev_priv; - printk(KERN_ERR "DRM: mali_driver_load start\n"); - - dev_priv = drm_calloc(1, sizeof(drm_mali_private_t), DRM_MEM_DRIVER); - - if (dev_priv == NULL) - { - return -ENOMEM; - } - - dev->dev_private = (void *)dev_priv; - - if (NULL == dev->platformdev) - { - dev->platformdev = platform_device_register_simple(mali_drm_device_name, 0, NULL, 0); - pdev = dev->platformdev; - } - -#if 0 - base = drm_get_resource_start(dev, 1); - size = drm_get_resource_len(dev, 1); -#endif - ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); - - if (ret) - { - drm_free(dev_priv, sizeof(dev_priv), DRM_MEM_DRIVER); - } - - //if ( ret ) kfree( dev_priv ); - - printk(KERN_ERR "DRM: mali_driver_load done\n"); - - return ret; -} - -static int mali_driver_unload(struct drm_device *dev) -{ - drm_mali_private_t *dev_priv = dev->dev_private; - printk(KERN_ERR "DRM: mali_driver_unload start\n"); - - drm_sman_takedown(&dev_priv->sman); - drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); - //kfree( dev_priv ); - printk(KERN_ERR "DRM: mali_driver_unload done\n"); - - return 0; -} - -static struct drm_driver driver = -{ - .driver_features = DRIVER_USE_PLATFORM_DEVICE, - .load = mali_driver_load, - .unload = mali_driver_unload, - .context_dtor = NULL, - .dma_quiescent = mali_idle, - .reclaim_buffers = NULL, - .reclaim_buffers_idlelocked = mali_reclaim_buffers_locked, - .lastclose = mali_lastclose, - .get_map_ofs = drm_core_get_map_ofs, - .get_reg_ofs = drm_core_get_reg_ofs, - .ioctls = mali_ioctls, - .fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) - .ioctl = drm_ioctl, -#else - .unlocked_ioctl = drm_ioctl, -#endif - .mmap = drm_mmap, - .poll = drm_poll, - .fasync = drm_fasync, - }, - .name = DRIVER_NAME, - .desc = DRIVER_DESC, - .date = DRIVER_DATE, - .major = DRIVER_MAJOR, - .minor = DRIVER_MINOR, - .patchlevel = DRIVER_PATCHLEVEL, -}; - -static int __init mali_init(void) -{ - driver.num_ioctls = mali_max_ioctl; - return drm_init(&driver); -} - -static void __exit mali_exit(void) -{ - platform_device_unregister(pdev); - drm_exit(&driver); -} - -module_init(mali_init); -module_exit(mali_exit); - -MODULE_INFO(vermagic, VERMAGIC_STRING); -MODULE_AUTHOR("ARM Ltd."); -MODULE_DESCRIPTION(DRIVER_DESC); -MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/arm/egl/x11/drm_module/mali_drm/mali/mali_drv.h b/drivers/gpu/arm/egl/x11/drm_module/mali_drm/mali/mali_drv.h deleted file mode 100644 index df86ab8f05695..0000000000000 --- a/drivers/gpu/arm/egl/x11/drm_module/mali_drm/mali/mali_drv.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (C) 2010, 2012-2013, 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained from Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef _MALI_DRV_H_ -#define _MALI_DRV_H_ - -#define DRIVER_AUTHOR "ARM" -#define DRIVER_NAME "mali_drm" -#define DRIVER_DESC "DRM module for Mali-200, Mali-400" -#define DRIVER_DATE "20100520" -#define DRIVER_MAJOR 0 -#define DRIVER_MINOR 1 -#define DRIVER_PATCHLEVEL 0 - -#include "drm_sman.h" - -typedef struct drm_mali_private -{ - drm_local_map_t *mmio; - unsigned int idle_fault; - struct drm_sman sman; - int vram_initialized; - unsigned long vram_offset; -} drm_mali_private_t; - -extern int mali_idle(struct drm_device *dev); -extern void mali_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv); -extern void mali_lastclose(struct drm_device *dev); -extern struct drm_ioctl_desc mali_ioctls[]; -extern int mali_max_ioctl; - -#endif /* _MALI_DRV_H_ */ diff --git a/drivers/gpu/arm/egl/x11/drm_module/mali_drm/mali/mali_mm.c b/drivers/gpu/arm/egl/x11/drm_module/mali_drm/mali/mali_mm.c deleted file mode 100644 index fe289fc792f00..0000000000000 --- a/drivers/gpu/arm/egl/x11/drm_module/mali_drm/mali/mali_mm.c +++ /dev/null @@ -1,262 +0,0 @@ -/* - * Copyright (C) 2010, 2012-2013, 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained from Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#include "drmP.h" -#include "mali_drm.h" -#include "mali_drv.h" - -#define VIDEO_TYPE 0 -#define MEM_TYPE 1 - -#define MALI_MM_ALIGN_SHIFT 4 -#define MALI_MM_ALIGN_MASK ( (1 << MALI_MM_ALIGN_SHIFT) - 1) - - -static void *mali_sman_mm_allocate(void *private, unsigned long size, unsigned alignment) -{ - printk(KERN_ERR "DRM: %s\n", __func__); - return NULL; -} - -static void mali_sman_mm_free(void *private, void *ref) -{ - printk(KERN_ERR "DRM: %s\n", __func__); -} - -static void mali_sman_mm_destroy(void *private) -{ - printk(KERN_ERR "DRM: %s\n", __func__); -} - -static unsigned long mali_sman_mm_offset(void *private, void *ref) -{ - printk(KERN_ERR "DRM: %s\n", __func__); - return ~((unsigned long)ref); -} - -static int mali_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - drm_mali_private_t *dev_priv = dev->dev_private; - drm_mali_fb_t *fb = data; - int ret; - printk(KERN_ERR "DRM: %s\n", __func__); - - mutex_lock(&dev->struct_mutex); - { - struct drm_sman_mm sman_mm; - sman_mm.private = (void *)0xFFFFFFFF; - sman_mm.allocate = mali_sman_mm_allocate; - sman_mm.free = mali_sman_mm_free; - sman_mm.destroy = mali_sman_mm_destroy; - sman_mm.offset = mali_sman_mm_offset; - ret = drm_sman_set_manager(&dev_priv->sman, VIDEO_TYPE, &sman_mm); - } - - if (ret) - { - DRM_ERROR("VRAM memory manager initialisation error\n"); - mutex_unlock(&dev->struct_mutex); - return ret; - } - - dev_priv->vram_initialized = 1; - dev_priv->vram_offset = fb->offset; - - mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size); - - return 0; -} - -static int mali_drm_alloc(struct drm_device *dev, struct drm_file *file_priv, void *data, int pool) -{ - drm_mali_private_t *dev_priv = dev->dev_private; - drm_mali_mem_t *mem = data; - int retval = 0; - struct drm_memblock_item *item; - printk(KERN_ERR "DRM: %s\n", __func__); - - mutex_lock(&dev->struct_mutex); - - if (0 == dev_priv->vram_initialized) - { - DRM_ERROR("Attempt to allocate from uninitialized memory manager.\n"); - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - - mem->size = (mem->size + MALI_MM_ALIGN_MASK) >> MALI_MM_ALIGN_SHIFT; - item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0, - (unsigned long)file_priv); - - mutex_unlock(&dev->struct_mutex); - - if (item) - { - mem->offset = dev_priv->vram_offset + (item->mm->offset(item->mm, item->mm_info) << MALI_MM_ALIGN_SHIFT); - mem->free = item->user_hash.key; - mem->size = mem->size << MALI_MM_ALIGN_SHIFT; - } - else - { - mem->offset = 0; - mem->size = 0; - mem->free = 0; - retval = -ENOMEM; - } - - DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size, mem->offset); - - return retval; -} - -static int mali_drm_free(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - drm_mali_private_t *dev_priv = dev->dev_private; - drm_mali_mem_t *mem = data; - int ret; - printk(KERN_ERR "DRM: %s\n", __func__); - - mutex_lock(&dev->struct_mutex); - ret = drm_sman_free_key(&dev_priv->sman, mem->free); - mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("free = 0x%lx\n", mem->free); - - return ret; -} - -static int mali_fb_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - printk(KERN_ERR "DRM: %s\n", __func__); - return mali_drm_alloc(dev, file_priv, data, VIDEO_TYPE); -} - -static int mali_ioctl_mem_init(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - drm_mali_private_t *dev_priv = dev->dev_private; - drm_mali_mem_t *mem = data; - int ret; - dev_priv = dev->dev_private; - printk(KERN_ERR "DRM: %s\n", __func__); - - mutex_lock(&dev->struct_mutex); - ret = drm_sman_set_range(&dev_priv->sman, MEM_TYPE, 0, mem->size >> MALI_MM_ALIGN_SHIFT); - - if (ret) - { - DRM_ERROR("MEM memory manager initialisation error\n"); - mutex_unlock(&dev->struct_mutex); - return ret; - } - - mutex_unlock(&dev->struct_mutex); - - return 0; -} - -static int mali_ioctl_mem_alloc(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - - printk(KERN_ERR "DRM: %s\n", __func__); - return mali_drm_alloc(dev, file_priv, data, MEM_TYPE); -} - -static drm_local_map_t *mem_reg_init(struct drm_device *dev) -{ - struct drm_map_list *entry; - drm_local_map_t *map; - printk(KERN_ERR "DRM: %s\n", __func__); - - list_for_each_entry(entry, &dev->maplist, head) - { - map = entry->map; - - if (!map) - { - continue; - } - - if (map->type == _DRM_REGISTERS) - { - return map; - } - } - return NULL; -} - -int mali_idle(struct drm_device *dev) -{ - drm_mali_private_t *dev_priv = dev->dev_private; - uint32_t idle_reg; - unsigned long end; - int i; - printk(KERN_ERR "DRM: %s\n", __func__); - - if (dev_priv->idle_fault) - { - return 0; - } - - return 0; -} - - -void mali_lastclose(struct drm_device *dev) -{ - drm_mali_private_t *dev_priv = dev->dev_private; - printk(KERN_ERR "DRM: %s\n", __func__); - - if (!dev_priv) - { - return; - } - - mutex_lock(&dev->struct_mutex); - drm_sman_cleanup(&dev_priv->sman); - dev_priv->vram_initialized = 0; - dev_priv->mmio = NULL; - mutex_unlock(&dev->struct_mutex); -} - -void mali_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv) -{ - drm_mali_private_t *dev_priv = dev->dev_private; - printk(KERN_ERR "DRM: %s\n", __func__); - - mutex_lock(&dev->struct_mutex); - - if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) - { - mutex_unlock(&dev->struct_mutex); - return; - } - - if (dev->driver->dma_quiescent) - { - dev->driver->dma_quiescent(dev); - } - - drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv); - mutex_unlock(&dev->struct_mutex); - return; -} - -struct drm_ioctl_desc mali_ioctls[] = -{ - DRM_IOCTL_DEF(DRM_MALI_FB_ALLOC, mali_fb_alloc, DRM_AUTH), - DRM_IOCTL_DEF(DRM_MALI_FB_FREE, mali_drm_free, DRM_AUTH), - DRM_IOCTL_DEF(DRM_MALI_MEM_INIT, mali_ioctl_mem_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_MALI_MEM_ALLOC, mali_ioctl_mem_alloc, DRM_AUTH), - DRM_IOCTL_DEF(DRM_MALI_MEM_FREE, mali_drm_free, DRM_AUTH), - DRM_IOCTL_DEF(DRM_MALI_FB_INIT, mali_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), -}; - -int mali_max_ioctl = DRM_ARRAY_SIZE(mali_ioctls); diff --git a/drivers/gpu/arm/egl/x11/drm_module/readme b/drivers/gpu/arm/egl/x11/drm_module/readme deleted file mode 100755 index 578d7254ab2a8..0000000000000 --- a/drivers/gpu/arm/egl/x11/drm_module/readme +++ /dev/null @@ -1,8 +0,0 @@ -How to build: -KDIR=/work/kernel-2.6.35.7 make - -How to install: -insmod drm/drm.ko -insmod mali_drm/mali_drm.ko -insmod trunk/src/devicedrv/mali/mali.ko - diff --git a/drivers/gpu/arm/mali/Kbuild b/drivers/gpu/arm/mali/Kbuild index 7e6da2713fe2c..ea4228adf1511 100755 --- a/drivers/gpu/arm/mali/Kbuild +++ b/drivers/gpu/arm/mali/Kbuild @@ -9,86 +9,24 @@ # # This file is called by the Linux build system. -############## Kasin Added, for platform. ################ - -ifndef CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH - ccflags-y += -DCONFIG_MALI_DMA_BUF_MAP_ON_ATTACH=y -endif -#ifeq ($(CONFIG_ARCH_MESON),y) -ccflags-y += -DCONFIG_MALI450=y -#ifeq ($(CONFIG_MALI450),m) -#ccflags-y += -DCONFIG_MALI450=y -#endif -#ifeq ($(CONFIG_MALI450),y) -#ccflags-y += -DCONFIG_MALI450=y -#endif -#ccflags-y += -DCONFIG_MALI_DT=y -ccflags-y += -DMESON_CPU_TYPE=0x80 -ccflags-y += -DMESON_CPU_TYPE_MESON6=0x60 -ccflags-y += -DMESON_CPU_TYPE_MESON6TVD=0x75 -ccflags-y += -DMESON_CPU_TYPE_MESON8=0x80 -ccflags-y += -DMESON_CPU_TYPE_MESON8B=0x8B -#endif - -##################### end Kasin Added. ################### # set up defaults if not defined by the user TIMESTAMP ?= default -ifeq ($(CONFIG_UMP), m) - USING_UMP ?= 1 -else - USING_UMP ?= 0 -endif - -ifneq ($(KBUILD_SRC),) - ifneq ($(wildcard $(KBUILD_SRC)/$(src)),) - TOP_KBUILD_SRC := $(KBUILD_SRC)/ - endif -endif - OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB ?= 16 - -#USING_GPU_UTILIZATION ?= 0 -#PROFILING_SKIP_PP_JOBS ?= 0 -#PROFILING_SKIP_PP_AND_GP_JOBS ?= 0 -ifeq ($(CONFIG_MALI_DVFS),y) - ccflags-y += -DCONFIG_MALI_DVFS - USING_GPU_UTILIZATION=0 - USING_DVFS=1 -else - USING_GPU_UTILIZATION=0 - USING_DVFS=0 -endif +USING_GPU_UTILIZATION ?= 1 PROFILING_SKIP_PP_JOBS ?= 0 PROFILING_SKIP_PP_AND_GP_JOBS ?= 0 -############## Kasin Added, for platform. ################ - -ifeq ($(CONFIG_MALI400_DEBUG),y) - BUILD ?= debug -else - BUILD ?= release - #ldflags-y += --strip-debug - -endif -##################### end Kasin Added. ################### - MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP ?= 0 MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED ?= 0 MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS ?= 0 MALI_UPPER_HALF_SCHEDULING ?= 1 - -############## Kasin Added, useless now. ################ -# Get path to driver source from Linux build system -DRIVER_DIR=$(src) -##################### end Kasin Added. ################### - MALI_ENABLE_CPU_CYCLES ?= 0 # For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases: # The ARM proprietary product will only include the license/proprietary directory # The GPL product will only include the license/gpl directory -ifeq ($(wildcard $(TOP_KBUILD_SRC)$(DRIVER_DIR)/linux/license/gpl/*),) - ccflags-y += -I$(TOP_KBUILD_SRC)$(DRIVER_DIR)/linux/license/proprietary +ifeq ($(wildcard $(src)/linux/license/gpl/*),) + ccflags-y += -I$(src)/linux/license/proprietary ifeq ($(CONFIG_MALI400_PROFILING),y) $(error Profiling is incompatible with non-GPL license) endif @@ -100,7 +38,7 @@ ifeq ($(wildcard $(TOP_KBUILD_SRC)$(DRIVER_DIR)/linux/license/gpl/*),) endif $(error Linux Device integration is incompatible with non-GPL license) else - ccflags-y += -I$(TOP_KBUILD_SRC)$(DRIVER_DIR)/linux/license/gpl + ccflags-y += -I$(src)/linux/license/gpl endif ifeq ($(USING_GPU_UTILIZATION), 1) @@ -109,6 +47,23 @@ ifeq ($(USING_GPU_UTILIZATION), 1) endif endif +ccflags-y += -DMESON_CPU_TYPE=0x80 +ccflags-y += -DMESON_CPU_TYPE_MESON6=0x60 +ccflags-y += -DMESON_CPU_TYPE_MESON6TVD=0x75 +ccflags-y += -DMESON_CPU_TYPE_MESON8=0x80 +ccflags-y += -DMESON_CPU_TYPE_MESON8B=0x8B + +ifeq ($(MALI_PLATFORM_FILES),) +ifeq ($(CONFIG_ARCH_EXYNOS4),y) +EXTRA_DEFINES += -DMALI_FAKE_PLATFORM_DEVICE=1 +export MALI_PLATFORM=exynos4 +export MALI_PLATFORM_FILES_BUILDIN = $(notdir $(wildcard $(src)/platform/$(MALI_PLATFORM)/*.c)) +export MALI_PLATFORM_FILES_ADD_PREFIX = $(addprefix platform/$(MALI_PLATFORM)/,$(MALI_PLATFORM_FILES_BUILDIN)) +endif +endif + +EXTRA_DEFINES += -DMALI_FAKE_PLATFORM_DEVICE=1 + mali-y += \ linux/mali_osk_atomics.o \ linux/mali_osk_irq.o \ @@ -122,16 +77,21 @@ mali-y += \ linux/mali_osk_mali.o \ linux/mali_osk_notification.o \ linux/mali_osk_time.o \ - linux/mali_osk_timers.o + linux/mali_osk_timers.o \ + linux/mali_osk_bitmap.o mali-y += linux/mali_memory.o linux/mali_memory_os_alloc.o mali-y += linux/mali_memory_external.o mali-y += linux/mali_memory_block_alloc.o +mali-y += linux/mali_memory_swap_alloc.o mali-y += \ linux/mali_memory_manager.o \ linux/mali_memory_virtual.o \ - linux/mali_memory_util.o + linux/mali_memory_util.o \ + linux/mali_memory_cow.o \ + linux/mali_memory_defer_bind.o + mali-y += \ linux/mali_ukk_mem.o \ linux/mali_ukk_gp.o \ @@ -140,6 +100,10 @@ mali-y += \ linux/mali_ukk_soft_job.o \ linux/mali_ukk_timeline.o +mali-$(CONFIG_MALI_DEVFREQ) += \ + linux/mali_devfreq.o \ + common/mali_pm_metrics.o + # Source files which always are included in a build mali-y += \ common/mali_kernel_core.o \ @@ -177,64 +141,28 @@ mali-y += \ linux/mali_osk_pm.o \ linux/mali_pmu_power_up_down.o \ __malidrv_build_info.o - -############## Kasin Added, for platform. ################ -ifeq (true,false) -mali-y += \ - platform/meson_main.o \ - platform/mali_pm_device.o \ - platform/mali_clock.o \ - platform/mpgpu.o -else + +# amlogic mali-y += \ platform/mali_pm_device.o \ platform/meson_bu/meson_main2.o \ platform/meson_bu/mali_clock.o \ platform/meson_bu/mpgpu.o \ - platform/meson_bu/platform_gx.o -endif -ifeq ($(CONFIG_MALI_DVFS),y) - mali-y += platform/meson_bu/mali_dvfs.o -else - mali-y += platform/meson_bu/scaling.o -endif + platform/meson_bu/platform_gx.o \ + platform/meson_bu/scaling.o -ifeq ($(TARGET_PLATFORM),meson_m400) -MALI_PLATFORM_FILES:= \ - platform/meson_m400/mali_fix.o \ - platform/meson_m400/mali_platform.o \ - platform/meson_m400/platform_mx.o +ifneq ($(wildcard $(src)/linux/mali_slp_global_lock.c),) + mali-y += linux/mali_slp_global_lock.o endif -ifeq ($(TARGET_PLATFORM),meson_m450) -ccflags-y += -DCONFIG_MALI450=y -mali-y += \ - platform/meson_m450/scaling.o - -mali-$(CONFIG_ARCH_MESON) += \ - platform/meson_m450/platform_m8.o - -mali-$(CONFIG_ARCH_MESON8) += \ - platform/meson_m450/platform_m8.o - -mali-$(CONFIG_ARCH_MESON6TVD) += \ - platform/meson_m450/platform_m6tvd.o - -mali-$(CONFIG_ARCH_MESON8B) += \ - platform/meson_m450/platform_m8b.o - -mali-$(CONFIG_ARCH_MESONG9TV) += \ - platform/meson_m450/platform_m8.o - -mali-$(CONFIG_ARCH_MESONG9BB) += \ - platform/meson_m450/platform_m8b.o -endif -##################### end Kasin Added. ################### - ifneq ($(MALI_PLATFORM_FILES),) mali-y += $(MALI_PLATFORM_FILES:.c=.o) endif +ifneq ($(MALI_PLATFORM_FILES_ADD_PREFIX),) + mali-y += $(MALI_PLATFORM_FILES_ADD_PREFIX:.c=.o) +endif + mali-$(CONFIG_MALI400_PROFILING) += linux/mali_ukk_profiling.o mali-$(CONFIG_MALI400_PROFILING) += linux/mali_osk_profiling.o @@ -242,6 +170,7 @@ mali-$(CONFIG_MALI400_INTERNAL_PROFILING) += linux/mali_profiling_internal.o tim ccflags-$(CONFIG_MALI400_INTERNAL_PROFILING) += -I$(src)/timestamp-$(TIMESTAMP) mali-$(CONFIG_DMA_SHARED_BUFFER) += linux/mali_memory_dma_buf.o +mali-$(CONFIG_DMA_SHARED_BUFFER) += linux/mali_memory_secure.o mali-$(CONFIG_SYNC) += linux/mali_sync.o ccflags-$(CONFIG_SYNC) += -Idrivers/staging/android @@ -262,22 +191,26 @@ ccflags-y += -DMALI_STATE_TRACKING=1 ccflags-y += -DMALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB) ccflags-y += -DUSING_GPU_UTILIZATION=$(USING_GPU_UTILIZATION) ccflags-y += -DMALI_ENABLE_CPU_CYCLES=$(MALI_ENABLE_CPU_CYCLES) -ccflags-y += -DMALI_FAKE_PLATFORM_DEVICE ifeq ($(MALI_UPPER_HALF_SCHEDULING),1) ccflags-y += -DMALI_UPPER_HALF_SCHEDULING endif -ccflags-$(CONFIG_MALI400_UMP) += -I$(src)/../ump/include/ump +#build-in include path is different +ifeq ($(MALI_PLATFORM_FILES),) +ccflags-$(CONFIG_MALI400_UMP) += -I$(src)/../ump/include/ +else +ccflags-$(CONFIG_MALI400_UMP) += -I$(src)/../../ump/include/ump +endif ccflags-$(CONFIG_MALI400_DEBUG) += -DDEBUG # Use our defines when compiling -ccflags-y += -I$(src) -I$(src)/include -I$(src)/common -I$(src)/linux -I$(src)/platform +ccflags-y += -I$(src) -I$(src)/include -I$(src)/common -I$(src)/linux -I$(src)/platform -Wno-date-time # Get subversion revision number, fall back to only ${MALI_RELEASE_NAME} if no svn info is available -MALI_RELEASE_NAME=$(shell cat $(TOP_KBUILD_SRC)$(DRIVER_DIR)/.version 2> /dev/null) +MALI_RELEASE_NAME=$(shell cat $(src)/.version 2> /dev/null) -SVN_INFO = (cd $(TOP_KBUILD_SRC)$(DRIVER_DIR); svn info 2>/dev/null) +SVN_INFO = (cd $(src); svn info 2>/dev/null) ifneq ($(shell $(SVN_INFO) 2>/dev/null),) # SVN detected @@ -288,13 +221,13 @@ CHANGED_REVISION := $(shell $(SVN_INFO) | grep '^Last Changed Rev: ' | cut -d: - REPO_URL := $(shell $(SVN_INFO) | grep '^URL: ' | cut -d: -f2- | cut -b2-) else # SVN -GIT_REV := $(shell cd $(TOP_KBUILD_SRC)$(DRIVER_DIR); git describe --always 2>/dev/null) +GIT_REV := $(shell cd $(src); git describe --always 2>/dev/null) ifneq ($(GIT_REV),) # Git detected DRIVER_REV := $(MALI_RELEASE_NAME)-$(GIT_REV) -CHANGE_DATE := $(shell cd $(TOP_KBUILD_SRC)$(DRIVER_DIR); git log -1 --format="%ci") +CHANGE_DATE := $(shell cd $(src); git log -1 --format="%ci") CHANGED_REVISION := $(GIT_REV) -REPO_URL := $(shell cd $(TOP_KBUILD_SRC)$(DRIVER_DIR); git describe --all --always 2>/dev/null) +REPO_URL := $(shell cd $(src); git describe --all --always 2>/dev/null) else # Git # No Git or SVN detected @@ -307,7 +240,7 @@ endif ccflags-y += -DSVN_REV_STRING=\"$(DRIVER_REV)\" VERSION_STRINGS := -VERSION_STRINGS += API_VERSION=$(shell cd $(TOP_KBUILD_SRC)$(DRIVER_DIR); grep "\#define _MALI_API_VERSION" $(FILES_PREFIX)include/linux/mali/mali_utgard_uk_types.h | cut -d' ' -f 3 ) +VERSION_STRINGS += API_VERSION=$(shell cd $(src); grep "\#define _MALI_API_VERSION" $(FILES_PREFIX)include/linux/mali/mali_utgard_uk_types.h | cut -d' ' -f 3 ) VERSION_STRINGS += REPO_URL=$(REPO_URL) VERSION_STRINGS += REVISION=$(DRIVER_REV) VERSION_STRINGS += CHANGED_REVISION=$(CHANGED_REVISION) @@ -330,5 +263,5 @@ VERSION_STRINGS += USING_DVFS=$(CONFIG_MALI_DVFS) VERSION_STRINGS += MALI_UPPER_HALF_SCHEDULING=$(MALI_UPPER_HALF_SCHEDULING) # Create file with Mali driver configuration -$(TOP_KBUILD_SRC)$(DRIVER_DIR)/__malidrv_build_info.c: - @echo 'const char *__malidrv_build_info(void) { return "malidrv: $(VERSION_STRINGS)";}' > $(TOP_KBUILD_SRC)$(DRIVER_DIR)/__malidrv_build_info.c +$(src)/__malidrv_build_info.c: + @echo 'const char *__malidrv_build_info(void) { return "malidrv: $(VERSION_STRINGS)";}' > $(src)/__malidrv_build_info.c diff --git a/drivers/gpu/arm/mali/Kconfig b/drivers/gpu/arm/mali/Kconfig index 09687d5be8eeb..7b4e9fb2cf90c 100755 --- a/drivers/gpu/arm/mali/Kconfig +++ b/drivers/gpu/arm/mali/Kconfig @@ -1,8 +1,6 @@ -menu "Mali GPU OpenGL device driver" config MALI400 tristate "Mali-300/400/450 support" depends on ARM || ARM64 - default m select DMA_SHARED_BUFFER ---help--- This enables support for the ARM Mali-300, Mali-400, and Mali-450 @@ -11,31 +9,29 @@ config MALI400 To compile this driver as a module, choose M here: the module will be called mali. +config MALI450 + bool "Enable Mali-450 support" + depends on MALI400 + ---help--- + This enables support for Mali-450 specific features. + +config MALI470 + bool "Enable Mali-470 support" + depends on MALI400 + ---help--- + This enables support for Mali-470 specific features. + config MALI400_DEBUG bool "Enable debug in Mali driver" depends on MALI400 - default n ---help--- This enabled extra debug checks and messages in the Mali driver. -config MALI400_PROFILING_EXTRA_SUPPORT - bool "Select other items in kernel to support PROFILING." - depends on MALI400_PROFILING - select PROFILING - select FTRACE - select PERF_EVENTS - select ENABLE_DEFAULT_TRACERS - select DEBUG_MUTEXES - select HIGH_RES_TIMERS - select HW_PERF_EVENTS - select CPU_FREQ - select MALI400_DEBUG - config MALI400_PROFILING bool "Enable Mali profiling" depends on MALI400 select TRACEPOINTS - default n + default y ---help--- This enables gator profiling of Mali GPU events. @@ -54,8 +50,8 @@ config MALI400_UMP config MALI_DVFS bool "Enable Mali dynamically frequency change" - depends on MALI400 - default n + depends on MALI400 && !MALI_DEVFREQ + default y ---help--- This enables support for dynamic change frequency of Mali with the goal of lowering power consumption. @@ -80,14 +76,6 @@ config MALI_SHARED_INTERRUPTS works when the GPU is not using shared interrupts, but might have a slight performance impact. -if ARCH_MESON6 -config MESON6_GPU_EXTRA - bool "M6 fix" - depends on MALI400 - default y - select MALI_SHARED_INTERRUPTS -endif - config MALI_PMU_PARALLEL_POWER_UP bool "Power up Mali PMU domains in parallel" depends on MALI400 @@ -108,6 +96,17 @@ config MALI_DT device tree is enabled in kernel and corresponding hardware description is implemented properly in device DTS file. +config MALI_DEVFREQ + bool "Using devfreq to tuning frequency" + depends on MALI400 && PM_DEVFREQ + default n + ---help--- + Support devfreq for Mali. + + Using the devfreq framework and, by default, the simpleondemand + governor, the frequency of Mali will be dynamically selected from the + available OPPs. + config MALI_QUIET bool "Make Mali driver very quiet" depends on MALI400 && !MALI400_DEBUG @@ -116,4 +115,3 @@ config MALI_QUIET This forces the Mali driver to never print any messages. If unsure, say N. -endmenu diff --git a/drivers/gpu/arm/mali/Makefile b/drivers/gpu/arm/mali/Makefile index a0bef307dbfbc..7f3f8aa940c3f 100755 --- a/drivers/gpu/arm/mali/Makefile +++ b/drivers/gpu/arm/mali/Makefile @@ -1,5 +1,5 @@ # -# Copyright (C) 2010-2015 ARM Limited. All rights reserved. +# Copyright (C) 2010-2016 ARM Limited. All rights reserved. # # This program is free software and is provided to you under the terms of the GNU General Public License version 2 # as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -16,6 +16,8 @@ MALI_HEATMAPS_ENABLED ?= 0 MALI_DMA_BUF_MAP_ON_ATTACH ?= 1 MALI_PMU_PARALLEL_POWER_UP ?= 0 USING_DT ?= 0 +MALI_MEM_SWAP_TRACKING ?= 0 +USING_DEVFREQ ?= 0 # The Makefile sets up "arch" based on the CONFIG, creates the version info # string and the __malidrv_build_info.c file, and then call the Linux build @@ -102,9 +104,11 @@ endif # Set up build config export CONFIG_MALI400=m export CONFIG_MALI450=y +export CONFIG_MALI470=y export EXTRA_DEFINES += -DCONFIG_MALI400=1 export EXTRA_DEFINES += -DCONFIG_MALI450=1 +export EXTRA_DEFINES += -DCONFIG_MALI470=1 ifneq ($(MALI_PLATFORM),) export EXTRA_DEFINES += -DMALI_FAKE_PLATFORM_DEVICE=1 @@ -139,7 +143,6 @@ export EXTRA_DEFINES += -DCONFIG_MALI_SHARED_INTERRUPTS endif ifeq ($(USING_DVFS),1) - xxxyyyy export CONFIG_MALI_DVFS=y export EXTRA_DEFINES += -DCONFIG_MALI_DVFS endif @@ -156,6 +159,15 @@ export EXTRA_DEFINES += -DCONFIG_MALI_DT endif endif +ifeq ($(USING_DEVFREQ), 1) +ifdef CONFIG_PM_DEVFREQ +export CONFIG_MALI_DEVFREQ=y +export EXTRA_DEFINES += -DCONFIG_MALI_DEVFREQ=1 +else +$(warning "You want to support DEVFREQ but kernel didn't support DEVFREQ.") +endif +endif + ifneq ($(BUILD),release) # Debug export CONFIG_MALI400_DEBUG=y @@ -171,6 +183,10 @@ ifeq ($(MALI_SKIP_JOBS),1) EXTRA_DEFINES += -DPROFILING_SKIP_PP_JOBS=1 -DPROFILING_SKIP_GP_JOBS=1 endif +ifeq ($(MALI_MEM_SWAP_TRACKING),1) +EXTRA_DEFINES += -DMALI_MEM_SWAP_TRACKING=1 +endif + all: $(UMP_SYMVERS_FILE) $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) modules @rm $(FILES_PREFIX)__malidrv_build_info.c $(FILES_PREFIX)__malidrv_build_info.o diff --git a/drivers/gpu/arm/mali/Module.symvers b/drivers/gpu/arm/mali/Module.symvers deleted file mode 100644 index b8aed7ff8bfbe..0000000000000 --- a/drivers/gpu/arm/mali/Module.symvers +++ /dev/null @@ -1,5 +0,0 @@ -0x00000000 mali_set_user_setting /home/user/LibreELEC.tv/build.LibreELEC-S905.aarch64-8.0.2/gpu-aml-9b0fbbc/mali/mali EXPORT_SYMBOL -0x00000000 mali_perf_set_num_pp_cores /home/user/LibreELEC.tv/build.LibreELEC-S905.aarch64-8.0.2/gpu-aml-9b0fbbc/mali/mali EXPORT_SYMBOL -0x00000000 mali_dev_resume /home/user/LibreELEC.tv/build.LibreELEC-S905.aarch64-8.0.2/gpu-aml-9b0fbbc/mali/mali EXPORT_SYMBOL -0x00000000 mali_dev_pause /home/user/LibreELEC.tv/build.LibreELEC-S905.aarch64-8.0.2/gpu-aml-9b0fbbc/mali/mali EXPORT_SYMBOL -0x00000000 mali_get_user_setting /home/user/LibreELEC.tv/build.LibreELEC-S905.aarch64-8.0.2/gpu-aml-9b0fbbc/mali/mali EXPORT_SYMBOL diff --git a/drivers/gpu/arm/mali/__malidrv_build_info.c b/drivers/gpu/arm/mali/__malidrv_build_info.c index fb54f1bb9cdb6..9b728409f0369 100644 --- a/drivers/gpu/arm/mali/__malidrv_build_info.c +++ b/drivers/gpu/arm/mali/__malidrv_build_info.c @@ -1 +1 @@ -const char *__malidrv_build_info(void) { return "malidrv: API_VERSION=800 REPO_URL= REVISION= CHANGED_REVISION= CHANGE_DATE= BUILD_DATE=Ð’Ñ‚. апр. 18 10:44:53 MSK 2017 BUILD=release TARGET_PLATFORM= MALI_PLATFORM= KDIR= OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=16 USING_UMP= USING_PROFILING= USING_INTERNAL_PROFILING= USING_GPU_UTILIZATION=0 USING_DVFS= MALI_UPPER_HALF_SCHEDULING=1";} +const char *__malidrv_build_info(void) { return "malidrv: API_VERSION=800 REPO_URL=heads/odroidc2-3.14.y-android-3-gd5a12c1 REVISION=-v3.14.23-2647-gd5a12c1 CHANGED_REVISION=v3.14.23-2647-gd5a12c1 CHANGE_DATE=2016-01-21 14:20:54 +0900 BUILD_DATE=2016. 01. 21. (목) 14:55:02 KST BUILD=release TARGET_PLATFORM= MALI_PLATFORM= KDIR= OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=16 USING_UMP= USING_PROFILING= USING_INTERNAL_PROFILING= USING_GPU_UTILIZATION=1 USING_DVFS= MALI_UPPER_HALF_SCHEDULING=1";} diff --git a/drivers/gpu/arm/mali/common/mali_broadcast.c b/drivers/gpu/arm/mali/common/mali_broadcast.c index 136db61ace4a4..4c4b2bc9ee964 100755 --- a/drivers/gpu/arm/mali/common/mali_broadcast.c +++ b/drivers/gpu/arm/mali/common/mali_broadcast.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_broadcast.h b/drivers/gpu/arm/mali/common/mali_broadcast.h index efce44142ee9f..e12e8a2b59029 100755 --- a/drivers/gpu/arm/mali/common/mali_broadcast.h +++ b/drivers/gpu/arm/mali/common/mali_broadcast.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_control_timer.c b/drivers/gpu/arm/mali/common/mali_control_timer.c index d0dd95ac1b39d..fc6ceb43fbbb2 100755 --- a/drivers/gpu/arm/mali/common/mali_control_timer.c +++ b/drivers/gpu/arm/mali/common/mali_control_timer.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2012, 2014-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2012, 2014-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_control_timer.h b/drivers/gpu/arm/mali/common/mali_control_timer.h index 4f919ecfd70a5..1265390348a4a 100755 --- a/drivers/gpu/arm/mali/common/mali_control_timer.h +++ b/drivers/gpu/arm/mali/common/mali_control_timer.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2012, 2014-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2012, 2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_dlbu.c b/drivers/gpu/arm/mali/common/mali_dlbu.c index efe1ab3ab5cc3..4f2a121fc0e6f 100755 --- a/drivers/gpu/arm/mali/common/mali_dlbu.c +++ b/drivers/gpu/arm/mali/common/mali_dlbu.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_dlbu.h b/drivers/gpu/arm/mali/common/mali_dlbu.h index 6b068884bd499..c031f1150b83a 100755 --- a/drivers/gpu/arm/mali/common/mali_dlbu.h +++ b/drivers/gpu/arm/mali/common/mali_dlbu.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_dvfs_policy.c b/drivers/gpu/arm/mali/common/mali_dvfs_policy.c index 12ba069ec13bd..1094f9df9dbe0 100755 --- a/drivers/gpu/arm/mali/common/mali_dvfs_policy.c +++ b/drivers/gpu/arm/mali/common/mali_dvfs_policy.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2012, 2014-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2012, 2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_dvfs_policy.h b/drivers/gpu/arm/mali/common/mali_dvfs_policy.h index 55e4b354c1a94..17704269b7aae 100755 --- a/drivers/gpu/arm/mali/common/mali_dvfs_policy.h +++ b/drivers/gpu/arm/mali/common/mali_dvfs_policy.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2012, 2014-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2012, 2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_executor.c b/drivers/gpu/arm/mali/common/mali_executor.c index 28bbc1d41c8f9..305690c402aaf 100755 --- a/drivers/gpu/arm/mali/common/mali_executor.c +++ b/drivers/gpu/arm/mali/common/mali_executor.c @@ -21,6 +21,7 @@ #include "mali_timeline.h" #include "mali_osk_profiling.h" #include "mali_session.h" +#include "mali_osk_mali.h" /* * If dma_buf with map on demand is used, we defer job deletion and job queue @@ -118,8 +119,6 @@ static _mali_osk_wait_queue_t *executor_notify_core_change_wait_queue = NULL; /* * ---------- Forward declaration of static functions ---------- */ -static void mali_executor_lock(void); -static void mali_executor_unlock(void); static mali_bool mali_executor_is_suspended(void *data); static mali_bool mali_executor_is_working(void); static void mali_executor_disable_empty_virtual(void); @@ -612,6 +611,10 @@ _mali_osk_errcode_t mali_executor_interrupt_gp(struct mali_group *group, struct mali_gp_job *job; mali_bool success; + if (MALI_TRUE == time_out) { + mali_group_dump_status(group); + } + success = (int_result != MALI_INTERRUPT_RESULT_ERROR) ? MALI_TRUE : MALI_FALSE; @@ -688,9 +691,6 @@ _mali_osk_errcode_t mali_executor_interrupt_pp(struct mali_group *group, } #else MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_NONE != int_result); - if (!mali_group_has_timed_out(group)) { - MALI_DEBUG_ASSERT(!mali_group_pp_is_active(group)); - } #endif /* We should now have a real interrupt to handle */ @@ -712,6 +712,10 @@ _mali_osk_errcode_t mali_executor_interrupt_pp(struct mali_group *group, struct mali_pp_job *job = NULL; mali_bool success; + if (MALI_TRUE == time_out) { + mali_group_dump_status(group); + } + success = (int_result == MALI_INTERRUPT_RESULT_SUCCESS) ? MALI_TRUE : MALI_FALSE; @@ -797,6 +801,7 @@ _mali_osk_errcode_t mali_executor_interrupt_mmu(struct mali_group *group, group->mmu->hw_core.description)); MALI_DEBUG_PRINT(3, ("Executor: MMU rawstat = 0x%08X, MMU status = 0x%08X\n", mali_mmu_get_rawstat(group->mmu), status)); + mali_mmu_pagedir_diag(mali_session_get_page_directory(group->session), fault_address); #endif mali_executor_complete_group(group, MALI_FALSE, &gp_job, &pp_job); @@ -1336,13 +1341,13 @@ _mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s * ---------- Implementation of static functions ---------- */ -static void mali_executor_lock(void) +void mali_executor_lock(void) { _mali_osk_spinlock_irq_lock(mali_executor_lock_obj); MALI_DEBUG_PRINT(5, ("Executor: lock taken\n")); } -static void mali_executor_unlock(void) +void mali_executor_unlock(void) { MALI_DEBUG_PRINT(5, ("Executor: Releasing lock\n")); _mali_osk_spinlock_irq_unlock(mali_executor_lock_obj); @@ -1442,23 +1447,23 @@ static mali_bool mali_executor_physical_rejoin_virtual(struct mali_group *group) static mali_bool mali_executor_has_virtual_group(void) { -#if defined(CONFIG_MALI450) +#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) return (NULL != virtual_group) ? MALI_TRUE : MALI_FALSE; #else return MALI_FALSE; -#endif /* defined(CONFIG_MALI450) */ +#endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */ } static mali_bool mali_executor_virtual_group_is_usable(void) { -#if defined(CONFIG_MALI450) +#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); return ((EXEC_STATE_INACTIVE == virtual_group_state || - EXEC_STATE_IDLE == virtual_group_state)&&(virtual_group->state != MALI_GROUP_STATE_ACTIVATION_PENDING)) ? + EXEC_STATE_IDLE == virtual_group_state) && (virtual_group->state != MALI_GROUP_STATE_ACTIVATION_PENDING)) ? MALI_TRUE : MALI_FALSE; #else return MALI_FALSE; -#endif /* defined(CONFIG_MALI450) */ +#endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */ } static mali_bool mali_executor_tackle_gp_bound(void) @@ -1479,6 +1484,75 @@ static mali_bool mali_executor_tackle_gp_bound(void) return MALI_FALSE; } +static mali_bool mali_executor_schedule_is_early_out(mali_bool *gpu_secure_mode_is_needed) +{ + struct mali_pp_job *next_pp_job_to_start = NULL; + struct mali_group *group; + struct mali_group *tmp_group; + struct mali_pp_job *physical_pp_job_working = NULL; + struct mali_pp_job *virtual_pp_job_working = NULL; + mali_bool gpu_working_in_protected_mode = MALI_FALSE; + mali_bool gpu_working_in_non_protected_mode = MALI_FALSE; + + MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj); + + *gpu_secure_mode_is_needed = MALI_FALSE; + + /* Check if the gpu secure mode is supported, exit if not.*/ + if (MALI_FALSE == _mali_osk_gpu_secure_mode_is_supported()) { + return MALI_FALSE; + } + + /* Check if need to set gpu secure mode for the next pp job, + * get the next pp job that will be scheduled if exist. + */ + next_pp_job_to_start = mali_scheduler_job_pp_next(); + + /* Check current pp physical/virtual running job is protected job or not if exist.*/ + _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_working, + struct mali_group, executor_list) { + physical_pp_job_working = group->pp_running_job; + break; + } + + if (EXEC_STATE_WORKING == virtual_group_state) { + virtual_pp_job_working = virtual_group->pp_running_job; + } + + if (NULL != physical_pp_job_working) { + if (MALI_TRUE == mali_pp_job_is_protected_job(physical_pp_job_working)) { + gpu_working_in_protected_mode = MALI_TRUE; + } else { + gpu_working_in_non_protected_mode = MALI_TRUE; + } + } else if (NULL != virtual_pp_job_working) { + if (MALI_TRUE == mali_pp_job_is_protected_job(virtual_pp_job_working)) { + gpu_working_in_protected_mode = MALI_TRUE; + } else { + gpu_working_in_non_protected_mode = MALI_TRUE; + } + } else if (EXEC_STATE_WORKING == gp_group_state) { + gpu_working_in_non_protected_mode = MALI_TRUE; + } + + /* If the next pp job is the protected pp job.*/ + if ((NULL != next_pp_job_to_start) && MALI_TRUE == mali_pp_job_is_protected_job(next_pp_job_to_start)) { + /* if gp is working or any non-protected pp job is working now, unable to schedule protected pp job. */ + if (MALI_TRUE == gpu_working_in_non_protected_mode) + return MALI_TRUE; + + *gpu_secure_mode_is_needed = MALI_TRUE; + return MALI_FALSE; + + } + + if (MALI_TRUE == gpu_working_in_protected_mode) { + /* Unable to schedule non-protected pp job/gp job if exist protected pp running jobs*/ + return MALI_TRUE; + } + + return MALI_FALSE; +} /* * This is where jobs are actually started. */ @@ -1489,6 +1563,7 @@ static void mali_executor_schedule(void) u32 num_physical_to_process = 0; mali_bool trigger_pm_update = MALI_FALSE; mali_bool deactivate_idle_group = MALI_TRUE; + mali_bool gpu_secure_mode_is_needed = MALI_FALSE; /* Physical groups + jobs to start in this function */ struct mali_group *groups_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS]; @@ -1512,9 +1587,16 @@ static void mali_executor_schedule(void) /* Lock needed in order to safely handle the job queues */ mali_scheduler_lock(); - /* 1. Activate gp firstly if have gp job queued. */ - if (EXEC_STATE_INACTIVE == gp_group_state && - 0 < mali_scheduler_job_gp_count()) { + /* 1. Check the schedule if need to early out. */ + if (MALI_TRUE == mali_executor_schedule_is_early_out(&gpu_secure_mode_is_needed)) { + mali_scheduler_unlock(); + return; + } + + /* 2. Activate gp firstly if have gp job queued. */ + if ((EXEC_STATE_INACTIVE == gp_group_state) + && (0 < mali_scheduler_job_gp_count()) + && (gpu_secure_mode_is_needed == MALI_FALSE)) { enum mali_group_state state = mali_group_activate(gp_group); @@ -1526,9 +1608,9 @@ static void mali_executor_schedule(void) } } - /* 2. Prepare as many physical groups as needed/possible */ + /* 3. Prepare as many physical groups as needed/possible */ - num_physical_needed = mali_scheduler_job_physical_head_count(); + num_physical_needed = mali_scheduler_job_physical_head_count(gpu_secure_mode_is_needed); /* On mali-450 platform, we don't need to enter in this block frequently. */ if (0 < num_physical_needed) { @@ -1545,7 +1627,7 @@ static void mali_executor_schedule(void) if (0 < num_physical_needed) { - /* 2.1. Activate groups which are inactive */ + /* 3.1. Activate groups which are inactive */ struct mali_group *group; struct mali_group *temp; @@ -1577,7 +1659,7 @@ static void mali_executor_schedule(void) if (mali_executor_virtual_group_is_usable()) { /* - * 2.2. And finally, steal and activate groups + * 3.2. And finally, steal and activate groups * from virtual group if we need even more */ while (0 < num_physical_needed) { @@ -1620,7 +1702,7 @@ static void mali_executor_schedule(void) } } - /* 2.3. Assign physical jobs to groups */ + /* 3.3. Assign physical jobs to groups */ if (0 < num_physical_to_process) { struct mali_group *group; @@ -1635,24 +1717,30 @@ static void mali_executor_schedule(void) MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS); MALI_DEBUG_ASSERT(0 < - mali_scheduler_job_physical_head_count()); + mali_scheduler_job_physical_head_count(gpu_secure_mode_is_needed)); - if (mali_executor_hint_is_enabled( - MALI_EXECUTOR_HINT_GP_BOUND)) { - if (MALI_TRUE == mali_executor_tackle_gp_bound()) { - /* - * We're gp bound, - * don't start this right now. - */ - deactivate_idle_group = MALI_FALSE; - num_physical_to_process = 0; - break; - } + /* If the next pp job is non-protected, check if gp bound now. */ + if ((MALI_FALSE == gpu_secure_mode_is_needed) + && (mali_executor_hint_is_enabled(MALI_EXECUTOR_HINT_GP_BOUND)) + && (MALI_TRUE == mali_executor_tackle_gp_bound())) { + /* + * We're gp bound, + * don't start this right now. + */ + deactivate_idle_group = MALI_FALSE; + num_physical_to_process = 0; + break; } job = mali_scheduler_job_pp_physical_get( &sub_job); + if (MALI_FALSE == gpu_secure_mode_is_needed) { + MALI_DEBUG_ASSERT(MALI_FALSE == mali_pp_job_is_protected_job(job)); + } else { + MALI_DEBUG_ASSERT(MALI_TRUE == mali_pp_job_is_protected_job(job)); + } + MALI_DEBUG_ASSERT_POINTER(job); MALI_DEBUG_ASSERT(sub_job <= MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS); @@ -1679,8 +1767,7 @@ static void mali_executor_schedule(void) } } - - /* 3. Deactivate idle pp group , must put deactive here before active vitual group + /* 4. Deactivate idle pp group , must put deactive here before active vitual group * for cover case first only has physical job in normal queue but group inactive, * so delay the job start go to active group, when group activated, * call scheduler again, but now if we get high queue virtual job, @@ -1692,34 +1779,41 @@ static void mali_executor_schedule(void) trigger_pm_update = MALI_TRUE; } - /* 4. Activate virtual group, if needed */ - + /* 5. Activate virtual group, if needed */ if (EXEC_STATE_INACTIVE == virtual_group_state && - 0 < mali_scheduler_job_next_is_virtual()) { - enum mali_group_state state = - mali_group_activate(virtual_group); - if (MALI_GROUP_STATE_ACTIVE == state) { - /* Set virtual group state to idle */ - virtual_group_state = EXEC_STATE_IDLE; - } else { - trigger_pm_update = MALI_TRUE; + MALI_TRUE == mali_scheduler_job_next_is_virtual()) { + struct mali_pp_job *virtual_job = mali_scheduler_job_pp_virtual_peek(); + if ((MALI_FALSE == gpu_secure_mode_is_needed && MALI_FALSE == mali_pp_job_is_protected_job(virtual_job)) + || (MALI_TRUE == gpu_secure_mode_is_needed && MALI_TRUE == mali_pp_job_is_protected_job(virtual_job))) { + enum mali_group_state state = + mali_group_activate(virtual_group); + if (MALI_GROUP_STATE_ACTIVE == state) { + /* Set virtual group state to idle */ + virtual_group_state = EXEC_STATE_IDLE; + } else { + trigger_pm_update = MALI_TRUE; + } } } - /* 5. To power up group asap, we trigger pm update here. */ + /* 6. To power up group asap, we trigger pm update here. */ if (MALI_TRUE == trigger_pm_update) { trigger_pm_update = MALI_FALSE; mali_pm_update_async(); } - /* 6. Assign jobs to idle virtual group (or deactivate if no job) */ + /* 7. Assign jobs to idle virtual group (or deactivate if no job) */ if (EXEC_STATE_IDLE == virtual_group_state) { - if (0 < mali_scheduler_job_next_is_virtual()) { - virtual_job_to_start = - mali_scheduler_job_pp_virtual_get(); - virtual_group_state = EXEC_STATE_WORKING; + if (MALI_TRUE == mali_scheduler_job_next_is_virtual()) { + struct mali_pp_job *virtual_job = mali_scheduler_job_pp_virtual_peek(); + if ((MALI_FALSE == gpu_secure_mode_is_needed && MALI_FALSE == mali_pp_job_is_protected_job(virtual_job)) + || (MALI_TRUE == gpu_secure_mode_is_needed && MALI_TRUE == mali_pp_job_is_protected_job(virtual_job))) { + virtual_job_to_start = + mali_scheduler_job_pp_virtual_get(); + virtual_group_state = EXEC_STATE_WORKING; + } } else if (!mali_timeline_has_virtual_pp_job()) { virtual_group_state = EXEC_STATE_INACTIVE; @@ -1729,9 +1823,9 @@ static void mali_executor_schedule(void) } } - /* 7. Assign job to idle GP group (or deactivate if no job) */ + /* 8. Assign job to idle GP group (or deactivate if no job) */ - if (EXEC_STATE_IDLE == gp_group_state) { + if (EXEC_STATE_IDLE == gp_group_state && MALI_FALSE == gpu_secure_mode_is_needed) { if (0 < mali_scheduler_job_gp_count()) { gp_job_to_start = mali_scheduler_job_gp_get(); gp_group_state = EXEC_STATE_WORKING; @@ -1743,12 +1837,11 @@ static void mali_executor_schedule(void) } } - /* 8. We no longer need the schedule/queue lock */ + /* 9. We no longer need the schedule/queue lock */ mali_scheduler_unlock(); - /* 9. start jobs */ - + /* 10. start jobs */ if (NULL != virtual_job_to_start) { MALI_DEBUG_ASSERT(!mali_group_pp_is_active(virtual_group)); mali_group_start_pp_job(virtual_group, @@ -1770,7 +1863,7 @@ static void mali_executor_schedule(void) mali_group_start_gp_job(gp_group, gp_job_to_start); } - /* 10. Trigger any pending PM updates */ + /* 11. Trigger any pending PM updates */ if (MALI_TRUE == trigger_pm_update) { mali_pm_update_async(); } @@ -2136,7 +2229,7 @@ static void mali_executor_notify_core_change(u32 num_cores) { mali_bool done = MALI_FALSE; - if (mali_is_mali450()) { + if (mali_is_mali450() || mali_is_mali470()) { return; } @@ -2238,7 +2331,7 @@ static void mali_executor_wq_notify_core_change(void *arg) { MALI_IGNORE(arg); - if (mali_is_mali450()) { + if (mali_is_mali450() || mali_is_mali470()) { return; } @@ -2511,3 +2604,85 @@ static mali_bool mali_executor_deactivate_list_idle(mali_bool deactivate_idle_gr return trigger_pm_update; } + +void mali_executor_running_status_print(void) +{ + struct mali_group *group = NULL; + struct mali_group *temp = NULL; + + MALI_PRINT(("GP running job: %p\n", gp_group->gp_running_job)); + if ((gp_group->gp_core) && (gp_group->is_working)) { + mali_group_dump_status(gp_group); + } + MALI_PRINT(("Physical PP groups in WORKING state (count = %u):\n", group_list_working_count)); + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, executor_list) { + MALI_PRINT(("PP running job: %p, subjob %d \n", group->pp_running_job, group->pp_running_sub_job)); + mali_group_dump_status(group); + } + MALI_PRINT(("Physical PP groups in INACTIVE state (count = %u):\n", group_list_inactive_count)); + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) { + MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off")); + MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description)); + } + MALI_PRINT(("Physical PP groups in IDLE state (count = %u):\n", group_list_idle_count)); + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) { + MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off")); + MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description)); + } + MALI_PRINT(("Physical PP groups in DISABLED state (count = %u):\n", group_list_disabled_count)); + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) { + MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off")); + MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description)); + } + + if (mali_executor_has_virtual_group()) { + MALI_PRINT(("Virtual group running job: %p\n", virtual_group->pp_running_job)); + MALI_PRINT(("Virtual group status: %d\n", virtual_group_state)); + MALI_PRINT(("Virtual group->status: %d\n", virtual_group->state)); + MALI_PRINT(("\tSW power: %s\n", virtual_group->power_is_on ? "On" : "Off")); + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &virtual_group->group_list, + struct mali_group, group_list) { + int i = 0; + MALI_PRINT(("\tchild group(%s) running job: %p\n", group->pp_core->hw_core.description, group->pp_running_job)); + MALI_PRINT(("\tchild group(%s)->status: %d\n", group->pp_core->hw_core.description, group->state)); + MALI_PRINT(("\tchild group(%s) SW power: %s\n", group->pp_core->hw_core.description, group->power_is_on ? "On" : "Off")); + if (group->pm_domain) { + MALI_PRINT(("\tPower domain: id %u\n", mali_pm_domain_get_id(group->pm_domain))); + MALI_PRINT(("\tMask:0x%04x \n", mali_pm_domain_get_mask(group->pm_domain))); + MALI_PRINT(("\tUse-count:%u \n", mali_pm_domain_get_use_count(group->pm_domain))); + MALI_PRINT(("\tCurrent power status:%s \n", (mali_pm_domain_get_mask(group->pm_domain)& mali_pm_get_current_mask()) ? "On" : "Off")); + MALI_PRINT(("\tWanted power status:%s \n", (mali_pm_domain_get_mask(group->pm_domain)& mali_pm_get_wanted_mask()) ? "On" : "Off")); + } + + for (i = 0; i < 2; i++) { + if (NULL != group->l2_cache_core[i]) { + struct mali_pm_domain *domain; + domain = mali_l2_cache_get_pm_domain(group->l2_cache_core[i]); + MALI_PRINT(("\t L2(index %d) group SW power: %s\n", i, group->l2_cache_core[i]->power_is_on ? "On" : "Off")); + if (domain) { + MALI_PRINT(("\tL2 Power domain: id %u\n", mali_pm_domain_get_id(domain))); + MALI_PRINT(("\tL2 Mask:0x%04x \n", mali_pm_domain_get_mask(domain))); + MALI_PRINT(("\tL2 Use-count:%u \n", mali_pm_domain_get_use_count(domain))); + MALI_PRINT(("\tL2 Current power status:%s \n", (mali_pm_domain_get_mask(domain) & mali_pm_get_current_mask()) ? "On" : "Off")); + MALI_PRINT(("\tL2 Wanted power status:%s \n", (mali_pm_domain_get_mask(domain) & mali_pm_get_wanted_mask()) ? "On" : "Off")); + } + } + } + } + if (EXEC_STATE_WORKING == virtual_group_state) { + mali_group_dump_status(virtual_group); + } + } +} + +void mali_executor_status_dump(void) +{ + mali_executor_lock(); + mali_scheduler_lock(); + + /* print schedule queue status */ + mali_scheduler_gp_pp_job_queue_print(); + + mali_scheduler_unlock(); + mali_executor_unlock(); +} diff --git a/drivers/gpu/arm/mali/common/mali_executor.h b/drivers/gpu/arm/mali/common/mali_executor.h index 448540594b54b..927a384583223 100755 --- a/drivers/gpu/arm/mali/common/mali_executor.h +++ b/drivers/gpu/arm/mali/common/mali_executor.h @@ -58,7 +58,6 @@ void mali_executor_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferr _mali_osk_errcode_t mali_executor_interrupt_gp(struct mali_group *group, mali_bool in_upper_half); _mali_osk_errcode_t mali_executor_interrupt_pp(struct mali_group *group, mali_bool in_upper_half); _mali_osk_errcode_t mali_executor_interrupt_mmu(struct mali_group *group, mali_bool in_upper_half); - void mali_executor_group_power_up(struct mali_group *groups[], u32 num_groups); void mali_executor_group_power_down(struct mali_group *groups[], u32 num_groups); @@ -96,4 +95,8 @@ MALI_STATIC_INLINE mali_bool mali_executor_hint_is_enabled(mali_executor_hint hi return mali_executor_hints[hint]; } +void mali_executor_running_status_print(void); +void mali_executor_status_dump(void); +void mali_executor_lock(void); +void mali_executor_unlock(void); #endif /* __MALI_EXECUTOR_H__ */ diff --git a/drivers/gpu/arm/mali/common/mali_gp.c b/drivers/gpu/arm/mali/common/mali_gp.c index 3893cb0a25c48..a918552162a80 100755 --- a/drivers/gpu/arm/mali/common/mali_gp.c +++ b/drivers/gpu/arm/mali/common/mali_gp.c @@ -12,6 +12,7 @@ #include "mali_hw_core.h" #include "mali_group.h" #include "mali_osk.h" +#include "mali_osk_mali.h" #include "regs/mali_gp_regs.h" #include "mali_kernel_common.h" #include "mali_kernel_core.h" @@ -19,8 +20,6 @@ #include "mali_osk_profiling.h" #endif -#include - static struct mali_gp_core *mali_global_gp_core = NULL; /* Interrupt handlers */ @@ -111,8 +110,6 @@ _mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core) if (MALI_REG_POLL_COUNT_SLOW == i) { MALI_PRINT_ERROR(("Mali GP: Failed to stop bus on %s\n", core->hw_core.description)); - if (mali_gp_reset_fail < 65533) - mali_gp_reset_fail++; return _MALI_OSK_ERR_FAULT; } return _MALI_OSK_ERR_OK; @@ -120,7 +117,7 @@ _mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core) void mali_gp_hard_reset(struct mali_gp_core *core) { - const u32 reset_wait_target_register = MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW; + const u32 reset_wait_target_register = MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_LIMIT; const u32 reset_invalid_value = 0xC0FFE000; const u32 reset_check_value = 0xC01A0000; const u32 reset_default_value = 0; @@ -180,8 +177,6 @@ _mali_osk_errcode_t mali_gp_reset_wait(struct mali_gp_core *core) if (i == MALI_REG_POLL_COUNT_FAST) { MALI_PRINT_ERROR(("Mali GP: Failed to reset core %s, rawstat: 0x%08x\n", core->hw_core.description, rawstat)); - if (mali_gp_reset_fail < 65533) - mali_gp_reset_fail++; return _MALI_OSK_ERR_FAULT; } @@ -205,6 +200,11 @@ void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job) u32 counter_src0 = mali_gp_job_get_perf_counter_src0(job); u32 counter_src1 = mali_gp_job_get_perf_counter_src1(job); + /* Disable gpu secure mode. */ + if (MALI_TRUE == _mali_osk_gpu_secure_mode_is_enabled()) { + _mali_osk_gpu_secure_mode_disable(); + } + MALI_DEBUG_ASSERT_POINTER(core); if (mali_gp_job_has_vs_job(job)) { @@ -302,7 +302,7 @@ static void mali_gp_irq_probe_trigger(void *data) struct mali_gp_core *core = (struct mali_gp_core *)data; mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); - mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, MALIGP2_REG_VAL_CMD_FORCE_HANG); + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR); _mali_osk_mem_barrier(); } @@ -312,8 +312,8 @@ static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data) u32 irq_readout; irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT); - if (MALIGP2_REG_VAL_IRQ_FORCE_HANG & irq_readout) { - mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_FORCE_HANG); + if (MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR & irq_readout) { + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR); _mali_osk_mem_barrier(); return _MALI_OSK_ERR_OK; } @@ -346,6 +346,7 @@ void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_ #if defined(CONFIG_MALI400_PROFILING) _mali_osk_profiling_report_hw_counter(COUNTER_VP_0_C0, val0); + _mali_osk_profiling_record_global_counters(COUNTER_VP_0_C0, val0); #endif } @@ -356,6 +357,7 @@ void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_ #if defined(CONFIG_MALI400_PROFILING) _mali_osk_profiling_report_hw_counter(COUNTER_VP_0_C1, val1); + _mali_osk_profiling_record_global_counters(COUNTER_VP_0_C1, val1); #endif } } diff --git a/drivers/gpu/arm/mali/common/mali_gp.h b/drivers/gpu/arm/mali/common/mali_gp.h index 8d5f69c23229c..ecbe70e940c33 100755 --- a/drivers/gpu/arm/mali/common/mali_gp.h +++ b/drivers/gpu/arm/mali/common/mali_gp.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * Copyright (C) 2011-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_gp_job.c b/drivers/gpu/arm/mali/common/mali_gp_job.c index d6ef312406e0f..8dd19cb2df09c 100755 --- a/drivers/gpu/arm/mali/common/mali_gp_job.c +++ b/drivers/gpu/arm/mali/common/mali_gp_job.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -12,35 +12,87 @@ #include "mali_osk.h" #include "mali_osk_list.h" #include "mali_uk_types.h" +#include "mali_memory_virtual.h" +#include "mali_memory_defer_bind.h" static u32 gp_counter_src0 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */ static u32 gp_counter_src1 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */ +static void _mali_gp_del_varying_allocations(struct mali_gp_job *job); + + +static int _mali_gp_add_varying_allocations(struct mali_session_data *session, + struct mali_gp_job *job, + u32 *alloc, + u32 num) +{ + int i = 0; + struct mali_gp_allocation_node *alloc_node; + mali_mem_allocation *mali_alloc = NULL; + struct mali_vma_node *mali_vma_node = NULL; + + for (i = 0 ; i < num ; i++) { + MALI_DEBUG_ASSERT(alloc[i]); + alloc_node = _mali_osk_calloc(1, sizeof(struct mali_gp_allocation_node)); + if (alloc_node) { + INIT_LIST_HEAD(&alloc_node->node); + /* find mali allocation structure by vaddress*/ + mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, alloc[i], 0); + + if (likely(mali_vma_node)) { + mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node); + MALI_DEBUG_ASSERT(alloc[i] == mali_vma_node->vm_node.start); + } else { + MALI_DEBUG_PRINT(1, ("ERROE!_mali_gp_add_varying_allocations,can't find allocation %d by address =0x%x, num=%d\n", i, alloc[i], num)); + _mali_osk_free(alloc_node); + goto fail; + } + alloc_node->alloc = mali_alloc; + /* add to gp job varying alloc list*/ + list_move(&alloc_node->node, &job->varying_alloc); + } else + goto fail; + } + + return 0; +fail: + MALI_DEBUG_PRINT(1, ("ERROE!_mali_gp_add_varying_allocations,failed to alloc memory!\n")); + _mali_gp_del_varying_allocations(job); + return -1; +} + + +static void _mali_gp_del_varying_allocations(struct mali_gp_job *job) +{ + struct mali_gp_allocation_node *alloc_node, *tmp_node; + + list_for_each_entry_safe(alloc_node, tmp_node, &job->varying_alloc, node) { + list_del(&alloc_node->node); + kfree(alloc_node); + } + INIT_LIST_HEAD(&job->varying_alloc); +} struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker) { struct mali_gp_job *job; u32 perf_counter_flag; + u32 __user *memory_list = NULL; + struct mali_gp_allocation_node *alloc_node, *tmp_node; - job = _mali_osk_malloc(sizeof(struct mali_gp_job)); + job = _mali_osk_calloc(1, sizeof(struct mali_gp_job)); if (NULL != job) { job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_FINISHED, sizeof(_mali_uk_gp_job_finished_s)); if (NULL == job->finished_notification) { - _mali_osk_free(job); - return NULL; + goto fail3; } job->oom_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s)); if (NULL == job->oom_notification) { - _mali_osk_notification_delete(job->finished_notification); - _mali_osk_free(job); - return NULL; + goto fail2; } if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_gp_start_job_s))) { - _mali_osk_notification_delete(job->finished_notification); - _mali_osk_notification_delete(job->oom_notification); - _mali_osk_free(job); - return NULL; + goto fail1; } perf_counter_flag = mali_gp_job_get_perf_counter_flag(job); @@ -62,6 +114,69 @@ struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_ job->pid = _mali_osk_get_pid(); job->tid = _mali_osk_get_tid(); + + INIT_LIST_HEAD(&job->varying_alloc); + INIT_LIST_HEAD(&job->vary_todo); + job->dmem = NULL; + + if (job->uargs.deferred_mem_num > session->allocation_mgr.mali_allocation_num) { + MALI_PRINT_ERROR(("Mali GP job: The number of varying buffer to defer bind is invalid !\n")); + goto fail1; + } + + /* add varying allocation list*/ + if (job->uargs.deferred_mem_num > 0) { + /* copy varying list from user space*/ + job->varying_list = _mali_osk_calloc(1, sizeof(u32) * job->uargs.deferred_mem_num); + if (!job->varying_list) { + MALI_PRINT_ERROR(("Mali GP job: allocate varying_list failed varying_alloc_num = %d !\n", job->uargs.deferred_mem_num)); + goto fail1; + } + + memory_list = (u32 __user *)(uintptr_t)uargs->deferred_mem_list; + + if (0 != _mali_osk_copy_from_user(job->varying_list, memory_list, sizeof(u32) * job->uargs.deferred_mem_num)) { + MALI_PRINT_ERROR(("Mali GP job: Failed to copy varying list from user space!\n")); + goto fail; + } + + if (unlikely(_mali_gp_add_varying_allocations(session, job, job->varying_list, + job->uargs.deferred_mem_num))) { + MALI_PRINT_ERROR(("Mali GP job: _mali_gp_add_varying_allocations failed!\n")); + goto fail; + } + + /* do preparetion for each allocation */ + list_for_each_entry_safe(alloc_node, tmp_node, &job->varying_alloc, node) { + if (unlikely(_MALI_OSK_ERR_OK != mali_mem_defer_bind_allocation_prepare(alloc_node->alloc, &job->vary_todo, &job->required_varying_memsize))) { + MALI_PRINT_ERROR(("Mali GP job: mali_mem_defer_bind_allocation_prepare failed!\n")); + goto fail; + } + } + + _mali_gp_del_varying_allocations(job); + + /* bind varying here, to avoid memory latency issue. */ + { + struct mali_defer_mem_block dmem_block; + + INIT_LIST_HEAD(&dmem_block.free_pages); + atomic_set(&dmem_block.num_free_pages, 0); + + if (mali_mem_prepare_mem_for_job(job, &dmem_block)) { + MALI_PRINT_ERROR(("Mali GP job: mali_mem_prepare_mem_for_job failed!\n")); + goto fail; + } + if (_MALI_OSK_ERR_OK != mali_mem_defer_bind(job, &dmem_block)) { + MALI_PRINT_ERROR(("gp job create, mali_mem_defer_bind failed! GP %x fail!", job)); + goto fail; + } + } + + if (uargs->varying_memsize > MALI_UK_BIG_VARYING_SIZE) { + job->big_job = 1; + } + } job->pp_tracker = pp_tracker; if (NULL != job->pp_tracker) { /* Take a reference on PP job's tracker that will be released when the GP @@ -73,16 +188,46 @@ struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_ mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence)); return job; + } else { + MALI_PRINT_ERROR(("Mali GP job: _mali_osk_calloc failed!\n")); + return NULL; } + +fail: + _mali_osk_free(job->varying_list); + /* Handle allocate fail here, free all varying node */ + { + struct mali_backend_bind_list *bkn, *bkn_tmp; + list_for_each_entry_safe(bkn, bkn_tmp , &job->vary_todo, node) { + list_del(&bkn->node); + _mali_osk_free(bkn); + } + } +fail1: + _mali_osk_notification_delete(job->oom_notification); +fail2: + _mali_osk_notification_delete(job->finished_notification); +fail3: + _mali_osk_free(job); return NULL; } void mali_gp_job_delete(struct mali_gp_job *job) { + struct mali_backend_bind_list *bkn, *bkn_tmp; MALI_DEBUG_ASSERT_POINTER(job); MALI_DEBUG_ASSERT(NULL == job->pp_tracker); MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list)); + _mali_osk_free(job->varying_list); + + /* Handle allocate fail here, free all varying node */ + list_for_each_entry_safe(bkn, bkn_tmp , &job->vary_todo, node) { + list_del(&bkn->node); + _mali_osk_free(bkn); + } + + mali_mem_defer_dmem_free(job); /* de-allocate the pre-allocated oom notifications */ if (NULL != job->oom_notification) { diff --git a/drivers/gpu/arm/mali/common/mali_gp_job.h b/drivers/gpu/arm/mali/common/mali_gp_job.h index e4b0858efedcb..fbf8ee8fd6052 100755 --- a/drivers/gpu/arm/mali/common/mali_gp_job.h +++ b/drivers/gpu/arm/mali/common/mali_gp_job.h @@ -21,6 +21,7 @@ #include "mali_executor.h" #include "mali_timeline.h" +struct mali_defer_mem; /** * This structure represents a GP job * @@ -67,6 +68,21 @@ struct mali_gp_job { u32 heap_current_addr; /**< Holds the current HEAP address when the job has completed */ u32 perf_counter_value0; /**< Value of performance counter 0 (to be returned to user space) */ u32 perf_counter_value1; /**< Value of performance counter 1 (to be returned to user space) */ + struct mali_defer_mem *dmem; /** < used for defer bind to store dmem info */ + struct list_head varying_alloc; /**< hold the list of varying allocations */ + u32 bind_flag; /** < flag for deferbind*/ + u32 *varying_list; /**< varying memory list need to to defer bind*/ + struct list_head vary_todo; /**< list of backend list need to do defer bind*/ + u32 required_varying_memsize; /** < size of varying memory to reallocate*/ + u32 big_job; /** < if the gp job have large varying output and may take long time*/ +}; + +#define MALI_DEFER_BIND_MEMORY_PREPARED (0x1 << 0) +#define MALI_DEFER_BIND_MEMORY_BINDED (0x1 << 2) + +struct mali_gp_allocation_node { + struct list_head node; + mali_mem_allocation *alloc; }; struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker); diff --git a/drivers/gpu/arm/mali/common/mali_group.c b/drivers/gpu/arm/mali/common/mali_group.c index 0cbd8610fe291..310811ef81bcc 100755 --- a/drivers/gpu/arm/mali/common/mali_group.c +++ b/drivers/gpu/arm/mali/common/mali_group.c @@ -1,18 +1,12 @@ /* * Copyright (C) 2011-2015 ARM Limited. All rights reserved. - * + * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. - * + * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ - -#include -#include -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 29)) -#include -#endif #include "mali_kernel_common.h" #include "mali_group.h" #include "mali_osk.h" @@ -27,7 +21,6 @@ #include "mali_pm_domain.h" #include "mali_pm.h" #include "mali_executor.h" -#include #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) #include @@ -51,7 +44,6 @@ static void mali_group_bottom_half_mmu(void *data); static void mali_group_bottom_half_gp(void *data); static void mali_group_bottom_half_pp(void *data); static void mali_group_timeout(void *data); - static void mali_group_reset_pp(struct mali_group *group); static void mali_group_reset_mmu(struct mali_group *group); @@ -59,598 +51,664 @@ static void mali_group_activate_page_directory(struct mali_group *group, struct static void mali_group_recovery_reset(struct mali_group *group); struct mali_group *mali_group_create(struct mali_l2_cache_core *core, - struct mali_dlbu_core *dlbu, - struct mali_bcast_unit *bcast, - u32 domain_index) + struct mali_dlbu_core *dlbu, + struct mali_bcast_unit *bcast, + u32 domain_index) { - struct mali_group *group = NULL; - - if (mali_global_num_groups >= MALI_MAX_NUMBER_OF_GROUPS) { - MALI_PRINT_ERROR(("Mali group: Too many group objects created\n")); - return NULL; - } - - group = _mali_osk_calloc(1, sizeof(struct mali_group)); - if (NULL != group) { - group->timeout_timer = _mali_osk_timer_init(); - if (NULL != group->timeout_timer) { - _mali_osk_timer_setcallback(group->timeout_timer, mali_group_timeout, (void *)group); - - group->l2_cache_core[0] = core; - _mali_osk_list_init(&group->group_list); - _mali_osk_list_init(&group->executor_list); - _mali_osk_list_init(&group->pm_domain_list); - group->bcast_core = bcast; - group->dlbu_core = dlbu; - - /* register this object as a part of the correct power domain */ - if ((NULL != core) || (NULL != dlbu) || (NULL != bcast)) - group->pm_domain = mali_pm_register_group(domain_index, group); - - mali_global_groups[mali_global_num_groups] = group; - mali_global_num_groups++; - - return group; - } - _mali_osk_free(group); - } - - return NULL; + struct mali_group *group = NULL; + + if (mali_global_num_groups >= MALI_MAX_NUMBER_OF_GROUPS) { + MALI_PRINT_ERROR(("Mali group: Too many group objects created\n")); + return NULL; + } + + group = _mali_osk_calloc(1, sizeof(struct mali_group)); + if (NULL != group) { + group->timeout_timer = _mali_osk_timer_init(); + if (NULL != group->timeout_timer) { + _mali_osk_timer_setcallback(group->timeout_timer, mali_group_timeout, (void *)group); + + group->l2_cache_core[0] = core; + _mali_osk_list_init(&group->group_list); + _mali_osk_list_init(&group->executor_list); + _mali_osk_list_init(&group->pm_domain_list); + group->bcast_core = bcast; + group->dlbu_core = dlbu; + + /* register this object as a part of the correct power domain */ + if ((NULL != core) || (NULL != dlbu) || (NULL != bcast)) + group->pm_domain = mali_pm_register_group(domain_index, group); + + mali_global_groups[mali_global_num_groups] = group; + mali_global_num_groups++; + + return group; + } + _mali_osk_free(group); + } + + return NULL; } void mali_group_delete(struct mali_group *group) { - u32 i; - - MALI_DEBUG_PRINT(4, ("Deleting group %s\n", - mali_group_core_description(group))); - - MALI_DEBUG_ASSERT(NULL == group->parent_group); - MALI_DEBUG_ASSERT((MALI_GROUP_STATE_INACTIVE == group->state) || ((MALI_GROUP_STATE_ACTIVATION_PENDING == group->state))); - - /* Delete the resources that this group owns */ - if (NULL != group->gp_core) { - mali_gp_delete(group->gp_core); - } - - if (NULL != group->pp_core) { - mali_pp_delete(group->pp_core); - } - - if (NULL != group->mmu) { - mali_mmu_delete(group->mmu); - } - - if (mali_group_is_virtual(group)) { - /* Remove all groups from virtual group */ - struct mali_group *child; - struct mali_group *temp; - - _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { - child->parent_group = NULL; - mali_group_delete(child); - } - - mali_dlbu_delete(group->dlbu_core); - - if (NULL != group->bcast_core) { - mali_bcast_unit_delete(group->bcast_core); - } - } - - for (i = 0; i < mali_global_num_groups; i++) { - if (mali_global_groups[i] == group) { - mali_global_groups[i] = NULL; - mali_global_num_groups--; - - if (i != mali_global_num_groups) { - /* We removed a group from the middle of the array -- move the last - * group to the current position to close the gap */ - mali_global_groups[i] = mali_global_groups[mali_global_num_groups]; - mali_global_groups[mali_global_num_groups] = NULL; - } - - break; - } - } - - if (NULL != group->timeout_timer) { - _mali_osk_timer_del(group->timeout_timer); - _mali_osk_timer_term(group->timeout_timer); - } - - if (NULL != group->bottom_half_work_mmu) { - _mali_osk_wq_delete_work(group->bottom_half_work_mmu); - } - - if (NULL != group->bottom_half_work_gp) { - _mali_osk_wq_delete_work(group->bottom_half_work_gp); - } - - if (NULL != group->bottom_half_work_pp) { - _mali_osk_wq_delete_work(group->bottom_half_work_pp); - } - - _mali_osk_free(group); + u32 i; + + MALI_DEBUG_PRINT(4, ("Deleting group %s\n", + mali_group_core_description(group))); + + MALI_DEBUG_ASSERT(NULL == group->parent_group); + MALI_DEBUG_ASSERT((MALI_GROUP_STATE_INACTIVE == group->state) || ((MALI_GROUP_STATE_ACTIVATION_PENDING == group->state))); + + /* Delete the resources that this group owns */ + if (NULL != group->gp_core) { + mali_gp_delete(group->gp_core); + } + + if (NULL != group->pp_core) { + mali_pp_delete(group->pp_core); + } + + if (NULL != group->mmu) { + mali_mmu_delete(group->mmu); + } + + if (mali_group_is_virtual(group)) { + /* Remove all groups from virtual group */ + struct mali_group *child; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + child->parent_group = NULL; + mali_group_delete(child); + } + + mali_dlbu_delete(group->dlbu_core); + + if (NULL != group->bcast_core) { + mali_bcast_unit_delete(group->bcast_core); + } + } + + for (i = 0; i < mali_global_num_groups; i++) { + if (mali_global_groups[i] == group) { + mali_global_groups[i] = NULL; + mali_global_num_groups--; + + if (i != mali_global_num_groups) { + /* We removed a group from the middle of the array -- move the last + * group to the current position to close the gap */ + mali_global_groups[i] = mali_global_groups[mali_global_num_groups]; + mali_global_groups[mali_global_num_groups] = NULL; + } + + break; + } + } + + if (NULL != group->timeout_timer) { + _mali_osk_timer_del(group->timeout_timer); + _mali_osk_timer_term(group->timeout_timer); + } + + if (NULL != group->bottom_half_work_mmu) { + _mali_osk_wq_delete_work(group->bottom_half_work_mmu); + } + + if (NULL != group->bottom_half_work_gp) { + _mali_osk_wq_delete_work(group->bottom_half_work_gp); + } + + if (NULL != group->bottom_half_work_pp) { + _mali_osk_wq_delete_work(group->bottom_half_work_pp); + } + + _mali_osk_free(group); } _mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core *mmu_core) { - /* This group object now owns the MMU core object */ - group->mmu = mmu_core; - group->bottom_half_work_mmu = _mali_osk_wq_create_work(mali_group_bottom_half_mmu, group); - if (NULL == group->bottom_half_work_mmu) { - return _MALI_OSK_ERR_FAULT; - } - return _MALI_OSK_ERR_OK; + /* This group object now owns the MMU core object */ + group->mmu = mmu_core; + group->bottom_half_work_mmu = _mali_osk_wq_create_work(mali_group_bottom_half_mmu, group); + if (NULL == group->bottom_half_work_mmu) { + return _MALI_OSK_ERR_FAULT; + } + return _MALI_OSK_ERR_OK; } void mali_group_remove_mmu_core(struct mali_group *group) { - /* This group object no longer owns the MMU core object */ - group->mmu = NULL; - if (NULL != group->bottom_half_work_mmu) { - _mali_osk_wq_delete_work(group->bottom_half_work_mmu); - } + /* This group object no longer owns the MMU core object */ + group->mmu = NULL; + if (NULL != group->bottom_half_work_mmu) { + _mali_osk_wq_delete_work(group->bottom_half_work_mmu); + } } _mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core *gp_core) { - /* This group object now owns the GP core object */ - group->gp_core = gp_core; - group->bottom_half_work_gp = _mali_osk_wq_create_work(mali_group_bottom_half_gp, group); - if (NULL == group->bottom_half_work_gp) { - return _MALI_OSK_ERR_FAULT; - } - return _MALI_OSK_ERR_OK; + /* This group object now owns the GP core object */ + group->gp_core = gp_core; + group->bottom_half_work_gp = _mali_osk_wq_create_work(mali_group_bottom_half_gp, group); + if (NULL == group->bottom_half_work_gp) { + return _MALI_OSK_ERR_FAULT; + } + + return _MALI_OSK_ERR_OK; } void mali_group_remove_gp_core(struct mali_group *group) { - /* This group object no longer owns the GP core object */ - group->gp_core = NULL; - if (NULL != group->bottom_half_work_gp) { - _mali_osk_wq_delete_work(group->bottom_half_work_gp); - } + /* This group object no longer owns the GP core object */ + group->gp_core = NULL; + if (NULL != group->bottom_half_work_gp) { + _mali_osk_wq_delete_work(group->bottom_half_work_gp); + } } _mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core *pp_core) { - /* This group object now owns the PP core object */ - group->pp_core = pp_core; - group->bottom_half_work_pp = _mali_osk_wq_create_work(mali_group_bottom_half_pp, group); - if (NULL == group->bottom_half_work_pp) { - return _MALI_OSK_ERR_FAULT; - } - return _MALI_OSK_ERR_OK; + /* This group object now owns the PP core object */ + group->pp_core = pp_core; + group->bottom_half_work_pp = _mali_osk_wq_create_work(mali_group_bottom_half_pp, group); + if (NULL == group->bottom_half_work_pp) { + return _MALI_OSK_ERR_FAULT; + } + return _MALI_OSK_ERR_OK; } void mali_group_remove_pp_core(struct mali_group *group) { - /* This group object no longer owns the PP core object */ - group->pp_core = NULL; - if (NULL != group->bottom_half_work_pp) { - _mali_osk_wq_delete_work(group->bottom_half_work_pp); - } + /* This group object no longer owns the PP core object */ + group->pp_core = NULL; + if (NULL != group->bottom_half_work_pp) { + _mali_osk_wq_delete_work(group->bottom_half_work_pp); + } } enum mali_group_state mali_group_activate(struct mali_group *group) { - MALI_DEBUG_ASSERT_POINTER(group); - MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); - - MALI_DEBUG_PRINT(4, ("Group: Activating group %s\n", - mali_group_core_description(group))); - - if (MALI_GROUP_STATE_INACTIVE == group->state) { - /* Group is inactive, get PM refs in order to power up */ - - /* - * We'll take a maximum of 2 power domain references pr group, - * one for the group itself, and one for it's L2 cache. - */ - struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS]; - struct mali_group *groups[MALI_MAX_NUM_DOMAIN_REFS]; - u32 num_domains = 0; - mali_bool all_groups_on; - - /* Deal with child groups first */ - if (mali_group_is_virtual(group)) { - /* - * The virtual group might have 0, 1 or 2 L2s in - * its l2_cache_core array, but we ignore these and - * let the child groups take the needed L2 cache ref - * on behalf of the virtual group. - * In other words; The L2 refs are taken in pair with - * the physical group which the L2 is attached to. - */ - struct mali_group *child; - struct mali_group *temp; - - /* - * Child group is inactive, get PM - * refs in order to power up. - */ - _MALI_OSK_LIST_FOREACHENTRY(child, temp, - &group->group_list, - struct mali_group, group_list) { - MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE - == child->state); - - child->state = MALI_GROUP_STATE_ACTIVATION_PENDING; - - MALI_DEBUG_ASSERT_POINTER( - child->pm_domain); - domains[num_domains] = child->pm_domain; - groups[num_domains] = child; - num_domains++; - - /* - * Take L2 domain ref for child group. - */ - MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS - > num_domains); - domains[num_domains] = mali_l2_cache_get_pm_domain( - child->l2_cache_core[0]); - groups[num_domains] = NULL; - MALI_DEBUG_ASSERT(NULL == - child->l2_cache_core[1]); - num_domains++; - } - } else { - /* Take L2 domain ref for physical groups. */ - MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS > - num_domains); - - domains[num_domains] = mali_l2_cache_get_pm_domain( - group->l2_cache_core[0]); - groups[num_domains] = NULL; - MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]); - num_domains++; - } - - /* Do the group itself last (it's dependencies first) */ - - group->state = MALI_GROUP_STATE_ACTIVATION_PENDING; - - MALI_DEBUG_ASSERT_POINTER(group->pm_domain); - domains[num_domains] = group->pm_domain; - groups[num_domains] = group; - num_domains++; - - all_groups_on = mali_pm_get_domain_refs(domains, groups, - num_domains); - - /* - * Complete activation for group, include - * virtual group or physical group. - */ - if (MALI_TRUE == all_groups_on) { - - mali_group_set_active(group); - } - } else if (MALI_GROUP_STATE_ACTIVE == group->state) { - /* Already active */ - MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on); - } else { - /* - * Activation already pending, group->power_is_on could - * be both true or false. We need to wait for power up - * notification anyway. - */ - MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING - == group->state); - } - - MALI_DEBUG_PRINT(4, ("Group: group %s activation result: %s\n", - mali_group_core_description(group), - MALI_GROUP_STATE_ACTIVE == group->state ? - "ACTIVE" : "PENDING")); - - return group->state; + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + MALI_DEBUG_PRINT(4, ("Group: Activating group %s\n", + mali_group_core_description(group))); + + if (MALI_GROUP_STATE_INACTIVE == group->state) { + /* Group is inactive, get PM refs in order to power up */ + + /* + * We'll take a maximum of 2 power domain references pr group, + * one for the group itself, and one for it's L2 cache. + */ + struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS]; + struct mali_group *groups[MALI_MAX_NUM_DOMAIN_REFS]; + u32 num_domains = 0; + mali_bool all_groups_on; + + /* Deal with child groups first */ + if (mali_group_is_virtual(group)) { + /* + * The virtual group might have 0, 1 or 2 L2s in + * its l2_cache_core array, but we ignore these and + * let the child groups take the needed L2 cache ref + * on behalf of the virtual group. + * In other words; The L2 refs are taken in pair with + * the physical group which the L2 is attached to. + */ + struct mali_group *child; + struct mali_group *temp; + + /* + * Child group is inactive, get PM + * refs in order to power up. + */ + _MALI_OSK_LIST_FOREACHENTRY(child, temp, + &group->group_list, + struct mali_group, group_list) { + MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE + == child->state); + + child->state = MALI_GROUP_STATE_ACTIVATION_PENDING; + + MALI_DEBUG_ASSERT_POINTER( + child->pm_domain); + domains[num_domains] = child->pm_domain; + groups[num_domains] = child; + num_domains++; + + /* + * Take L2 domain ref for child group. + */ + MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS + > num_domains); + domains[num_domains] = mali_l2_cache_get_pm_domain( + child->l2_cache_core[0]); + groups[num_domains] = NULL; + MALI_DEBUG_ASSERT(NULL == + child->l2_cache_core[1]); + num_domains++; + } + } else { + /* Take L2 domain ref for physical groups. */ + MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS > + num_domains); + + domains[num_domains] = mali_l2_cache_get_pm_domain( + group->l2_cache_core[0]); + groups[num_domains] = NULL; + MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]); + num_domains++; + } + + /* Do the group itself last (it's dependencies first) */ + + group->state = MALI_GROUP_STATE_ACTIVATION_PENDING; + + MALI_DEBUG_ASSERT_POINTER(group->pm_domain); + domains[num_domains] = group->pm_domain; + groups[num_domains] = group; + num_domains++; + + all_groups_on = mali_pm_get_domain_refs(domains, groups, + num_domains); + + /* + * Complete activation for group, include + * virtual group or physical group. + */ + if (MALI_TRUE == all_groups_on) { + + mali_group_set_active(group); + } + } else if (MALI_GROUP_STATE_ACTIVE == group->state) { + /* Already active */ + MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on); + } else { + /* + * Activation already pending, group->power_is_on could + * be both true or false. We need to wait for power up + * notification anyway. + */ + MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING + == group->state); + } + + MALI_DEBUG_PRINT(4, ("Group: group %s activation result: %s\n", + mali_group_core_description(group), + MALI_GROUP_STATE_ACTIVE == group->state ? + "ACTIVE" : "PENDING")); + + return group->state; } mali_bool mali_group_set_active(struct mali_group *group) { - MALI_DEBUG_ASSERT_POINTER(group); - MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); - MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING == group->state); - MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on); + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING == group->state); + MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on); - MALI_DEBUG_PRINT(4, ("Group: Activation completed for %s\n", - mali_group_core_description(group))); + MALI_DEBUG_PRINT(4, ("Group: Activation completed for %s\n", + mali_group_core_description(group))); - if (mali_group_is_virtual(group)) { - struct mali_group *child; - struct mali_group *temp; + if (mali_group_is_virtual(group)) { + struct mali_group *child; + struct mali_group *temp; - _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, - struct mali_group, group_list) { - if (MALI_TRUE != child->power_is_on) { - return MALI_FALSE; - } + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, + struct mali_group, group_list) { + if (MALI_TRUE != child->power_is_on) { + return MALI_FALSE; + } - child->state = MALI_GROUP_STATE_ACTIVE; - } + child->state = MALI_GROUP_STATE_ACTIVE; + } - mali_group_reset(group); - } + mali_group_reset(group); + } - /* Go to ACTIVE state */ - group->state = MALI_GROUP_STATE_ACTIVE; + /* Go to ACTIVE state */ + group->state = MALI_GROUP_STATE_ACTIVE; - return MALI_TRUE; + return MALI_TRUE; } mali_bool mali_group_deactivate(struct mali_group *group) { - struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS]; - u32 num_domains = 0; - mali_bool power_down = MALI_FALSE; - - MALI_DEBUG_ASSERT_POINTER(group); - MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); - MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE != group->state); - - MALI_DEBUG_PRINT(3, ("Group: Deactivating group %s\n", - mali_group_core_description(group))); - - group->state = MALI_GROUP_STATE_INACTIVE; - - MALI_DEBUG_ASSERT_POINTER(group->pm_domain); - domains[num_domains] = group->pm_domain; - num_domains++; - - if (mali_group_is_virtual(group)) { - /* Release refs for all child groups */ - struct mali_group *child; - struct mali_group *temp; - - _MALI_OSK_LIST_FOREACHENTRY(child, temp, - &group->group_list, - struct mali_group, group_list) { - child->state = MALI_GROUP_STATE_INACTIVE; - - MALI_DEBUG_ASSERT_POINTER(child->pm_domain); - domains[num_domains] = child->pm_domain; - num_domains++; - - /* Release L2 cache domain for child groups */ - MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS > - num_domains); - domains[num_domains] = mali_l2_cache_get_pm_domain( - child->l2_cache_core[0]); - MALI_DEBUG_ASSERT(NULL == child->l2_cache_core[1]); - num_domains++; - } - - /* - * Must do mali_group_power_down() steps right here for - * virtual group, because virtual group itself is likely to - * stay powered on, however child groups are now very likely - * to be powered off (and thus lose their state). - */ - - mali_group_clear_session(group); - /* - * Disable the broadcast unit (clear it's mask). - * This is needed in case the GPU isn't actually - * powered down at this point and groups are - * removed from an inactive virtual group. - * If not, then the broadcast unit will intercept - * their interrupts! - */ - mali_bcast_disable(group->bcast_core); - } else { - /* Release L2 cache domain for physical groups */ - MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS > - num_domains); - domains[num_domains] = mali_l2_cache_get_pm_domain( - group->l2_cache_core[0]); - MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]); - num_domains++; - } - - power_down = mali_pm_put_domain_refs(domains, num_domains); - - return power_down; + struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS]; + u32 num_domains = 0; + mali_bool power_down = MALI_FALSE; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE != group->state); + + MALI_DEBUG_PRINT(3, ("Group: Deactivating group %s\n", + mali_group_core_description(group))); + + group->state = MALI_GROUP_STATE_INACTIVE; + + MALI_DEBUG_ASSERT_POINTER(group->pm_domain); + domains[num_domains] = group->pm_domain; + num_domains++; + + if (mali_group_is_virtual(group)) { + /* Release refs for all child groups */ + struct mali_group *child; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(child, temp, + &group->group_list, + struct mali_group, group_list) { + child->state = MALI_GROUP_STATE_INACTIVE; + + MALI_DEBUG_ASSERT_POINTER(child->pm_domain); + domains[num_domains] = child->pm_domain; + num_domains++; + + /* Release L2 cache domain for child groups */ + MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS > + num_domains); + domains[num_domains] = mali_l2_cache_get_pm_domain( + child->l2_cache_core[0]); + MALI_DEBUG_ASSERT(NULL == child->l2_cache_core[1]); + num_domains++; + } + + /* + * Must do mali_group_power_down() steps right here for + * virtual group, because virtual group itself is likely to + * stay powered on, however child groups are now very likely + * to be powered off (and thus lose their state). + */ + + mali_group_clear_session(group); + /* + * Disable the broadcast unit (clear it's mask). + * This is needed in case the GPU isn't actually + * powered down at this point and groups are + * removed from an inactive virtual group. + * If not, then the broadcast unit will intercept + * their interrupts! + */ + mali_bcast_disable(group->bcast_core); + } else { + /* Release L2 cache domain for physical groups */ + MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS > + num_domains); + domains[num_domains] = mali_l2_cache_get_pm_domain( + group->l2_cache_core[0]); + MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]); + num_domains++; + } + + power_down = mali_pm_put_domain_refs(domains, num_domains); + + return power_down; } void mali_group_power_up(struct mali_group *group) { - MALI_DEBUG_ASSERT_POINTER(group); - MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); - - MALI_DEBUG_PRINT(3, ("Group: Power up for %s\n", - mali_group_core_description(group))); - - group->power_is_on = MALI_TRUE; - - if (MALI_FALSE == mali_group_is_virtual(group) - && MALI_FALSE == mali_group_is_in_virtual(group)) { - mali_group_reset(group); - } - - /* - * When we just acquire only one physical group form virt group, - * we should remove the bcast&dlbu mask from virt group and - * reset bcast and dlbu core, although part of pp cores in virt - * group maybe not be powered on. - */ - if (MALI_TRUE == mali_group_is_virtual(group)) { - mali_bcast_reset(group->bcast_core); - mali_dlbu_update_mask(group->dlbu_core); - } + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + MALI_DEBUG_PRINT(3, ("Group: Power up for %s\n", + mali_group_core_description(group))); + + group->power_is_on = MALI_TRUE; + + if (MALI_FALSE == mali_group_is_virtual(group) + && MALI_FALSE == mali_group_is_in_virtual(group)) { + mali_group_reset(group); + } + + /* + * When we just acquire only one physical group form virt group, + * we should remove the bcast&dlbu mask from virt group and + * reset bcast and dlbu core, although part of pp cores in virt + * group maybe not be powered on. + */ + if (MALI_TRUE == mali_group_is_virtual(group)) { + mali_bcast_reset(group->bcast_core); + mali_dlbu_update_mask(group->dlbu_core); + } } void mali_group_power_down(struct mali_group *group) { - MALI_DEBUG_ASSERT_POINTER(group); - MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on); - MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); - - MALI_DEBUG_PRINT(3, ("Group: Power down for %s\n", - mali_group_core_description(group))); - - group->power_is_on = MALI_FALSE; - - if (mali_group_is_virtual(group)) { - /* - * What we do for physical jobs in this function should - * already have been done in mali_group_deactivate() - * for virtual group. - */ - MALI_DEBUG_ASSERT(NULL == group->session); - } else { - mali_group_clear_session(group); - } + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + MALI_DEBUG_PRINT(3, ("Group: Power down for %s\n", + mali_group_core_description(group))); + + group->power_is_on = MALI_FALSE; + + if (mali_group_is_virtual(group)) { + /* + * What we do for physical jobs in this function should + * already have been done in mali_group_deactivate() + * for virtual group. + */ + MALI_DEBUG_ASSERT(NULL == group->session); + } else { + mali_group_clear_session(group); + } } MALI_DEBUG_CODE(static void mali_group_print_virtual(struct mali_group *vgroup) - { - u32 i; - struct mali_group *group; - struct mali_group *temp; - - MALI_DEBUG_PRINT(4, ("Virtual group %s (%p)\n", - mali_group_core_description(vgroup), - vgroup)); - MALI_DEBUG_PRINT(4, ("l2_cache_core[0] = %p, ref = %d\n", vgroup->l2_cache_core[0], vgroup->l2_cache_core_ref_count[0])); - MALI_DEBUG_PRINT(4, ("l2_cache_core[1] = %p, ref = %d\n", vgroup->l2_cache_core[1], vgroup->l2_cache_core_ref_count[1])); - - i = 0; - _MALI_OSK_LIST_FOREACHENTRY(group, temp, &vgroup->group_list, struct mali_group, group_list) { - MALI_DEBUG_PRINT(4, ("[%d] %s (%p), l2_cache_core[0] = %p\n", - i, mali_group_core_description(group), - group, group->l2_cache_core[0])); - i++; - } - }) +{ + u32 i; + struct mali_group *group; + struct mali_group *temp; + + MALI_DEBUG_PRINT(4, ("Virtual group %s (%p)\n", + mali_group_core_description(vgroup), + vgroup)); + MALI_DEBUG_PRINT(4, ("l2_cache_core[0] = %p, ref = %d\n", vgroup->l2_cache_core[0], vgroup->l2_cache_core_ref_count[0])); + MALI_DEBUG_PRINT(4, ("l2_cache_core[1] = %p, ref = %d\n", vgroup->l2_cache_core[1], vgroup->l2_cache_core_ref_count[1])); + + i = 0; + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &vgroup->group_list, struct mali_group, group_list) { + MALI_DEBUG_PRINT(4, ("[%d] %s (%p), l2_cache_core[0] = %p\n", + i, mali_group_core_description(group), + group, group->l2_cache_core[0])); + i++; + } +}) + +static void mali_group_dump_core_status(struct mali_group *group) +{ + u32 i; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT(NULL != group->gp_core || (NULL != group->pp_core && !mali_group_is_virtual(group))); + + if (NULL != group->gp_core) { + MALI_PRINT(("Dump Group %s\n", group->gp_core->hw_core.description)); + + for (i = 0; i < 0xA8; i += 0x10) { + MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->gp_core->hw_core, i), + mali_hw_core_register_read(&group->gp_core->hw_core, i + 4), + mali_hw_core_register_read(&group->gp_core->hw_core, i + 8), + mali_hw_core_register_read(&group->gp_core->hw_core, i + 12))); + } + + + } else { + MALI_PRINT(("Dump Group %s\n", group->pp_core->hw_core.description)); + + for (i = 0; i < 0x5c; i += 0x10) { + MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->pp_core->hw_core, i), + mali_hw_core_register_read(&group->pp_core->hw_core, i + 4), + mali_hw_core_register_read(&group->pp_core->hw_core, i + 8), + mali_hw_core_register_read(&group->pp_core->hw_core, i + 12))); + } + + /* Ignore some minor registers */ + for (i = 0x1000; i < 0x1068; i += 0x10) { + MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->pp_core->hw_core, i), + mali_hw_core_register_read(&group->pp_core->hw_core, i + 4), + mali_hw_core_register_read(&group->pp_core->hw_core, i + 8), + mali_hw_core_register_read(&group->pp_core->hw_core, i + 12))); + } + } + + MALI_PRINT(("Dump Group MMU\n")); + for (i = 0; i < 0x24; i += 0x10) { + MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->mmu->hw_core, i), + mali_hw_core_register_read(&group->mmu->hw_core, i + 4), + mali_hw_core_register_read(&group->mmu->hw_core, i + 8), + mali_hw_core_register_read(&group->mmu->hw_core, i + 12))); + } +} + + +/** + * @Dump group status + */ +void mali_group_dump_status(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + + if (mali_group_is_virtual(group)) { + struct mali_group *group_c; + struct mali_group *temp; + _MALI_OSK_LIST_FOREACHENTRY(group_c, temp, &group->group_list, struct mali_group, group_list) { + mali_group_dump_core_status(group_c); + } + } else { + mali_group_dump_core_status(group); + } +} /** * @brief Add child group to virtual group parent */ void mali_group_add_group(struct mali_group *parent, struct mali_group *child) { - mali_bool found; - u32 i; - - MALI_DEBUG_PRINT(3, ("Adding group %s to virtual group %s\n", - mali_group_core_description(child), - mali_group_core_description(parent))); - - MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); - MALI_DEBUG_ASSERT(mali_group_is_virtual(parent)); - MALI_DEBUG_ASSERT(!mali_group_is_virtual(child)); - MALI_DEBUG_ASSERT(NULL == child->parent_group); - - _mali_osk_list_addtail(&child->group_list, &parent->group_list); - - child->parent_group = parent; - - MALI_DEBUG_ASSERT_POINTER(child->l2_cache_core[0]); - - MALI_DEBUG_PRINT(4, ("parent->l2_cache_core: [0] = %p, [1] = %p\n", parent->l2_cache_core[0], parent->l2_cache_core[1])); - MALI_DEBUG_PRINT(4, ("child->l2_cache_core: [0] = %p, [1] = %p\n", child->l2_cache_core[0], child->l2_cache_core[1])); - - /* Keep track of the L2 cache cores of child groups */ - found = MALI_FALSE; - for (i = 0; i < 2; i++) { - if (parent->l2_cache_core[i] == child->l2_cache_core[0]) { - MALI_DEBUG_ASSERT(parent->l2_cache_core_ref_count[i] > 0); - parent->l2_cache_core_ref_count[i]++; - found = MALI_TRUE; - } - } - - if (!found) { - /* First time we see this L2 cache, add it to our list */ - i = (NULL == parent->l2_cache_core[0]) ? 0 : 1; - - MALI_DEBUG_PRINT(4, ("First time we see l2_cache %p. Adding to [%d] = %p\n", child->l2_cache_core[0], i, parent->l2_cache_core[i])); - - MALI_DEBUG_ASSERT(NULL == parent->l2_cache_core[i]); - - parent->l2_cache_core[i] = child->l2_cache_core[0]; - parent->l2_cache_core_ref_count[i]++; - } - - /* Update Broadcast Unit and DLBU */ - mali_bcast_add_group(parent->bcast_core, child); - mali_dlbu_add_group(parent->dlbu_core, child); - - if (MALI_TRUE == parent->power_is_on) { - mali_bcast_reset(parent->bcast_core); - mali_dlbu_update_mask(parent->dlbu_core); - } - - if (MALI_TRUE == child->power_is_on) { - if (NULL == parent->session) { - if (NULL != child->session) { - /* - * Parent has no session, so clear - * child session as well. - */ - mali_mmu_activate_empty_page_directory(child->mmu); - } - } else { - if (parent->session == child->session) { - /* We already have same session as parent, - * so a simple zap should be enough. - */ - mali_mmu_zap_tlb(child->mmu); - } else { - /* - * Parent has a different session, so we must - * switch to that sessions page table - */ - mali_mmu_activate_page_directory(child->mmu, mali_session_get_page_directory(parent->session)); - } - - /* It is the parent which keeps the session from now on */ - child->session = NULL; - } - } else { - /* should have been cleared when child was powered down */ - MALI_DEBUG_ASSERT(NULL == child->session); - } - - /* Start job on child when parent is active */ - if (NULL != parent->pp_running_job) { - struct mali_pp_job *job = parent->pp_running_job; - - MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n", - child, mali_pp_job_get_id(job), parent)); - - /* Only allowed to add active child to an active parent */ - MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == parent->state); - MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == child->state); - - mali_pp_job_start(child->pp_core, job, mali_pp_core_get_id(child->pp_core), MALI_TRUE); - - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | - MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) | - MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, - mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0); - - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | - MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) | - MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL, - mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0); + mali_bool found; + u32 i; + + MALI_DEBUG_PRINT(3, ("Adding group %s to virtual group %s\n", + mali_group_core_description(child), + mali_group_core_description(parent))); + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(mali_group_is_virtual(parent)); + MALI_DEBUG_ASSERT(!mali_group_is_virtual(child)); + MALI_DEBUG_ASSERT(NULL == child->parent_group); + + _mali_osk_list_addtail(&child->group_list, &parent->group_list); + + child->parent_group = parent; + + MALI_DEBUG_ASSERT_POINTER(child->l2_cache_core[0]); + + MALI_DEBUG_PRINT(4, ("parent->l2_cache_core: [0] = %p, [1] = %p\n", parent->l2_cache_core[0], parent->l2_cache_core[1])); + MALI_DEBUG_PRINT(4, ("child->l2_cache_core: [0] = %p, [1] = %p\n", child->l2_cache_core[0], child->l2_cache_core[1])); + + /* Keep track of the L2 cache cores of child groups */ + found = MALI_FALSE; + for (i = 0; i < 2; i++) { + if (parent->l2_cache_core[i] == child->l2_cache_core[0]) { + MALI_DEBUG_ASSERT(parent->l2_cache_core_ref_count[i] > 0); + parent->l2_cache_core_ref_count[i]++; + found = MALI_TRUE; + } + } + + if (!found) { + /* First time we see this L2 cache, add it to our list */ + i = (NULL == parent->l2_cache_core[0]) ? 0 : 1; + + MALI_DEBUG_PRINT(4, ("First time we see l2_cache %p. Adding to [%d] = %p\n", child->l2_cache_core[0], i, parent->l2_cache_core[i])); + + MALI_DEBUG_ASSERT(NULL == parent->l2_cache_core[i]); + + parent->l2_cache_core[i] = child->l2_cache_core[0]; + parent->l2_cache_core_ref_count[i]++; + } + + /* Update Broadcast Unit and DLBU */ + mali_bcast_add_group(parent->bcast_core, child); + mali_dlbu_add_group(parent->dlbu_core, child); + + if (MALI_TRUE == parent->power_is_on) { + mali_bcast_reset(parent->bcast_core); + mali_dlbu_update_mask(parent->dlbu_core); + } + + if (MALI_TRUE == child->power_is_on) { + if (NULL == parent->session) { + if (NULL != child->session) { + /* + * Parent has no session, so clear + * child session as well. + */ + mali_mmu_activate_empty_page_directory(child->mmu); + } + } else { + if (parent->session == child->session) { + /* We already have same session as parent, + * so a simple zap should be enough. + */ + mali_mmu_zap_tlb(child->mmu); + } else { + /* + * Parent has a different session, so we must + * switch to that sessions page table + */ + mali_mmu_activate_page_directory(child->mmu, mali_session_get_page_directory(parent->session)); + } + + /* It is the parent which keeps the session from now on */ + child->session = NULL; + } + } else { + /* should have been cleared when child was powered down */ + MALI_DEBUG_ASSERT(NULL == child->session); + } + + /* Start job on child when parent is active */ + if (NULL != parent->pp_running_job) { + struct mali_pp_job *job = parent->pp_running_job; + + MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n", + child, mali_pp_job_get_id(job), parent)); + + /* Only allowed to add active child to an active parent */ + MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == parent->state); + MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == child->state); + + mali_pp_job_start(child->pp_core, job, mali_pp_core_get_id(child->pp_core), MALI_TRUE); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) | + MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, + mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) | + MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL, + mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0); #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) - trace_gpu_sched_switch( - mali_pp_core_description(group->pp_core), - sched_clock(), mali_pp_job_get_tid(job), - 0, mali_pp_job_get_id(job)); + trace_gpu_sched_switch( + mali_pp_core_description(group->pp_core), + sched_clock(), mali_pp_job_get_tid(job), + 0, mali_pp_job_get_id(job)); #endif #if defined(CONFIG_MALI400_PROFILING) - trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core), - mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job)); + trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core), + mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job)); #endif - } + } - MALI_DEBUG_CODE(mali_group_print_virtual(parent);) + MALI_DEBUG_CODE(mali_group_print_virtual(parent);) } /** @@ -658,155 +716,155 @@ void mali_group_add_group(struct mali_group *parent, struct mali_group *child) */ void mali_group_remove_group(struct mali_group *parent, struct mali_group *child) { - u32 i; + u32 i; - MALI_DEBUG_PRINT(3, ("Removing group %s from virtual group %s\n", - mali_group_core_description(child), - mali_group_core_description(parent))); + MALI_DEBUG_PRINT(3, ("Removing group %s from virtual group %s\n", + mali_group_core_description(child), + mali_group_core_description(parent))); - MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); - MALI_DEBUG_ASSERT(mali_group_is_virtual(parent)); - MALI_DEBUG_ASSERT(!mali_group_is_virtual(child)); - MALI_DEBUG_ASSERT(parent == child->parent_group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(mali_group_is_virtual(parent)); + MALI_DEBUG_ASSERT(!mali_group_is_virtual(child)); + MALI_DEBUG_ASSERT(parent == child->parent_group); - /* Update Broadcast Unit and DLBU */ - mali_bcast_remove_group(parent->bcast_core, child); - mali_dlbu_remove_group(parent->dlbu_core, child); + /* Update Broadcast Unit and DLBU */ + mali_bcast_remove_group(parent->bcast_core, child); + mali_dlbu_remove_group(parent->dlbu_core, child); - if (MALI_TRUE == parent->power_is_on) { - mali_bcast_reset(parent->bcast_core); - mali_dlbu_update_mask(parent->dlbu_core); - } + if (MALI_TRUE == parent->power_is_on) { + mali_bcast_reset(parent->bcast_core); + mali_dlbu_update_mask(parent->dlbu_core); + } - child->session = parent->session; - child->parent_group = NULL; + child->session = parent->session; + child->parent_group = NULL; - _mali_osk_list_delinit(&child->group_list); - if (_mali_osk_list_empty(&parent->group_list)) { - parent->session = NULL; - } + _mali_osk_list_delinit(&child->group_list); + if (_mali_osk_list_empty(&parent->group_list)) { + parent->session = NULL; + } - /* Keep track of the L2 cache cores of child groups */ - i = (child->l2_cache_core[0] == parent->l2_cache_core[0]) ? 0 : 1; + /* Keep track of the L2 cache cores of child groups */ + i = (child->l2_cache_core[0] == parent->l2_cache_core[0]) ? 0 : 1; - MALI_DEBUG_ASSERT(child->l2_cache_core[0] == parent->l2_cache_core[i]); + MALI_DEBUG_ASSERT(child->l2_cache_core[0] == parent->l2_cache_core[i]); - parent->l2_cache_core_ref_count[i]--; - if (parent->l2_cache_core_ref_count[i] == 0) { - parent->l2_cache_core[i] = NULL; - } + parent->l2_cache_core_ref_count[i]--; + if (parent->l2_cache_core_ref_count[i] == 0) { + parent->l2_cache_core[i] = NULL; + } - MALI_DEBUG_CODE(mali_group_print_virtual(parent)); + MALI_DEBUG_CODE(mali_group_print_virtual(parent)); } struct mali_group *mali_group_acquire_group(struct mali_group *parent) { - struct mali_group *child = NULL; + struct mali_group *child = NULL; - MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); - MALI_DEBUG_ASSERT(mali_group_is_virtual(parent)); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(mali_group_is_virtual(parent)); - if (!_mali_osk_list_empty(&parent->group_list)) { - child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list); - mali_group_remove_group(parent, child); - } + if (!_mali_osk_list_empty(&parent->group_list)) { + child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list); + mali_group_remove_group(parent, child); + } - if (NULL != child) { - if (MALI_GROUP_STATE_ACTIVE != parent->state - && MALI_TRUE == child->power_is_on) { - mali_group_reset(child); - } - } + if (NULL != child) { + if (MALI_GROUP_STATE_ACTIVE != parent->state + && MALI_TRUE == child->power_is_on) { + mali_group_reset(child); + } + } - return child; + return child; } void mali_group_reset(struct mali_group *group) { - MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); - MALI_DEBUG_ASSERT(NULL == group->gp_running_job); - MALI_DEBUG_ASSERT(NULL == group->pp_running_job); - MALI_DEBUG_ASSERT(NULL == group->session); - - MALI_DEBUG_PRINT(3, ("Group: reset of %s\n", - mali_group_core_description(group))); - - if (NULL != group->dlbu_core) { - mali_dlbu_reset(group->dlbu_core); - } - - if (NULL != group->bcast_core) { - mali_bcast_reset(group->bcast_core); - } - - MALI_DEBUG_ASSERT(NULL != group->mmu); - mali_group_reset_mmu(group); - - if (NULL != group->gp_core) { - MALI_DEBUG_ASSERT(NULL == group->pp_core); - mali_gp_reset(group->gp_core); - } else { - MALI_DEBUG_ASSERT(NULL != group->pp_core); - mali_group_reset_pp(group); - } + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(NULL == group->gp_running_job); + MALI_DEBUG_ASSERT(NULL == group->pp_running_job); + MALI_DEBUG_ASSERT(NULL == group->session); + + MALI_DEBUG_PRINT(3, ("Group: reset of %s\n", + mali_group_core_description(group))); + + if (NULL != group->dlbu_core) { + mali_dlbu_reset(group->dlbu_core); + } + + if (NULL != group->bcast_core) { + mali_bcast_reset(group->bcast_core); + } + + MALI_DEBUG_ASSERT(NULL != group->mmu); + mali_group_reset_mmu(group); + + if (NULL != group->gp_core) { + MALI_DEBUG_ASSERT(NULL == group->pp_core); + mali_gp_reset(group->gp_core); + } else { + MALI_DEBUG_ASSERT(NULL != group->pp_core); + mali_group_reset_pp(group); + } } void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job) { - struct mali_session_data *session; + struct mali_session_data *session; - MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); - MALI_DEBUG_PRINT(3, ("Group: Starting GP job 0x%08X on group %s\n", - job, - mali_group_core_description(group))); + MALI_DEBUG_PRINT(3, ("Group: Starting GP job 0x%08X on group %s\n", + job, + mali_group_core_description(group))); - session = mali_gp_job_get_session(job); + session = mali_gp_job_get_session(job); - MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]); - mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_cache_order(job)); + MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]); + mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_cache_order(job)); - mali_group_activate_page_directory(group, session); + mali_group_activate_page_directory(group, session); - mali_gp_job_start(group->gp_core, job); + mali_gp_job_start(group->gp_core, job); - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | - MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) | - MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, - mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job), 0, 0, 0); - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | - MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), - mali_gp_job_get_pid(job), mali_gp_job_get_tid(job), 0, 0, 0); + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) | + MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, + mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job), 0, 0, 0); + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), + mali_gp_job_get_pid(job), mali_gp_job_get_tid(job), 0, 0, 0); #if defined(CONFIG_MALI400_PROFILING) - trace_mali_core_active(mali_gp_job_get_pid(job), 1 /* active */, 1 /* GP */, 0 /* core */, - mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job)); + trace_mali_core_active(mali_gp_job_get_pid(job), 1 /* active */, 1 /* GP */, 0 /* core */, + mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job)); #endif #if defined(CONFIG_MALI400_PROFILING) - if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && - (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) { - mali_group_report_l2_cache_counters_per_core(group, 0); - } + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) { + mali_group_report_l2_cache_counters_per_core(group, 0); + } #endif /* #if defined(CONFIG_MALI400_PROFILING) */ #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) - trace_gpu_sched_switch(mali_gp_core_description(group->gp_core), - sched_clock(), mali_gp_job_get_tid(job), - 0, mali_gp_job_get_id(job)); + trace_gpu_sched_switch(mali_gp_core_description(group->gp_core), + sched_clock(), mali_gp_job_get_tid(job), + 0, mali_gp_job_get_id(job)); #endif - group->gp_running_job = job; - group->is_working = MALI_TRUE; + group->gp_running_job = job; + group->is_working = MALI_TRUE; - /* Setup SW timer and record start time */ - group->start_time = _mali_osk_time_tickcount(); - _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime)); + /* Setup SW timer and record start time */ + group->start_time = _mali_osk_time_tickcount(); + _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime)); - MALI_DEBUG_PRINT(4, ("Group: Started GP job 0x%08X on group %s at %u\n", - job, - mali_group_core_description(group), - group->start_time)); + MALI_DEBUG_PRINT(4, ("Group: Started GP job 0x%08X on group %s at %u\n", + job, + mali_group_core_description(group), + group->start_time)); } /* Used to set all the registers except frame renderer list address and fragment shader stack address @@ -814,925 +872,954 @@ void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job) */ void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job) { - struct mali_session_data *session; + struct mali_session_data *session; - MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); - MALI_DEBUG_PRINT(3, ("Group: Starting PP job 0x%08X part %u/%u on group %s\n", - job, sub_job + 1, - mali_pp_job_get_sub_job_count(job), - mali_group_core_description(group))); + MALI_DEBUG_PRINT(3, ("Group: Starting PP job 0x%08X part %u/%u on group %s\n", + job, sub_job + 1, + mali_pp_job_get_sub_job_count(job), + mali_group_core_description(group))); - session = mali_pp_job_get_session(job); + session = mali_pp_job_get_session(job); - if (NULL != group->l2_cache_core[0]) { - mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_pp_job_get_cache_order(job)); - } + if (NULL != group->l2_cache_core[0]) { + mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_pp_job_get_cache_order(job)); + } - if (NULL != group->l2_cache_core[1]) { - mali_l2_cache_invalidate_conditional(group->l2_cache_core[1], mali_pp_job_get_cache_order(job)); - } + if (NULL != group->l2_cache_core[1]) { + mali_l2_cache_invalidate_conditional(group->l2_cache_core[1], mali_pp_job_get_cache_order(job)); + } - mali_group_activate_page_directory(group, session); + mali_group_activate_page_directory(group, session); - if (mali_group_is_virtual(group)) { - struct mali_group *child; - struct mali_group *temp; - u32 core_num = 0; + if (mali_group_is_virtual(group)) { + struct mali_group *child; + struct mali_group *temp; + u32 core_num = 0; - MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job)); + MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job)); - /* Configure DLBU for the job */ - mali_dlbu_config_job(group->dlbu_core, job); + /* Configure DLBU for the job */ + mali_dlbu_config_job(group->dlbu_core, job); - /* Write stack address for each child group */ - _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { - mali_pp_write_addr_stack(child->pp_core, job); - core_num++; - } + /* Write stack address for each child group */ + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + mali_pp_write_addr_stack(child->pp_core, job); + core_num++; + } - mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE); - } else { - mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE); - } + mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE); + } else { + mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE); + } - /* if the group is virtual, loop through physical groups which belong to this group - * and call profiling events for its cores as virtual */ - if (MALI_TRUE == mali_group_is_virtual(group)) { - struct mali_group *child; - struct mali_group *temp; + /* if the group is virtual, loop through physical groups which belong to this group + * and call profiling events for its cores as virtual */ + if (MALI_TRUE == mali_group_is_virtual(group)) { + struct mali_group *child; + struct mali_group *temp; - _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | - MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) | - MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, - mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0); + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) | + MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, + mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0); - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | - MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) | - MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL, - mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0); + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) | + MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL, + mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0); #if defined(CONFIG_MALI400_PROFILING) - trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core), - mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job)); + trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core), + mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job)); #endif - } + } #if defined(CONFIG_MALI400_PROFILING) - if (0 != group->l2_cache_core_ref_count[0]) { - if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && - (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) { - mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0])); - } - } - if (0 != group->l2_cache_core_ref_count[1]) { - if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) && - (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) { - mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1])); - } - } + if (0 != group->l2_cache_core_ref_count[0]) { + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) { + mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0])); + } + } + if (0 != group->l2_cache_core_ref_count[1]) { + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) { + mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1])); + } + } #endif /* #if defined(CONFIG_MALI400_PROFILING) */ - } else { /* group is physical - call profiling events for physical cores */ - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | - MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) | - MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, - mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0); + } else { /* group is physical - call profiling events for physical cores */ + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) | + MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, + mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0); - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | - MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) | - MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL, - mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0); + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) | + MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL, + mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0); #if defined(CONFIG_MALI400_PROFILING) - trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core), - mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job)); + trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core), + mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job)); #endif #if defined(CONFIG_MALI400_PROFILING) - if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && - (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) { - mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0])); - } + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) { + mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0])); + } #endif /* #if defined(CONFIG_MALI400_PROFILING) */ - } + } #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) - trace_gpu_sched_switch(mali_pp_core_description(group->pp_core), - sched_clock(), mali_pp_job_get_tid(job), - 0, mali_pp_job_get_id(job)); + trace_gpu_sched_switch(mali_pp_core_description(group->pp_core), + sched_clock(), mali_pp_job_get_tid(job), + 0, mali_pp_job_get_id(job)); #endif - group->pp_running_job = job; - group->pp_running_sub_job = sub_job; - group->is_working = MALI_TRUE; + group->pp_running_job = job; + group->pp_running_sub_job = sub_job; + group->is_working = MALI_TRUE; - /* Setup SW timer and record start time */ - group->start_time = _mali_osk_time_tickcount(); - _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime)); + /* Setup SW timer and record start time */ + group->start_time = _mali_osk_time_tickcount(); + _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime)); - MALI_DEBUG_PRINT(4, ("Group: Started PP job 0x%08X part %u/%u on group %s at %u\n", - job, sub_job + 1, - mali_pp_job_get_sub_job_count(job), - mali_group_core_description(group), - group->start_time)); + MALI_DEBUG_PRINT(4, ("Group: Started PP job 0x%08X part %u/%u on group %s at %u\n", + job, sub_job + 1, + mali_pp_job_get_sub_job_count(job), + mali_group_core_description(group), + group->start_time)); } void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr) { - MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); - MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]); - mali_l2_cache_invalidate(group->l2_cache_core[0]); + MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]); + mali_l2_cache_invalidate(group->l2_cache_core[0]); - mali_mmu_zap_tlb_without_stall(group->mmu); + mali_mmu_zap_tlb_without_stall(group->mmu); - mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr); + mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr); - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME | - MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), - 0, 0, 0, 0, 0); + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME | + MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), + 0, 0, 0, 0, 0); #if defined(CONFIG_MALI400_PROFILING) - trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 1 /* active */, 1 /* GP */, 0 /* core */, - mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job)); + trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 1 /* active */, 1 /* GP */, 0 /* core */, + mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job)); #endif } static void mali_group_reset_mmu(struct mali_group *group) { - struct mali_group *child; - struct mali_group *temp; - _mali_osk_errcode_t err; - - MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); - - if (!mali_group_is_virtual(group)) { - /* This is a physical group or an idle virtual group -- simply wait for - * the reset to complete. */ - err = mali_mmu_reset(group->mmu); - MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err); - } else { /* virtual group */ - /* Loop through all members of this virtual group and wait - * until they are done resetting. - */ - _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { - err = mali_mmu_reset(child->mmu); - MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err); - } - } + struct mali_group *child; + struct mali_group *temp; + _mali_osk_errcode_t err; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + if (!mali_group_is_virtual(group)) { + /* This is a physical group or an idle virtual group -- simply wait for + * the reset to complete. */ + err = mali_mmu_reset(group->mmu); + MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err); + } else { /* virtual group */ + /* Loop through all members of this virtual group and wait + * until they are done resetting. + */ + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + err = mali_mmu_reset(child->mmu); + MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err); + } + } } static void mali_group_reset_pp(struct mali_group *group) { - struct mali_group *child; - struct mali_group *temp; - - MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); - - mali_pp_reset_async(group->pp_core); - - if (!mali_group_is_virtual(group) || NULL == group->pp_running_job) { - /* This is a physical group or an idle virtual group -- simply wait for - * the reset to complete. */ - mali_pp_reset_wait(group->pp_core); - } else { - /* Loop through all members of this virtual group and wait until they - * are done resetting. - */ - _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { - mali_pp_reset_wait(child->pp_core); - } - } + struct mali_group *child; + struct mali_group *temp; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + mali_pp_reset_async(group->pp_core); + + if (!mali_group_is_virtual(group) || NULL == group->pp_running_job) { + /* This is a physical group or an idle virtual group -- simply wait for + * the reset to complete. */ + mali_pp_reset_wait(group->pp_core); + } else { + /* Loop through all members of this virtual group and wait until they + * are done resetting. + */ + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + mali_pp_reset_wait(child->pp_core); + } + } } struct mali_pp_job *mali_group_complete_pp(struct mali_group *group, mali_bool success, u32 *sub_job) { - struct mali_pp_job *pp_job_to_return; + struct mali_pp_job *pp_job_to_return; - MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); - MALI_DEBUG_ASSERT_POINTER(group); - MALI_DEBUG_ASSERT_POINTER(group->pp_core); - MALI_DEBUG_ASSERT_POINTER(group->pp_running_job); - MALI_DEBUG_ASSERT_POINTER(sub_job); - MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + MALI_DEBUG_ASSERT_POINTER(group->pp_running_job); + MALI_DEBUG_ASSERT_POINTER(sub_job); + MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working); - /* Stop/clear the timeout timer. */ - _mali_osk_timer_del_async(group->timeout_timer); + /* Stop/clear the timeout timer. */ + _mali_osk_timer_del_async(group->timeout_timer); - if (NULL != group->pp_running_job) { + if (NULL != group->pp_running_job) { - /* Deal with HW counters and profiling */ + /* Deal with HW counters and profiling */ - if (MALI_TRUE == mali_group_is_virtual(group)) { - struct mali_group *child; - struct mali_group *temp; + if (MALI_TRUE == mali_group_is_virtual(group)) { + struct mali_group *child; + struct mali_group *temp; - /* update performance counters from each physical pp core within this virtual group */ - _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { - mali_pp_update_performance_counters(group->pp_core, child->pp_core, group->pp_running_job, mali_pp_core_get_id(child->pp_core)); - } + /* update performance counters from each physical pp core within this virtual group */ + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + mali_pp_update_performance_counters(group->pp_core, child->pp_core, group->pp_running_job, mali_pp_core_get_id(child->pp_core)); + } #if defined(CONFIG_MALI400_PROFILING) - /* send profiling data per physical core */ - _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | - MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) | - MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL, - mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)), - mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)), - mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8), - 0, 0); - - trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job), - 0 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core), - mali_pp_job_get_frame_builder_id(group->pp_running_job), - mali_pp_job_get_flush_id(group->pp_running_job)); - } - if (0 != group->l2_cache_core_ref_count[0]) { - if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && - (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) { - mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0])); - } - } - if (0 != group->l2_cache_core_ref_count[1]) { - if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) && - (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) { - mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1])); - } - } + /* send profiling data per physical core */ + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) | + MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL, + mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)), + mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)), + mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8), + 0, 0); + + trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job), + 0 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core), + mali_pp_job_get_frame_builder_id(group->pp_running_job), + mali_pp_job_get_flush_id(group->pp_running_job)); + } + if (0 != group->l2_cache_core_ref_count[0]) { + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) { + mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0])); + } + } + if (0 != group->l2_cache_core_ref_count[1]) { + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) { + mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1])); + } + } #endif - } else { - /* update performance counters for a physical group's pp core */ - mali_pp_update_performance_counters(group->pp_core, group->pp_core, group->pp_running_job, group->pp_running_sub_job); + } else { + /* update performance counters for a physical group's pp core */ + mali_pp_update_performance_counters(group->pp_core, group->pp_core, group->pp_running_job, group->pp_running_sub_job); #if defined(CONFIG_MALI400_PROFILING) - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | - MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) | - MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL, - mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job), - mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job), - mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8), - 0, 0); - - trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job), - 0 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core), - mali_pp_job_get_frame_builder_id(group->pp_running_job), - mali_pp_job_get_flush_id(group->pp_running_job)); - - if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && - (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) { - mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0])); - } + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) | + MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL, + mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job), + mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job), + mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8), + 0, 0); + + trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job), + 0 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core), + mali_pp_job_get_frame_builder_id(group->pp_running_job), + mali_pp_job_get_flush_id(group->pp_running_job)); + + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) { + mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0])); + } #endif - } + } #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) - trace_gpu_sched_switch( - mali_gp_core_description(group->gp_core), - sched_clock(), 0, 0, 0); + trace_gpu_sched_switch( + mali_gp_core_description(group->gp_core), + sched_clock(), 0, 0, 0); #endif - } + } - if (success) { - /* Only do soft reset for successful jobs, a full recovery - * reset will be done for failed jobs. */ - mali_pp_reset_async(group->pp_core); - } + if (success) { + /* Only do soft reset for successful jobs, a full recovery + * reset will be done for failed jobs. */ + mali_pp_reset_async(group->pp_core); + } - pp_job_to_return = group->pp_running_job; - group->pp_running_job = NULL; - group->is_working = MALI_FALSE; - *sub_job = group->pp_running_sub_job; + pp_job_to_return = group->pp_running_job; + group->pp_running_job = NULL; + group->is_working = MALI_FALSE; + *sub_job = group->pp_running_sub_job; - if (!success) { - MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n")); - mali_group_recovery_reset(group); - } else if (_MALI_OSK_ERR_OK != mali_pp_reset_wait(group->pp_core)) { - MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n")); - mali_group_recovery_reset(group); - } + if (!success) { + MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n")); + mali_group_recovery_reset(group); + } else if (_MALI_OSK_ERR_OK != mali_pp_reset_wait(group->pp_core)) { + MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n")); + mali_group_recovery_reset(group); + } - return pp_job_to_return; + return pp_job_to_return; } struct mali_gp_job *mali_group_complete_gp(struct mali_group *group, mali_bool success) { - struct mali_gp_job *gp_job_to_return; + struct mali_gp_job *gp_job_to_return; - MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); - MALI_DEBUG_ASSERT_POINTER(group); - MALI_DEBUG_ASSERT_POINTER(group->gp_core); - MALI_DEBUG_ASSERT_POINTER(group->gp_running_job); - MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + MALI_DEBUG_ASSERT_POINTER(group->gp_running_job); + MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working); - /* Stop/clear the timeout timer. */ - _mali_osk_timer_del_async(group->timeout_timer); + /* Stop/clear the timeout timer. */ + _mali_osk_timer_del_async(group->timeout_timer); - if (NULL != group->gp_running_job) { - mali_gp_update_performance_counters(group->gp_core, group->gp_running_job); + if (NULL != group->gp_running_job) { + mali_gp_update_performance_counters(group->gp_core, group->gp_running_job); #if defined(CONFIG_MALI400_PROFILING) - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), - mali_gp_job_get_perf_counter_value0(group->gp_running_job), - mali_gp_job_get_perf_counter_value1(group->gp_running_job), - mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8), - 0, 0); - - if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && - (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) - mali_group_report_l2_cache_counters_per_core(group, 0); + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), + mali_gp_job_get_perf_counter_value0(group->gp_running_job), + mali_gp_job_get_perf_counter_value1(group->gp_running_job), + mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8), + 0, 0); + + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) + mali_group_report_l2_cache_counters_per_core(group, 0); #endif #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) - trace_gpu_sched_switch( - mali_pp_core_description(group->pp_core), - sched_clock(), 0, 0, 0); + trace_gpu_sched_switch( + mali_pp_core_description(group->pp_core), + sched_clock(), 0, 0, 0); #endif #if defined(CONFIG_MALI400_PROFILING) - trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 0 /* active */, 1 /* GP */, 0 /* core */, - mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job)); + trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 0 /* active */, 1 /* GP */, 0 /* core */, + mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job)); #endif - mali_gp_job_set_current_heap_addr(group->gp_running_job, - mali_gp_read_plbu_alloc_start_addr(group->gp_core)); - } - - if (success) { - /* Only do soft reset for successful jobs, a full recovery - * reset will be done for failed jobs. */ - mali_gp_reset_async(group->gp_core); - } - - gp_job_to_return = group->gp_running_job; - group->gp_running_job = NULL; - group->is_working = MALI_FALSE; - - if (!success) { - MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n")); - mali_group_recovery_reset(group); - } else if (_MALI_OSK_ERR_OK != mali_gp_reset_wait(group->gp_core)) { - MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n")); - mali_group_recovery_reset(group); - } - - return gp_job_to_return; + mali_gp_job_set_current_heap_addr(group->gp_running_job, + mali_gp_read_plbu_alloc_start_addr(group->gp_core)); + } + + if (success) { + /* Only do soft reset for successful jobs, a full recovery + * reset will be done for failed jobs. */ + mali_gp_reset_async(group->gp_core); + } + + gp_job_to_return = group->gp_running_job; + group->gp_running_job = NULL; + group->is_working = MALI_FALSE; + + if (!success) { + MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n")); + mali_group_recovery_reset(group); + } else if (_MALI_OSK_ERR_OK != mali_gp_reset_wait(group->gp_core)) { + MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n")); + mali_group_recovery_reset(group); + } + + return gp_job_to_return; } struct mali_group *mali_group_get_glob_group(u32 index) { - if (mali_global_num_groups > index) { - return mali_global_groups[index]; - } + if (mali_global_num_groups > index) { + return mali_global_groups[index]; + } - return NULL; + return NULL; } u32 mali_group_get_glob_num_groups(void) { - return mali_global_num_groups; + return mali_global_num_groups; } static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session) { - MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group %s\n", - mali_session_get_page_directory(session), session, - mali_group_core_description(group))); - - MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); - - if (group->session != session) { - /* Different session than last time, so we need to do some work */ - MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group %s\n", - session, group->session, - mali_group_core_description(group))); - mali_mmu_activate_page_directory(group->mmu, mali_session_get_page_directory(session)); - group->session = session; - } else { - /* Same session as last time, so no work required */ - MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group %s\n", - session->page_directory, - mali_group_core_description(group))); - mali_mmu_zap_tlb_without_stall(group->mmu); - } + MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group %s\n", + mali_session_get_page_directory(session), session, + mali_group_core_description(group))); + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + if (group->session != session) { + /* Different session than last time, so we need to do some work */ + MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group %s\n", + session, group->session, + mali_group_core_description(group))); + mali_mmu_activate_page_directory(group->mmu, mali_session_get_page_directory(session)); + group->session = session; + } else { + /* Same session as last time, so no work required */ + MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group %s\n", + session->page_directory, + mali_group_core_description(group))); + mali_mmu_zap_tlb_without_stall(group->mmu); + } } static void mali_group_recovery_reset(struct mali_group *group) { - _mali_osk_errcode_t err; - - MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); - - /* Stop cores, bus stop */ - if (NULL != group->pp_core) { - mali_pp_stop_bus(group->pp_core); - } else { - mali_gp_stop_bus(group->gp_core); - } - - /* Flush MMU and clear page fault (if any) */ - mali_mmu_activate_fault_flush_page_directory(group->mmu); - mali_mmu_page_fault_done(group->mmu); - - /* Wait for cores to stop bus, then do a hard reset on them */ - if (NULL != group->pp_core) { - if (mali_group_is_virtual(group)) { - struct mali_group *child, *temp; - - /* Disable the broadcast unit while we do reset directly on the member cores. */ - mali_bcast_disable(group->bcast_core); - - _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { - mali_pp_stop_bus_wait(child->pp_core); - mali_pp_hard_reset(child->pp_core); - } - - mali_bcast_enable(group->bcast_core); - } else { - mali_pp_stop_bus_wait(group->pp_core); - mali_pp_hard_reset(group->pp_core); - } - } else { - mali_gp_stop_bus_wait(group->gp_core); - mali_gp_hard_reset(group->gp_core); - } - - /* Reset MMU */ - err = mali_mmu_reset(group->mmu); - MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err); - MALI_IGNORE(err); - - group->session = NULL; + _mali_osk_errcode_t err; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + /* Stop cores, bus stop */ + if (NULL != group->pp_core) { + mali_pp_stop_bus(group->pp_core); + } else { + mali_gp_stop_bus(group->gp_core); + } + + /* Flush MMU and clear page fault (if any) */ + mali_mmu_activate_fault_flush_page_directory(group->mmu); + mali_mmu_page_fault_done(group->mmu); + + /* Wait for cores to stop bus, then do a hard reset on them */ + if (NULL != group->pp_core) { + if (mali_group_is_virtual(group)) { + struct mali_group *child, *temp; + + /* Disable the broadcast unit while we do reset directly on the member cores. */ + mali_bcast_disable(group->bcast_core); + + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + mali_pp_stop_bus_wait(child->pp_core); + mali_pp_hard_reset(child->pp_core); + } + + mali_bcast_enable(group->bcast_core); + } else { + mali_pp_stop_bus_wait(group->pp_core); + mali_pp_hard_reset(group->pp_core); + } + } else { + mali_gp_stop_bus_wait(group->gp_core); + mali_gp_hard_reset(group->gp_core); + } + + /* Reset MMU */ + err = mali_mmu_reset(group->mmu); + MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err); + MALI_IGNORE(err); + + group->session = NULL; } #if MALI_STATE_TRACKING u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size) { - int n = 0; - int i; - struct mali_group *child; - struct mali_group *temp; - - if (mali_group_is_virtual(group)) { - n += _mali_osk_snprintf(buf + n, size - n, - "Virtual PP Group: %p\n", group); - } else if (mali_group_is_in_virtual(group)) { - n += _mali_osk_snprintf(buf + n, size - n, - "Child PP Group: %p\n", group); - } else if (NULL != group->pp_core) { - n += _mali_osk_snprintf(buf + n, size - n, - "Physical PP Group: %p\n", group); - } else { - MALI_DEBUG_ASSERT_POINTER(group->gp_core); - n += _mali_osk_snprintf(buf + n, size - n, - "GP Group: %p\n", group); - } - - switch (group->state) { - case MALI_GROUP_STATE_INACTIVE: - n += _mali_osk_snprintf(buf + n, size - n, - "\tstate: INACTIVE\n"); - break; - case MALI_GROUP_STATE_ACTIVATION_PENDING: - n += _mali_osk_snprintf(buf + n, size - n, - "\tstate: ACTIVATION_PENDING\n"); - break; - case MALI_GROUP_STATE_ACTIVE: - n += _mali_osk_snprintf(buf + n, size - n, - "\tstate: MALI_GROUP_STATE_ACTIVE\n"); - break; - default: - n += _mali_osk_snprintf(buf + n, size - n, - "\tstate: UNKNOWN (%d)\n", group->state); - MALI_DEBUG_ASSERT(0); - break; - } - - n += _mali_osk_snprintf(buf + n, size - n, - "\tSW power: %s\n", - group->power_is_on ? "On" : "Off"); - - n += mali_pm_dump_state_domain(group->pm_domain, buf + n, size - n); - - for (i = 0; i < 2; i++) { - if (NULL != group->l2_cache_core[i]) { - struct mali_pm_domain *domain; - domain = mali_l2_cache_get_pm_domain( - group->l2_cache_core[i]); - n += mali_pm_dump_state_domain(domain, - buf + n, size - n); - } - } - - if (group->gp_core) { - n += mali_gp_dump_state(group->gp_core, buf + n, size - n); - n += _mali_osk_snprintf(buf + n, size - n, - "\tGP running job: %p\n", group->gp_running_job); - } - - if (group->pp_core) { - n += mali_pp_dump_state(group->pp_core, buf + n, size - n); - n += _mali_osk_snprintf(buf + n, size - n, - "\tPP running job: %p, subjob %d \n", - group->pp_running_job, - group->pp_running_sub_job); - } - - _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, - struct mali_group, group_list) { - n += mali_group_dump_state(child, buf + n, size - n); - } - - return n; + int n = 0; + int i; + struct mali_group *child; + struct mali_group *temp; + + if (mali_group_is_virtual(group)) { + n += _mali_osk_snprintf(buf + n, size - n, + "Virtual PP Group: %p\n", group); + } else if (mali_group_is_in_virtual(group)) { + n += _mali_osk_snprintf(buf + n, size - n, + "Child PP Group: %p\n", group); + } else if (NULL != group->pp_core) { + n += _mali_osk_snprintf(buf + n, size - n, + "Physical PP Group: %p\n", group); + } else { + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + n += _mali_osk_snprintf(buf + n, size - n, + "GP Group: %p\n", group); + } + + switch (group->state) { + case MALI_GROUP_STATE_INACTIVE: + n += _mali_osk_snprintf(buf + n, size - n, + "\tstate: INACTIVE\n"); + break; + case MALI_GROUP_STATE_ACTIVATION_PENDING: + n += _mali_osk_snprintf(buf + n, size - n, + "\tstate: ACTIVATION_PENDING\n"); + break; + case MALI_GROUP_STATE_ACTIVE: + n += _mali_osk_snprintf(buf + n, size - n, + "\tstate: MALI_GROUP_STATE_ACTIVE\n"); + break; + default: + n += _mali_osk_snprintf(buf + n, size - n, + "\tstate: UNKNOWN (%d)\n", group->state); + MALI_DEBUG_ASSERT(0); + break; + } + + n += _mali_osk_snprintf(buf + n, size - n, + "\tSW power: %s\n", + group->power_is_on ? "On" : "Off"); + + n += mali_pm_dump_state_domain(group->pm_domain, buf + n, size - n); + + for (i = 0; i < 2; i++) { + if (NULL != group->l2_cache_core[i]) { + struct mali_pm_domain *domain; + domain = mali_l2_cache_get_pm_domain( + group->l2_cache_core[i]); + n += mali_pm_dump_state_domain(domain, + buf + n, size - n); + } + } + + if (group->gp_core) { + n += mali_gp_dump_state(group->gp_core, buf + n, size - n); + n += _mali_osk_snprintf(buf + n, size - n, + "\tGP running job: %p\n", group->gp_running_job); + } + + if (group->pp_core) { + n += mali_pp_dump_state(group->pp_core, buf + n, size - n); + n += _mali_osk_snprintf(buf + n, size - n, + "\tPP running job: %p, subjob %d \n", + group->pp_running_job, + group->pp_running_sub_job); + } + + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, + struct mali_group, group_list) { + n += mali_group_dump_state(child, buf + n, size - n); + } + + return n; } #endif -/* Kasin added. */ -#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6 -#include -#define INT_MALI_PP2_MMU ( 6+32) -struct _mali_osk_irq_t_struct; -u32 get_irqnum(struct _mali_osk_irq_t_struct* irq); -#endif - _mali_osk_errcode_t mali_group_upper_half_mmu(void *data) { - struct mali_group *group = (struct mali_group *)data; -#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6 - struct mali_mmu_core *mmu = group->mmu; + struct mali_group *group = (struct mali_group *)data; + _mali_osk_errcode_t ret; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + +#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS) +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_lock(); + if (!mali_group_is_working(group)) { + /* Not working, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } +#endif + if (NULL != group->gp_core) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0), + mali_mmu_get_rawstat(group->mmu), 0); + } else { + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU( + mali_pp_core_get_id(group->pp_core)), + mali_mmu_get_rawstat(group->mmu), 0); + } +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_unlock(); #endif - _mali_osk_errcode_t ret; - - MALI_DEBUG_ASSERT_POINTER(group); - MALI_DEBUG_ASSERT_POINTER(group->mmu); - -#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6 - if (MALI_FALSE == group->power_is_on) - MALI_SUCCESS; - if (get_irqnum(mmu->irq) == INT_MALI_PP2_MMU) - { - if (group == NULL || group->pp_core == NULL) - MALI_SUCCESS; - if (group->pp_core->core_id == 0) { - if (malifix_get_mmu_int_process_state(0) == MMU_INT_HIT) - malifix_set_mmu_int_process_state(0, MMU_INT_TOP); - else - MALI_SUCCESS; - } - else if (group->pp_core->core_id == 1) { - if (malifix_get_mmu_int_process_state(1) == MMU_INT_HIT) - malifix_set_mmu_int_process_state(1, MMU_INT_TOP); - else - MALI_SUCCESS; - } else - MALI_SUCCESS; - } #endif - if (NULL != group->gp_core) { - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | - MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | - MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, - 0, 0, /* No pid and tid for interrupt handler */ - MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0), - mali_mmu_get_rawstat(group->mmu), 0); - } else { - MALI_DEBUG_ASSERT_POINTER(group->pp_core); - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | - MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | - MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, - 0, 0, /* No pid and tid for interrupt handler */ - MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU( - mali_pp_core_get_id(group->pp_core)), - mali_mmu_get_rawstat(group->mmu), 0); - } - - ret = mali_executor_interrupt_mmu(group, MALI_TRUE); - - if (NULL != group->gp_core) { - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | - MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | - MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, - 0, 0, /* No pid and tid for interrupt handler */ - MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0), - mali_mmu_get_rawstat(group->mmu), 0); - } else { - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | - MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | - MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, - 0, 0, /* No pid and tid for interrupt handler */ - MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU( - mali_pp_core_get_id(group->pp_core)), - mali_mmu_get_rawstat(group->mmu), 0); - } - - return ret; -} - -static void mali_group_bottom_half_mmu(void *data) -{ - struct mali_group *group = (struct mali_group *)data; -#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6 - struct mali_mmu_core *mmu = group->mmu; + ret = mali_executor_interrupt_mmu(group, MALI_TRUE); + +#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS) +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_lock(); + if (!mali_group_is_working(group) && (!mali_group_power_is_on(group))) { + /* group complete and on job shedule on it, it already power off */ + if (NULL != group->gp_core) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0), + 0xFFFFFFFF, 0); + } else { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU( + mali_pp_core_get_id(group->pp_core)), + 0xFFFFFFFF, 0); + } + + mali_executor_unlock(); + return ret; + } #endif - MALI_DEBUG_ASSERT_POINTER(group); - MALI_DEBUG_ASSERT_POINTER(group->mmu); - - if (NULL != group->gp_core) { - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | - MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | - MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, - 0, _mali_osk_get_tid(), /* pid and tid */ - MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0), - mali_mmu_get_rawstat(group->mmu), 0); - } else { - MALI_DEBUG_ASSERT_POINTER(group->pp_core); - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | - MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | - MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, - 0, _mali_osk_get_tid(), /* pid and tid */ - MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU( - mali_pp_core_get_id(group->pp_core)), - mali_mmu_get_rawstat(group->mmu), 0); - } - - mali_executor_interrupt_mmu(group, MALI_FALSE); - - if (NULL != group->gp_core) { - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | - MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | - MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, - 0, _mali_osk_get_tid(), /* pid and tid */ - MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0), - mali_mmu_get_rawstat(group->mmu), 0); - } else { - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | - MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | - MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, - 0, _mali_osk_get_tid(), /* pid and tid */ - MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU( - mali_pp_core_get_id(group->pp_core)), - mali_mmu_get_rawstat(group->mmu), 0); - } -#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6 - if (get_irqnum(mmu->irq) == INT_MALI_PP2_MMU) - { - if (group->pp_core->core_id == 0) { - if (malifix_get_mmu_int_process_state(0) == MMU_INT_TOP) - malifix_set_mmu_int_process_state(0, MMU_INT_NONE); - } - else if (group->pp_core->core_id == 1) { - if (malifix_get_mmu_int_process_state(1) == MMU_INT_TOP) - malifix_set_mmu_int_process_state(1, MMU_INT_NONE); - } - } + if (NULL != group->gp_core) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0), + mali_mmu_get_rawstat(group->mmu), 0); + } else { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU( + mali_pp_core_get_id(group->pp_core)), + mali_mmu_get_rawstat(group->mmu), 0); + } +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_unlock(); +#endif #endif -} -_mali_osk_errcode_t mali_group_upper_half_gp(void *data) -{ - struct mali_group *group = (struct mali_group *)data; - _mali_osk_errcode_t ret; - - MALI_DEBUG_ASSERT_POINTER(group); - MALI_DEBUG_ASSERT_POINTER(group->gp_core); - MALI_DEBUG_ASSERT_POINTER(group->mmu); - - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | - MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | - MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, - 0, 0, /* No pid and tid for interrupt handler */ - MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), - mali_gp_get_rawstat(group->gp_core), 0); - - MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n", - mali_gp_get_rawstat(group->gp_core), - mali_group_core_description(group))); - - ret = mali_executor_interrupt_gp(group, MALI_TRUE); - - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | - MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | - MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, - 0, 0, /* No pid and tid for interrupt handler */ - MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), - mali_gp_get_rawstat(group->gp_core), 0); - - return ret; + return ret; } -static void mali_group_bottom_half_gp(void *data) +static void mali_group_bottom_half_mmu(void *data) { - struct mali_group *group = (struct mali_group *)data; - - MALI_DEBUG_ASSERT_POINTER(group); - MALI_DEBUG_ASSERT_POINTER(group->gp_core); - MALI_DEBUG_ASSERT_POINTER(group->mmu); - - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | - MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | - MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, - 0, _mali_osk_get_tid(), /* pid and tid */ - MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), - mali_gp_get_rawstat(group->gp_core), 0); - - mali_executor_interrupt_gp(group, MALI_FALSE); - - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | - MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | - MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, - 0, _mali_osk_get_tid(), /* pid and tid */ - MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), - mali_gp_get_rawstat(group->gp_core), 0); + struct mali_group *group = (struct mali_group *)data; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + + if (NULL != group->gp_core) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0), + mali_mmu_get_rawstat(group->mmu), 0); + } else { + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU( + mali_pp_core_get_id(group->pp_core)), + mali_mmu_get_rawstat(group->mmu), 0); + } + + mali_executor_interrupt_mmu(group, MALI_FALSE); + + if (NULL != group->gp_core) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0), + mali_mmu_get_rawstat(group->mmu), 0); + } else { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU( + mali_pp_core_get_id(group->pp_core)), + mali_mmu_get_rawstat(group->mmu), 0); + } } -#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6 -int PP0_int_cnt = 0; -int mali_PP0_int_cnt(void) +_mali_osk_errcode_t mali_group_upper_half_gp(void *data) { - return PP0_int_cnt; + struct mali_group *group = (struct mali_group *)data; + _mali_osk_errcode_t ret; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + +#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS) +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_lock(); + if (!mali_group_is_working(group)) { + /* Not working, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } +#endif + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), + mali_gp_get_rawstat(group->gp_core), 0); + + MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n", + mali_gp_get_rawstat(group->gp_core), + mali_group_core_description(group))); +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_unlock(); +#endif +#endif + ret = mali_executor_interrupt_gp(group, MALI_TRUE); + +#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS) +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_lock(); + if (!mali_group_is_working(group) && (!mali_group_power_is_on(group))) { + /* group complete and on job shedule on it, it already power off */ + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), + 0xFFFFFFFF, 0); + mali_executor_unlock(); + return ret; + } +#endif + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), + mali_gp_get_rawstat(group->gp_core), 0); +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_unlock(); +#endif +#endif + return ret; } -EXPORT_SYMBOL(mali_PP0_int_cnt); -int PP1_int_cnt = 0; -int mali_PP1_int_cnt(void) +static void mali_group_bottom_half_gp(void *data) { - return PP1_int_cnt; + struct mali_group *group = (struct mali_group *)data; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), + mali_gp_get_rawstat(group->gp_core), 0); + + mali_executor_interrupt_gp(group, MALI_FALSE); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), + mali_gp_get_rawstat(group->gp_core), 0); } -EXPORT_SYMBOL(mali_PP1_int_cnt); -#endif _mali_osk_errcode_t mali_group_upper_half_pp(void *data) { - struct mali_group *group = (struct mali_group *)data; -#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6 - struct mali_pp_core *core = group->pp_core; -#endif - _mali_osk_errcode_t ret; - - MALI_DEBUG_ASSERT_POINTER(group); - MALI_DEBUG_ASSERT_POINTER(group->pp_core); - MALI_DEBUG_ASSERT_POINTER(group->mmu); - - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | - MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | - MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, - 0, 0, /* No pid and tid for interrupt handler */ - MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP( - mali_pp_core_get_id(group->pp_core)), - mali_pp_get_rawstat(group->pp_core), 0); - - MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n", - mali_pp_get_rawstat(group->pp_core), - mali_group_core_description(group))); - - ret = mali_executor_interrupt_pp(group, MALI_TRUE); - -#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6 - if (core->core_id == 0) - PP0_int_cnt++; - else if (core->core_id == 1) - PP1_int_cnt++; + struct mali_group *group = (struct mali_group *)data; + _mali_osk_errcode_t ret; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + +#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS) +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_lock(); + if (!mali_group_is_working(group)) { + /* Not working, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } #endif - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | - MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | - MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, - 0, 0, /* No pid and tid for interrupt handler */ - MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP( - mali_pp_core_get_id(group->pp_core)), - mali_pp_get_rawstat(group->pp_core), 0); + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP( + mali_pp_core_get_id(group->pp_core)), + mali_pp_get_rawstat(group->pp_core), 0); + + MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n", + mali_pp_get_rawstat(group->pp_core), + mali_group_core_description(group))); +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_unlock(); +#endif +#endif - return ret; + ret = mali_executor_interrupt_pp(group, MALI_TRUE); + +#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS) +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_lock(); + if (!mali_group_is_working(group) && (!mali_group_power_is_on(group))) { + /* group complete and on job shedule on it, it already power off */ + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP( + mali_pp_core_get_id(group->pp_core)), + 0xFFFFFFFF, 0); + mali_executor_unlock(); + return ret; + } +#endif + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP( + mali_pp_core_get_id(group->pp_core)), + mali_pp_get_rawstat(group->pp_core), 0); +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_unlock(); +#endif +#endif + return ret; } static void mali_group_bottom_half_pp(void *data) { - struct mali_group *group = (struct mali_group *)data; - - MALI_DEBUG_ASSERT_POINTER(group); - MALI_DEBUG_ASSERT_POINTER(group->pp_core); - MALI_DEBUG_ASSERT_POINTER(group->mmu); - - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | - MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | - MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, - 0, _mali_osk_get_tid(), /* pid and tid */ - MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP( - mali_pp_core_get_id(group->pp_core)), - mali_pp_get_rawstat(group->pp_core), 0); - - mali_executor_interrupt_pp(group, MALI_FALSE); - - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | - MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | - MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, - 0, _mali_osk_get_tid(), /* pid and tid */ - MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP( - mali_pp_core_get_id(group->pp_core)), - mali_pp_get_rawstat(group->pp_core), 0); + struct mali_group *group = (struct mali_group *)data; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP( + mali_pp_core_get_id(group->pp_core)), + mali_pp_get_rawstat(group->pp_core), 0); + + mali_executor_interrupt_pp(group, MALI_FALSE); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP( + mali_pp_core_get_id(group->pp_core)), + mali_pp_get_rawstat(group->pp_core), 0); } static void mali_group_timeout(void *data) { - struct mali_group *group = (struct mali_group *)data; - MALI_DEBUG_ASSERT_POINTER(group); - - MALI_DEBUG_PRINT(2, ("Group: timeout handler for %s at %u\n", - mali_group_core_description(group), - _mali_osk_time_tickcount())); - - if (mali_core_timeout < 65533) - mali_core_timeout++; - if (NULL != group->gp_core) { - mali_group_schedule_bottom_half_gp(group); - } else { - MALI_DEBUG_ASSERT_POINTER(group->pp_core); - mali_group_schedule_bottom_half_pp(group); - } + struct mali_group *group = (struct mali_group *)data; + MALI_DEBUG_ASSERT_POINTER(group); + + MALI_DEBUG_PRINT(2, ("Group: timeout handler for %s at %u\n", + mali_group_core_description(group), + _mali_osk_time_tickcount())); + + if (NULL != group->gp_core) { + mali_group_schedule_bottom_half_gp(group); + } else { + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + mali_group_schedule_bottom_half_pp(group); + } } mali_bool mali_group_zap_session(struct mali_group *group, - struct mali_session_data *session) + struct mali_session_data *session) { - MALI_DEBUG_ASSERT_POINTER(group); - MALI_DEBUG_ASSERT_POINTER(session); - MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); - - if (group->session != session) { - /* not running from this session */ - return MALI_TRUE; /* success */ - } - - if (group->is_working) { - /* The Zap also does the stall and disable_stall */ - mali_bool zap_success = mali_mmu_zap_tlb(group->mmu); - return zap_success; - } else { - /* Just remove the session instead of zapping */ - mali_group_clear_session(group); - return MALI_TRUE; /* success */ - } + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(session); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + if (group->session != session) { + /* not running from this session */ + return MALI_TRUE; /* success */ + } + + if (group->is_working) { + /* The Zap also does the stall and disable_stall */ + mali_bool zap_success = mali_mmu_zap_tlb(group->mmu); + return zap_success; + } else { + /* Just remove the session instead of zapping */ + mali_group_clear_session(group); + return MALI_TRUE; /* success */ + } } #if defined(CONFIG_MALI400_PROFILING) static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num) { - u32 source0 = 0; - u32 value0 = 0; - u32 source1 = 0; - u32 value1 = 0; - u32 profiling_channel = 0; - - MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); - - switch (core_num) { - case 0: - profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE | - MALI_PROFILING_EVENT_CHANNEL_GPU | - MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS; - break; - case 1: - profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE | - MALI_PROFILING_EVENT_CHANNEL_GPU | - MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS; - break; - case 2: - profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE | - MALI_PROFILING_EVENT_CHANNEL_GPU | - MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS; - break; - default: - profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE | - MALI_PROFILING_EVENT_CHANNEL_GPU | - MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS; - break; - } - - if (0 == core_num) { - mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1); - } - if (1 == core_num) { - if (1 == mali_l2_cache_get_id(group->l2_cache_core[0])) { - mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1); - } else if (1 == mali_l2_cache_get_id(group->l2_cache_core[1])) { - mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1); - } - } - if (2 == core_num) { - if (2 == mali_l2_cache_get_id(group->l2_cache_core[0])) { - mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1); - } else if (2 == mali_l2_cache_get_id(group->l2_cache_core[1])) { - mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1); - } - } - - _mali_osk_profiling_add_event(profiling_channel, source1 << 8 | source0, value0, value1, 0, 0); + u32 source0 = 0; + u32 value0 = 0; + u32 source1 = 0; + u32 value1 = 0; + u32 profiling_channel = 0; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + switch (core_num) { + case 0: + profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_GPU | + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS; + break; + case 1: + profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_GPU | + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS; + break; + case 2: + profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_GPU | + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS; + break; + default: + profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_GPU | + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS; + break; + } + + if (0 == core_num) { + mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1); + } + if (1 == core_num) { + if (1 == mali_l2_cache_get_id(group->l2_cache_core[0])) { + mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1); + } else if (1 == mali_l2_cache_get_id(group->l2_cache_core[1])) { + mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1); + } + } + if (2 == core_num) { + if (2 == mali_l2_cache_get_id(group->l2_cache_core[0])) { + mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1); + } else if (2 == mali_l2_cache_get_id(group->l2_cache_core[1])) { + mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1); + } + } + + _mali_osk_profiling_add_event(profiling_channel, source1 << 8 | source0, value0, value1, 0, 0); } #endif /* #if defined(CONFIG_MALI400_PROFILING) */ diff --git a/drivers/gpu/arm/mali/common/mali_group.h b/drivers/gpu/arm/mali/common/mali_group.h index 9f9fd6bd4f681..f2c0313810482 100755 --- a/drivers/gpu/arm/mali/common/mali_group.h +++ b/drivers/gpu/arm/mali/common/mali_group.h @@ -96,6 +96,8 @@ struct mali_group *mali_group_create(struct mali_l2_cache_core *core, struct mali_bcast_unit *bcast, u32 domain_index); +void mali_group_dump_status(struct mali_group *group); + void mali_group_delete(struct mali_group *group); _mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, @@ -126,7 +128,7 @@ MALI_STATIC_INLINE mali_bool mali_group_is_virtual(struct mali_group *group) { MALI_DEBUG_ASSERT_POINTER(group); -#if defined(CONFIG_MALI450) +#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) return (NULL != group->dlbu_core); #else return MALI_FALSE; @@ -140,7 +142,7 @@ MALI_STATIC_INLINE mali_bool mali_group_is_in_virtual(struct mali_group *group) MALI_DEBUG_ASSERT_POINTER(group); MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); -#if defined(CONFIG_MALI450) +#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) return (NULL != group->parent_group) ? MALI_TRUE : MALI_FALSE; #else return MALI_FALSE; @@ -409,6 +411,7 @@ MALI_STATIC_INLINE void mali_group_schedule_bottom_half_gp(struct mali_group *gr _mali_osk_wq_schedule_work(group->bottom_half_work_gp); } + MALI_STATIC_INLINE void mali_group_schedule_bottom_half_pp(struct mali_group *group) { MALI_DEBUG_ASSERT_POINTER(group); diff --git a/drivers/gpu/arm/mali/common/mali_hw_core.c b/drivers/gpu/arm/mali/common/mali_hw_core.c index c90cf38d8516c..dd10cfe89aeb8 100755 --- a/drivers/gpu/arm/mali/common/mali_hw_core.c +++ b/drivers/gpu/arm/mali/common/mali_hw_core.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * Copyright (C) 2011-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_hw_core.h b/drivers/gpu/arm/mali/common/mali_hw_core.h index 9fbac2321e9ce..e4357253998be 100755 --- a/drivers/gpu/arm/mali/common/mali_hw_core.h +++ b/drivers/gpu/arm/mali/common/mali_hw_core.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -26,7 +26,7 @@ struct mali_hw_core { const char *description; /**< Name of unit (as specified in device configuration) */ }; -#define MALI_REG_POLL_COUNT_FAST 1000 +#define MALI_REG_POLL_COUNT_FAST 1000000 #define MALI_REG_POLL_COUNT_SLOW 1000000 /* diff --git a/drivers/gpu/arm/mali/common/mali_kernel_common.h b/drivers/gpu/arm/mali/common/mali_kernel_common.h index 990cf3ac78e02..9bae26579866c 100755 --- a/drivers/gpu/arm/mali/common/mali_kernel_common.h +++ b/drivers/gpu/arm/mali/common/mali_kernel_common.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2012-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_kernel_core.c b/drivers/gpu/arm/mali/common/mali_kernel_core.c index 4cc2ea227051e..6a62c9298f355 100755 --- a/drivers/gpu/arm/mali/common/mali_kernel_core.c +++ b/drivers/gpu/arm/mali/common/mali_kernel_core.c @@ -41,6 +41,7 @@ #endif #include "mali_control_timer.h" #include "mali_dvfs_policy.h" +#include #define MALI_SHARED_MEMORY_DEFAULT_SIZE 0xffffffff @@ -74,6 +75,7 @@ static u32 global_gpu_major_version = 0; static u32 global_gpu_minor_version = 0; mali_bool mali_gpu_class_is_mali450 = MALI_FALSE; +mali_bool mali_gpu_class_is_mali470 = MALI_FALSE; static _mali_osk_errcode_t mali_set_global_gpu_base_address(void) { @@ -154,6 +156,10 @@ static _mali_osk_errcode_t mali_parse_product_info(void) global_product_id = _MALI_PRODUCT_ID_MALI450; MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-450 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version)); break; + case MALI470_PP_PRODUCT_ID: + global_product_id = _MALI_PRODUCT_ID_MALI470; + MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-470 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version)); + break; default: MALI_DEBUG_PRINT(2, ("Found unknown Mali GPU (r%up%u)\n", global_gpu_major_version, global_gpu_minor_version)); return _MALI_OSK_ERR_FAULT; @@ -276,6 +282,20 @@ static _mali_osk_errcode_t mali_parse_config_l2_cache(void) return _MALI_OSK_ERR_FAULT; } } + } else if (mali_is_mali470()) { + _mali_osk_resource_t l2c1_resource; + + /* Make cluster for L2C1 */ + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI470_OFFSET_L2_CACHE1, &l2c1_resource)) { + MALI_DEBUG_PRINT(3, ("Creating Mali-470 L2 cache 1\n")); + l2_cache = mali_create_l2_cache_core(&l2c1_resource, MALI_DOMAIN_INDEX_L21); + if (NULL == l2_cache) { + return _MALI_OSK_ERR_FAULT; + } + } else { + MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for L2C1\n")); + return _MALI_OSK_ERR_FAULT; + } } return _MALI_OSK_ERR_OK; @@ -443,7 +463,7 @@ static _mali_osk_errcode_t mali_parse_config_groups(void) _mali_osk_errcode_t resource_dlbu_found; _mali_osk_errcode_t resource_bcast_found; - if (!(mali_is_mali400() || mali_is_mali450())) { + if (!(mali_is_mali400() || mali_is_mali450() || mali_is_mali470())) { /* No known HW core */ return _MALI_OSK_ERR_FAULT; } @@ -486,7 +506,7 @@ static _mali_osk_errcode_t mali_parse_config_groups(void) resource_pp_mmu_found[7] = _mali_osk_resource_find(MALI_OFFSET_PP7_MMU, &(resource_pp_mmu[7])); - if (mali_is_mali450()) { + if (mali_is_mali450() || mali_is_mali470()) { resource_bcast_found = _mali_osk_resource_find(MALI_OFFSET_BCAST, &resource_bcast); resource_dlbu_found = _mali_osk_resource_find(MALI_OFFSET_DLBU, &resource_dlbu); resource_pp_mmu_bcast_found = _mali_osk_resource_find(MALI_OFFSET_PP_BCAST_MMU, &resource_pp_mmu_bcast); @@ -496,7 +516,7 @@ static _mali_osk_errcode_t mali_parse_config_groups(void) _MALI_OSK_ERR_OK != resource_dlbu_found || _MALI_OSK_ERR_OK != resource_pp_mmu_bcast_found || _MALI_OSK_ERR_OK != resource_pp_bcast_found) { - /* Missing mandatory core(s) for Mali-450 */ + /* Missing mandatory core(s) for Mali-450 or Mali-470 */ MALI_DEBUG_PRINT(2, ("Missing mandatory resources, Mali-450 needs DLBU, Broadcast unit, virtual PP core and virtual MMU\n")); return _MALI_OSK_ERR_FAULT; } @@ -555,7 +575,7 @@ static _mali_osk_errcode_t mali_parse_config_groups(void) } } - if (mali_is_mali450()) { + if (mali_is_mali450() || mali_is_mali470()) { _mali_osk_errcode_t err = mali_create_virtual_group(&resource_pp_mmu_bcast, &resource_pp_bcast, &resource_dlbu, &resource_bcast); if (_MALI_OSK_ERR_OK != err) { return err; @@ -684,14 +704,16 @@ static _mali_osk_errcode_t mali_parse_config_memory(void) static void mali_detect_gpu_class(void) { - if (_mali_osk_l2_resource_count() > 1) { + if (_mali_osk_identify_gpu_resource() == 0x450) mali_gpu_class_is_mali450 = MALI_TRUE; - } + + if (_mali_osk_identify_gpu_resource() == 0x470) + mali_gpu_class_is_mali470 = MALI_TRUE; } static _mali_osk_errcode_t mali_init_hw_reset(void) { -#if defined(CONFIG_MALI450) +#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) _mali_osk_resource_t resource_bcast; /* Ensure broadcast unit is in a good state before we start creating @@ -707,7 +729,7 @@ static _mali_osk_errcode_t mali_init_hw_reset(void) } mali_bcast_unit_delete(bcast_core); } -#endif /* CONFIG_MALI450 */ +#endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */ return _MALI_OSK_ERR_OK; } @@ -734,6 +756,9 @@ _mali_osk_errcode_t mali_initialize_subsystems(void) return err; } + /*Try to init gpu secure mode */ + _mali_osk_gpu_secure_mode_init(); + #if defined(CONFIG_MALI400_PROFILING) err = _mali_osk_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE); if (_MALI_OSK_ERR_OK != err) { @@ -825,7 +850,7 @@ _mali_osk_errcode_t mali_initialize_subsystems(void) return err; } - if (mali_is_mali450()) { + if (mali_is_mali450() || mali_is_mali470()) { err = mali_dlbu_initialize(); if (_MALI_OSK_ERR_OK != err) { mali_pm_init_end(); @@ -903,7 +928,7 @@ void mali_terminate_subsystems(void) mali_delete_l2_cache_cores(); mali_mmu_terminate(); - if (mali_is_mali450()) { + if (mali_is_mali450() || mali_is_mali470()) { mali_dlbu_terminate(); } @@ -917,6 +942,8 @@ void mali_terminate_subsystems(void) _mali_osk_profiling_term(); #endif + _mali_osk_gpu_secure_mode_deinit(); + mali_memory_terminate(); mali_session_terminate(); @@ -974,7 +1001,7 @@ _mali_osk_errcode_t _mali_ukk_get_api_version_v2(_mali_uk_get_api_version_v2_s * args->version = _MALI_UK_API_VERSION; /* report our version */ /* success regardless of being compatible or not */ - return _MALI_OSK_ERR_OK;; + return _MALI_OSK_ERR_OK; } _mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notification_s *args) @@ -995,7 +1022,7 @@ _mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notificati if (NULL == queue) { MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n")); args->type = _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS; - return _MALI_OSK_ERR_OK;; + return _MALI_OSK_ERR_OK; } /* receive a notification, might sleep */ @@ -1011,7 +1038,7 @@ _mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notificati /* finished with the notification */ _mali_osk_notification_delete(notification); - return _MALI_OSK_ERR_OK;; /* all ok */ + return _MALI_OSK_ERR_OK; /* all ok */ } _mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *args) @@ -1030,7 +1057,7 @@ _mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *ar /* if the queue does not exist we're currently shutting down */ if (NULL == queue) { MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n")); - return _MALI_OSK_ERR_OK;; + return _MALI_OSK_ERR_OK; } notification = _mali_osk_notification_create(args->type, 0); @@ -1041,9 +1068,28 @@ _mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *ar _mali_osk_notification_queue_send(queue, notification); - return _MALI_OSK_ERR_OK;; /* all ok */ + return _MALI_OSK_ERR_OK; /* all ok */ +} + +_mali_osk_errcode_t _mali_ukk_pending_submit(_mali_uk_pending_submit_s *args) +{ + wait_queue_head_t *queue; + + /* check input */ + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + + queue = mali_session_get_wait_queue(); + + /* check pending big job number, might sleep if larger than MAX allowed number */ + if (wait_event_interruptible(*queue, MALI_MAX_PENDING_BIG_JOB > mali_scheduler_job_gp_big_job_count())) { + return _MALI_OSK_ERR_RESTARTSYSCALL; + } + + return _MALI_OSK_ERR_OK; /* all ok */ } + _mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priority_s *args) { struct mali_session_data *session; @@ -1058,7 +1104,7 @@ _mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priori MALI_DEBUG_PRINT(2, ("Session 0x%08X with pid %d was granted higher priority.\n", session, _mali_osk_get_pid())); } - return _MALI_OSK_ERR_OK;; + return _MALI_OSK_ERR_OK; } _mali_osk_errcode_t _mali_ukk_open(void **context) @@ -1075,22 +1121,17 @@ _mali_osk_errcode_t _mali_ukk_open(void **context) /* create a response queue for this session */ session->ioctl_queue = _mali_osk_notification_queue_init(); if (NULL == session->ioctl_queue) { - _mali_osk_free(session); - MALI_ERROR(_MALI_OSK_ERR_NOMEM); + goto err; } session->page_directory = mali_mmu_pagedir_alloc(); if (NULL == session->page_directory) { - _mali_osk_notification_queue_term(session->ioctl_queue); - _mali_osk_free(session); - MALI_ERROR(_MALI_OSK_ERR_NOMEM); + goto err_mmu; } if (_MALI_OSK_ERR_OK != mali_mmu_pagedir_map(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE)) { MALI_PRINT_ERROR(("Failed to map DLBU page into session\n")); - _mali_osk_notification_queue_term(session->ioctl_queue); - _mali_osk_free(session); - MALI_ERROR(_MALI_OSK_ERR_NOMEM); + goto err_mmu; } if (0 != mali_dlbu_phys_addr) { @@ -1099,31 +1140,19 @@ _mali_osk_errcode_t _mali_ukk_open(void **context) } if (_MALI_OSK_ERR_OK != mali_memory_session_begin(session)) { - mali_mmu_pagedir_free(session->page_directory); - _mali_osk_notification_queue_term(session->ioctl_queue); - _mali_osk_free(session); - MALI_ERROR(_MALI_OSK_ERR_NOMEM); + goto err_session; } /* Create soft system. */ session->soft_job_system = mali_soft_job_system_create(session); if (NULL == session->soft_job_system) { - mali_memory_session_end(session); - mali_mmu_pagedir_free(session->page_directory); - _mali_osk_notification_queue_term(session->ioctl_queue); - _mali_osk_free(session); - MALI_ERROR(_MALI_OSK_ERR_NOMEM); + goto err_soft; } /* Create timeline system. */ session->timeline_system = mali_timeline_system_create(session); if (NULL == session->timeline_system) { - mali_soft_job_system_destroy(session->soft_job_system); - mali_memory_session_end(session); - mali_mmu_pagedir_free(session->page_directory); - _mali_osk_notification_queue_term(session->ioctl_queue); - _mali_osk_free(session); - MALI_ERROR(_MALI_OSK_ERR_NOMEM); + goto err_time_line; } #if defined(CONFIG_MALI_DVFS) @@ -1142,15 +1171,31 @@ _mali_osk_errcode_t _mali_ukk_open(void **context) session->pid = _mali_osk_get_pid(); session->comm = _mali_osk_get_comm(); - session->max_mali_mem_allocated = 0; - _mali_osk_memset(session->mali_mem_array, 0, sizeof(size_t) * MALI_MEM_TYPE_MAX); + session->max_mali_mem_allocated_size = 0; + for (i = 0; i < MALI_MEM_TYPE_MAX; i ++) { + atomic_set(&session->mali_mem_array[i], 0); + } + atomic_set(&session->mali_mem_allocated_pages, 0); *context = (void *)session; /* Add session to the list of all sessions. */ mali_session_add(session); MALI_DEBUG_PRINT(3, ("Session started\n")); - return _MALI_OSK_ERR_OK;; + return _MALI_OSK_ERR_OK; + +err_time_line: + mali_soft_job_system_destroy(session->soft_job_system); +err_soft: + mali_memory_session_end(session); +err_session: + mali_mmu_pagedir_free(session->page_directory); +err_mmu: + _mali_osk_notification_queue_term(session->ioctl_queue); +err: + _mali_osk_free(session); + MALI_ERROR(_MALI_OSK_ERR_NOMEM); + } #if defined(DEBUG) @@ -1234,7 +1279,12 @@ _mali_osk_errcode_t _mali_ukk_close(void **context) _mali_osk_atomic_term(&session->number_of_window_jobs); #endif +#if defined(CONFIG_MALI400_PROFILING) + _mali_osk_profiling_stop_sampling(session->pid); +#endif + /* Free session data structures */ + mali_mmu_pagedir_unmap(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE); mali_mmu_pagedir_free(session->page_directory); _mali_osk_notification_queue_term(session->ioctl_queue); _mali_osk_free(session); diff --git a/drivers/gpu/arm/mali/common/mali_kernel_core.h b/drivers/gpu/arm/mali/common/mali_kernel_core.h old mode 100644 new mode 100755 index 57d3cd2413862..cf6af3263f0b3 --- a/drivers/gpu/arm/mali/common/mali_kernel_core.h +++ b/drivers/gpu/arm/mali/common/mali_kernel_core.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -19,9 +19,11 @@ typedef enum { _MALI_PRODUCT_ID_MALI300, _MALI_PRODUCT_ID_MALI400, _MALI_PRODUCT_ID_MALI450, + _MALI_PRODUCT_ID_MALI470, } _mali_product_id_t; extern mali_bool mali_gpu_class_is_mali450; +extern mali_bool mali_gpu_class_is_mali470; _mali_osk_errcode_t mali_initialize_subsystems(void); @@ -35,22 +37,21 @@ u32 mali_kernel_core_get_gpu_minor_version(void); u32 _mali_kernel_core_dump_state(char *buf, u32 size); +MALI_STATIC_INLINE mali_bool mali_is_mali470(void) +{ + return mali_gpu_class_is_mali470; +} + MALI_STATIC_INLINE mali_bool mali_is_mali450(void) { -#if defined(CONFIG_MALI450) return mali_gpu_class_is_mali450; -#else - return MALI_FALSE; -#endif } MALI_STATIC_INLINE mali_bool mali_is_mali400(void) { -#if !defined(CONFIG_MALI450) + if (mali_gpu_class_is_mali450 || mali_gpu_class_is_mali470) + return MALI_FALSE; + return MALI_TRUE; -#else - return !mali_gpu_class_is_mali450; -#endif } - #endif /* __MALI_KERNEL_CORE_H__ */ diff --git a/drivers/gpu/arm/mali/common/mali_kernel_utilization.c b/drivers/gpu/arm/mali/common/mali_kernel_utilization.c index 63b9417422496..ca7ebea4763b9 100755 --- a/drivers/gpu/arm/mali/common/mali_kernel_utilization.c +++ b/drivers/gpu/arm/mali/common/mali_kernel_utilization.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_kernel_utilization.h b/drivers/gpu/arm/mali/common/mali_kernel_utilization.h index 3c20b1983762a..5206225ca2f11 100755 --- a/drivers/gpu/arm/mali/common/mali_kernel_utilization.h +++ b/drivers/gpu/arm/mali/common/mali_kernel_utilization.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_kernel_vsync.c b/drivers/gpu/arm/mali/common/mali_kernel_vsync.c index 2eed4c88cf561..3b2b108c747a8 100755 --- a/drivers/gpu/arm/mali/common/mali_kernel_vsync.c +++ b/drivers/gpu/arm/mali/common/mali_kernel_vsync.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * Copyright (C) 2011-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_l2_cache.c b/drivers/gpu/arm/mali/common/mali_l2_cache.c index ff53d6c93e8db..4fbb51c870d7e 100755 --- a/drivers/gpu/arm/mali/common/mali_l2_cache.c +++ b/drivers/gpu/arm/mali/common/mali_l2_cache.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -203,7 +203,8 @@ void mali_l2_cache_power_up(struct mali_l2_cache_core *cache) mali_l2_cache_reset(cache); - MALI_DEBUG_ASSERT(MALI_FALSE == cache->power_is_on); + if ((1 << MALI_DOMAIN_INDEX_DUMMY) != cache->pm_domain->pmu_mask) + MALI_DEBUG_ASSERT(MALI_FALSE == cache->power_is_on); cache->power_is_on = MALI_TRUE; mali_l2_cache_unlock(cache); @@ -466,24 +467,18 @@ void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages) static void mali_l2_cache_reset(struct mali_l2_cache_core *cache) { - MALI_DEBUG_ASSERT_POINTER(cache); - MALI_DEBUG_ASSERT_LOCK_HELD(cache->lock); - - /* Kasin Added, skip off power domain. */ - if (cache && cache->pm_domain && cache->pm_domain->power_is_on == MALI_FALSE) { - printk("===========%s, %d skip off power domain?\n", __FUNCTION__, __LINE__); - } - + MALI_DEBUG_ASSERT_POINTER(cache); + MALI_DEBUG_ASSERT_LOCK_HELD(cache->lock); - /* Invalidate cache (just to keep it in a known state at startup) */ - mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, - MALI400_L2_CACHE_COMMAND_CLEAR_ALL); + /* Invalidate cache (just to keep it in a known state at startup) */ + mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, + MALI400_L2_CACHE_COMMAND_CLEAR_ALL); - /* Enable cache */ - mali_hw_core_register_write(&cache->hw_core, - MALI400_L2_CACHE_REGISTER_ENABLE, - (u32)MALI400_L2_CACHE_ENABLE_ACCESS | - (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE); + /* Enable cache */ + mali_hw_core_register_write(&cache->hw_core, + MALI400_L2_CACHE_REGISTER_ENABLE, + (u32)MALI400_L2_CACHE_ENABLE_ACCESS | + (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE); if (MALI400_L2_MAX_READS_NOT_SET != mali_l2_max_reads) { mali_hw_core_register_write(&cache->hw_core, diff --git a/drivers/gpu/arm/mali/common/mali_l2_cache.h b/drivers/gpu/arm/mali/common/mali_l2_cache.h index 6dc8ec22d6dea..de92ad63e9aa7 100755 --- a/drivers/gpu/arm/mali/common/mali_l2_cache.h +++ b/drivers/gpu/arm/mali/common/mali_l2_cache.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_mem_validation.c b/drivers/gpu/arm/mali/common/mali_mem_validation.c old mode 100644 new mode 100755 index d9d3f5e215020..432061486bdb3 --- a/drivers/gpu/arm/mali/common/mali_mem_validation.c +++ b/drivers/gpu/arm/mali/common/mali_mem_validation.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * Copyright (C) 2011-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -46,24 +46,5 @@ _mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size) _mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size) { -#if 0 - if (phys_addr < (phys_addr + size)) { /* Don't allow overflow (or zero size) */ - if ((0 == (phys_addr & (~_MALI_OSK_CPU_PAGE_MASK))) && - (0 == (size & (~_MALI_OSK_CPU_PAGE_MASK)))) { - if ((phys_addr >= mali_mem_validator.phys_base) && - ((phys_addr + (size - 1)) >= mali_mem_validator.phys_base) && - (phys_addr <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1))) && - ((phys_addr + (size - 1)) <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1)))) { - MALI_DEBUG_PRINT(3, ("Accepted range 0x%08X + size 0x%08X (= 0x%08X)\n", phys_addr, size, (phys_addr + size - 1))); - return _MALI_OSK_ERR_OK; - } - } - } - - MALI_PRINT_ERROR(("MALI PHYSICAL RANGE VALIDATION ERROR: The range supplied was: phys_base=0x%08X, size=0x%08X\n", phys_addr, size)); - - return _MALI_OSK_ERR_FAULT; -#else return _MALI_OSK_ERR_OK; -#endif } diff --git a/drivers/gpu/arm/mali/common/mali_mem_validation.h b/drivers/gpu/arm/mali/common/mali_mem_validation.h old mode 100644 new mode 100755 index 267720625d87e..f7e6cb1009b3d --- a/drivers/gpu/arm/mali/common/mali_mem_validation.h +++ b/drivers/gpu/arm/mali/common/mali_mem_validation.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011-2013, 2015 ARM Limited. All rights reserved. + * Copyright (C) 2011-2013, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_mmu.c b/drivers/gpu/arm/mali/common/mali_mmu.c index fac1f7cb7a52f..fba534058b799 100755 --- a/drivers/gpu/arm/mali/common/mali_mmu.c +++ b/drivers/gpu/arm/mali/common/mali_mmu.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -395,7 +395,6 @@ static void mali_mmu_probe_trigger(void *data) } /* Is called when the irq probe wants the mmu to acknowledge an interrupt from the hw */ -extern int mali_page_fault; static _mali_osk_errcode_t mali_mmu_probe_ack(void *data) { struct mali_mmu_core *mmu = (struct mali_mmu_core *)data; @@ -409,7 +408,6 @@ static _mali_osk_errcode_t mali_mmu_probe_ack(void *data) mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT); } else { MALI_DEBUG_PRINT(1, ("Probe: Page fault detect: FAILED\n")); - mali_page_fault++; } if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR) { @@ -417,7 +415,6 @@ static _mali_osk_errcode_t mali_mmu_probe_ack(void *data) mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR); } else { MALI_DEBUG_PRINT(1, ("Probe: Bus read error detect: FAILED\n")); - mali_page_fault++; } if ((int_stat & (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) == diff --git a/drivers/gpu/arm/mali/common/mali_mmu.h b/drivers/gpu/arm/mali/common/mali_mmu.h index 101c968bd45d3..398b2096c5d84 100755 --- a/drivers/gpu/arm/mali/common/mali_mmu.h +++ b/drivers/gpu/arm/mali/common/mali_mmu.h @@ -91,6 +91,7 @@ MALI_STATIC_INLINE enum mali_interrupt_result mali_mmu_get_interrupt_result(stru if (0 == rawstat_used) { return MALI_INTERRUPT_RESULT_NONE; } + return MALI_INTERRUPT_RESULT_ERROR; } diff --git a/drivers/gpu/arm/mali/common/mali_mmu_page_directory.c b/drivers/gpu/arm/mali/common/mali_mmu_page_directory.c index a4f4e585a6fa9..74d8cc85bc8ef 100755 --- a/drivers/gpu/arm/mali/common/mali_mmu_page_directory.c +++ b/drivers/gpu/arm/mali/common/mali_mmu_page_directory.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -120,8 +120,8 @@ _mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u3 _mali_osk_errcode_t err; mali_io_address pde_mapping; mali_dma_addr pde_phys; - int i; - + int i, page_count; + u32 start_address; if (last_pde < first_pde) return _MALI_OSK_ERR_INVALID_ARGS; @@ -144,9 +144,20 @@ _mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u3 pde_phys | MALI_MMU_FLAGS_PRESENT); MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]); - pagedir->page_entries_usage_count[i] = 1; + } + + if (first_pde == last_pde) { + pagedir->page_entries_usage_count[i] += size / MALI_MMU_PAGE_SIZE; + } else if (i == first_pde) { + start_address = i * MALI_MMU_VIRTUAL_PAGE_SIZE; + page_count = (start_address + MALI_MMU_VIRTUAL_PAGE_SIZE - mali_address) / MALI_MMU_PAGE_SIZE; + pagedir->page_entries_usage_count[i] += page_count; + } else if (i == last_pde) { + start_address = i * MALI_MMU_VIRTUAL_PAGE_SIZE; + page_count = (mali_address + size - start_address) / MALI_MMU_PAGE_SIZE; + pagedir->page_entries_usage_count[i] += page_count; } else { - pagedir->page_entries_usage_count[i]++; + pagedir->page_entries_usage_count[i] = 1024; } } _mali_osk_write_mem_barrier(); @@ -198,7 +209,7 @@ _mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset; } - pagedir->page_entries_usage_count[i]--; + pagedir->page_entries_usage_count[i] -= size_in_pde / MALI_MMU_PAGE_SIZE; /* If entire page table is unused, free it */ if (0 == pagedir->page_entries_usage_count[i]) { diff --git a/drivers/gpu/arm/mali/common/mali_mmu_page_directory.h b/drivers/gpu/arm/mali/common/mali_mmu_page_directory.h index 561fb60b9803e..4fc1058d73f6c 100755 --- a/drivers/gpu/arm/mali/common/mali_mmu_page_directory.h +++ b/drivers/gpu/arm/mali/common/mali_mmu_page_directory.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * Copyright (C) 2011-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_osk.h b/drivers/gpu/arm/mali/common/mali_osk.h index c48cfc01087f3..5c20fc32993f0 100755 --- a/drivers/gpu/arm/mali/common/mali_osk.h +++ b/drivers/gpu/arm/mali/common/mali_osk.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -16,6 +16,7 @@ #ifndef __MALI_OSK_H__ #define __MALI_OSK_H__ +#include #include "mali_osk_types.h" #include "mali_osk_specific.h" /* include any per-os specifics */ #include "mali_osk_locks.h" @@ -50,6 +51,8 @@ extern "C" { #define MALI_DEBUG_ASSERT_LOCK_HELD(l) do {} while(0) #endif +#define _mali_osk_ctxprintf seq_printf + /** @} */ /* end group _mali_osk_lock */ /** @addtogroup _mali_osk_miscellaneous @@ -1237,17 +1240,6 @@ void _mali_osk_dbgmsg(const char *fmt, ...); */ u32 _mali_osk_snprintf(char *buf, u32 size, const char *fmt, ...); -/** @brief Print fmt into print_ctx. - * - * The interpretation of \a fmt is the same as the \c format parameter in - * _mali_osu_vsnprintf(). - * - * @param print_ctx a pointer to the result file buffer - * @param fmt a _mali_osu_vsnprintf() style format string - * @param ... a variable-number of parameters suitable for \a fmt - */ -void _mali_osk_ctxprintf(_mali_osk_print_ctx *print_ctx, const char *fmt, ...); - /** @brief Abnormal process abort. * * Terminates the caller-process if this function is called. @@ -1323,6 +1315,58 @@ void _mali_osk_pm_dev_barrier(void); /** @} */ /* end group _mali_osk_miscellaneous */ +/** @defgroup _mali_osk_bitmap OSK Bitmap + * @{ */ + +/** @brief Allocate a unique number from the bitmap object. + * + * @param bitmap Initialized bitmap object. + * @return An unique existence in the bitmap object. + */ +u32 _mali_osk_bitmap_alloc(struct _mali_osk_bitmap *bitmap); + +/** @brief Free a interger to the bitmap object. + * + * @param bitmap Initialized bitmap object. + * @param obj An number allocated from bitmap object. + */ +void _mali_osk_bitmap_free(struct _mali_osk_bitmap *bitmap, u32 obj); + +/** @brief Allocate continuous number from the bitmap object. + * + * @param bitmap Initialized bitmap object. + * @return start number of the continuous number block. + */ +u32 _mali_osk_bitmap_alloc_range(struct _mali_osk_bitmap *bitmap, int cnt); + +/** @brief Free a block of continuous number block to the bitmap object. + * + * @param bitmap Initialized bitmap object. + * @param obj Start number. + * @param cnt The size of the continuous number block. + */ +void _mali_osk_bitmap_free_range(struct _mali_osk_bitmap *bitmap, u32 obj, int cnt); + +/** @brief Available count could be used to allocate in the given bitmap object. + * + */ +u32 _mali_osk_bitmap_avail(struct _mali_osk_bitmap *bitmap); + +/** @brief Initialize an bitmap object.. + * + * @param bitmap An poiter of uninitialized bitmap object. + * @param num Size of thei bitmap object and decide the memory size allocated. + * @param reserve start number used to allocate. + */ +int _mali_osk_bitmap_init(struct _mali_osk_bitmap *bitmap, u32 num, u32 reserve); + +/** @brief Free the given bitmap object. + * + * @param bitmap Initialized bitmap object. + */ +void _mali_osk_bitmap_term(struct _mali_osk_bitmap *bitmap); +/** @} */ /* end group _mali_osk_bitmap */ + /** @} */ /* end group osuapi */ /** @} */ /* end group uddapi */ diff --git a/drivers/gpu/arm/mali/common/mali_osk_bitops.h b/drivers/gpu/arm/mali/common/mali_osk_bitops.h index c1709f94c8837..61e0a9110b3ff 100755 --- a/drivers/gpu/arm/mali/common/mali_osk_bitops.h +++ b/drivers/gpu/arm/mali/common/mali_osk_bitops.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_osk_list.h b/drivers/gpu/arm/mali/common/mali_osk_list.h index 22d22446d7a0b..a41f5ab3e77ba 100755 --- a/drivers/gpu/arm/mali/common/mali_osk_list.h +++ b/drivers/gpu/arm/mali/common/mali_osk_list.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_osk_mali.h b/drivers/gpu/arm/mali/common/mali_osk_mali.h index 9e4f11dfd7e6b..0d6906ad204b2 100755 --- a/drivers/gpu/arm/mali/common/mali_osk_mali.h +++ b/drivers/gpu/arm/mali/common/mali_osk_mali.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -23,6 +23,28 @@ extern "C" { #endif +#ifdef CONFIG_MALI_DEVFREQ +struct mali_device { + struct device *dev; +#ifdef CONFIG_HAVE_CLK + struct clk *clock; +#endif +#ifdef CONFIG_REGULATOR + struct regulator *regulator; +#endif +#ifdef CONFIG_PM_DEVFREQ + struct devfreq_dev_profile devfreq_profile; + struct devfreq *devfreq; + unsigned long current_freq; + unsigned long current_voltage; +#ifdef CONFIG_DEVFREQ_THERMAL + struct thermal_cooling_device *devfreq_cooling; +#endif +#endif + struct mali_pm_metrics_data mali_metrics; +}; +#endif + /** @addtogroup _mali_osk_miscellaneous * @{ */ @@ -53,11 +75,15 @@ _mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res) */ uintptr_t _mali_osk_resource_base_address(void); -/** @brief Find the number of L2 cache cores. +/** @brief Find the specific GPU resource. + * + * @return value + * 0x400 if Mali 400 specific GPU resource identified + * 0x450 if Mali 450 specific GPU resource identified + * 0x470 if Mali 470 specific GPU resource identified * - * @return return the number of l2 cache cores we find in device resources. */ -u32 _mali_osk_l2_resource_count(void); +u32 _mali_osk_identify_gpu_resource(void); /** @brief Retrieve the Mali GPU specific data * @@ -84,6 +110,38 @@ u32 _mali_osk_get_pmu_switch_delay(void); */ mali_bool _mali_osk_shared_interrupts(void); +/** @brief Initialize the gpu secure mode. + * The gpu secure mode will initially be in a disabled state. + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_gpu_secure_mode_init(void); + +/** @brief Deinitialize the gpu secure mode. + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_gpu_secure_mode_deinit(void); + +/** @brief Enable the gpu secure mode. + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_gpu_secure_mode_enable(void); + +/** @brief Disable the gpu secure mode. + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_gpu_secure_mode_disable(void); + +/** @brief Check if the gpu secure mode has been enabled. + * @return MALI_TRUE if enabled, otherwise MALI_FALSE. + */ +mali_bool _mali_osk_gpu_secure_mode_is_enabled(void); + +/** @brief Check if the gpu secure mode is supported. + * @return MALI_TRUE if supported, otherwise MALI_FALSE. + */ +mali_bool _mali_osk_gpu_secure_mode_is_supported(void); + + /** @} */ /* end group _mali_osk_miscellaneous */ #ifdef __cplusplus diff --git a/drivers/gpu/arm/mali/common/mali_osk_profiling.h b/drivers/gpu/arm/mali/common/mali_osk_profiling.h index e85c28b11c200..eca6ad493c33b 100755 --- a/drivers/gpu/arm/mali/common/mali_osk_profiling.h +++ b/drivers/gpu/arm/mali/common/mali_osk_profiling.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -35,6 +35,11 @@ _mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start); */ void _mali_osk_profiling_term(void); +/** + * Stop the profile sampling operation. + */ +void _mali_osk_profiling_stop_sampling(u32 pid); + /** * Start recording profiling data * @@ -55,10 +60,8 @@ _mali_osk_errcode_t _mali_osk_profiling_start(u32 *limit); * @param data2 Third data parameter, depending on event_id specified. * @param data3 Fourth data parameter, depending on event_id specified. * @param data4 Fifth data parameter, depending on event_id specified. - * @return _MALI_OSK_ERR_OK on success, otherwise failure. */ -/* Call Linux tracepoint directly */ -#define _mali_osk_profiling_add_event(event_id, data0, data1, data2, data3, data4) trace_mali_timeline_event((event_id), (data0), (data1), (data2), (data3), (data4)) +void _mali_osk_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4); /** * Report a hardware counter event. @@ -77,6 +80,8 @@ _mali_osk_errcode_t _mali_osk_profiling_start(u32 *limit); */ void _mali_osk_profiling_report_sw_counters(u32 *counters); +void _mali_osk_profiling_record_global_counters(int counter_id, u32 value); + /** * Stop recording profiling data * diff --git a/drivers/gpu/arm/mali/common/mali_osk_types.h b/drivers/gpu/arm/mali/common/mali_osk_types.h index 5204d3038257b..6e9a1336bf0be 100755 --- a/drivers/gpu/arm/mali/common/mali_osk_types.h +++ b/drivers/gpu/arm/mali/common/mali_osk_types.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -64,6 +64,9 @@ typedef unsigned long mali_bool; #define MALI_HW_CORE_NO_COUNTER ((u32)-1) + +#define MALI_S32_MAX 0x7fffffff + /** * @brief OSK Error codes * @@ -449,6 +452,18 @@ typedef struct _mali_osk_wait_queue_t_struct _mali_osk_wait_queue_t; */ typedef struct seq_file _mali_osk_print_ctx; +#define _MALI_OSK_BITMAP_INVALIDATE_INDEX -1 + +typedef struct _mali_osk_bitmap { + u32 reserve; + u32 last; + u32 max; + u32 avail; + _mali_osk_spinlock_t *lock; + unsigned long *table; +} _mali_osk_bitmap_t; + + #ifdef __cplusplus } #endif diff --git a/drivers/gpu/arm/mali/common/mali_pm.c b/drivers/gpu/arm/mali/common/mali_pm.c index 85cdde01572b5..1ef03a69a084a 100755 --- a/drivers/gpu/arm/mali/common/mali_pm.c +++ b/drivers/gpu/arm/mali/common/mali_pm.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -974,6 +974,8 @@ static void mali_pm_set_default_pm_domain_config(void) domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 2; } else if (mali_is_mali450()) { domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 1; + } else if (mali_is_mali470()) { + domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 0; } } @@ -983,6 +985,8 @@ static void mali_pm_set_default_pm_domain_config(void) domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 3; } else if (mali_is_mali450()) { domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 2; + } else if (mali_is_mali470()) { + domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 1; } } @@ -992,6 +996,8 @@ static void mali_pm_set_default_pm_domain_config(void) domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 4; } else if (mali_is_mali450()) { domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 2; + } else if (mali_is_mali470()) { + domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 1; } } @@ -1001,6 +1007,8 @@ static void mali_pm_set_default_pm_domain_config(void) domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 5; } else if (mali_is_mali450()) { domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 2; + } else if (mali_is_mali470()) { + domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 1; } } @@ -1046,6 +1054,11 @@ static void mali_pm_set_default_pm_domain_config(void) MALI450_OFFSET_L2_CACHE2, NULL)) { domain_config[MALI_DOMAIN_INDEX_L22] = 0x01 << 3; } + } else if (mali_is_mali470()) { + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI470_OFFSET_L2_CACHE1, NULL)) { + domain_config[MALI_DOMAIN_INDEX_L21] = 0x01 << 0; + } } } @@ -1337,3 +1350,13 @@ void mali_pm_get_best_power_cost_mask(int num_requested, int *dst) _mali_osk_memcpy(dst, mali_pm_domain_power_cost_result[num_requested], MALI_MAX_NUMBER_OF_DOMAINS * sizeof(int)); } + +u32 mali_pm_get_current_mask(void) +{ + return pd_mask_current; +} + +u32 mali_pm_get_wanted_mask(void) +{ + return pd_mask_wanted; +} diff --git a/drivers/gpu/arm/mali/common/mali_pm.h b/drivers/gpu/arm/mali/common/mali_pm.h index ca4db2436b51b..3c11977f00fa0 100755 --- a/drivers/gpu/arm/mali/common/mali_pm.h +++ b/drivers/gpu/arm/mali/common/mali_pm.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -86,4 +86,6 @@ void mali_pm_get_best_power_cost_mask(int num_requested, int *dst); const char *mali_pm_mask_to_string(u32 mask); #endif +u32 mali_pm_get_current_mask(void); +u32 mali_pm_get_wanted_mask(void); #endif /* __MALI_PM_H__ */ diff --git a/drivers/gpu/arm/mali/common/mali_pm_domain.c b/drivers/gpu/arm/mali/common/mali_pm_domain.c index dbf985e6d37b0..9db848865a80c 100755 --- a/drivers/gpu/arm/mali/common/mali_pm_domain.c +++ b/drivers/gpu/arm/mali/common/mali_pm_domain.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_pm_domain.h b/drivers/gpu/arm/mali/common/mali_pm_domain.h index aceb3449359af..2ac8c0d3068c3 100755 --- a/drivers/gpu/arm/mali/common/mali_pm_domain.h +++ b/drivers/gpu/arm/mali/common/mali_pm_domain.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_pm_metrics.c b/drivers/gpu/arm/mali/common/mali_pm_metrics.c new file mode 100755 index 0000000000000..981ec815d0368 --- /dev/null +++ b/drivers/gpu/arm/mali/common/mali_pm_metrics.c @@ -0,0 +1,255 @@ +/* + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include "mali_pm_metrics.h" +#include "mali_osk_locks.h" +#include "mali_osk_mali.h" +#include + +#define MALI_PM_TIME_SHIFT 0 +#define MALI_UTILIZATION_MAX_PERIOD 80000000/* ns = 100ms */ + +_mali_osk_errcode_t mali_pm_metrics_init(struct mali_device *mdev) +{ + int i = 0; + + MALI_DEBUG_ASSERT(mdev != NULL); + + mdev->mali_metrics.time_period_start = ktime_get(); + mdev->mali_metrics.time_period_start_gp = mdev->mali_metrics.time_period_start; + mdev->mali_metrics.time_period_start_pp = mdev->mali_metrics.time_period_start; + + mdev->mali_metrics.time_busy = 0; + mdev->mali_metrics.time_idle = 0; + mdev->mali_metrics.prev_busy = 0; + mdev->mali_metrics.prev_idle = 0; + mdev->mali_metrics.num_running_gp_cores = 0; + mdev->mali_metrics.num_running_pp_cores = 0; + mdev->mali_metrics.time_busy_gp = 0; + mdev->mali_metrics.time_idle_gp = 0; + + for (i = 0; i < MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; i++) { + mdev->mali_metrics.time_busy_pp[i] = 0; + mdev->mali_metrics.time_idle_pp[i] = 0; + } + mdev->mali_metrics.gpu_active = MALI_FALSE; + + mdev->mali_metrics.lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_UNORDERED, _MALI_OSK_LOCK_ORDER_FIRST); + if (NULL == mdev->mali_metrics.lock) { + return _MALI_OSK_ERR_NOMEM; + } + + return _MALI_OSK_ERR_OK; +} + +void mali_pm_metrics_term(struct mali_device *mdev) +{ + _mali_osk_spinlock_irq_term(mdev->mali_metrics.lock); +} + +/*caller needs to hold mdev->mali_metrics.lock before calling this function*/ +void mali_pm_record_job_status(struct mali_device *mdev) +{ + ktime_t now; + ktime_t diff; + u64 ns_time; + + MALI_DEBUG_ASSERT(mdev != NULL); + + now = ktime_get(); + diff = ktime_sub(now, mdev->mali_metrics.time_period_start); + + ns_time = (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT); + mdev->mali_metrics.time_busy += ns_time; + mdev->mali_metrics.time_period_start = now; +} + +void mali_pm_record_gpu_idle(mali_bool is_gp) +{ + ktime_t now; + ktime_t diff; + u64 ns_time; + struct mali_device *mdev = dev_get_drvdata(&mali_platform_device->dev); + + MALI_DEBUG_ASSERT(mdev != NULL); + + _mali_osk_spinlock_irq_lock(mdev->mali_metrics.lock); + now = ktime_get(); + + if (MALI_TRUE == is_gp) { + --mdev->mali_metrics.num_running_gp_cores; + if (0 == mdev->mali_metrics.num_running_gp_cores) { + diff = ktime_sub(now, mdev->mali_metrics.time_period_start_gp); + ns_time = (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT); + mdev->mali_metrics.time_busy_gp += ns_time; + mdev->mali_metrics.time_period_start_gp = now; + + if (0 == mdev->mali_metrics.num_running_pp_cores) { + MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_TRUE); + diff = ktime_sub(now, mdev->mali_metrics.time_period_start); + ns_time = (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT); + mdev->mali_metrics.time_busy += ns_time; + mdev->mali_metrics.time_period_start = now; + mdev->mali_metrics.gpu_active = MALI_FALSE; + } + } + } else { + --mdev->mali_metrics.num_running_pp_cores; + if (0 == mdev->mali_metrics.num_running_pp_cores) { + diff = ktime_sub(now, mdev->mali_metrics.time_period_start_pp); + ns_time = (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT); + mdev->mali_metrics.time_busy_pp[0] += ns_time; + mdev->mali_metrics.time_period_start_pp = now; + + if (0 == mdev->mali_metrics.num_running_gp_cores) { + MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_TRUE); + diff = ktime_sub(now, mdev->mali_metrics.time_period_start); + ns_time = (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT); + mdev->mali_metrics.time_busy += ns_time; + mdev->mali_metrics.time_period_start = now; + mdev->mali_metrics.gpu_active = MALI_FALSE; + } + } + } + + _mali_osk_spinlock_irq_unlock(mdev->mali_metrics.lock); +} + +void mali_pm_record_gpu_active(mali_bool is_gp) +{ + ktime_t now; + ktime_t diff; + struct mali_device *mdev = dev_get_drvdata(&mali_platform_device->dev); + + MALI_DEBUG_ASSERT(mdev != NULL); + + _mali_osk_spinlock_irq_lock(mdev->mali_metrics.lock); + now = ktime_get(); + + if (MALI_TRUE == is_gp) { + mdev->mali_metrics.num_running_gp_cores++; + if (1 == mdev->mali_metrics.num_running_gp_cores) { + diff = ktime_sub(now, mdev->mali_metrics.time_period_start_gp); + mdev->mali_metrics.time_idle_gp += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT); + mdev->mali_metrics.time_period_start_gp = now; + if (0 == mdev->mali_metrics.num_running_pp_cores) { + MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_FALSE); + diff = ktime_sub(now, mdev->mali_metrics.time_period_start); + mdev->mali_metrics.time_idle += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT); + mdev->mali_metrics.time_period_start = now; + mdev->mali_metrics.gpu_active = MALI_TRUE; + } + } else { + MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_TRUE); + } + } else { + mdev->mali_metrics.num_running_pp_cores++; + if (1 == mdev->mali_metrics.num_running_pp_cores) { + diff = ktime_sub(now, mdev->mali_metrics.time_period_start_pp); + mdev->mali_metrics.time_idle_pp[0] += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT); + mdev->mali_metrics.time_period_start_pp = now; + if (0 == mdev->mali_metrics.num_running_gp_cores) { + MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_FALSE); + diff = ktime_sub(now, mdev->mali_metrics.time_period_start); + mdev->mali_metrics.time_idle += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT); + mdev->mali_metrics.time_period_start = now; + mdev->mali_metrics.gpu_active = MALI_TRUE; + } + } else { + MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_TRUE); + } + } + + _mali_osk_spinlock_irq_unlock(mdev->mali_metrics.lock); +} + + +/*caller needs to hold mdev->mali_metrics.lock before calling this function*/ +static void mali_pm_get_dvfs_utilisation_calc(struct mali_device *mdev, ktime_t now) +{ + ktime_t diff; + + MALI_DEBUG_ASSERT(mdev != NULL); + + diff = ktime_sub(now, mdev->mali_metrics.time_period_start); + + if (mdev->mali_metrics.gpu_active) { + mdev->mali_metrics.time_busy += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT); + } else { + mdev->mali_metrics.time_idle += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT); + } +} + +/* Caller needs to hold mdev->mali_metrics.lock before calling this function. */ +static void mali_pm_reset_dvfs_utilisation_unlocked(struct mali_device *mdev, ktime_t now) +{ + /* Store previous value */ + mdev->mali_metrics.prev_idle = mdev->mali_metrics.time_idle; + mdev->mali_metrics.prev_busy = mdev->mali_metrics.time_busy; + + /* Reset current values */ + mdev->mali_metrics.time_period_start = now; + mdev->mali_metrics.time_period_start_gp = now; + mdev->mali_metrics.time_period_start_pp = now; + mdev->mali_metrics.time_idle = 0; + mdev->mali_metrics.time_busy = 0; + + mdev->mali_metrics.time_busy_gp = 0; + mdev->mali_metrics.time_idle_gp = 0; + mdev->mali_metrics.time_busy_pp[0] = 0; + mdev->mali_metrics.time_idle_pp[0] = 0; +} + +void mali_pm_reset_dvfs_utilisation(struct mali_device *mdev) +{ + _mali_osk_spinlock_irq_lock(mdev->mali_metrics.lock); + mali_pm_reset_dvfs_utilisation_unlocked(mdev, ktime_get()); + _mali_osk_spinlock_irq_unlock(mdev->mali_metrics.lock); +} + +void mali_pm_get_dvfs_utilisation(struct mali_device *mdev, + unsigned long *total_out, unsigned long *busy_out) +{ + ktime_t now = ktime_get(); + u64 busy = 0; + u64 total = 0; + + _mali_osk_spinlock_irq_lock(mdev->mali_metrics.lock); + + mali_pm_get_dvfs_utilisation_calc(mdev, now); + + busy = mdev->mali_metrics.time_busy; + total = busy + mdev->mali_metrics.time_idle; + + /* Reset stats if older than MALI_UTILIZATION_MAX_PERIOD (default + * 100ms) */ + if (total >= MALI_UTILIZATION_MAX_PERIOD) { + mali_pm_reset_dvfs_utilisation_unlocked(mdev, now); + } else if (total < (MALI_UTILIZATION_MAX_PERIOD / 2)) { + total += mdev->mali_metrics.prev_idle + + mdev->mali_metrics.prev_busy; + busy += mdev->mali_metrics.prev_busy; + } + + *total_out = (unsigned long)total; + *busy_out = (unsigned long)busy; + _mali_osk_spinlock_irq_unlock(mdev->mali_metrics.lock); +} + +void mali_pm_metrics_spin_lock(void) +{ + struct mali_device *mdev = dev_get_drvdata(&mali_platform_device->dev); + _mali_osk_spinlock_irq_lock(mdev->mali_metrics.lock); +} + +void mali_pm_metrics_spin_unlock(void) +{ + struct mali_device *mdev = dev_get_drvdata(&mali_platform_device->dev); + _mali_osk_spinlock_irq_unlock(mdev->mali_metrics.lock); +} diff --git a/drivers/gpu/arm/mali/common/mali_pm_metrics.h b/drivers/gpu/arm/mali/common/mali_pm_metrics.h new file mode 100755 index 0000000000000..256f4484a8a48 --- /dev/null +++ b/drivers/gpu/arm/mali/common/mali_pm_metrics.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_PM_METRICS_H__ +#define __MALI_PM_METRICS_H__ + +#ifdef CONFIG_MALI_DEVFREQ +#include "mali_osk_locks.h" +#include "mali_group.h" + +struct mali_device; + +/** + * Metrics data collected for use by the power management framework. + */ +struct mali_pm_metrics_data { + ktime_t time_period_start; + u64 time_busy; + u64 time_idle; + u64 prev_busy; + u64 prev_idle; + u32 num_running_gp_cores; + u32 num_running_pp_cores; + ktime_t time_period_start_gp; + u64 time_busy_gp; + u64 time_idle_gp; + ktime_t time_period_start_pp; + u64 time_busy_pp[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS]; + u64 time_idle_pp[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS]; + mali_bool gpu_active; + _mali_osk_spinlock_irq_t *lock; +}; + +/** + * Initialize/start the Mali GPU pm_metrics metrics reporting. + * + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t mali_pm_metrics_init(struct mali_device *mdev); + +/** + * Terminate the Mali GPU pm_metrics metrics reporting + */ +void mali_pm_metrics_term(struct mali_device *mdev); + +/** + * Should be called when a job is about to execute a GPU job + */ +void mali_pm_record_gpu_active(mali_bool is_gp); + +/** + * Should be called when a job is finished + */ +void mali_pm_record_gpu_idle(mali_bool is_gp); + +void mali_pm_reset_dvfs_utilisation(struct mali_device *mdev); + +void mali_pm_get_dvfs_utilisation(struct mali_device *mdev, unsigned long *total_out, unsigned long *busy_out); + +void mali_pm_metrics_spin_lock(void); + +void mali_pm_metrics_spin_unlock(void); +#else +void mali_pm_record_gpu_idle(mali_bool is_gp) {} +void mali_pm_record_gpu_active(mali_bool is_gp) {} +#endif +#endif /* __MALI_PM_METRICS_H__ */ diff --git a/drivers/gpu/arm/mali/common/mali_pmu.c b/drivers/gpu/arm/mali/common/mali_pmu.c index f667b65ff6533..bf0d413515994 100755 --- a/drivers/gpu/arm/mali/common/mali_pmu.c +++ b/drivers/gpu/arm/mali/common/mali_pmu.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -143,6 +143,8 @@ _mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask) */ MALI_DEBUG_ASSERT(0 == (stat & mask)); + mask &= ~(0x1 << MALI_DOMAIN_INDEX_DUMMY); + if (0 == mask || 0 == ((~stat) & mask)) return _MALI_OSK_ERR_OK; mali_hw_core_register_write(&pmu->hw_core, @@ -153,7 +155,7 @@ _mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask) * powered off by our power down command, because the HW will simply * not generate an interrupt in this case. */ - if (mali_is_mali450() || pmu->registered_cores_mask != (mask | stat)) { + if (mali_is_mali450() || mali_is_mali470() || pmu->registered_cores_mask != (mask | stat)) { err = mali_pmu_wait_for_command_finish(pmu); if (_MALI_OSK_ERR_OK != err) { return err; @@ -195,6 +197,8 @@ _mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask) stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS); stat &= pmu->registered_cores_mask; + + mask &= ~(0x1 << MALI_DOMAIN_INDEX_DUMMY); if (0 == mask || 0 == (stat & mask)) return _MALI_OSK_ERR_OK; /* diff --git a/drivers/gpu/arm/mali/common/mali_pmu.h b/drivers/gpu/arm/mali/common/mali_pmu.h index 5ca78795f5350..36ff58ea2d9f2 100755 --- a/drivers/gpu/arm/mali/common/mali_pmu.h +++ b/drivers/gpu/arm/mali/common/mali_pmu.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_pp.c b/drivers/gpu/arm/mali/common/mali_pp.c index b1fcdeab57185..fb890a7d3ee9f 100755 --- a/drivers/gpu/arm/mali/common/mali_pp.c +++ b/drivers/gpu/arm/mali/common/mali_pp.c @@ -15,12 +15,12 @@ #include "regs/mali_200_regs.h" #include "mali_kernel_common.h" #include "mali_kernel_core.h" +#include "mali_osk_mali.h" + #if defined(CONFIG_MALI400_PROFILING) #include "mali_osk_profiling.h" #endif -#include - /* Number of frame registers on Mali-200 */ #define MALI_PP_MALI200_NUM_FRAME_REGISTERS ((0x04C/4)+1) /* Number of frame registers on Mali-300 and later */ @@ -45,7 +45,7 @@ struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct return NULL; } - core = _mali_osk_malloc(sizeof(struct mali_pp_core)); + core = _mali_osk_calloc(1, sizeof(struct mali_pp_core)); if (NULL != core) { core->core_id = mali_global_num_pp_cores; core->bcast_id = bcast_id; @@ -149,8 +149,6 @@ _mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core) if (MALI_REG_POLL_COUNT_FAST == i) { MALI_PRINT_ERROR(("Mali PP: Failed to stop bus on %s. Status: 0x%08x\n", core->hw_core.description, mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS))); - if (mali_gp_reset_fail < 65533) - mali_gp_reset_fail++; return _MALI_OSK_ERR_FAULT; } return _MALI_OSK_ERR_OK; @@ -203,20 +201,19 @@ static const u32 mali_wb_registers_reset_values[_MALI_PP_MAX_WB_REGISTERS] = { /* Performance Counter 0 Enable Register reset value */ static const u32 mali_perf_cnt_enable_reset_value = 0; -extern int pp_hardware_reset; _mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core) { /* Bus must be stopped before calling this function */ + const u32 reset_wait_target_register = MALI200_REG_ADDR_MGMT_PERF_CNT_0_LIMIT; const u32 reset_invalid_value = 0xC0FFE000; const u32 reset_check_value = 0xC01A0000; int i; MALI_DEBUG_ASSERT_POINTER(core); MALI_DEBUG_PRINT(2, ("Mali PP: Hard reset of core %s\n", core->hw_core.description)); - pp_hardware_reset ++; /* Set register to a bogus value. The register will be used to detect when reset is complete */ - mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_invalid_value); + mali_hw_core_register_write_relaxed(&core->hw_core, reset_wait_target_register, reset_invalid_value); mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE); /* Force core to reset */ @@ -224,8 +221,8 @@ _mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core) /* Wait for reset to be complete */ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) { - mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_check_value); - if (reset_check_value == mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW)) { + mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_check_value); + if (reset_check_value == mali_hw_core_register_read(&core->hw_core, reset_wait_target_register)) { break; } } @@ -234,7 +231,7 @@ _mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core) MALI_PRINT_ERROR(("Mali PP: The hard reset loop didn't work, unable to recover\n")); } - mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, 0x00000000); /* set it back to the default */ + mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, 0x00000000); /* set it back to the default */ /* Re-enable interrupts */ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL); mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); @@ -301,6 +298,13 @@ void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 s MALI_DEBUG_ASSERT_POINTER(core); + /* Change gpu secure mode if needed. */ + if (MALI_TRUE == mali_pp_job_is_protected_job(job) && MALI_FALSE == _mali_osk_gpu_secure_mode_is_enabled()) { + _mali_osk_gpu_secure_mode_enable(); + } else if (MALI_FALSE == mali_pp_job_is_protected_job(job) && MALI_TRUE == _mali_osk_gpu_secure_mode_is_enabled()) { + _mali_osk_gpu_secure_mode_disable(); + } + /* Write frame registers */ /* @@ -417,7 +421,7 @@ static void mali_pp_irq_probe_trigger(void *data) { struct mali_pp_core *core = (struct mali_pp_core *)data; mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); - mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_FORCE_HANG); + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_BUS_ERROR); _mali_osk_mem_barrier(); } @@ -427,8 +431,8 @@ static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data) u32 irq_readout; irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS); - if (MALI200_REG_VAL_IRQ_FORCE_HANG & irq_readout) { - mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_FORCE_HANG); + if (MALI200_REG_VAL_IRQ_BUS_ERROR & irq_readout) { + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_BUS_ERROR); _mali_osk_mem_barrier(); return _MALI_OSK_ERR_OK; } @@ -479,6 +483,7 @@ void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mal #if defined(CONFIG_MALI400_PROFILING) _mali_osk_profiling_report_hw_counter(counter_index, val0); + _mali_osk_profiling_record_global_counters(counter_index, val0); #endif } @@ -488,6 +493,7 @@ void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mal #if defined(CONFIG_MALI400_PROFILING) _mali_osk_profiling_report_hw_counter(counter_index + 1, val1); + _mali_osk_profiling_record_global_counters(counter_index + 1, val1); #endif } } diff --git a/drivers/gpu/arm/mali/common/mali_pp.h b/drivers/gpu/arm/mali/common/mali_pp.h index 45712a30e831c..76f5bd0bedf63 100755 --- a/drivers/gpu/arm/mali/common/mali_pp.h +++ b/drivers/gpu/arm/mali/common/mali_pp.h @@ -93,6 +93,7 @@ MALI_STATIC_INLINE enum mali_interrupt_result mali_pp_get_interrupt_result(struc } else if (MALI200_REG_VAL_IRQ_END_OF_FRAME == rawstat_used) { return MALI_INTERRUPT_RESULT_SUCCESS; } + return MALI_INTERRUPT_RESULT_ERROR; } diff --git a/drivers/gpu/arm/mali/common/mali_pp_job.c b/drivers/gpu/arm/mali/common/mali_pp_job.c index 86ca93581bedd..27349c9ca8d3d 100755 --- a/drivers/gpu/arm/mali/common/mali_pp_job.c +++ b/drivers/gpu/arm/mali/common/mali_pp_job.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -18,6 +18,8 @@ #if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) #include "linux/mali_memory_dma_buf.h" #endif +#include "mali_memory_swap_alloc.h" +#include "mali_scheduler.h" static u32 pp_counter_src0 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */ static u32 pp_counter_src1 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */ @@ -43,7 +45,10 @@ struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, job = _mali_osk_calloc(1, sizeof(struct mali_pp_job)); if (NULL != job) { - u32 num_memory_cookies = 0; + + _mali_osk_list_init(&job->list); + _mali_osk_list_init(&job->session_fb_lookup_list); + if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s))) { goto fail; } @@ -78,7 +83,6 @@ struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, } } - _mali_osk_list_init(&job->list); job->session = session; job->id = id; @@ -88,17 +92,20 @@ struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_osk_atomic_init(&job->sub_jobs_completed, 0); _mali_osk_atomic_init(&job->sub_job_errors, 0); - num_memory_cookies = job->uargs.num_memory_cookies; - if (num_memory_cookies != 0) { + job->swap_status = MALI_NO_SWAP_IN; + job->user_notification = MALI_FALSE; + job->num_pp_cores_in_virtual = 0; + + if (job->uargs.num_memory_cookies > session->allocation_mgr.mali_allocation_num) { + MALI_PRINT_ERROR(("Mali PP job: The number of memory cookies is invalid !\n")); + goto fail; + } + + if (job->uargs.num_memory_cookies > 0) { u32 size; u32 __user *memory_cookies = (u32 __user *)(uintptr_t)job->uargs.memory_cookies; - if (num_memory_cookies > session->allocation_mgr.mali_allocation_nr) { - MALI_PRINT_ERROR(("Mali PP job: Too many memory cookies specified in job object\n")); - goto fail; - } - - size = sizeof(*memory_cookies) * num_memory_cookies; + size = sizeof(*memory_cookies) * (job->uargs.num_memory_cookies); job->memory_cookies = _mali_osk_malloc(size); if (NULL == job->memory_cookies) { @@ -120,6 +127,8 @@ struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_PP, NULL, job); mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence)); + mali_mem_swap_in_pages(job); + return job; } @@ -137,14 +146,27 @@ void mali_pp_job_delete(struct mali_pp_job *job) MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list)); MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list)); - if (NULL != job->finished_notification) { - _mali_osk_notification_delete(job->finished_notification); - } - if (NULL != job->memory_cookies) { +#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) + /* Unmap buffers attached to job */ + mali_dma_buf_unmap_job(job); +#endif + if (MALI_NO_SWAP_IN != job->swap_status) { + mali_mem_swap_out_pages(job); + } + _mali_osk_free(job->memory_cookies); } + if (job->user_notification) { + mali_scheduler_return_pp_job_to_user(job, + job->num_pp_cores_in_virtual); + } + + if (NULL != job->finished_notification) { + _mali_osk_notification_delete(job->finished_notification); + } + _mali_osk_atomic_term(&job->sub_jobs_completed); _mali_osk_atomic_term(&job->sub_job_errors); diff --git a/drivers/gpu/arm/mali/common/mali_pp_job.h b/drivers/gpu/arm/mali/common/mali_pp_job.h index bdd61c51169ab..d033fa3314151 100755 --- a/drivers/gpu/arm/mali/common/mali_pp_job.h +++ b/drivers/gpu/arm/mali/common/mali_pp_job.h @@ -26,6 +26,12 @@ #include "linux/mali_memory_dma_buf.h" #endif +typedef enum pp_job_status { + MALI_NO_SWAP_IN, + MALI_SWAP_IN_FAIL, + MALI_SWAP_IN_SUCC, +} pp_job_status; + /** * This structure represents a PP job, including all sub jobs. * @@ -55,6 +61,10 @@ struct mali_pp_job { u32 perf_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src1 */ u32 sub_jobs_num; /**< Number of subjobs; set to 1 for Mali-450 if DLBU is used, otherwise equals number of PP cores */ + pp_job_status swap_status; /**< Used to track each PP job swap status, if fail, we need to drop them in scheduler part */ + mali_bool user_notification; /**< When we deferred delete PP job, we need to judge if we need to send job finish notification to user space */ + u32 num_pp_cores_in_virtual; /**< How many PP cores we have when job finished */ + /* * These members are used by both scheduler and executor. * They are "protected" by atomic operations. @@ -169,7 +179,7 @@ MALI_STATIC_INLINE u32 *mali_pp_job_get_dlbu_registers(struct mali_pp_job *job) MALI_STATIC_INLINE mali_bool mali_pp_job_is_virtual(struct mali_pp_job *job) { -#if defined(CONFIG_MALI450) +#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) MALI_DEBUG_ASSERT_POINTER(job); return (0 == job->uargs.num_cores) ? MALI_TRUE : MALI_FALSE; #else @@ -485,6 +495,13 @@ MALI_STATIC_INLINE mali_bool mali_pp_job_is_window_surface( ? MALI_TRUE : MALI_FALSE; } +MALI_STATIC_INLINE mali_bool mali_pp_job_is_protected_job(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (job->uargs.flags & _MALI_PP_JOB_FLAG_PROTECTED) + ? MALI_TRUE : MALI_FALSE; +} + MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_flag(struct mali_pp_job *job) { MALI_DEBUG_ASSERT_POINTER(job); diff --git a/drivers/gpu/arm/mali/common/mali_scheduler.c b/drivers/gpu/arm/mali/common/mali_scheduler.c index b07f78a016627..a4136ce1130e9 100755 --- a/drivers/gpu/arm/mali/common/mali_scheduler.c +++ b/drivers/gpu/arm/mali/common/mali_scheduler.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2012-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -18,6 +18,9 @@ #include "mali_pp_job.h" #include "mali_executor.h" #include "mali_group.h" +#include +#include +#include "mali_pm_metrics.h" #if defined(CONFIG_DMA_SHARED_BUFFER) #include "mali_memory_dma_buf.h" @@ -32,16 +35,16 @@ */ /* - * If dma_buf with map on demand is used, we defer job deletion and job queue + * If dma_buf with map on demand is used, we defer job queue * if in atomic context, since both might sleep. */ #if defined(CONFIG_DMA_SHARED_BUFFER) #if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) -#define MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE 1 #define MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE 1 #endif #endif + /* * ---------- global variables (exported due to inline functions) ---------- */ @@ -61,11 +64,9 @@ _mali_osk_atomic_t mali_job_cache_order_autonumber; * ---------- static variables ---------- */ -#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) -static _mali_osk_wq_work_t *scheduler_wq_pp_job_delete = NULL; -static _mali_osk_spinlock_irq_t *scheduler_pp_job_delete_lock = NULL; +_mali_osk_wq_work_t *scheduler_wq_pp_job_delete = NULL; +_mali_osk_spinlock_irq_t *scheduler_pp_job_delete_lock = NULL; static _MALI_OSK_LIST_HEAD_STATIC_INIT(scheduler_pp_job_deletion_queue); -#endif #if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) static _mali_osk_wq_work_t *scheduler_wq_pp_job_queue = NULL; @@ -87,13 +88,9 @@ static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job); static void mali_scheduler_return_gp_job_to_user(struct mali_gp_job *job, mali_bool success); -static void mali_scheduler_return_pp_job_to_user(struct mali_pp_job *job, - u32 num_cores_in_virtual); -#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) static void mali_scheduler_deferred_pp_job_delete(struct mali_pp_job *job); -static void mali_scheduler_do_pp_job_delete(void *arg); -#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) */ +void mali_scheduler_do_pp_job_delete(void *arg); #if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) static void mali_scheduler_deferred_pp_job_queue(struct mali_pp_job *job); @@ -112,10 +109,12 @@ _mali_osk_errcode_t mali_scheduler_initialize(void) _MALI_OSK_INIT_LIST_HEAD(&job_queue_gp.normal_pri); _MALI_OSK_INIT_LIST_HEAD(&job_queue_gp.high_pri); job_queue_gp.depth = 0; + job_queue_gp.big_job_num = 0; _MALI_OSK_INIT_LIST_HEAD(&job_queue_pp.normal_pri); _MALI_OSK_INIT_LIST_HEAD(&job_queue_pp.high_pri); job_queue_pp.depth = 0; + job_queue_pp.big_job_num = 0; mali_scheduler_lock_obj = _mali_osk_spinlock_irq_init( _MALI_OSK_LOCKFLAG_ORDERED, @@ -124,7 +123,6 @@ _mali_osk_errcode_t mali_scheduler_initialize(void) mali_scheduler_terminate(); } -#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) scheduler_wq_pp_job_delete = _mali_osk_wq_create_work( mali_scheduler_do_pp_job_delete, NULL); if (NULL == scheduler_wq_pp_job_delete) { @@ -139,7 +137,6 @@ _mali_osk_errcode_t mali_scheduler_initialize(void) mali_scheduler_terminate(); return _MALI_OSK_ERR_FAULT; } -#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) */ #if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) scheduler_wq_pp_job_queue = _mali_osk_wq_create_work( @@ -175,7 +172,6 @@ void mali_scheduler_terminate(void) } #endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */ -#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) if (NULL != scheduler_pp_job_delete_lock) { _mali_osk_spinlock_irq_term(scheduler_pp_job_delete_lock); scheduler_pp_job_delete_lock = NULL; @@ -185,7 +181,6 @@ void mali_scheduler_terminate(void) _mali_osk_wq_delete_work(scheduler_wq_pp_job_delete); scheduler_wq_pp_job_delete = NULL; } -#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) */ if (NULL != mali_scheduler_lock_obj) { _mali_osk_spinlock_irq_term(mali_scheduler_lock_obj); @@ -196,7 +191,7 @@ void mali_scheduler_terminate(void) _mali_osk_atomic_term(&mali_job_id_autonumber); } -u32 mali_scheduler_job_physical_head_count(void) +u32 mali_scheduler_job_physical_head_count(mali_bool gpu_mode_is_secure) { /* * Count how many physical sub jobs are present from the head of queue @@ -221,16 +216,23 @@ u32 mali_scheduler_job_physical_head_count(void) * Remember; virtual jobs can't be queued and started * at the same time, so this must be a physical job */ - count += mali_pp_job_unstarted_sub_job_count(job); - if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) { - return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; + if ((MALI_FALSE == gpu_mode_is_secure && MALI_FALSE == mali_pp_job_is_protected_job(job)) + || (MALI_TRUE == gpu_mode_is_secure && MALI_TRUE == mali_pp_job_is_protected_job(job))) { + + count += mali_pp_job_unstarted_sub_job_count(job); + if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) { + return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; + } } } } _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.high_pri, struct mali_pp_job, list) { - if (MALI_FALSE == mali_pp_job_is_virtual(job)) { + if ((MALI_FALSE == mali_pp_job_is_virtual(job)) + && ((MALI_FALSE == gpu_mode_is_secure && MALI_FALSE == mali_pp_job_is_protected_job(job)) + || (MALI_TRUE == gpu_mode_is_secure && MALI_TRUE == mali_pp_job_is_protected_job(job)))) { + count += mali_pp_job_unstarted_sub_job_count(job); if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) { return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; @@ -243,24 +245,57 @@ u32 mali_scheduler_job_physical_head_count(void) _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.normal_pri, struct mali_pp_job, list) { - if (MALI_FALSE == mali_pp_job_is_virtual(job)) { - /* any partially started is already counted */ - if (MALI_FALSE == mali_pp_job_has_started_sub_jobs(job)) { - count += mali_pp_job_unstarted_sub_job_count(job); - if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= - count) { - return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; - } + if ((MALI_FALSE == mali_pp_job_is_virtual(job)) + && (MALI_FALSE == mali_pp_job_has_started_sub_jobs(job)) + && ((MALI_FALSE == gpu_mode_is_secure && MALI_FALSE == mali_pp_job_is_protected_job(job)) + || (MALI_TRUE == gpu_mode_is_secure && MALI_TRUE == mali_pp_job_is_protected_job(job)))) { + + count += mali_pp_job_unstarted_sub_job_count(job); + if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) { + return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; } } else { /* Came across a virtual job, so stop counting */ return count; } } - return count; } +struct mali_pp_job *mali_scheduler_job_pp_next(void) +{ + struct mali_pp_job *job; + struct mali_pp_job *temp; + + MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj); + + /* Check for partially started normal pri jobs */ + if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) { + MALI_DEBUG_ASSERT(0 < job_queue_pp.depth); + + job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next, + struct mali_pp_job, list); + + MALI_DEBUG_ASSERT_POINTER(job); + + if (MALI_TRUE == mali_pp_job_has_started_sub_jobs(job)) { + return job; + } + } + + _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.high_pri, + struct mali_pp_job, list) { + return job; + } + + _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.normal_pri, + struct mali_pp_job, list) { + return job; + } + + return NULL; +} + mali_bool mali_scheduler_job_next_is_virtual(void) { struct mali_pp_job *job; @@ -282,6 +317,7 @@ struct mali_gp_job *mali_scheduler_job_gp_get(void) MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj); MALI_DEBUG_ASSERT(0 < job_queue_gp.depth); + MALI_DEBUG_ASSERT(job_queue_gp.big_job_num <= job_queue_gp.depth); if (!_mali_osk_list_empty(&job_queue_gp.high_pri)) { queue = &job_queue_gp.high_pri; @@ -296,7 +332,14 @@ struct mali_gp_job *mali_scheduler_job_gp_get(void) mali_gp_job_list_remove(job); job_queue_gp.depth--; - + if (job->big_job) { + job_queue_gp.big_job_num --; + if (job_queue_gp.big_job_num < MALI_MAX_PENDING_BIG_JOB) { + /* wake up process */ + wait_queue_head_t *queue = mali_session_get_wait_queue(); + wake_up(queue); + } + } return job; } @@ -531,6 +574,7 @@ void mali_scheduler_complete_gp_job(struct mali_gp_job *job, if (mali_utilization_enabled()) { mali_utilization_gp_end(); } + mali_pm_record_gpu_idle(MALI_TRUE); } mali_gp_job_delete(job); @@ -541,10 +585,8 @@ void mali_scheduler_complete_pp_job(struct mali_pp_job *job, mali_bool user_notification, mali_bool dequeued) { - if (user_notification) { - mali_scheduler_return_pp_job_to_user(job, - num_cores_in_virtual); - } + job->user_notification = user_notification; + job->num_pp_cores_in_virtual = num_cores_in_virtual; if (dequeued) { #if defined(CONFIG_MALI_DVFS) @@ -560,18 +602,11 @@ void mali_scheduler_complete_pp_job(struct mali_pp_job *job, if (mali_utilization_enabled()) { mali_utilization_pp_end(); } + mali_pm_record_gpu_idle(MALI_FALSE); } -#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) - /* - * The deletion of the job object (releasing sync refs etc) - * must be done in a different context - */ + /* With ZRAM feature enabled, all pp jobs will be force to use deferred delete. */ mali_scheduler_deferred_pp_job_delete(job); -#else - /* no use cases need this in this configuration */ - mali_pp_job_delete(job); -#endif } void mali_scheduler_abort_session(struct mali_session_data *session) @@ -597,6 +632,7 @@ void mali_scheduler_abort_session(struct mali_session_data *session) if (mali_gp_job_get_session(gp_job) == session) { mali_gp_job_list_move(gp_job, &removed_jobs_gp); job_queue_gp.depth--; + job_queue_gp.big_job_num -= gp_job->big_job ? 1 : 0; } } @@ -606,6 +642,7 @@ void mali_scheduler_abort_session(struct mali_session_data *session) if (mali_gp_job_get_session(gp_job) == session) { mali_gp_job_list_move(gp_job, &removed_jobs_gp); job_queue_gp.depth--; + job_queue_gp.big_job_num -= gp_job->big_job ? 1 : 0; } } @@ -624,7 +661,7 @@ void mali_scheduler_abort_session(struct mali_session_data *session) if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(pp_job)) { if (mali_pp_job_is_complete(pp_job)) { mali_pp_job_list_move(pp_job, - &removed_jobs_pp); + &removed_jobs_pp); } else { mali_pp_job_list_remove(pp_job); } @@ -647,7 +684,7 @@ void mali_scheduler_abort_session(struct mali_session_data *session) if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(pp_job)) { if (mali_pp_job_is_complete(pp_job)) { mali_pp_job_list_move(pp_job, - &removed_jobs_pp); + &removed_jobs_pp); } else { mali_pp_job_list_remove(pp_job); } @@ -969,6 +1006,7 @@ static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job) } job_queue_gp.depth += 1; + job_queue_gp.big_job_num += (job->big_job) ? 1 : 0; /* Add job to queue (mali_gp_job_queue_add find correct place). */ mali_gp_job_list_add(job, queue); @@ -991,6 +1029,8 @@ static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job) mali_utilization_gp_start(); } + mali_pm_record_gpu_active(MALI_TRUE); + /* Add profiling events for job enqueued */ _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_SINGLE | @@ -1028,6 +1068,10 @@ static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job) MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while session is aborting.\n", mali_pp_job_get_id(job), job)); return MALI_FALSE; /* job not queued */ + } else if (unlikely(MALI_SWAP_IN_FAIL == job->swap_status)) { + MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while swap in failed.\n", + mali_pp_job_get_id(job), job)); + return MALI_FALSE; } mali_pp_job_set_cache_order(job, mali_scheduler_get_new_cache_order()); @@ -1062,8 +1106,9 @@ static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job) mali_utilization_pp_start(); } - /* Add profiling events for job enqueued */ + mali_pm_record_gpu_active(MALI_FALSE); + /* Add profiling events for job enqueued */ _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | @@ -1106,6 +1151,8 @@ static void mali_scheduler_return_gp_job_to_user(struct mali_gp_job *job, jobres = notification->result_buffer; MALI_DEBUG_ASSERT_POINTER(jobres); + jobres->pending_big_job_num = mali_scheduler_job_gp_big_job_count(); + jobres->user_job_ptr = mali_gp_job_get_user_id(job); if (MALI_TRUE == success) { jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS; @@ -1119,7 +1166,7 @@ static void mali_scheduler_return_gp_job_to_user(struct mali_gp_job *job, mali_session_send_notification(session, notification); } -static void mali_scheduler_return_pp_job_to_user(struct mali_pp_job *job, +void mali_scheduler_return_pp_job_to_user(struct mali_pp_job *job, u32 num_cores_in_virtual) { u32 i; @@ -1170,8 +1217,6 @@ static void mali_scheduler_return_pp_job_to_user(struct mali_pp_job *job, mali_session_send_notification(session, notification); } -#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) - static void mali_scheduler_deferred_pp_job_delete(struct mali_pp_job *job) { MALI_DEBUG_ASSERT_POINTER(job); @@ -1183,7 +1228,7 @@ static void mali_scheduler_deferred_pp_job_delete(struct mali_pp_job *job) _mali_osk_wq_schedule_work(scheduler_wq_pp_job_delete); } -static void mali_scheduler_do_pp_job_delete(void *arg) +void mali_scheduler_do_pp_job_delete(void *arg) { _MALI_OSK_LIST_HEAD_STATIC_INIT(list); struct mali_pp_job *job; @@ -1202,14 +1247,12 @@ static void mali_scheduler_do_pp_job_delete(void *arg) _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list, struct mali_pp_job, list) { - _mali_osk_list_delinit(&job->list); + mali_pp_job_delete(job); /* delete the job object itself */ } } -#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) */ - #if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) static void mali_scheduler_deferred_pp_job_queue(struct mali_pp_job *job) @@ -1292,3 +1335,65 @@ static void mali_scheduler_do_pp_job_queue(void *arg) } #endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */ + +void mali_scheduler_gp_pp_job_queue_print(void) +{ + struct mali_gp_job *gp_job = NULL; + struct mali_gp_job *tmp_gp_job = NULL; + struct mali_pp_job *pp_job = NULL; + struct mali_pp_job *tmp_pp_job = NULL; + + MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj); + MALI_DEBUG_ASSERT_LOCK_HELD(mali_executor_lock_obj); + + /* dump job queup status */ + if ((0 == job_queue_gp.depth) && (0 == job_queue_pp.depth)) { + MALI_PRINT(("No GP&PP job in the job queue.\n")); + return; + } + + MALI_PRINT(("Total (%d) GP job in the job queue.\n", job_queue_gp.depth)); + if (job_queue_gp.depth > 0) { + if (!_mali_osk_list_empty(&job_queue_gp.high_pri)) { + _MALI_OSK_LIST_FOREACHENTRY(gp_job, tmp_gp_job, &job_queue_gp.high_pri, + struct mali_gp_job, list) { + MALI_PRINT(("GP job(%p) id = %d tid = %d pid = %d in the gp job high_pri queue\n", gp_job, gp_job->id, gp_job->tid, gp_job->pid)); + } + } + + if (!_mali_osk_list_empty(&job_queue_gp.normal_pri)) { + _MALI_OSK_LIST_FOREACHENTRY(gp_job, tmp_gp_job, &job_queue_gp.normal_pri, + struct mali_gp_job, list) { + MALI_PRINT(("GP job(%p) id = %d tid = %d pid = %d in the gp job normal_pri queue\n", gp_job, gp_job->id, gp_job->tid, gp_job->pid)); + } + } + } + + MALI_PRINT(("Total (%d) PP job in the job queue.\n", job_queue_pp.depth)); + if (job_queue_pp.depth > 0) { + if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) { + _MALI_OSK_LIST_FOREACHENTRY(pp_job, tmp_pp_job, &job_queue_pp.high_pri, + struct mali_pp_job, list) { + if (mali_pp_job_is_virtual(pp_job)) { + MALI_PRINT(("PP Virtual job(%p) id = %d tid = %d pid = %d in the pp job high_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid)); + } else { + MALI_PRINT(("PP Physical job(%p) id = %d tid = %d pid = %d in the pp job high_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid)); + } + } + } + + if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) { + _MALI_OSK_LIST_FOREACHENTRY(pp_job, tmp_pp_job, &job_queue_pp.normal_pri, + struct mali_pp_job, list) { + if (mali_pp_job_is_virtual(pp_job)) { + MALI_PRINT(("PP Virtual job(%p) id = %d tid = %d pid = %d in the pp job normal_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid)); + } else { + MALI_PRINT(("PP Physical job(%p) id = %d tid = %d pid = %d in the pp job normal_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid)); + } + } + } + } + + /* dump group running job status */ + mali_executor_running_status_print(); +} diff --git a/drivers/gpu/arm/mali/common/mali_scheduler.h b/drivers/gpu/arm/mali/common/mali_scheduler.h index 47632b51f085a..0ab91776654ff 100755 --- a/drivers/gpu/arm/mali/common/mali_scheduler.h +++ b/drivers/gpu/arm/mali/common/mali_scheduler.h @@ -20,6 +20,7 @@ struct mali_scheduler_job_queue { _MALI_OSK_LIST_HEAD(normal_pri); /* Queued jobs with normal priority */ _MALI_OSK_LIST_HEAD(high_pri); /* Queued jobs with high priority */ u32 depth; /* Depth of combined queues. */ + u32 big_job_num; }; extern _mali_osk_spinlock_irq_t *mali_scheduler_lock_obj; @@ -54,10 +55,15 @@ MALI_STATIC_INLINE u32 mali_scheduler_job_gp_count(void) { return job_queue_gp.depth; } +MALI_STATIC_INLINE u32 mali_scheduler_job_gp_big_job_count(void) +{ + return job_queue_gp.big_job_num; +} -u32 mali_scheduler_job_physical_head_count(void); +u32 mali_scheduler_job_physical_head_count(mali_bool gpu_mode_is_secure); mali_bool mali_scheduler_job_next_is_virtual(void); +struct mali_pp_job *mali_scheduler_job_pp_next(void); struct mali_gp_job *mali_scheduler_job_gp_get(void); struct mali_pp_job *mali_scheduler_job_pp_physical_peek(void); @@ -113,8 +119,13 @@ void mali_scheduler_complete_pp_job(struct mali_pp_job *job, void mali_scheduler_abort_session(struct mali_session_data *session); +void mali_scheduler_return_pp_job_to_user(struct mali_pp_job *job, + u32 num_cores_in_virtual); + #if MALI_STATE_TRACKING u32 mali_scheduler_dump_state(char *buf, u32 size); #endif +void mali_scheduler_gp_pp_job_queue_print(void); + #endif /* __MALI_SCHEDULER_H__ */ diff --git a/drivers/gpu/arm/mali/common/mali_scheduler_types.h b/drivers/gpu/arm/mali/common/mali_scheduler_types.h index f862961d146e7..0819c590b4d2f 100755 --- a/drivers/gpu/arm/mali/common/mali_scheduler_types.h +++ b/drivers/gpu/arm/mali/common/mali_scheduler_types.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_session.c b/drivers/gpu/arm/mali/common/mali_session.c index 6506b1d228611..1b409619a6398 100755 --- a/drivers/gpu/arm/mali/common/mali_session.c +++ b/drivers/gpu/arm/mali/common/mali_session.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2012-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -8,20 +8,25 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ -#include #include "mali_osk.h" #include "mali_osk_list.h" #include "mali_session.h" #include "mali_ukk.h" +#ifdef MALI_MEM_SWAP_TRACKING +#include "mali_memory_swap_alloc.h" +#endif _MALI_OSK_LIST_HEAD(mali_sessions); static u32 mali_session_count = 0; _mali_osk_spinlock_irq_t *mali_sessions_lock = NULL; +wait_queue_head_t pending_queue; _mali_osk_errcode_t mali_session_initialize(void) { _MALI_OSK_INIT_LIST_HEAD(&mali_sessions); + /* init wait queue for big varying job */ + init_waitqueue_head(&pending_queue); mali_sessions_lock = _mali_osk_spinlock_irq_init( _MALI_OSK_LOCKFLAG_ORDERED, @@ -62,6 +67,11 @@ u32 mali_session_get_count(void) return mali_session_count; } +wait_queue_head_t *mali_session_get_wait_queue(void) +{ + return &pending_queue; +} + /* * Get the max completed window jobs from all active session, * which will be used in window render frame per sec calculate @@ -88,29 +98,47 @@ u32 mali_session_max_window_num(void) return max_window_num; } #endif + void mali_session_memory_tracking(_mali_osk_print_ctx *print_ctx) { struct mali_session_data *session, *tmp; - char task_comm[TASK_COMM_LEN]; - struct task_struct *ttask; u32 mali_mem_usage; u32 total_mali_mem_size; +#ifdef MALI_MEM_SWAP_TRACKING + u32 swap_pool_size; + u32 swap_unlock_size; +#endif MALI_DEBUG_ASSERT_POINTER(print_ctx); mali_session_lock(); - MALI_SESSION_FOREACH(session, tmp, link) { - ttask = pid_task(find_vpid(session->pid), PIDTYPE_PID); - //get_task_comm(task_comm, ttask); - strncpy(task_comm, ttask->comm, sizeof(ttask->comm)); - _mali_osk_ctxprintf(print_ctx, " %-25s %-10u %-25s %-10u %-15u %-15u %-10u %-10u\n", - session->comm, session->pid, task_comm, - session->mali_mem_array[MALI_MEM_OS] + session->mali_mem_array[MALI_MEM_BLOCK], session->max_mali_mem_allocated, - session->mali_mem_array[MALI_MEM_EXTERNAL], session->mali_mem_array[MALI_MEM_UMP], - session->mali_mem_array[MALI_MEM_DMA_BUF]); +#ifdef MALI_MEM_SWAP_TRACKING + _mali_osk_ctxprintf(print_ctx, " %-25s %-10u %-10u %-15u %-15u %-10u %-10u %-10u\n", + session->comm, session->pid, + (atomic_read(&session->mali_mem_allocated_pages)) * _MALI_OSK_MALI_PAGE_SIZE, + (unsigned int)session->max_mali_mem_allocated_size, + (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_EXTERNAL])) * _MALI_OSK_MALI_PAGE_SIZE), + (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_UMP])) * _MALI_OSK_MALI_PAGE_SIZE), + (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_DMA_BUF])) * _MALI_OSK_MALI_PAGE_SIZE), + (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_SWAP])) * _MALI_OSK_MALI_PAGE_SIZE) + ); +#else + _mali_osk_ctxprintf(print_ctx, " %-25s %-10u %-10u %-15u %-15u %-10u %-10u \n", + session->comm, session->pid, + (unsigned int)((atomic_read(&session->mali_mem_allocated_pages)) * _MALI_OSK_MALI_PAGE_SIZE), + (unsigned int)session->max_mali_mem_allocated_size, + (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_EXTERNAL])) * _MALI_OSK_MALI_PAGE_SIZE), + (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_UMP])) * _MALI_OSK_MALI_PAGE_SIZE), + (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_DMA_BUF])) * _MALI_OSK_MALI_PAGE_SIZE) + ); +#endif } mali_session_unlock(); mali_mem_usage = _mali_ukk_report_memory_usage(); total_mali_mem_size = _mali_ukk_report_total_memory_size(); _mali_osk_ctxprintf(print_ctx, "Mali mem usage: %u\nMali mem limit: %u\n", mali_mem_usage, total_mali_mem_size); +#ifdef MALI_MEM_SWAP_TRACKING + mali_mem_swap_tracking(&swap_pool_size, &swap_unlock_size); + _mali_osk_ctxprintf(print_ctx, "Mali swap mem pool : %u\nMali swap mem unlock: %u\n", swap_pool_size, swap_unlock_size); +#endif } diff --git a/drivers/gpu/arm/mali/common/mali_session.h b/drivers/gpu/arm/mali/common/mali_session.h index 9fad5cfead707..66d95c30073fd 100755 --- a/drivers/gpu/arm/mali/common/mali_session.h +++ b/drivers/gpu/arm/mali/common/mali_session.h @@ -23,11 +23,14 @@ struct mali_soft_system; /* Number of frame builder job lists per session. */ #define MALI_PP_JOB_FB_LOOKUP_LIST_SIZE 16 #define MALI_PP_JOB_FB_LOOKUP_LIST_MASK (MALI_PP_JOB_FB_LOOKUP_LIST_SIZE - 1) +/*Max pending big job allowed in kernel*/ +#define MALI_MAX_PENDING_BIG_JOB (2) struct mali_session_data { _mali_osk_notification_queue_t *ioctl_queue; _mali_osk_mutex_t *memory_lock; /**< Lock protecting the vm manipulation */ + _mali_osk_mutex_t *cow_lock; /** < Lock protecting the cow memory free manipulation */ #if 0 _mali_osk_list_t memory_head; /**< Track all the memory allocated in this session, for freeing on abnormal termination */ #endif @@ -49,8 +52,9 @@ struct mali_session_data { mali_bool use_high_priority_job_queue; /**< If MALI_TRUE, jobs added from this session will use the high priority job queues. */ u32 pid; char *comm; - size_t mali_mem_array[MALI_MEM_TYPE_MAX]; /**< The array to record all mali mem types' usage for this session. */ - size_t max_mali_mem_allocated; /**< The past max mali memory usage for this session. */ + atomic_t mali_mem_array[MALI_MEM_TYPE_MAX]; /**< The array to record mem types' usage for this session. */ + atomic_t mali_mem_allocated_pages; /** The current allocated mali memory pages, which include mali os memory and mali dedicated memory.*/ + size_t max_mali_mem_allocated_size; /**< The past max mali memory allocated size, which include mali os memory and mali dedicated memory. */ /* Added for new memroy system */ struct mali_allocation_manager allocation_mgr; }; @@ -76,6 +80,7 @@ MALI_STATIC_INLINE void mali_session_unlock(void) void mali_session_add(struct mali_session_data *session); void mali_session_remove(struct mali_session_data *session); u32 mali_session_get_count(void); +wait_queue_head_t *mali_session_get_wait_queue(void); #define MALI_SESSION_FOREACH(session, tmp, link) \ _MALI_OSK_LIST_FOREACHENTRY(session, tmp, &mali_sessions, struct mali_session_data, link) diff --git a/drivers/gpu/arm/mali/common/mali_soft_job.c b/drivers/gpu/arm/mali/common/mali_soft_job.c index 36ac982e1df08..c76d1fef1ea7b 100755 --- a/drivers/gpu/arm/mali/common/mali_soft_job.c +++ b/drivers/gpu/arm/mali/common/mali_soft_job.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_soft_job.h b/drivers/gpu/arm/mali/common/mali_soft_job.h index f35394e603846..4dd05897b2e0c 100755 --- a/drivers/gpu/arm/mali/common/mali_soft_job.h +++ b/drivers/gpu/arm/mali/common/mali_soft_job.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_spinlock_reentrant.c b/drivers/gpu/arm/mali/common/mali_spinlock_reentrant.c index 178abaf43ba1e..28e2e17112774 100755 --- a/drivers/gpu/arm/mali/common/mali_spinlock_reentrant.c +++ b/drivers/gpu/arm/mali/common/mali_spinlock_reentrant.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013, 2015 ARM Limited. All rights reserved. + * Copyright (C) 2013, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_spinlock_reentrant.h b/drivers/gpu/arm/mali/common/mali_spinlock_reentrant.h index 6a62df850b2f5..793875a0e7ed1 100755 --- a/drivers/gpu/arm/mali/common/mali_spinlock_reentrant.h +++ b/drivers/gpu/arm/mali/common/mali_spinlock_reentrant.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013, 2015 ARM Limited. All rights reserved. + * Copyright (C) 2013, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_timeline.c b/drivers/gpu/arm/mali/common/mali_timeline.c index f18aaa633845a..88bc5a975e01e 100755 --- a/drivers/gpu/arm/mali/common/mali_timeline.c +++ b/drivers/gpu/arm/mali/common/mali_timeline.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -237,18 +237,20 @@ static void mali_timeline_destroy(struct mali_timeline *timeline) MALI_DEBUG_ASSERT(NULL != timeline->system); MALI_DEBUG_ASSERT(MALI_TIMELINE_MAX > timeline->id); + if (NULL != timeline->delayed_work) { + _mali_osk_wq_delayed_cancel_work_sync(timeline->delayed_work); + _mali_osk_wq_delayed_delete_work_nonflush(timeline->delayed_work); + } + #if defined(CONFIG_SYNC) if (NULL != timeline->sync_tl) { sync_timeline_destroy(timeline->sync_tl); } #endif /* defined(CONFIG_SYNC) */ - if (NULL != timeline->delayed_work) { - _mali_osk_wq_delayed_cancel_work_sync(timeline->delayed_work); - _mali_osk_wq_delayed_delete_work_nonflush(timeline->delayed_work); - } - +#ifndef CONFIG_SYNC _mali_osk_free(timeline); +#endif } } @@ -306,11 +308,19 @@ static struct mali_timeline *mali_timeline_create(struct mali_timeline_system *s return NULL; } + timeline->destroyed = MALI_FALSE; + timeline->sync_tl = mali_sync_timeline_create(timeline, timeline_name); if (NULL == timeline->sync_tl) { mali_timeline_destroy(timeline); return NULL; } + + timeline->spinlock = mali_spinlock_reentrant_init(_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM); + if (NULL == timeline->spinlock) { + mali_timeline_destroy(timeline); + return NULL; + } } #endif /* defined(CONFIG_SYNC) */ @@ -609,8 +619,8 @@ void mali_timeline_system_release_waiter_list(struct mali_timeline_system *syste struct mali_timeline_waiter *tail, struct mali_timeline_waiter *head) { - struct mali_timeline_waiter *waiter = NULL; - struct mali_timeline_waiter *next = NULL; + struct mali_timeline_waiter *waiter = NULL; + struct mali_timeline_waiter *next = NULL; MALI_DEBUG_ASSERT_POINTER(system); MALI_DEBUG_ASSERT_POINTER(head); @@ -620,13 +630,13 @@ void mali_timeline_system_release_waiter_list(struct mali_timeline_system *syste head->tracker_next = system->waiter_empty_list; system->waiter_empty_list = tail; - waiter = system->waiter_empty_list; - while (NULL != waiter) { - next = waiter->tracker_next; - _mali_osk_free(waiter); - waiter = next; - } - system->waiter_empty_list = NULL; + waiter = system->waiter_empty_list; + while (NULL != waiter) { + next = waiter->tracker_next; + _mali_osk_free(waiter); + waiter = next; + } + system->waiter_empty_list = NULL; } static mali_scheduler_mask mali_timeline_tracker_activate(struct mali_timeline_tracker *tracker) @@ -913,6 +923,9 @@ void mali_timeline_system_destroy(struct mali_timeline_system *system) { u32 i; struct mali_timeline_waiter *waiter, *next; +#if defined(CONFIG_SYNC) + u32 tid = _mali_osk_get_tid(); +#endif MALI_DEBUG_ASSERT_POINTER(system); MALI_DEBUG_ASSERT_POINTER(system->session); @@ -920,6 +933,7 @@ void mali_timeline_system_destroy(struct mali_timeline_system *system) MALI_DEBUG_PRINT(4, ("Mali Timeline: destroying timeline system\n")); if (NULL != system) { + /* There should be no waiters left on this queue. */ if (NULL != system->wait_queue) { _mali_osk_wait_queue_term(system->wait_queue); @@ -938,6 +952,14 @@ void mali_timeline_system_destroy(struct mali_timeline_system *system) if (NULL != system->signaled_sync_tl) { sync_timeline_destroy(system->signaled_sync_tl); } + + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + if ((NULL != system->timelines[i]) && (NULL != system->timelines[i]->spinlock)) { + mali_spinlock_reentrant_wait(system->timelines[i]->spinlock, tid); + system->timelines[i]->destroyed = MALI_TRUE; + mali_spinlock_reentrant_signal(system->timelines[i]->spinlock, tid); + } + } #endif /* defined(CONFIG_SYNC) */ for (i = 0; i < MALI_TIMELINE_MAX; ++i) { @@ -945,6 +967,7 @@ void mali_timeline_system_destroy(struct mali_timeline_system *system) mali_timeline_destroy(system->timelines[i]); } } + if (NULL != system->spinlock) { mali_spinlock_reentrant_term(system->spinlock); } @@ -1448,11 +1471,11 @@ void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker, _m is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0], is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1], is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2], - tracker->fence.sync_fd, tracker->sync_fence, tracker->job); + tracker->fence.sync_fd, (unsigned int)(uintptr_t)(tracker->sync_fence), (unsigned int)(uintptr_t)(tracker->job)); } else { _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c fd:%d fence:(0x%08X) job:(0x%08X)\n", tracker_type, tracker->point, state_char, - tracker->fence.sync_fd, tracker->sync_fence, tracker->job); + tracker->fence.sync_fd, (unsigned int)(uintptr_t)(tracker->sync_fence), (unsigned int)(uintptr_t)(tracker->job)); } #else if (0 != tracker->trigger_ref_count) { @@ -1461,11 +1484,11 @@ void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker, _m is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0], is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1], is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2], - tracker->job); + (unsigned int)(uintptr_t)(tracker->job)); } else { _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c job:(0x%08X)\n", tracker_type, tracker->point, state_char, - tracker->job); + (unsigned int)(uintptr_t)(tracker->job)); } #endif } @@ -1483,6 +1506,62 @@ void mali_timeline_debug_print_timeline(struct mali_timeline *timeline, _mali_os } } +#if !(LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) +void mali_timeline_debug_direct_print_tracker(struct mali_timeline_tracker *tracker) +{ + const char *tracker_state = "IWAF"; + char state_char = 'I'; + char tracker_type[32] = {0}; + + MALI_DEBUG_ASSERT_POINTER(tracker); + + state_char = *(tracker_state + mali_timeline_debug_get_tracker_state(tracker)); + _mali_osk_snprintf(tracker_type, sizeof(tracker_type), "%s", timeline_tracker_type_to_string(tracker->type)); + +#if defined(CONFIG_SYNC) + if (0 != tracker->trigger_ref_count) { + MALI_PRINT(("TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u), fd:%d, fence:(0x%08X)] job:(0x%08X)\n", + tracker_type, tracker->point, state_char, tracker->trigger_ref_count, + is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0], + is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1], + is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2], + tracker->fence.sync_fd, tracker->sync_fence, tracker->job)); + } else { + MALI_PRINT(("TL: %s %u %c fd:%d fence:(0x%08X) job:(0x%08X)\n", + tracker_type, tracker->point, state_char, + tracker->fence.sync_fd, tracker->sync_fence, tracker->job)); + } +#else + if (0 != tracker->trigger_ref_count) { + MALI_PRINT(("TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u)] job:(0x%08X)\n", + tracker_type, tracker->point, state_char, tracker->trigger_ref_count, + is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0], + is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1], + is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2], + tracker->job)); + } else { + MALI_PRINT(("TL: %s %u %c job:(0x%08X)\n", + tracker_type, tracker->point, state_char, + tracker->job)); + } +#endif +} + +void mali_timeline_debug_direct_print_timeline(struct mali_timeline *timeline) +{ + struct mali_timeline_tracker *tracker = NULL; + + MALI_DEBUG_ASSERT_POINTER(timeline); + + tracker = timeline->tracker_tail; + while (NULL != tracker) { + mali_timeline_debug_direct_print_tracker(tracker); + tracker = tracker->timeline_next; + } +} + +#endif + void mali_timeline_debug_print_system(struct mali_timeline_system *system, _mali_osk_print_ctx *print_ctx) { int i; diff --git a/drivers/gpu/arm/mali/common/mali_timeline.h b/drivers/gpu/arm/mali/common/mali_timeline.h index 1ad308f2e03a3..268676a6f2fab 100755 --- a/drivers/gpu/arm/mali/common/mali_timeline.h +++ b/drivers/gpu/arm/mali/common/mali_timeline.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -18,6 +18,7 @@ #include "mali_spinlock_reentrant.h" #include "mali_sync.h" #include "mali_scheduler_types.h" +#include /** * Soft job timeout. @@ -140,6 +141,8 @@ struct mali_timeline { #if defined(CONFIG_SYNC) struct sync_timeline *sync_tl; /**< Sync timeline that corresponds to this timeline. */ + mali_bool destroyed; + struct mali_spinlock_reentrant *spinlock; /**< Spin lock protecting the timeline system */ #endif /* defined(CONFIG_SYNC) */ /* The following fields are used to time out soft job trackers. */ @@ -515,6 +518,11 @@ void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker, _m */ void mali_timeline_debug_print_timeline(struct mali_timeline *timeline, _mali_osk_print_ctx *print_ctx); +#if !(LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) +void mali_timeline_debug_direct_print_tracker(struct mali_timeline_tracker *tracker); +void mali_timeline_debug_direct_print_timeline(struct mali_timeline *timeline); +#endif + /** * Print debug information about timeline system. * diff --git a/drivers/gpu/arm/mali/common/mali_timeline_fence_wait.c b/drivers/gpu/arm/mali/common/mali_timeline_fence_wait.c index 3c58928dd3a2d..1f5d42172c9f8 100755 --- a/drivers/gpu/arm/mali/common/mali_timeline_fence_wait.c +++ b/drivers/gpu/arm/mali/common/mali_timeline_fence_wait.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_timeline_fence_wait.h b/drivers/gpu/arm/mali/common/mali_timeline_fence_wait.h index f5440ab6fc6d8..348652b4446a2 100755 --- a/drivers/gpu/arm/mali/common/mali_timeline_fence_wait.h +++ b/drivers/gpu/arm/mali/common/mali_timeline_fence_wait.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013, 2015 ARM Limited. All rights reserved. + * Copyright (C) 2013, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_timeline_sync_fence.c b/drivers/gpu/arm/mali/common/mali_timeline_sync_fence.c index 73843f07c9905..a83252b7f8460 100755 --- a/drivers/gpu/arm/mali/common/mali_timeline_sync_fence.c +++ b/drivers/gpu/arm/mali/common/mali_timeline_sync_fence.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013, 2015 ARM Limited. All rights reserved. + * Copyright (C) 2013, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_timeline_sync_fence.h b/drivers/gpu/arm/mali/common/mali_timeline_sync_fence.h index 29a3822457e9b..6662b25b17bef 100755 --- a/drivers/gpu/arm/mali/common/mali_timeline_sync_fence.h +++ b/drivers/gpu/arm/mali/common/mali_timeline_sync_fence.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013, 2015 ARM Limited. All rights reserved. + * Copyright (C) 2013, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_ukk.h b/drivers/gpu/arm/mali/common/mali_ukk.h index fdfce57b1c049..79829b9d891e5 100755 --- a/drivers/gpu/arm/mali/common/mali_ukk.h +++ b/drivers/gpu/arm/mali/common/mali_ukk.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -271,6 +271,11 @@ _mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args */ _mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priority_s *args); +/** @brief Make process sleep if the pending big job in kernel >= MALI_MAX_PENDING_BIG_JOB + * + */ +_mali_osk_errcode_t _mali_ukk_pending_submit(_mali_uk_pending_submit_s *args); + /** @} */ /* end group _mali_uk_core */ @@ -484,12 +489,17 @@ _mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s */ _mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args); -/** @brief Return the total memory usage +/** @brief Get profiling stream fd. * - * @param args see _mali_uk_profiling_memory_usage_get_s in "mali_utgard_uk_types.h" - * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + * @param args see _mali_uk_profiling_stream_fd_get_s in "mali_utgard_uk_types.h" + */ +_mali_osk_errcode_t _mali_ukk_profiling_stream_fd_get(_mali_uk_profiling_stream_fd_get_s *args); + +/** @brief Profiling control set. + * + * @param args see _mali_uk_profiling_control_set_s in "mali_utgard_uk_types.h" */ -_mali_osk_errcode_t _mali_ukk_profiling_memory_usage_get(_mali_uk_profiling_memory_usage_get_s *args); +_mali_osk_errcode_t _mali_ukk_profiling_control_set(_mali_uk_profiling_control_set_s *args); /** @} */ /* end group _mali_uk_profiling */ #endif diff --git a/drivers/gpu/arm/mali/common/mali_user_settings_db.c b/drivers/gpu/arm/mali/common/mali_user_settings_db.c index 54e1580fad1a9..a1a613fde3a96 100755 --- a/drivers/gpu/arm/mali/common/mali_user_settings_db.c +++ b/drivers/gpu/arm/mali/common/mali_user_settings_db.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/common/mali_user_settings_db.h b/drivers/gpu/arm/mali/common/mali_user_settings_db.h index 0732c3e56e2ad..6828dc7b30d59 100755 --- a/drivers/gpu/arm/mali/common/mali_user_settings_db.h +++ b/drivers/gpu/arm/mali/common/mali_user_settings_db.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2012-2013, 2015 ARM Limited. All rights reserved. + * Copyright (C) 2012-2013, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/include/linux/mali/mali_utgard.h b/drivers/gpu/arm/mali/include/linux/mali/mali_utgard.h index a3f7ac4cf016a..a2416a5724a71 100755 --- a/drivers/gpu/arm/mali/include/linux/mali/mali_utgard.h +++ b/drivers/gpu/arm/mali/include/linux/mali/mali_utgard.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2012-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -17,6 +17,13 @@ #define __MALI_UTGARD_H__ #include "mali_osk_types.h" +#ifdef CONFIG_MALI_DEVFREQ +#include +#include "mali_pm_metrics.h" +#ifdef CONFIG_DEVFREQ_THERMAL +#include +#endif +#endif #define MALI_GPU_NAME_UTGARD "mali-utgard" @@ -50,6 +57,7 @@ #define MALI450_OFFSET_L2_CACHE0 MALI_OFFSET_L2_RESOURCE1 #define MALI450_OFFSET_L2_CACHE1 MALI_OFFSET_L2_RESOURCE0 #define MALI450_OFFSET_L2_CACHE2 MALI_OFFSET_L2_RESOURCE2 +#define MALI470_OFFSET_L2_CACHE1 MALI_OFFSET_L2_RESOURCE0 #define MALI_OFFSET_BCAST 0x13000 #define MALI_OFFSET_DLBU 0x14000 @@ -207,6 +215,65 @@ MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \ + /* Mali - 470 */ +#define MALI_GPU_RESOURCES_MALI470_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \ + MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \ + MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \ + MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \ + MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) + +#define MALI_GPU_RESOURCES_MALI470_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCES_MALI470_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \ + +#define MALI_GPU_RESOURCES_MALI470_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \ + MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \ + MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \ + MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \ + MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) + +#define MALI_GPU_RESOURCES_MALI470_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCES_MALI470_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \ + +#define MALI_GPU_RESOURCES_MALI470_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \ + MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \ + MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \ + MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \ + MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) + +#define MALI_GPU_RESOURCES_MALI470_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCES_MALI470_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \ + +#define MALI_GPU_RESOURCES_MALI470_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \ + MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq) \ + MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \ + MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \ + MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) + +#define MALI_GPU_RESOURCES_MALI470_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCES_MALI470_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \ + #define MALI_GPU_RESOURCE_L2(addr) \ { \ .name = "Mali_L2", \ @@ -420,6 +487,18 @@ void (*get_clock_info)(struct mali_gpu_clock **data); /* Function that get the current clock info, needed when CONFIG_MALI_DVFS enabled */ int (*get_freq)(void); + /* Function that init the mali gpu secure mode */ + int (*secure_mode_init)(void); + /* Function that deinit the mali gpu secure mode */ + void (*secure_mode_deinit)(void); + /* Function that enable the mali gpu secure mode */ + int (*secure_mode_enable)(void); + /* Function that disable the mali gpu secure mode */ + int (*secure_mode_disable)(void); + /* ipa related interface customer need register */ +#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL) + struct devfreq_cooling_power *gpu_cooling_ops; +#endif }; /** diff --git a/drivers/gpu/arm/mali/include/linux/mali/mali_utgard_ioctl.h b/drivers/gpu/arm/mali/include/linux/mali/mali_utgard_ioctl.h index 6bb675f4b22b2..dfe152bae8651 100755 --- a/drivers/gpu/arm/mali/include/linux/mali/mali_utgard_ioctl.h +++ b/drivers/gpu/arm/mali/include/linux/mali/mali_utgard_ioctl.h @@ -1,11 +1,21 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + + * Class Path Exception + * Linking this library statically or dynamically with other modules is making a combined work based on this library. + * Thus, the terms and conditions of the GNU General Public License cover the whole combination. + * As a special exception, the copyright holders of this library give you permission to link this library with independent modules + * to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting + * executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions + * of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify + * this library, you may extend this exception to your version of the library, but you are not obligated to do so. + * If you do not wish to do so, delete this exception statement from your version. */ #ifndef __MALI_UTGARD_IOCTL_H__ @@ -50,11 +60,15 @@ extern "C" { #define MALI_IOC_TIMELINE_CREATE_SYNC_FENCE _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_CREATE_SYNC_FENCE, _mali_uk_timeline_create_sync_fence_s) #define MALI_IOC_SOFT_JOB_START _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_START, _mali_uk_soft_job_start_s) #define MALI_IOC_SOFT_JOB_SIGNAL _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_SIGNAL, _mali_uk_soft_job_signal_s) +#define MALI_IOC_PENDING_SUBMIT _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_PENDING_SUBMIT, _mali_uk_pending_submit_s) #define MALI_IOC_MEM_ALLOC _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ALLOC_MEM, _mali_uk_alloc_mem_s) #define MALI_IOC_MEM_FREE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_FREE_MEM, _mali_uk_free_mem_s) #define MALI_IOC_MEM_BIND _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_BIND_MEM, _mali_uk_bind_mem_s) #define MALI_IOC_MEM_UNBIND _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_UNBIND_MEM, _mali_uk_unbind_mem_s) +#define MALI_IOC_MEM_COW _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_COW_MEM, _mali_uk_cow_mem_s) +#define MALI_IOC_MEM_COW_MODIFY_RANGE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_COW_MODIFY_RANGE, _mali_uk_cow_modify_range_s) +#define MALI_IOC_MEM_RESIZE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_RESIZE_MEM, _mali_uk_mem_resize_s) #define MALI_IOC_MEM_DMA_BUF_GET_SIZE _IOR(MALI_IOC_MEMORY_BASE, _MALI_UK_DMA_BUF_GET_SIZE, _mali_uk_dma_buf_get_size_s) #define MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, _mali_uk_query_mmu_page_table_dump_size_s) #define MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_DUMP_MMU_PAGE_TABLE, _mali_uk_dump_mmu_page_table_s) @@ -74,6 +88,8 @@ extern "C" { #define MALI_IOC_PROFILING_ADD_EVENT _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_ADD_EVENT, _mali_uk_profiling_add_event_s) #define MALI_IOC_PROFILING_REPORT_SW_COUNTERS _IOW (MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_REPORT_SW_COUNTERS, _mali_uk_sw_counters_report_s) #define MALI_IOC_PROFILING_MEMORY_USAGE_GET _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_MEMORY_USAGE_GET, _mali_uk_profiling_memory_usage_get_s) +#define MALI_IOC_PROFILING_STREAM_FD_GET _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_STREAM_FD_GET, _mali_uk_profiling_stream_fd_get_s) +#define MALI_IOC_PROILING_CONTROL_SET _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_CONTROL_SET, _mali_uk_profiling_control_set_s) #define MALI_IOC_VSYNC_EVENT_REPORT _IOW (MALI_IOC_VSYNC_BASE, _MALI_UK_VSYNC_EVENT_REPORT, _mali_uk_vsync_event_report_s) diff --git a/drivers/gpu/arm/mali/include/linux/mali/mali_utgard_profiling_events.h b/drivers/gpu/arm/mali/include/linux/mali/mali_utgard_profiling_events.h index 6eff72194103c..eea6dac92dd0e 100755 --- a/drivers/gpu/arm/mali/include/linux/mali/mali_utgard_profiling_events.h +++ b/drivers/gpu/arm/mali/include/linux/mali/mali_utgard_profiling_events.h @@ -1,13 +1,24 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + + * Class Path Exception + * Linking this library statically or dynamically with other modules is making a combined work based on this library. + * Thus, the terms and conditions of the GNU General Public License cover the whole combination. + * As a special exception, the copyright holders of this library give you permission to link this library with independent modules + * to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting + * executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions + * of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify + * this library, you may extend this exception to your version of the library, but you are not obligated to do so. + * If you do not wish to do so, delete this exception statement from your version. */ + #ifndef _MALI_UTGARD_PROFILING_EVENTS_H_ #define _MALI_UTGARD_PROFILING_EVENTS_H_ @@ -131,6 +142,7 @@ typedef enum { MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_JOBS_WAIT = 46, /* USED */ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_NOFRAMES_WAIT = 47, /* USED */ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_NOJOBS_WAIT = 48, /* USED */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_SUBMIT_LIMITER_WAIT = 49, /* USED */ } cinstr_profiling_event_reason_suspend_resume_sw_t; /** diff --git a/drivers/gpu/arm/mali/include/linux/mali/mali_utgard_profiling_gator_api.h b/drivers/gpu/arm/mali/include/linux/mali/mali_utgard_profiling_gator_api.h index 76f03fcc81926..c82d8b1557b6e 100755 --- a/drivers/gpu/arm/mali/include/linux/mali/mali_utgard_profiling_gator_api.h +++ b/drivers/gpu/arm/mali/include/linux/mali/mali_utgard_profiling_gator_api.h @@ -1,11 +1,21 @@ /* - * Copyright (C) 2013, 2015 ARM Limited. All rights reserved. + * Copyright (C) 2013, 2015-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + + * Class Path Exception + * Linking this library statically or dynamically with other modules is making a combined work based on this library. + * Thus, the terms and conditions of the GNU General Public License cover the whole combination. + * As a special exception, the copyright holders of this library give you permission to link this library with independent modules + * to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting + * executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions + * of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify + * this library, you may extend this exception to your version of the library, but you are not obligated to do so. + * If you do not wish to do so, delete this exception statement from your version. */ #ifndef __MALI_UTGARD_PROFILING_GATOR_API_H__ @@ -21,6 +31,39 @@ extern "C" { #define MAX_NUM_FP_CORES 8 #define MAX_NUM_VP_CORES 1 +#define _MALI_SPCIAL_COUNTER_DESCRIPTIONS \ + { \ + "Filmstrip_cnt0", \ + "Frequency", \ + "Voltage", \ + "vertex", \ + "fragment", \ + "Total_alloc_pages", \ + }; + +#define _MALI_MEM_COUTNER_DESCRIPTIONS \ + { \ + "untyped_memory", \ + "vertex_index_buffer", \ + "texture_buffer", \ + "varying_buffer", \ + "render_target", \ + "pbuffer_buffer", \ + "plbu_heap", \ + "pointer_array_buffer", \ + "slave_tilelist", \ + "untyped_gp_cmdlist", \ + "polygon_cmdlist", \ + "texture_descriptor", \ + "render_state_word", \ + "shader", \ + "stream_buffer", \ + "fragment_stack", \ + "uniform", \ + "untyped_frame_pool", \ + "untyped_surface", \ + }; + /** The list of events supported by the Mali DDK. */ typedef enum { /* Vertex processor activity */ @@ -116,8 +159,41 @@ typedef enum { COUNTER_GLES_STRIP_LINES_COUNT, COUNTER_GLES_LOOP_LINES_COUNT, + /* Special counter */ + /* Framebuffer capture pseudo-counter */ COUNTER_FILMSTRIP, + COUNTER_FREQUENCY, + COUNTER_VOLTAGE, + COUNTER_VP_ACTIVITY, + COUNTER_FP_ACTIVITY, + COUNTER_TOTAL_ALLOC_PAGES, + + /* Memory usage counter */ + COUNTER_MEM_UNTYPED, + COUNTER_MEM_VB_IB, + COUNTER_MEM_TEXTURE, + COUNTER_MEM_VARYING, + COUNTER_MEM_RT, + COUNTER_MEM_PBUFFER, + /* memory usages for gp command */ + COUNTER_MEM_PLBU_HEAP, + COUNTER_MEM_POINTER_ARRAY, + COUNTER_MEM_SLAVE_TILELIST, + COUNTER_MEM_UNTYPE_GP_CMDLIST, + /* memory usages for polygon list command */ + COUNTER_MEM_POLYGON_CMDLIST, + /* memory usages for pp command */ + COUNTER_MEM_TD, + COUNTER_MEM_RSW, + /* other memory usages */ + COUNTER_MEM_SHADER, + COUNTER_MEM_STREAMS, + COUNTER_MEM_FRAGMENT_STACK, + COUNTER_MEM_UNIFORM, + /* Special mem usage, which is used for mem pool allocation */ + COUNTER_MEM_UNTYPE_MEM_POOL, + COUNTER_MEM_UNTYPE_SURFACE, NUMBER_OF_EVENTS } _mali_osk_counter_id; @@ -132,7 +208,34 @@ typedef enum { #define LAST_SW_COUNTER COUNTER_GLES_LOOP_LINES_COUNT #define FIRST_SPECIAL_COUNTER COUNTER_FILMSTRIP -#define LAST_SPECIAL_COUNTER COUNTER_FILMSTRIP +#define LAST_SPECIAL_COUNTER COUNTER_TOTAL_ALLOC_PAGES + +#define FIRST_MEM_COUNTER COUNTER_MEM_UNTYPED +#define LAST_MEM_COUNTER COUNTER_MEM_UNTYPE_SURFACE + +#define MALI_PROFILING_MEM_COUNTERS_NUM (LAST_MEM_COUNTER - FIRST_MEM_COUNTER + 1) +#define MALI_PROFILING_SPECIAL_COUNTERS_NUM (LAST_SPECIAL_COUNTER - FIRST_SPECIAL_COUNTER + 1) +#define MALI_PROFILING_SW_COUNTERS_NUM (LAST_SW_COUNTER - FIRST_SW_COUNTER + 1) + +/** + * Define the stream header type for porfiling stream. + */ +#define STREAM_HEADER_FRAMEBUFFER 0x05 /* The stream packet header type for framebuffer dumping. */ +#define STREAM_HEADER_COUNTER_VALUE 0x09 /* The stream packet header type for hw/sw/memory counter sampling. */ +#define STREAM_HEADER_CORE_ACTIVITY 0x0a /* The stream packet header type for activity counter sampling. */ +#define STREAM_HEADER_SIZE 5 + +/** + * Define the packet header type of profiling control packet. + */ +#define PACKET_HEADER_ERROR 0x80 /* The response packet header type if error. */ +#define PACKET_HEADER_ACK 0x81 /* The response packet header type if OK. */ +#define PACKET_HEADER_COUNTERS_REQUEST 0x82 /* The control packet header type to request counter information from ddk. */ +#define PACKET_HEADER_COUNTERS_ACK 0x83 /* The response packet header type to send out counter information. */ +#define PACKET_HEADER_COUNTERS_ENABLE 0x84 /* The control packet header type to enable counters. */ +#define PACKET_HEADER_START_CAPTURE_VALUE 0x85 /* The control packet header type to start capture values. */ + +#define PACKET_HEADER_SIZE 5 /** * Structure to pass performance counter data of a Mali core @@ -170,6 +273,19 @@ typedef struct _mali_profiling_mali_version { u32 num_of_vp_cores; } _mali_profiling_mali_version; +/** + * Structure to define the mali profiling counter struct. + */ +typedef struct mali_profiling_counter { + char counter_name[40]; + u32 counter_id; + u32 counter_event; + u32 prev_counter_value; + u32 current_counter_value; + u32 key; + int enabled; +} mali_profiling_counter; + /* * List of possible actions to be controlled by Streamline. * The following numbers are used by gator to control the frame buffer dumping and s/w counter reporting. @@ -179,6 +295,8 @@ typedef struct _mali_profiling_mali_version { #define FBDUMP_CONTROL_RATE (2) #define SW_COUNTER_ENABLE (3) #define FBDUMP_CONTROL_RESIZE_FACTOR (4) +#define MEM_COUNTER_ENABLE (5) +#define ANNOTATE_PROFILING_ENABLE (6) void _mali_profiling_control(u32 action, u32 value); diff --git a/drivers/gpu/arm/mali/include/linux/mali/mali_utgard_uk_types.h b/drivers/gpu/arm/mali/include/linux/mali/mali_utgard_uk_types.h index 61b77ff0fcab9..ef60764120352 100755 --- a/drivers/gpu/arm/mali/include/linux/mali/mali_utgard_uk_types.h +++ b/drivers/gpu/arm/mali/include/linux/mali/mali_utgard_uk_types.h @@ -1,11 +1,21 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + + * Class Path Exception + * Linking this library statically or dynamically with other modules is making a combined work based on this library. + * Thus, the terms and conditions of the GNU General Public License cover the whole combination. + * As a special exception, the copyright holders of this library give you permission to link this library with independent modules + * to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting + * executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions + * of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify + * this library, you may extend this exception to your version of the library, but you are not obligated to do so. + * If you do not wish to do so, delete this exception statement from your version. */ /** @@ -26,6 +36,8 @@ extern "C" { #define MALI_UK_TIMELINE_SOFT 2 #define MALI_UK_TIMELINE_MAX 3 +#define MALI_UK_BIG_VARYING_SIZE (1024*1024*2) + typedef struct { u32 points[MALI_UK_TIMELINE_MAX]; s32 sync_fd; @@ -81,13 +93,17 @@ typedef enum { _MALI_UK_TIMELINE_CREATE_SYNC_FENCE, /**< _mali_ukk_timeline_create_sync_fence() */ _MALI_UK_SOFT_JOB_START, /**< _mali_ukk_soft_job_start() */ _MALI_UK_SOFT_JOB_SIGNAL, /**< _mali_ukk_soft_job_signal() */ + _MALI_UK_PENDING_SUBMIT, /**< _mali_ukk_pending_submit() */ /** Memory functions */ - _MALI_UK_ALLOC_MEM = 0, /**< _mali_ukk_init_mem() */ - _MALI_UK_FREE_MEM, /**< _mali_ukk_term_mem() */ - _MALI_UK_BIND_MEM, /**< _mali_ukk_mem_mmap() */ - _MALI_UK_UNBIND_MEM, /**< _mali_ukk_mem_munmap() */ + _MALI_UK_ALLOC_MEM = 0, /**< _mali_ukk_alloc_mem() */ + _MALI_UK_FREE_MEM, /**< _mali_ukk_free_mem() */ + _MALI_UK_BIND_MEM, /**< _mali_ukk_mem_bind() */ + _MALI_UK_UNBIND_MEM, /**< _mali_ukk_mem_unbind() */ + _MALI_UK_COW_MEM, /**< _mali_ukk_mem_cow() */ + _MALI_UK_COW_MODIFY_RANGE, /**< _mali_ukk_mem_cow_modify_range() */ + _MALI_UK_RESIZE_MEM, /**<._mali_ukk_mem_resize() */ _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, /**< _mali_ukk_mem_get_mmu_page_table_dump_size() */ _MALI_UK_DUMP_MMU_PAGE_TABLE, /**< _mali_ukk_mem_dump_mmu_page_table() */ _MALI_UK_DMA_BUF_GET_SIZE, /**< _mali_ukk_dma_buf_get_size() */ @@ -119,6 +135,8 @@ typedef enum { _MALI_UK_PROFILING_ADD_EVENT = 0, /**< __mali_uku_profiling_add_event() */ _MALI_UK_PROFILING_REPORT_SW_COUNTERS,/**< __mali_uku_profiling_report_sw_counters() */ _MALI_UK_PROFILING_MEMORY_USAGE_GET, /**< __mali_uku_profiling_memory_usage_get() */ + _MALI_UK_PROFILING_STREAM_FD_GET, /** < __mali_uku_profiling_stream_fd_get() */ + _MALI_UK_PROFILING_CONTROL_SET, /** < __mali_uku_profiling_control_set() */ /** VSYNC reporting fuctions */ _MALI_UK_VSYNC_EVENT_REPORT = 0, /**< _mali_ukk_vsync_event_report() */ @@ -261,6 +279,9 @@ typedef struct { u32 flush_id; /**< [in] flush id within the originating frame builder */ _mali_uk_fence_t fence; /**< [in] fence this job must wait on */ u64 timeline_point_ptr; /**< [in,out] pointer to u32: location where point on gp timeline for this job will be written */ + u32 varying_memsize; /** < [in] size of varying memory to use deffer bind*/ + u32 deferred_mem_num; + u64 deferred_mem_list; /** < [in] memory hanlde list of varying buffer to use deffer bind */ } _mali_uk_gp_start_job_s; #define _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE (1<<0) /**< Enable performance counter SRC0 for a job */ @@ -275,6 +296,7 @@ typedef struct { u32 heap_current_addr; /**< [out] value of the GP PLB PL heap start address register */ u32 perf_counter0; /**< [out] value of performance counter 0 (see ARM DDI0415A) */ u32 perf_counter1; /**< [out] value of performance counter 1 (see ARM DDI0415A) */ + u32 pending_big_job_num; } _mali_uk_gp_job_finished_s; typedef struct { @@ -299,6 +321,7 @@ typedef struct { /** Flag for _mali_uk_pp_start_job_s */ #define _MALI_PP_JOB_FLAG_NO_NOTIFICATION (1<<0) #define _MALI_PP_JOB_FLAG_IS_WINDOW_SURFACE (1<<1) +#define _MALI_PP_JOB_FLAG_PROTECTED (1<<2) /** @defgroup _mali_uk_ppstartjob_s Fragment Processor Start Job * @{ */ @@ -437,6 +460,18 @@ typedef struct { /** @} */ /* end group _mali_uk_soft_job */ +typedef struct { + u32 counter_id; + u32 key; + int enable; +} _mali_uk_annotate_profiling_mem_counter_s; + +typedef struct { + u32 sampling_rate; + int enable; +} _mali_uk_annotate_profiling_enable_s; + + /** @addtogroup _mali_uk_core U/K Core * @{ */ @@ -471,6 +506,9 @@ typedef enum { _MALI_NOTIFICATION_GP_FINISHED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x10, _MALI_NOTIFICATION_GP_STALLED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x20, + /** Profiling notifications */ + _MALI_NOTIFICATION_ANNOTATE_PROFILING_MEM_COUNTER = (_MALI_UK_PROFILING_SUBSYSTEM << 16) | 0x10, + _MALI_NOTIFICATION_ANNOTATE_PROFILING_ENABLE = (_MALI_UK_PROFILING_SUBSYSTEM << 16) | 0x20, } _mali_uk_notification_type; /** to assist in splitting up 32-bit notification value in subsystem and id value */ @@ -575,6 +613,8 @@ typedef struct { _mali_uk_pp_job_finished_s pp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_PP_FINISHED notification type */ _mali_uk_settings_changed_s setting_changed;/**< [out] Notification data for _MALI_NOTIFICAATION_SETTINGS_CHANGED notification type */ _mali_uk_soft_job_activated_s soft_job_activated; /**< [out] Notification data for _MALI_NOTIFICATION_SOFT_ACTIVATED notification type */ + _mali_uk_annotate_profiling_mem_counter_s profiling_mem_counter; + _mali_uk_annotate_profiling_enable_s profiling_enable; } data; } _mali_uk_wait_for_notification_s; @@ -618,7 +658,7 @@ typedef struct { * The 16bit integer is stored twice in a 32bit integer * For example, for version 1 the value would be 0x00010001 */ -#define _MALI_API_VERSION 800 +#define _MALI_API_VERSION 900 #define _MALI_UK_API_VERSION _MAKE_VERSION_ID(_MALI_API_VERSION) /** @@ -698,6 +738,11 @@ typedef struct { u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ } _mali_uk_request_high_priority_s; +/** @brief Arguments for _mali_ukk_pending_submit() */ +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ +} _mali_uk_pending_submit_s; + /** @} */ /* end group _mali_uk_core */ @@ -706,14 +751,19 @@ typedef struct { #define _MALI_MEMORY_ALLOCATE_RESIZEABLE (1<<4) /* BUFFER can trim dow/grow*/ #define _MALI_MEMORY_ALLOCATE_NO_BIND_GPU (1<<5) /*Not map to GPU when allocate, must call bind later*/ +#define _MALI_MEMORY_ALLOCATE_SWAPPABLE (1<<6) /* Allocate swappale memory. */ +#define _MALI_MEMORY_ALLOCATE_DEFER_BIND (1<<7) /*Not map to GPU when allocate, must call bind later*/ +#define _MALI_MEMORY_ALLOCATE_SECURE (1<<8) /* Allocate secure memory. */ + typedef struct { u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ - u32 gpu_vaddr; /**< [in] GPU virtual address */ + u32 gpu_vaddr; /**< [in] GPU virtual address */ u32 vsize; /**< [in] vitrual size of the allocation */ u32 psize; /**< [in] physical size of the allocation */ u32 flags; - u64 backend_handle; /**< [out] backend handle */ + u64 backend_handle; /**< [out] backend handle */ + s32 secure_shared_fd; /** < [in] the mem handle for secure mem */ struct { /* buffer types*/ /* CPU read/write info*/ @@ -722,8 +772,9 @@ typedef struct { typedef struct { - u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ u32 gpu_vaddr; /**< [in] use as handle to free allocation */ + u32 free_pages_nr; /** < [out] record the number of free pages */ } _mali_uk_free_mem_s; @@ -747,16 +798,16 @@ typedef struct { typedef struct { - u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ - u32 vaddr; /**< [in] mali address to map the physical memory to */ - u32 size; /**< [in] size */ - u32 flags; /**< [in] see_MALI_MEMORY_BIND_BACKEND_* */ - u32 padding; /** padding for 32/64 struct alignment */ + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u32 vaddr; /**< [in] mali address to map the physical memory to */ + u32 size; /**< [in] size */ + u32 flags; /**< [in] see_MALI_MEMORY_BIND_BACKEND_* */ + u32 padding; /** padding for 32/64 struct alignment */ union { struct { - u32 secure_id; /**< [in] secure id */ - u32 rights; /**< [in] rights necessary for accessing memory */ - u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */ + u32 secure_id; /**< [in] secure id */ + u32 rights; /**< [in] rights necessary for accessing memory */ + u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */ } bind_ump; struct { u32 mem_fd; /**< [in] Memory descriptor */ @@ -775,20 +826,32 @@ typedef struct { } _mali_uk_bind_mem_s; typedef struct { - u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ - u32 flags; /**< [in] see_MALI_MEMORY_BIND_BACKEND_* */ - u32 vaddr; /**< [in] identifier for mapped memory object in kernel space */ + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u32 flags; /**< [in] see_MALI_MEMORY_BIND_BACKEND_* */ + u32 vaddr; /**< [in] identifier for mapped memory object in kernel space */ } _mali_uk_unbind_mem_s; typedef struct { - u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ - u32 target_handle; /**< [in] handle of allocation need to do COW */ - u32 range_start; /**< [in] re allocate range start offset, offset from the start of allocation */ - u32 size; /**< [in] re allocate size*/ - u32 vaddr; /**< [in] mali address for the new allocaiton */ - u32 backend_handle; /**< [out] backend handle */ + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u32 target_handle; /**< [in] handle of allocation need to do COW */ + u32 target_offset; /**< [in] offset in target allocation to do COW(for support COW a memory allocated from memory_bank, PAGE_SIZE align)*/ + u32 target_size; /**< [in] size of target allocation to do COW (for support memory bank, PAGE_SIZE align)(in byte) */ + u32 range_start; /**< [in] re allocate range start offset, offset from the start of allocation (PAGE_SIZE align)*/ + u32 range_size; /**< [in] re allocate size (PAGE_SIZE align)*/ + u32 vaddr; /**< [in] mali address for the new allocaiton */ + u32 backend_handle; /**< [out] backend handle */ + u32 flags; } _mali_uk_cow_mem_s; +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u32 range_start; /**< [in] re allocate range start offset, offset from the start of allocation */ + u32 size; /**< [in] re allocate size*/ + u32 vaddr; /**< [in] mali address for the new allocaiton */ + s32 change_pages_nr; /**< [out] record the page number change for cow operation */ +} _mali_uk_cow_modify_range_s; + + typedef struct { u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ u32 mem_fd; /**< [in] Memory descriptor */ @@ -797,22 +860,13 @@ typedef struct { /** Flag for _mali_uk_map_external_mem_s, _mali_uk_attach_ump_mem_s and _mali_uk_attach_dma_buf_s */ #define _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE (1<<0) -#if 0 -typedef struct { - u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ - u32 phys_addr; /**< [in] physical address */ - u32 size; /**< [in] size */ - u32 mali_address; /**< [in] mali address to map the physical memory to */ - u32 rights; /**< [in] rights necessary for accessing memory */ - u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */ - u32 cookie; /**< [out] identifier for mapped memory object in kernel space */ -} _mali_uk_map_external_mem_s; + typedef struct { - u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ - u32 cookie; /**< [out] identifier for mapped memory object in kernel space */ -} _mali_uk_unmap_external_mem_s; -#endif + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u64 vaddr; /* the buffer to do resize*/ + u32 psize; /* wanted physical size of this memory */ +} _mali_uk_mem_resize_s; /** * @brief Arguments for _mali_uk[uk]_mem_write_safe() @@ -908,6 +962,8 @@ typedef struct { typedef struct { u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ u32 memory_usage; /**< [out] total memory usage */ + u32 vaddr; /**< [in] mali address for the cow allocaiton */ + s32 change_pages_nr; /**< [out] record the page number change for cow operation */ } _mali_uk_profiling_memory_usage_get_s; @@ -945,6 +1001,7 @@ typedef struct { void *mapping; /**< [out] Returns user-space virtual address for the mapping */ u32 size; /**< [in] Size of the requested mapping */ u32 phys_addr; /**< [in] Physical address - could be offset, depending on caller+callee convention */ + mali_bool writeable; } _mali_uk_mem_mmap_s; /** @brief Arguments to _mali_ukk_mem_munmap() @@ -1030,6 +1087,19 @@ typedef struct { /** @} */ /* end group uddapi */ +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + s32 stream_fd; /**< [in] The profiling kernel base stream fd handle */ +} _mali_uk_profiling_stream_fd_get_s; + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u64 control_packet_data; /**< [in] the control packet data for control settings */ + u32 control_packet_size; /**< [in] The control packet size */ + u64 response_packet_data; /** < [out] The response packet data */ + u32 response_packet_size; /** < [in,out] The response packet data */ +} _mali_uk_profiling_control_set_s; + #ifdef __cplusplus } #endif diff --git a/drivers/gpu/arm/mali/linux/license/gpl/mali_kernel_license.h b/drivers/gpu/arm/mali/linux/license/gpl/mali_kernel_license.h index c88c992e859b8..264555dda2cf2 100755 --- a/drivers/gpu/arm/mali/linux/license/gpl/mali_kernel_license.h +++ b/drivers/gpu/arm/mali/linux/license/gpl/mali_kernel_license.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2013, 2015 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2013, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/license/proprietary/mali_kernel_license.h b/drivers/gpu/arm/mali/linux/license/proprietary/mali_kernel_license.h new file mode 100644 index 0000000000000..aba8254e90c77 --- /dev/null +++ b/drivers/gpu/arm/mali/linux/license/proprietary/mali_kernel_license.h @@ -0,0 +1,30 @@ +/* + * This confidential and proprietary software may be used only as + * authorised by a licensing agreement from ARM Limited + * (C) COPYRIGHT 2010, 2013, 2016 ARM Limited + * ALL RIGHTS RESERVED + * The entire notice above must be reproduced on all authorised + * copies and copies may only be made to the extent permitted + * by a licensing agreement from ARM Limited. + */ + +/** + * @file mali_kernel_license.h + * Defines for the macro MODULE_LICENSE. + */ + +#ifndef __MALI_KERNEL_LICENSE_H__ +#define __MALI_KERNEL_LICENSE_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#define MALI_KERNEL_LINUX_LICENSE "Proprietary" +#define MALI_LICENSE_IS_GPL 0 + +#ifdef __cplusplus +} +#endif + +#endif /* __MALI_KERNEL_LICENSE_H__ */ diff --git a/drivers/gpu/arm/mali/linux/mali_devfreq.c b/drivers/gpu/arm/mali/linux/mali_devfreq.c new file mode 100755 index 0000000000000..0b0ba1481eed7 --- /dev/null +++ b/drivers/gpu/arm/mali/linux/mali_devfreq.c @@ -0,0 +1,310 @@ +/* + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_osk_mali.h" +#include "mali_kernel_common.h" + +#include +#include +#include +#include +#ifdef CONFIG_DEVFREQ_THERMAL +#include +#endif + +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) +#include +#else /* Linux >= 3.13 */ +/* In 3.13 the OPP include header file, types, and functions were all + * renamed. Use the old filename for the include, and define the new names to + * the old, when an old kernel is detected. + */ +#include +#define dev_pm_opp opp +#define dev_pm_opp_get_voltage opp_get_voltage +#define dev_pm_opp_get_opp_count opp_get_opp_count +#define dev_pm_opp_find_freq_ceil opp_find_freq_ceil +#endif /* Linux >= 3.13 */ + +#include "mali_pm_metrics.h" + +static int +mali_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags) +{ + struct mali_device *mdev = dev_get_drvdata(dev); + struct dev_pm_opp *opp; + unsigned long freq = 0; + unsigned long voltage; + int err; + + freq = *target_freq; + + rcu_read_lock(); + opp = devfreq_recommended_opp(dev, &freq, flags); + voltage = dev_pm_opp_get_voltage(opp); + rcu_read_unlock(); + if (IS_ERR_OR_NULL(opp)) { + MALI_PRINT_ERROR(("Failed to get opp (%ld)\n", PTR_ERR(opp))); + return PTR_ERR(opp); + } + + MALI_DEBUG_PRINT(2, ("mali_devfreq_target:set_freq = %lld flags = 0x%x\n", freq, flags)); + /* + * Only update if there is a change of frequency + */ + if (mdev->current_freq == freq) { + *target_freq = freq; + mali_pm_reset_dvfs_utilisation(mdev); + return 0; + } + +#ifdef CONFIG_REGULATOR + if (mdev->regulator && mdev->current_voltage != voltage + && mdev->current_freq < freq) { + err = regulator_set_voltage(mdev->regulator, voltage, voltage); + if (err) { + MALI_PRINT_ERROR(("Failed to increase voltage (%d)\n", err)); + return err; + } + } +#endif + + err = clk_set_rate(mdev->clock, freq); + if (err) { + MALI_PRINT_ERROR(("Failed to set clock %lu (target %lu)\n", freq, *target_freq)); + return err; + } + +#ifdef CONFIG_REGULATOR + if (mdev->regulator && mdev->current_voltage != voltage + && mdev->current_freq > freq) { + err = regulator_set_voltage(mdev->regulator, voltage, voltage); + if (err) { + MALI_PRINT_ERROR(("Failed to decrease voltage (%d)\n", err)); + return err; + } + } +#endif + + *target_freq = freq; + mdev->current_voltage = voltage; + mdev->current_freq = freq; + + mali_pm_reset_dvfs_utilisation(mdev); + + return err; +} + +static int +mali_devfreq_cur_freq(struct device *dev, unsigned long *freq) +{ + struct mali_device *mdev = dev_get_drvdata(dev); + + *freq = mdev->current_freq; + + MALI_DEBUG_PRINT(2, ("mali_devfreq_cur_freq: freq = %d \n", *freq)); + return 0; +} + +static int +mali_devfreq_status(struct device *dev, struct devfreq_dev_status *stat) +{ + struct mali_device *mdev = dev_get_drvdata(dev); + + stat->current_frequency = mdev->current_freq; + + mali_pm_get_dvfs_utilisation(mdev, + &stat->total_time, &stat->busy_time); + + stat->private_data = NULL; + +#ifdef CONFIG_DEVFREQ_THERMAL + memcpy(&mdev->devfreq->last_status, stat, sizeof(*stat)); +#endif + + return 0; +} + +/* setup platform specific opp in platform.c*/ +int __weak setup_opps(void) +{ + return 0; +} + +/* term platform specific opp in platform.c*/ +int __weak term_opps(struct device *dev) +{ + return 0; +} + +static int mali_devfreq_init_freq_table(struct mali_device *mdev, + struct devfreq_dev_profile *dp) +{ + int err, count; + int i = 0; + unsigned long freq = 0; + struct dev_pm_opp *opp; + + err = setup_opps(); + if (err) + return err; + + rcu_read_lock(); + count = dev_pm_opp_get_opp_count(mdev->dev); + if (count < 0) { + rcu_read_unlock(); + return count; + } + rcu_read_unlock(); + + MALI_DEBUG_PRINT(2, ("mali devfreq table count %d\n", count)); + + dp->freq_table = kmalloc_array(count, sizeof(dp->freq_table[0]), + GFP_KERNEL); + if (!dp->freq_table) + return -ENOMEM; + + rcu_read_lock(); + for (i = 0; i < count; i++, freq++) { + opp = dev_pm_opp_find_freq_ceil(mdev->dev, &freq); + if (IS_ERR(opp)) + break; + + dp->freq_table[i] = freq; + MALI_DEBUG_PRINT(2, ("mali devfreq table array[%d] = %d\n", i, freq)); + } + rcu_read_unlock(); + + if (count != i) + MALI_PRINT_ERROR(("Unable to enumerate all OPPs (%d!=%d)\n", + count, i)); + + dp->max_state = i; + + return 0; +} + +static void mali_devfreq_term_freq_table(struct mali_device *mdev) +{ + struct devfreq_dev_profile *dp = mdev->devfreq->profile; + + kfree(dp->freq_table); + term_opps(mdev->dev); +} + +static void mali_devfreq_exit(struct device *dev) +{ + struct mali_device *mdev = dev_get_drvdata(dev); + + mali_devfreq_term_freq_table(mdev); +} + +int mali_devfreq_init(struct mali_device *mdev) +{ +#ifdef CONFIG_DEVFREQ_THERMAL + struct devfreq_cooling_power *callbacks = NULL; + _mali_osk_device_data data; +#endif + struct devfreq_dev_profile *dp; + int err; + + MALI_DEBUG_PRINT(2, ("Init Mali devfreq\n")); + + if (!mdev->clock) + return -ENODEV; + + mdev->current_freq = clk_get_rate(mdev->clock); + + dp = &mdev->devfreq_profile; + + dp->initial_freq = mdev->current_freq; + dp->polling_ms = 100; + dp->target = mali_devfreq_target; + dp->get_dev_status = mali_devfreq_status; + dp->get_cur_freq = mali_devfreq_cur_freq; + dp->exit = mali_devfreq_exit; + + if (mali_devfreq_init_freq_table(mdev, dp)) + return -EFAULT; + + mdev->devfreq = devfreq_add_device(mdev->dev, dp, + "simple_ondemand", NULL); + if (IS_ERR(mdev->devfreq)) { + mali_devfreq_term_freq_table(mdev); + return PTR_ERR(mdev->devfreq); + } + + err = devfreq_register_opp_notifier(mdev->dev, mdev->devfreq); + if (err) { + MALI_PRINT_ERROR(("Failed to register OPP notifier (%d)\n", err)); + goto opp_notifier_failed; + } + +#ifdef CONFIG_DEVFREQ_THERMAL + /* Initilization last_status it will be used when first power allocate called */ + mdev->devfreq->last_status.current_frequency = mdev->current_freq; + + if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) { + if (NULL != data.gpu_cooling_ops) { + callbacks = data.gpu_cooling_ops; + MALI_DEBUG_PRINT(2, ("Mali GPU Thermal: Callback handler installed \n")); + } + } + + if (callbacks) { + mdev->devfreq_cooling = of_devfreq_cooling_register_power( + mdev->dev->of_node, + mdev->devfreq, + callbacks); + if (IS_ERR_OR_NULL(mdev->devfreq_cooling)) { + err = PTR_ERR(mdev->devfreq_cooling); + MALI_PRINT_ERROR(("Failed to register cooling device (%d)\n", err)); + goto cooling_failed; + } else { + MALI_DEBUG_PRINT(2, ("Mali GPU Thermal Cooling installed \n")); + } + } +#endif + + return 0; + +#ifdef CONFIG_DEVFREQ_THERMAL +cooling_failed: + devfreq_unregister_opp_notifier(mdev->dev, mdev->devfreq); +#endif /* CONFIG_DEVFREQ_THERMAL */ +opp_notifier_failed: + err = devfreq_remove_device(mdev->devfreq); + if (err) + MALI_PRINT_ERROR(("Failed to terminate devfreq (%d)\n", err)); + else + mdev->devfreq = NULL; + + return err; +} + +void mali_devfreq_term(struct mali_device *mdev) +{ + int err; + + MALI_DEBUG_PRINT(2, ("Term Mali devfreq\n")); + +#ifdef CONFIG_DEVFREQ_THERMAL + devfreq_cooling_unregister(mdev->devfreq_cooling); +#endif + + devfreq_unregister_opp_notifier(mdev->dev, mdev->devfreq); + + err = devfreq_remove_device(mdev->devfreq); + if (err) + MALI_PRINT_ERROR(("Failed to terminate devfreq (%d)\n", err)); + else + mdev->devfreq = NULL; +} diff --git a/drivers/gpu/arm/mali/linux/mali_devfreq.h b/drivers/gpu/arm/mali/linux/mali_devfreq.h new file mode 100755 index 0000000000000..faf84f251d7dc --- /dev/null +++ b/drivers/gpu/arm/mali/linux/mali_devfreq.h @@ -0,0 +1,17 @@ +/* + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#ifndef _MALI_DEVFREQ_H_ +#define _MALI_DEVFREQ_H_ + +int mali_devfreq_init(struct mali_device *mdev); + +void mali_devfreq_term(struct mali_device *mdev); + +#endif diff --git a/drivers/gpu/arm/mali/linux/mali_device_pause_resume.c b/drivers/gpu/arm/mali/linux/mali_device_pause_resume.c index 37076a2eaf671..cb2f702f15ae4 100755 --- a/drivers/gpu/arm/mali/linux/mali_device_pause_resume.c +++ b/drivers/gpu/arm/mali/linux/mali_device_pause_resume.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_kernel_linux.c b/drivers/gpu/arm/mali/linux/mali_kernel_linux.c index 874da42810aeb..e5d8dcbdefab8 100755 --- a/drivers/gpu/arm/mali/linux/mali_kernel_linux.c +++ b/drivers/gpu/arm/mali/linux/mali_kernel_linux.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -24,6 +24,8 @@ #include #include #include +#include +#include #include #include "mali_kernel_common.h" @@ -39,12 +41,14 @@ #include "mali_memory.h" #include "mali_memory_dma_buf.h" #include "mali_memory_manager.h" +#include "mali_memory_swap_alloc.h" #if defined(CONFIG_MALI400_INTERNAL_PROFILING) #include "mali_profiling_internal.h" #endif #if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS) #include "mali_osk_profiling.h" #include "mali_dvfs_policy.h" + static int is_first_resume = 1; /*Store the clk and vol for boot/insmod and mali_resume*/ static struct mali_gpu_clk_item mali_gpu_clk[2]; @@ -61,20 +65,36 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(mali_hw_counter); EXPORT_TRACEPOINT_SYMBOL_GPL(mali_sw_counters); #endif /* CONFIG_TRACEPOINTS */ +#ifdef CONFIG_MALI_DEVFREQ +#include "mali_devfreq.h" +#include "mali_osk_mali.h" + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +#include +#else +/* In 3.13 the OPP include header file, types, and functions were all + * renamed. Use the old filename for the include, and define the new names to + * the old, when an old kernel is detected. + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) +#include +#else +#include +#endif /* Linux >= 3.13*/ +#define dev_pm_opp_of_add_table of_init_opp_table +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) +#define dev_pm_opp_of_remove_table of_free_opp_table +#endif /* Linux >= 3.19 */ +#endif /* Linux >= 4.4.0 */ +#endif + /* from the __malidrv_build_info.c file that is generated during build */ extern const char *__malidrv_build_info(void); extern void mali_post_init(void); extern int mali_pdev_dts_init(struct platform_device* mali_gpu_device); extern int mpgpu_class_init(void); extern void mpgpu_class_exit(void); - -int mali_page_fault = 0; -module_param(mali_page_fault, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */ -MODULE_PARM_DESC(mali_page_fault, "mali_page_fault"); - -int pp_hardware_reset = 0; -module_param(pp_hardware_reset, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */ -MODULE_PARM_DESC(pp_hardware_reset, "mali_hardware_reset"); + /* Module parameter to control log level */ int mali_debug_level = 2; module_param(mali_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */ @@ -114,6 +134,10 @@ extern int mali_max_pp_cores_group_2; module_param(mali_max_pp_cores_group_2, int, S_IRUSR | S_IRGRP | S_IROTH); MODULE_PARM_DESC(mali_max_pp_cores_group_2, "Limit the number of PP cores to use from second PP group (Mali-450 only)."); +extern unsigned int mali_mem_swap_out_threshold_value; +module_param(mali_mem_swap_out_threshold_value, uint, S_IRUSR | S_IRGRP | S_IROTH); +MODULE_PARM_DESC(mali_mem_swap_out_threshold_value, "Threshold value used to limit how much swappable memory cached in Mali driver."); + #if defined(CONFIG_MALI_DVFS) /** the max fps the same as display vsync default 60, can set by module insert parameter */ extern int mali_max_system_fps; @@ -211,7 +235,7 @@ static struct of_device_id base_dt_ids[] = { {.compatible = "arm,mali-300"}, {.compatible = "arm,mali-400"}, {.compatible = "arm,mali-450"}, - {.compatible = "arm,mali-utgard"}, + {.compatible = "arm,mali-470"}, {}, }; @@ -376,7 +400,7 @@ int mali_module_init(void) int err = 0; MALI_DEBUG_PRINT(2, ("Inserting Mali v%d device driver. \n", _MALI_API_VERSION)); - //MALI_DEBUG_PRINT(2, ("Compiled: %s, time: %s.\n", __DATE__, __TIME__)); + MALI_DEBUG_PRINT(2, ("Compiled: %s, time: %s.\n", __DATE__, __TIME__)); MALI_DEBUG_PRINT(2, ("Driver revision: %s\n", SVN_REV_STRING)); #if MALI_ENABLE_CPU_CYCLES @@ -433,7 +457,7 @@ int mali_module_init(void) #endif MALI_PRINT(("Mali device driver loaded\n")); - + mpgpu_class_init(); return 0; /* Success */ @@ -465,14 +489,28 @@ void mali_module_exit(void) #if defined(CONFIG_MALI400_INTERNAL_PROFILING) _mali_internal_profiling_term(); #endif - mpgpu_class_exit(); MALI_PRINT(("Mali device driver unloaded\n")); } +#ifdef CONFIG_MALI_DEVFREQ +struct mali_device *mali_device_alloc(void) +{ + return kzalloc(sizeof(struct mali_device), GFP_KERNEL); +} + +void mali_device_free(struct mali_device *mdev) +{ + kfree(mdev); +} +#endif + static int mali_probe(struct platform_device *pdev) { int err; +#ifdef CONFIG_MALI_DEVFREQ + struct mali_device *mdev; +#endif MALI_DEBUG_PRINT(2, ("mali_probe(): Called for platform device %s\n", pdev->name)); @@ -489,10 +527,66 @@ static int mali_probe(struct platform_device *pdev) err = mali_platform_device_init(mali_platform_device); if (0 != err) { MALI_PRINT_ERROR(("mali_probe(): Failed to initialize platform device.")); + mali_platform_device = NULL; return -EFAULT; } #endif +#ifdef CONFIG_MALI_DEVFREQ + mdev = mali_device_alloc(); + if (!mdev) { + MALI_PRINT_ERROR(("Can't allocate mali device private data\n")); + return -ENOMEM; + } + + mdev->dev = &pdev->dev; + dev_set_drvdata(mdev->dev, mdev); + + /*Initilization clock and regulator*/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \ + && defined(CONFIG_REGULATOR) + mdev->regulator = regulator_get_optional(mdev->dev, "mali"); + if (IS_ERR_OR_NULL(mdev->regulator)) { + MALI_DEBUG_PRINT(2, ("Continuing without Mali regulator control\n")); + mdev->regulator = NULL; + /* Allow probe to continue without regulator */ + } +#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) && defined(CONFIG_OF) \ + && defined(CONFIG_PM_OPP) + /* Register the OPPs if they are available in device tree */ + if (dev_pm_opp_of_add_table(mdev->dev) < 0) + MALI_DEBUG_PRINT(3, ("OPP table not found\n")); +#endif + + /* Need to name the gpu clock "clk_mali" in the device tree */ + mdev->clock = clk_get(mdev->dev, "clk_mali"); + if (IS_ERR_OR_NULL(mdev->clock)) { + MALI_DEBUG_PRINT(2, ("Continuing without Mali clock control\n")); + mdev->clock = NULL; + /* Allow probe to continue without clock. */ + } else { + err = clk_prepare_enable(mdev->clock); + if (err) { + MALI_PRINT_ERROR(("Failed to prepare and enable clock (%d)\n", err)); + goto clock_prepare_failed; + } + } + + /* initilize pm metrics related */ + if (mali_pm_metrics_init(mdev) < 0) { + MALI_DEBUG_PRINT(2, ("mali pm metrics init failed\n")); + goto pm_metrics_init_failed; + } + + if (mali_devfreq_init(mdev) < 0) { + MALI_DEBUG_PRINT(2, ("mali devfreq init failed\n")); + goto devfreq_init_failed; + } +#endif + + if (_MALI_OSK_ERR_OK == _mali_osk_wq_init()) { /* Initialize the Mali GPU HW specified by pdev */ if (_MALI_OSK_ERR_OK == mali_initialize_subsystems()) { @@ -521,17 +615,67 @@ static int mali_probe(struct platform_device *pdev) _mali_osk_wq_term(); } +#ifdef CONFIG_MALI_DEVFREQ + mali_devfreq_term(mdev); +devfreq_init_failed: + mali_pm_metrics_term(mdev); +pm_metrics_init_failed: + clk_disable_unprepare(mdev->clock); +clock_prepare_failed: + clk_put(mdev->clock); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && defined(CONFIG_OF) \ + && defined(CONFIG_PM_OPP) + dev_pm_opp_of_remove_table(mdev->dev); +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \ + && defined(CONFIG_REGULATOR) + regulator_put(mdev->regulator); +#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */ + mali_device_free(mdev); +#endif + +#ifdef CONFIG_MALI_DT + mali_platform_device_deinit(mali_platform_device); +#endif mali_platform_device = NULL; return -EFAULT; } static int mali_remove(struct platform_device *pdev) { +#ifdef CONFIG_MALI_DEVFREQ + struct mali_device *mdev = dev_get_drvdata(&pdev->dev); +#endif + MALI_DEBUG_PRINT(2, ("mali_remove() called for platform device %s\n", pdev->name)); mali_sysfs_unregister(); mali_miscdevice_unregister(); mali_terminate_subsystems(); _mali_osk_wq_term(); + +#ifdef CONFIG_MALI_DEVFREQ + mali_devfreq_term(mdev); + + mali_pm_metrics_term(mdev); + + if (mdev->clock) { + clk_disable_unprepare(mdev->clock); + clk_put(mdev->clock); + mdev->clock = NULL; + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && defined(CONFIG_OF) \ + && defined(CONFIG_PM_OPP) + dev_pm_opp_of_remove_table(mdev->dev); +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \ + && defined(CONFIG_REGULATOR) + regulator_put(mdev->regulator); +#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */ + mali_device_free(mdev); +#endif + #ifdef CONFIG_MALI_DT mali_platform_device_deinit(mali_platform_device); #endif @@ -563,6 +707,17 @@ static void mali_miscdevice_unregister(void) static int mali_driver_suspend_scheduler(struct device *dev) { +#ifdef CONFIG_MALI_DEVFREQ + struct mali_device *mdev = dev_get_drvdata(dev); + if (!mdev) + return -ENODEV; +#endif + +#if defined(CONFIG_MALI_DEVFREQ) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) + devfreq_suspend_device(mdev->devfreq); +#endif + mali_pm_os_suspend(MALI_TRUE); /* Tracing the frequency and voltage after mali is suspended */ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | @@ -576,6 +731,12 @@ static int mali_driver_suspend_scheduler(struct device *dev) static int mali_driver_resume_scheduler(struct device *dev) { +#ifdef CONFIG_MALI_DEVFREQ + struct mali_device *mdev = dev_get_drvdata(dev); + if (!mdev) + return -ENODEV; +#endif + /* Tracing the frequency and voltage after mali is resumed */ #if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS) /* Just call mali_get_current_gpu_clk_item() once,to record current clk info.*/ @@ -591,12 +752,24 @@ static int mali_driver_resume_scheduler(struct device *dev) 0, 0, 0); #endif mali_pm_os_resume(); + +#if defined(CONFIG_MALI_DEVFREQ) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) + devfreq_resume_device(mdev->devfreq); +#endif + return 0; } #ifdef CONFIG_PM_RUNTIME static int mali_driver_runtime_suspend(struct device *dev) { +#ifdef CONFIG_MALI_DEVFREQ + struct mali_device *mdev = dev_get_drvdata(dev); + if (!mdev) + return -ENODEV; +#endif + if (MALI_TRUE == mali_pm_runtime_suspend()) { /* Tracing the frequency and voltage after mali is suspended */ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | @@ -606,6 +779,12 @@ static int mali_driver_runtime_suspend(struct device *dev) 0, 0, 0, 0); +#if defined(CONFIG_MALI_DEVFREQ) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) + MALI_DEBUG_PRINT(4, ("devfreq_suspend_device: stop devfreq monitor\n")); + devfreq_suspend_device(mdev->devfreq); +#endif + return 0; } else { return -EBUSY; @@ -614,6 +793,12 @@ static int mali_driver_runtime_suspend(struct device *dev) static int mali_driver_runtime_resume(struct device *dev) { +#ifdef CONFIG_MALI_DEVFREQ + struct mali_device *mdev = dev_get_drvdata(dev); + if (!mdev) + return -ENODEV; +#endif + /* Tracing the frequency and voltage after mali is resumed */ #if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS) /* Just call mali_get_current_gpu_clk_item() once,to record current clk info.*/ @@ -630,6 +815,12 @@ static int mali_driver_runtime_resume(struct device *dev) #endif mali_pm_runtime_resume(); + +#if defined(CONFIG_MALI_DEVFREQ) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) + MALI_DEBUG_PRINT(4, ("devfreq_resume_device: start devfreq monitor\n")); + devfreq_resume_device(mdev->devfreq); +#endif return 0; } @@ -661,6 +852,8 @@ static int mali_open(struct inode *inode, struct file *filp) /* link in our session data */ filp->private_data = (void *)session_data; + filp->f_mapping = mali_mem_swap_get_global_swap_file()->f_mapping; + return 0; } @@ -761,6 +954,11 @@ static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, err = request_high_priority_wrapper(session_data, (_mali_uk_request_high_priority_s __user *)arg); break; + case MALI_IOC_PENDING_SUBMIT: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pending_submit_s), sizeof(u64))); + err = pending_submit_wrapper(session_data, (_mali_uk_pending_submit_s __user *)arg); + break; + #if defined(CONFIG_MALI400_PROFILING) case MALI_IOC_PROFILING_ADD_EVENT: BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_add_event_s), sizeof(u64))); @@ -772,23 +970,29 @@ static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, err = profiling_report_sw_counters_wrapper(session_data, (_mali_uk_sw_counters_report_s __user *)arg); break; - - case MALI_IOC_PROFILING_MEMORY_USAGE_GET: - BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_memory_usage_get_s), sizeof(u64))); - err = profiling_memory_usage_get_wrapper(session_data, (_mali_uk_profiling_memory_usage_get_s __user *)arg); + case MALI_IOC_PROFILING_STREAM_FD_GET: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_stream_fd_get_s), sizeof(u64))); + err = profiling_get_stream_fd_wrapper(session_data, (_mali_uk_profiling_stream_fd_get_s __user *)arg); break; + case MALI_IOC_PROILING_CONTROL_SET: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_control_set_s), sizeof(u64))); + err = profiling_control_set_wrapper(session_data, (_mali_uk_profiling_control_set_s __user *)arg); + break; #else case MALI_IOC_PROFILING_ADD_EVENT: /* FALL-THROUGH */ case MALI_IOC_PROFILING_REPORT_SW_COUNTERS: /* FALL-THROUGH */ - case MALI_IOC_PROFILING_MEMORY_USAGE_GET: /* FALL-THROUGH */ MALI_DEBUG_PRINT(2, ("Profiling not supported\n")); err = -ENOTTY; break; - #endif + case MALI_IOC_PROFILING_MEMORY_USAGE_GET: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_memory_usage_get_s), sizeof(u64))); + err = mem_usage_get_wrapper(session_data, (_mali_uk_profiling_memory_usage_get_s __user *)arg); + break; + case MALI_IOC_MEM_ALLOC: BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_alloc_mem_s), sizeof(u64))); err = mem_alloc_wrapper(session_data, (_mali_uk_alloc_mem_s __user *)arg); @@ -809,6 +1013,21 @@ static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, err = mem_unbind_wrapper(session_data, (_mali_uk_unbind_mem_s __user *)arg); break; + case MALI_IOC_MEM_COW: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_cow_mem_s), sizeof(u64))); + err = mem_cow_wrapper(session_data, (_mali_uk_cow_mem_s __user *)arg); + break; + + case MALI_IOC_MEM_COW_MODIFY_RANGE: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_cow_modify_range_s), sizeof(u64))); + err = mem_cow_modify_range_wrapper(session_data, (_mali_uk_cow_modify_range_s __user *)arg); + break; + + case MALI_IOC_MEM_RESIZE: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_mem_resize_s), sizeof(u64))); + err = mem_resize_mem_wrapper(session_data, (_mali_uk_mem_resize_s __user *)arg); + break; + case MALI_IOC_MEM_WRITE_SAFE: BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_mem_write_safe_s), sizeof(u64))); err = mem_write_safe_wrapper(session_data, (_mali_uk_mem_write_safe_s __user *)arg); diff --git a/drivers/gpu/arm/mali/linux/mali_kernel_linux.h b/drivers/gpu/arm/mali/linux/mali_kernel_linux.h index f6042712d1e3a..e87879a8b8047 100755 --- a/drivers/gpu/arm/mali/linux/mali_kernel_linux.h +++ b/drivers/gpu/arm/mali/linux/mali_kernel_linux.h @@ -20,9 +20,15 @@ extern "C" { #include #include "mali_kernel_license.h" #include "mali_osk_types.h" +#include extern struct platform_device *mali_platform_device; +/* After 3.19.0 kenrel droped CONFIG_PM_RUNTIME define,define by ourself */ +#if defined(CONFIG_PM) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) +#define CONFIG_PM_RUNTIME 1 +#endif + #ifdef __cplusplus } #endif diff --git a/drivers/gpu/arm/mali/linux/mali_kernel_sysfs.c b/drivers/gpu/arm/mali/linux/mali_kernel_sysfs.c index ecde82dd29a04..3b50af6056678 100755 --- a/drivers/gpu/arm/mali/linux/mali_kernel_sysfs.c +++ b/drivers/gpu/arm/mali/linux/mali_kernel_sysfs.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -880,10 +880,17 @@ static const struct file_operations profiling_events_human_readable_fops = { static int memory_debugfs_show(struct seq_file *s, void *private_data) { - seq_printf(s, " %-25s %-10s %-25s %-10s %-15s %-15s %-10s %-10s\n"\ - "=======================================================================================================================================\n", - "Name (:bytes)", "pid", "pid-name", "mali_mem", "max_mali_mem", +#ifdef MALI_MEM_SWAP_TRACKING + seq_printf(s, " %-25s %-10s %-10s %-15s %-15s %-10s %-10s %-10s \n"\ + "=================================================================================================================================\n", + "Name (:bytes)", "pid", "mali_mem", "max_mali_mem", + "external_mem", "ump_mem", "dma_mem", "swap_mem"); +#else + seq_printf(s, " %-25s %-10s %-10s %-15s %-15s %-10s %-10s \n"\ + "========================================================================================================================\n", + "Name (:bytes)", "pid", "mali_mem", "max_mali_mem", "external_mem", "ump_mem", "dma_mem"); +#endif mali_session_memory_tracking(s); return 0; } @@ -1136,6 +1143,9 @@ static ssize_t version_read(struct file *filp, char __user *buf, size_t count, l case _MALI_PRODUCT_ID_MALI450: r = snprintf(buffer, 64, "Mali-450 MP\n"); break; + case _MALI_PRODUCT_ID_MALI470: + r = snprintf(buffer, 64, "Mali-470 MP\n"); + break; case _MALI_PRODUCT_ID_UNKNOWN: return -EINVAL; break; diff --git a/drivers/gpu/arm/mali/linux/mali_kernel_sysfs.h b/drivers/gpu/arm/mali/linux/mali_kernel_sysfs.h index a36a0cea99727..9dad7f24ccadf 100755 --- a/drivers/gpu/arm/mali/linux/mali_kernel_sysfs.h +++ b/drivers/gpu/arm/mali/linux/mali_kernel_sysfs.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011-2013, 2015 ARM Limited. All rights reserved. + * Copyright (C) 2011-2013, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_linux_trace.h b/drivers/gpu/arm/mali/linux/mali_linux_trace.h index c6cd2bfb72179..2c91ddcd82e54 100755 --- a/drivers/gpu/arm/mali/linux/mali_linux_trace.h +++ b/drivers/gpu/arm/mali/linux/mali_linux_trace.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_memory.c b/drivers/gpu/arm/mali/linux/mali_memory.c index f370094f4a671..95f04ed022366 100755 --- a/drivers/gpu/arm/mali/linux/mali_memory.c +++ b/drivers/gpu/arm/mali/linux/mali_memory.c @@ -27,11 +27,18 @@ #include "mali_memory_util.h" #include "mali_memory_virtual.h" #include "mali_memory_manager.h" +#include "mali_memory_cow.h" +#include "mali_memory_swap_alloc.h" +#include "mali_memory_defer_bind.h" +#if defined(CONFIG_DMA_SHARED_BUFFER) +#include "mali_memory_secure.h" +#endif extern unsigned int mali_dedicated_mem_size; extern unsigned int mali_shared_mem_size; -/* session->memory_lock must be held when calling this function */ +#define MALI_VM_NUM_FAULT_PREFETCH (0x8) + static void mali_mem_vma_open(struct vm_area_struct *vma) { mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data; @@ -52,9 +59,75 @@ static void mali_mem_vma_close(struct vm_area_struct *vma) static int mali_mem_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { - /* Not support yet */ - MALI_DEBUG_ASSERT(0); - return 0; + mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data; + mali_mem_backend *mem_bkend = NULL; + int ret; + int prefetch_num = MALI_VM_NUM_FAULT_PREFETCH; + + unsigned long address = (unsigned long)vmf->virtual_address; + MALI_DEBUG_ASSERT(alloc->backend_handle); + MALI_DEBUG_ASSERT((unsigned long)alloc->cpu_mapping.addr <= address); + + /* Get backend memory & Map on CPU */ + mutex_lock(&mali_idr_mutex); + if (!(mem_bkend = idr_find(&mali_backend_idr, alloc->backend_handle))) { + MALI_DEBUG_PRINT(1, ("Can't find memory backend in mmap!\n")); + mutex_unlock(&mali_idr_mutex); + return VM_FAULT_SIGBUS; + } + mutex_unlock(&mali_idr_mutex); + MALI_DEBUG_ASSERT(mem_bkend->type == alloc->type); + + if ((mem_bkend->type == MALI_MEM_COW && (MALI_MEM_BACKEND_FLAG_SWAP_COWED != + (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) && + (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE)) { + /*check if use page fault to do COW*/ + MALI_DEBUG_PRINT(4, ("mali_vma_fault: do cow allocate on demand!, address=0x%x\n", address)); + mutex_lock(&mem_bkend->mutex); + ret = mali_mem_cow_allocate_on_demand(mem_bkend, + (address - vma->vm_start) / PAGE_SIZE); + mutex_unlock(&mem_bkend->mutex); + + if (ret != _MALI_OSK_ERR_OK) { + return VM_FAULT_OOM; + } + prefetch_num = 1; + + /* handle COW modified range cpu mapping + we zap the mapping in cow_modify_range, it will trigger page fault + when CPU access it, so here we map it to CPU*/ + mutex_lock(&mem_bkend->mutex); + ret = mali_mem_cow_cpu_map_pages_locked(mem_bkend, vma, address, prefetch_num); + mutex_unlock(&mem_bkend->mutex); + + if (unlikely(ret != _MALI_OSK_ERR_OK)) { + return VM_FAULT_SIGBUS; + } + } else if ((mem_bkend->type == MALI_MEM_SWAP) || + (mem_bkend->type == MALI_MEM_COW && (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) { + u32 offset_in_bkend = (address - vma->vm_start) / PAGE_SIZE; + int ret = _MALI_OSK_ERR_OK; + + mutex_lock(&mem_bkend->mutex); + if (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE) { + ret = mali_mem_swap_cow_page_on_demand(mem_bkend, offset_in_bkend, &vmf->page); + } else { + ret = mali_mem_swap_allocate_page_on_demand(mem_bkend, offset_in_bkend, &vmf->page); + } + mutex_unlock(&mem_bkend->mutex); + + if (ret != _MALI_OSK_ERR_OK) { + MALI_DEBUG_PRINT(2, ("Mali swap memory page fault process failed, address=0x%x\n", address)); + return VM_FAULT_OOM; + } else { + return VM_FAULT_LOCKED; + } + } else { + MALI_PRINT_ERROR(("Mali vma fault! It never happen, indicating some logic errors in caller.\n")); + /*NOT support yet or OOM*/ + return VM_FAULT_OOM; + } + return VM_FAULT_NOPAGE; } static struct vm_operations_struct mali_kernel_vm_ops = { @@ -82,7 +155,7 @@ int mali_mmap(struct file *filp, struct vm_area_struct *vma) u32 mali_addr = vma->vm_pgoff << PAGE_SHIFT; struct mali_vma_node *mali_vma_node = NULL; mali_mem_backend *mem_bkend = NULL; - int ret; + int ret = -EFAULT; session = (struct mali_session_data *)filp->private_data; if (NULL == session) { @@ -94,25 +167,8 @@ int mali_mmap(struct file *filp, struct vm_area_struct *vma) (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT), (unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags)); - /* Set some bits which indicate that, the memory is IO memory, meaning - * that no paging is to be performed and the memory should not be - * included in crash dumps. And that the memory is reserved, meaning - * that it's present and can never be paged out (see also previous - * entry) - */ - vma->vm_flags |= VM_IO; - vma->vm_flags |= VM_DONTCOPY; - vma->vm_flags |= VM_PFNMAP; -#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0) - vma->vm_flags |= VM_RESERVED; -#else - vma->vm_flags |= VM_DONTDUMP; - vma->vm_flags |= VM_DONTEXPAND; -#endif - - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); - vma->vm_ops = &mali_kernel_vm_ops; /* Operations used on any memory system */ + /* do not need to anything in vm open/close now */ /* find mali allocation structure by vaddress*/ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0); @@ -121,6 +177,7 @@ int mali_mmap(struct file *filp, struct vm_area_struct *vma) MALI_DEBUG_ASSERT(mali_addr == mali_vma_node->vm_node.start); if (unlikely(mali_addr != mali_vma_node->vm_node.start)) { /* only allow to use start address for mmap */ + MALI_DEBUG_PRINT(1, ("mali_addr != mali_vma_node->vm_node.start\n")); return -EFAULT; } } else { @@ -130,6 +187,11 @@ int mali_mmap(struct file *filp, struct vm_area_struct *vma) mali_alloc->cpu_mapping.addr = (void __user *)vma->vm_start; + if (mali_alloc->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) { + MALI_DEBUG_PRINT(1, ("ERROR : trying to access varying memory by CPU!\n")); + return -EFAULT; + } + /* Get backend memory & Map on CPU */ mutex_lock(&mali_idr_mutex); if (!(mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle))) { @@ -139,22 +201,77 @@ int mali_mmap(struct file *filp, struct vm_area_struct *vma) } mutex_unlock(&mali_idr_mutex); + if (!(MALI_MEM_SWAP == mali_alloc->type || + (MALI_MEM_COW == mali_alloc->type && (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)))) { + /* Set some bits which indicate that, the memory is IO memory, meaning + * that no paging is to be performed and the memory should not be + * included in crash dumps. And that the memory is reserved, meaning + * that it's present and can never be paged out (see also previous + * entry) + */ + vma->vm_flags |= VM_IO; + vma->vm_flags |= VM_DONTCOPY; + vma->vm_flags |= VM_PFNMAP; +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0) + vma->vm_flags |= VM_RESERVED; +#else + vma->vm_flags |= VM_DONTDUMP; + vma->vm_flags |= VM_DONTEXPAND; +#endif + } else if (MALI_MEM_SWAP == mali_alloc->type) { + vma->vm_pgoff = mem_bkend->start_idx; + } + + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + vma->vm_ops = &mali_kernel_vm_ops; + + mali_alloc->cpu_mapping.addr = (void __user *)vma->vm_start; + + /* If it's a copy-on-write mapping, map to read only */ + if (!(vma->vm_flags & VM_WRITE)) { + MALI_DEBUG_PRINT(4, ("mmap allocation with read only !\n")); + /* add VM_WRITE for do_page_fault will check this when a write fault */ + vma->vm_flags |= VM_WRITE | VM_READ; + vma->vm_page_prot = PAGE_READONLY; + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + mem_bkend->flags |= MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE; + goto out; + } + if (mem_bkend->type == MALI_MEM_OS) { - ret = mali_mem_os_cpu_map(&mem_bkend->os_mem, vma); + ret = mali_mem_os_cpu_map(mem_bkend, vma); + } else if (mem_bkend->type == MALI_MEM_COW && + (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) { + ret = mali_mem_cow_cpu_map(mem_bkend, vma); } else if (mem_bkend->type == MALI_MEM_BLOCK) { ret = mali_mem_block_cpu_map(mem_bkend, vma); + } else if ((mem_bkend->type == MALI_MEM_SWAP) || (mem_bkend->type == MALI_MEM_COW && + (MALI_MEM_BACKEND_FLAG_SWAP_COWED == (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)))) { + /*For swappable memory, CPU page table will be created by page fault handler. */ + ret = 0; + } else if (mem_bkend->type == MALI_MEM_SECURE) { +#if defined(CONFIG_DMA_SHARED_BUFFER) + ret = mali_mem_secure_cpu_map(mem_bkend, vma); +#else + MALI_DEBUG_PRINT(1, ("DMA not supported for mali secure memory\n")); + return -EFAULT; +#endif } else { /* Not support yet*/ - MALI_DEBUG_ASSERT(0); - ret = -EFAULT; + MALI_DEBUG_PRINT_ERROR(("Invalid type of backend memory! \n")); + return -EFAULT; } - if (ret != 0) + if (ret != 0) { + MALI_DEBUG_PRINT(1, ("ret != 0\n")); return -EFAULT; - + } +out: MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == mali_alloc->magic); vma->vm_private_data = (void *)mali_alloc; + mali_alloc->cpu_mapping.vma = vma; + mali_allocation_ref(mali_alloc); return 0; @@ -176,6 +293,23 @@ _mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor) return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_vma_node.vm_node.start, size); } +_mali_osk_errcode_t mali_mem_mali_map_resize(mali_mem_allocation *descriptor, u32 new_size) +{ + u32 old_size = descriptor->psize; + struct mali_session_data *session = descriptor->session; + + MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic); + + if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) { + new_size += MALI_MMU_PAGE_SIZE; + } + + if (new_size > old_size) { + MALI_DEBUG_ASSERT(new_size <= descriptor->mali_vma_node.vm_node.size); + return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_vma_node.vm_node.start + old_size, new_size - old_size); + } + return _MALI_OSK_ERR_OK; +} void mali_mem_mali_map_free(struct mali_session_data *session, u32 size, mali_address_t vaddr, u32 flags) { @@ -221,6 +355,12 @@ _mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data *session_ _MALI_OSK_LOCK_ORDER_MEM_SESSION); if (NULL == session_data->memory_lock) { + MALI_ERROR(_MALI_OSK_ERR_FAULT); + } + + session_data->cow_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_UNORDERED, 0); + if (NULL == session_data->cow_lock) { + _mali_osk_mutex_term(session_data->memory_lock); _mali_osk_free(session_data); MALI_ERROR(_MALI_OSK_ERR_FAULT); } @@ -246,19 +386,33 @@ void mali_memory_session_end(struct mali_session_data *session) /* Free the lock */ _mali_osk_mutex_term(session->memory_lock); - + _mali_osk_mutex_term(session->cow_lock); return; } _mali_osk_errcode_t mali_memory_initialize(void) { + _mali_osk_errcode_t err; + idr_init(&mali_backend_idr); mutex_init(&mali_idr_mutex); - return mali_mem_os_init(); + + err = mali_mem_swap_init(); + if (err != _MALI_OSK_ERR_OK) { + return err; + } + err = mali_mem_os_init(); + if (_MALI_OSK_ERR_OK == err) { + err = mali_mem_defer_bind_manager_init(); + } + + return err; } void mali_memory_terminate(void) { + mali_mem_swap_term(); + mali_mem_defer_bind_manager_destory(); mali_mem_os_term(); if (mali_memory_have_dedicated_memory()) { mali_mem_block_allocator_destroy(); @@ -266,3 +420,112 @@ void mali_memory_terminate(void) } +struct mali_page_node *_mali_page_node_allocate(mali_page_node_type type) +{ + mali_page_node *page_node = NULL; + + page_node = kzalloc(sizeof(mali_page_node), GFP_KERNEL); + MALI_DEBUG_ASSERT(NULL != page_node); + + if (page_node) { + page_node->type = type; + INIT_LIST_HEAD(&page_node->list); + } + + return page_node; +} + +void _mali_page_node_ref(struct mali_page_node *node) +{ + if (node->type == MALI_PAGE_NODE_OS) { + /* add ref to this page */ + get_page(node->page); + } else if (node->type == MALI_PAGE_NODE_BLOCK) { + mali_mem_block_add_ref(node); + } else if (node->type == MALI_PAGE_NODE_SWAP) { + atomic_inc(&node->swap_it->ref_count); + } else { + MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n")); + } +} + +void _mali_page_node_unref(struct mali_page_node *node) +{ + if (node->type == MALI_PAGE_NODE_OS) { + /* unref to this page */ + put_page(node->page); + } else if (node->type == MALI_PAGE_NODE_BLOCK) { + mali_mem_block_dec_ref(node); + } else { + MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n")); + } +} + + +void _mali_page_node_add_page(struct mali_page_node *node, struct page *page) +{ + MALI_DEBUG_ASSERT(MALI_PAGE_NODE_OS == node->type); + node->page = page; +} + + +void _mali_page_node_add_swap_item(struct mali_page_node *node, struct mali_swap_item *item) +{ + MALI_DEBUG_ASSERT(MALI_PAGE_NODE_SWAP == node->type); + node->swap_it = item; +} + +void _mali_page_node_add_block_item(struct mali_page_node *node, mali_block_item *item) +{ + MALI_DEBUG_ASSERT(MALI_PAGE_NODE_BLOCK == node->type); + node->blk_it = item; +} + + +int _mali_page_node_get_ref_count(struct mali_page_node *node) +{ + if (node->type == MALI_PAGE_NODE_OS) { + /* get ref count of this page */ + return page_count(node->page); + } else if (node->type == MALI_PAGE_NODE_BLOCK) { + return mali_mem_block_get_ref_count(node); + } else if (node->type == MALI_PAGE_NODE_SWAP) { + return atomic_read(&node->swap_it->ref_count); + } else { + MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n")); + } + return -1; +} + + +dma_addr_t _mali_page_node_get_dma_addr(struct mali_page_node *node) +{ + if (node->type == MALI_PAGE_NODE_OS) { + return page_private(node->page); + } else if (node->type == MALI_PAGE_NODE_BLOCK) { + return _mali_blk_item_get_phy_addr(node->blk_it); + } else if (node->type == MALI_PAGE_NODE_SWAP) { + return node->swap_it->dma_addr; + } else { + MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n")); + } + return 0; +} + + +unsigned long _mali_page_node_get_pfn(struct mali_page_node *node) +{ + if (node->type == MALI_PAGE_NODE_OS) { + return page_to_pfn(node->page); + } else if (node->type == MALI_PAGE_NODE_BLOCK) { + /* get phy addr for BLOCK page*/ + return _mali_blk_item_get_pfn(node->blk_it); + } else if (node->type == MALI_PAGE_NODE_SWAP) { + return page_to_pfn(node->swap_it->page); + } else { + MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n")); + } + return 0; +} + + diff --git a/drivers/gpu/arm/mali/linux/mali_memory.h b/drivers/gpu/arm/mali/linux/mali_memory.h index 9ad047cb66bee..e5e4f66ec02e4 100755 --- a/drivers/gpu/arm/mali/linux/mali_memory.h +++ b/drivers/gpu/arm/mali/linux/mali_memory.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -87,6 +87,18 @@ void mali_memory_session_end(struct mali_session_data *session); */ _mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor); +/** @brief Resize Mali page tables for mapping + * + * This function will Resize the Mali page tables for mapping the memory + * described by \a descriptor. + * + * Page tables will be reference counted and allocated, if not yet present. + * + * @param descriptor Pointer to the memory descriptor to the mapping + * @param new_size The new size of descriptor + */ +_mali_osk_errcode_t mali_mem_mali_map_resize(mali_mem_allocation *descriptor, u32 new_size); + /** @brief Free Mali page tables for mapping * * This function will unmap pages from Mali memory and free the page tables @@ -98,7 +110,6 @@ _mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor); */ void mali_mem_mali_map_free(struct mali_session_data *session, u32 size, mali_address_t vaddr, u32 flags); - /** @brief Parse resource and prepare the OS memory allocator * * @param size Maximum size to allocate for Mali GPU. @@ -115,4 +126,18 @@ _mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size); _mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size); +struct mali_page_node *_mali_page_node_allocate(mali_page_node_type type); + +void _mali_page_node_ref(struct mali_page_node *node); +void _mali_page_node_unref(struct mali_page_node *node); +void _mali_page_node_add_page(struct mali_page_node *node, struct page *page); + +void _mali_page_node_add_block_item(struct mali_page_node *node, mali_block_item *item); + +void _mali_page_node_add_swap_item(struct mali_page_node *node, struct mali_swap_item *item); + +int _mali_page_node_get_ref_count(struct mali_page_node *node); +dma_addr_t _mali_page_node_get_dma_addr(struct mali_page_node *node); +unsigned long _mali_page_node_get_pfn(struct mali_page_node *node); + #endif /* __MALI_MEMORY_H__ */ diff --git a/drivers/gpu/arm/mali/linux/mali_memory_block_alloc.c b/drivers/gpu/arm/mali/linux/mali_memory_block_alloc.c index 22a7fdf49e18c..deb2ee84f6e59 100755 --- a/drivers/gpu/arm/mali/linux/mali_memory_block_alloc.c +++ b/drivers/gpu/arm/mali/linux/mali_memory_block_alloc.c @@ -1,9 +1,9 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. - * + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. - * + * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ @@ -15,80 +15,6 @@ #include -struct mali_block_node *_mali_block_node_allocate(mali_page_node_type type) -{ - mali_block_node *block_node = NULL; - - block_node = kzalloc(sizeof(mali_block_node), GFP_KERNEL); - MALI_DEBUG_ASSERT(NULL != block_node); - - if (block_node) { - block_node->type = type; - INIT_LIST_HEAD(&block_node->list); - } - - return block_node; -} - -void _mali_block_node_ref(struct mali_block_node *node) -{ - if (node->type == MALI_PAGE_NODE_BLOCK) { - mali_mem_block_add_ref(node); - } else - MALI_DEBUG_ASSERT(0); -} - -void _mali_block_node_unref(struct mali_block_node *node) -{ - if (node->type == MALI_PAGE_NODE_BLOCK) { - mali_mem_block_dec_ref(node); - } else - MALI_DEBUG_ASSERT(0); -} - - - -void _mali_block_node_add_block_item(struct mali_block_node *node, mali_block_item *item) -{ - MALI_DEBUG_ASSERT(MALI_PAGE_NODE_BLOCK == node->type); - node->blk_it = item; -} - - -int _mali_block_node_get_ref_count(struct mali_block_node *node) -{ - if (node->type == MALI_PAGE_NODE_BLOCK) { - return mali_mem_block_get_ref_count(node); - } else { - MALI_DEBUG_ASSERT(0); - } - return -1; -} - - -dma_addr_t _mali_block_node_get_phy_addr(struct mali_block_node *node) -{ - if (node->type == MALI_PAGE_NODE_BLOCK) { - return _mali_blk_item_get_phy_addr(node->blk_it); - } else { - MALI_DEBUG_ASSERT(0); - } - return 0; -} - - -unsigned long _mali_block_node_get_pfn(struct mali_block_node *node) -{ - if (node->type == MALI_PAGE_NODE_BLOCK) { - /* get phy addr for BLOCK page*/ - return _mali_blk_item_get_pfn(node->blk_it); - } else { - MALI_DEBUG_ASSERT(0); - } - return 0; -} - - static mali_block_allocator *mali_mem_block_gobal_allocator = NULL; unsigned long _mali_blk_item_get_phy_addr(mali_block_item *item) @@ -103,7 +29,7 @@ unsigned long _mali_blk_item_get_pfn(mali_block_item *item) } -u32 mali_mem_block_get_ref_count(mali_block_node *node) +u32 mali_mem_block_get_ref_count(mali_page_node *node) { MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK); return (node->blk_it->phy_addr & MALI_BLOCK_REF_MASK); @@ -114,7 +40,7 @@ u32 mali_mem_block_get_ref_count(mali_block_node *node) * It not atomic, so it need to get sp_lock before call this function */ -u32 mali_mem_block_add_ref(mali_block_node *node) +u32 mali_mem_block_add_ref(mali_page_node *node) { MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK); MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(node) < MALI_BLOCK_MAX_REF_COUNT); @@ -124,7 +50,7 @@ u32 mali_mem_block_add_ref(mali_block_node *node) /* Decase the refence count * It not atomic, so it need to get sp_lock before call this function */ -u32 mali_mem_block_dec_ref(mali_block_node *node) +u32 mali_mem_block_dec_ref(mali_page_node *node) { MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK); MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(node) > 0); @@ -137,7 +63,7 @@ static mali_block_allocator *mali_mem_block_allocator_create(u32 base_address, u mali_block_allocator *info; u32 usable_size; u32 num_blocks; - mali_block_node *m_node; + mali_page_node *m_node; mali_block_item *mali_blk_items = NULL; int i = 0; @@ -166,10 +92,10 @@ static mali_block_allocator *mali_mem_block_allocator_create(u32 base_address, u /* add block information*/ mali_blk_items[i].phy_addr = base_address + (i * MALI_BLOCK_SIZE); /* add to free list */ - m_node = _mali_block_node_allocate(MALI_PAGE_NODE_BLOCK); + m_node = _mali_page_node_allocate(MALI_PAGE_NODE_BLOCK); if (m_node == NULL) goto fail; - _mali_block_node_add_block_item(m_node, &(mali_blk_items[i])); + _mali_page_node_add_block_item(m_node, &(mali_blk_items[i])); list_add_tail(&m_node->list, &info->free); atomic_add(1, &info->free_num); } @@ -183,7 +109,7 @@ static mali_block_allocator *mali_mem_block_allocator_create(u32 base_address, u void mali_mem_block_allocator_destroy(void) { - struct mali_block_node *m_page, *m_tmp; + struct mali_page_node *m_page, *m_tmp; mali_block_allocator *info = mali_mem_block_gobal_allocator; MALI_DEBUG_ASSERT_POINTER(info); MALI_DEBUG_PRINT(4, ("Memory block destroy !\n")); @@ -201,22 +127,24 @@ void mali_mem_block_allocator_destroy(void) _mali_osk_free(info); } -void mali_mem_block_release(mali_mem_backend *mem_bkend) +u32 mali_mem_block_release(mali_mem_backend *mem_bkend) { - mali_mem_allocation *alloc = mem_bkend->mali_allocation;; - MALI_DEBUG_PRINT(4, ("BLOCK Mem: Release size = 0x%x\n", mem_bkend->size)); - + mali_mem_allocation *alloc = mem_bkend->mali_allocation; + u32 free_pages_nr = 0; MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_BLOCK); /* Unmap the memory from the mali virtual address space. */ mali_mem_block_mali_unmap(alloc); - mali_mem_block_free(&mem_bkend->block_mem); + mutex_lock(&mem_bkend->mutex); + free_pages_nr = mali_mem_block_free(&mem_bkend->block_mem); + mutex_unlock(&mem_bkend->mutex); + return free_pages_nr; } int mali_mem_block_alloc(mali_mem_block_mem *block_mem, u32 size) { - struct mali_block_node *m_page, *m_tmp; + struct mali_page_node *m_page, *m_tmp; size_t page_count = PAGE_ALIGN(size) / _MALI_OSK_MALI_PAGE_SIZE; mali_block_allocator *info = mali_mem_block_gobal_allocator; MALI_DEBUG_ASSERT_POINTER(info); @@ -235,7 +163,7 @@ int mali_mem_block_alloc(mali_mem_block_mem *block_mem, u32 size) list_move(&m_page->list, &block_mem->pfns); block_mem->count++; atomic_dec(&info->free_num); - _mali_block_node_ref(m_page); + _mali_page_node_ref(m_page); } else { break; } @@ -251,62 +179,99 @@ int mali_mem_block_alloc(mali_mem_block_mem *block_mem, u32 size) return 0; } -void mali_mem_block_free(mali_mem_block_mem *block_mem) +u32 mali_mem_block_free(mali_mem_block_mem *block_mem) { - mali_mem_block_free_list(&block_mem->pfns); - MALI_DEBUG_PRINT(4, ("BLOCK Mem free : size = 0x%x\n", block_mem->count * _MALI_OSK_MALI_PAGE_SIZE)); + u32 free_pages_nr = 0; + + free_pages_nr = mali_mem_block_free_list(&block_mem->pfns); + MALI_DEBUG_PRINT(4, ("BLOCK Mem free : allocated size = 0x%x, free size = 0x%x\n", block_mem->count * _MALI_OSK_MALI_PAGE_SIZE, + free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE)); block_mem->count = 0; MALI_DEBUG_ASSERT(list_empty(&block_mem->pfns)); + + return free_pages_nr; } -void mali_mem_block_free_list(struct list_head *list) +u32 mali_mem_block_free_list(struct list_head *list) { - struct mali_block_node *m_page, *m_tmp; + struct mali_page_node *m_page, *m_tmp; mali_block_allocator *info = mali_mem_block_gobal_allocator; + u32 free_pages_nr = 0; if (info) { spin_lock(&info->sp_lock); list_for_each_entry_safe(m_page, m_tmp , list, list) { + if (1 == _mali_page_node_get_ref_count(m_page)) { + free_pages_nr++; + } mali_mem_block_free_node(m_page); } spin_unlock(&info->sp_lock); } + return free_pages_nr; } /* free the node,*/ -void mali_mem_block_free_node(struct mali_block_node *node) +void mali_mem_block_free_node(struct mali_page_node *node) { mali_block_allocator *info = mali_mem_block_gobal_allocator; /* only handle BLOCK node */ if (node->type == MALI_PAGE_NODE_BLOCK && info) { /*Need to make this atomic?*/ - if (1 == _mali_block_node_get_ref_count(node)) { + if (1 == _mali_page_node_get_ref_count(node)) { /*Move to free list*/ - _mali_block_node_unref(node); + _mali_page_node_unref(node); list_move_tail(&node->list, &info->free); atomic_add(1, &info->free_num); } else { - _mali_block_node_unref(node); + _mali_page_node_unref(node); list_del(&node->list); kfree(node); } } } +/* unref the node, but not free it */ +_mali_osk_errcode_t mali_mem_block_unref_node(struct mali_page_node *node) +{ + mali_block_allocator *info = mali_mem_block_gobal_allocator; + mali_page_node *new_node; + + /* only handle BLOCK node */ + if (node->type == MALI_PAGE_NODE_BLOCK && info) { + /*Need to make this atomic?*/ + if (1 == _mali_page_node_get_ref_count(node)) { + /* allocate a new node, Add to free list, keep the old node*/ + _mali_page_node_unref(node); + new_node = _mali_page_node_allocate(MALI_PAGE_NODE_BLOCK); + if (new_node) { + memcpy(new_node, node, sizeof(mali_page_node)); + list_add(&new_node->list, &info->free); + atomic_add(1, &info->free_num); + } else + return _MALI_OSK_ERR_FAULT; + + } else { + _mali_page_node_unref(node); + } + } + return _MALI_OSK_ERR_OK; +} + int mali_mem_block_mali_map(mali_mem_block_mem *block_mem, struct mali_session_data *session, u32 vaddr, u32 props) { struct mali_page_directory *pagedir = session->page_directory; - struct mali_block_node *m_page; + struct mali_page_node *m_page; dma_addr_t phys; u32 virt = vaddr; u32 prop = props; list_for_each_entry(m_page, &block_mem->pfns, list) { MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK); - phys = _mali_block_node_get_phy_addr(m_page); + phys = _mali_page_node_get_dma_addr(m_page); #if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) /* Verify that the "physical" address is 32-bit and * usable for Mali, when on a system with bus addresses @@ -330,7 +295,6 @@ void mali_mem_block_mali_unmap(mali_mem_allocation *alloc) mali_session_memory_lock(session); mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start, alloc->flags); - session->mali_mem_array[alloc->type] -= alloc->psize; mali_session_memory_unlock(session); } @@ -340,12 +304,12 @@ int mali_mem_block_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *v int ret; mali_mem_block_mem *block_mem = &mem_bkend->block_mem; unsigned long addr = vma->vm_start; - struct mali_block_node *m_page; + struct mali_page_node *m_page; MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_BLOCK); list_for_each_entry(m_page, &block_mem->pfns, list) { MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK); - ret = vm_insert_pfn(vma, addr, _mali_block_node_get_pfn(m_page)); + ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page)); if (unlikely(0 != ret)) { return -EFAULT; diff --git a/drivers/gpu/arm/mali/linux/mali_memory_block_alloc.h b/drivers/gpu/arm/mali/linux/mali_memory_block_alloc.h index 65282a6a2fc36..3e11ef25888e3 100755 --- a/drivers/gpu/arm/mali/linux/mali_memory_block_alloc.h +++ b/drivers/gpu/arm/mali/linux/mali_memory_block_alloc.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2013, 2015 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2013, 2015-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -37,10 +37,10 @@ typedef struct mali_block_allocator { unsigned long _mali_blk_item_get_phy_addr(mali_block_item *item); unsigned long _mali_blk_item_get_pfn(mali_block_item *item); -u32 mali_mem_block_get_ref_count(mali_block_node *node); -u32 mali_mem_block_add_ref(mali_block_node *node); -u32 mali_mem_block_dec_ref(mali_block_node *node); -void mali_mem_block_release(mali_mem_backend *mem_bkend); +u32 mali_mem_block_get_ref_count(mali_page_node *node); +u32 mali_mem_block_add_ref(mali_page_node *node); +u32 mali_mem_block_dec_ref(mali_page_node *node); +u32 mali_mem_block_release(mali_mem_backend *mem_bkend); int mali_mem_block_alloc(mali_mem_block_mem *block_mem, u32 size); int mali_mem_block_mali_map(mali_mem_block_mem *block_mem, struct mali_session_data *session, u32 vaddr, u32 props); void mali_mem_block_mali_unmap(mali_mem_allocation *alloc); @@ -48,10 +48,11 @@ void mali_mem_block_mali_unmap(mali_mem_allocation *alloc); int mali_mem_block_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma); _mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size); mali_bool mali_memory_have_dedicated_memory(void); -void mali_mem_block_free(mali_mem_block_mem *block_mem); -void mali_mem_block_free_list(struct list_head *list); -void mali_mem_block_free_node(struct mali_block_node *node); +u32 mali_mem_block_free(mali_mem_block_mem *block_mem); +u32 mali_mem_block_free_list(struct list_head *list); +void mali_mem_block_free_node(struct mali_page_node *node); void mali_mem_block_allocator_destroy(void); +_mali_osk_errcode_t mali_mem_block_unref_node(struct mali_page_node *node); u32 mali_mem_block_allocator_stat(void); #endif /* __MALI_BLOCK_ALLOCATOR_H__ */ diff --git a/drivers/gpu/arm/mali/linux/mali_memory_cow.c b/drivers/gpu/arm/mali/linux/mali_memory_cow.c new file mode 100755 index 0000000000000..0a11f4b934bee --- /dev/null +++ b/drivers/gpu/arm/mali/linux/mali_memory_cow.c @@ -0,0 +1,776 @@ +/* + * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_ARM +#include +#endif +#include + +#include "mali_memory.h" +#include "mali_kernel_common.h" +#include "mali_uk_types.h" +#include "mali_osk.h" +#include "mali_kernel_linux.h" +#include "mali_memory_cow.h" +#include "mali_memory_block_alloc.h" +#include "mali_memory_swap_alloc.h" + +/** +* allocate pages for COW backend and flush cache +*/ +static struct page *mali_mem_cow_alloc_page(void) + +{ + mali_mem_os_mem os_mem; + struct mali_page_node *node; + struct page *new_page; + + int ret = 0; + /* allocate pages from os mem */ + ret = mali_mem_os_alloc_pages(&os_mem, _MALI_OSK_MALI_PAGE_SIZE); + + if (ret) { + return NULL; + } + + MALI_DEBUG_ASSERT(1 == os_mem.count); + + node = _MALI_OSK_CONTAINER_OF(os_mem.pages.next, struct mali_page_node, list); + new_page = node->page; + node->page = NULL; + list_del(&node->list); + kfree(node); + + return new_page; +} + + +static struct list_head *_mali_memory_cow_get_node_list(mali_mem_backend *target_bk, + u32 target_offset, + u32 target_size) +{ + MALI_DEBUG_ASSERT(MALI_MEM_OS == target_bk->type || MALI_MEM_COW == target_bk->type || + MALI_MEM_BLOCK == target_bk->type || MALI_MEM_SWAP == target_bk->type); + + if (MALI_MEM_OS == target_bk->type) { + MALI_DEBUG_ASSERT(&target_bk->os_mem); + MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->os_mem.count); + return &target_bk->os_mem.pages; + } else if (MALI_MEM_COW == target_bk->type) { + MALI_DEBUG_ASSERT(&target_bk->cow_mem); + MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->cow_mem.count); + return &target_bk->cow_mem.pages; + } else if (MALI_MEM_BLOCK == target_bk->type) { + MALI_DEBUG_ASSERT(&target_bk->block_mem); + MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->block_mem.count); + return &target_bk->block_mem.pfns; + } else if (MALI_MEM_SWAP == target_bk->type) { + MALI_DEBUG_ASSERT(&target_bk->swap_mem); + MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->swap_mem.count); + return &target_bk->swap_mem.pages; + } + + return NULL; +} + +/** +* Do COW for os memory - support do COW for memory from bank memory +* The range_start/size can be zero, which means it will call cow_modify_range +* latter. +* This function allocate new pages for COW backend from os mem for a modified range +* It will keep the page which not in the modified range and Add ref to it +* +* @target_bk - target allocation's backend(the allocation need to do COW) +* @target_offset - the offset in target allocation to do COW(for support COW a memory allocated from memory_bank, 4K align) +* @target_size - size of target allocation to do COW (for support memory bank) +* @backend -COW backend +* @range_start - offset of modified range (4K align) +* @range_size - size of modified range +*/ +_mali_osk_errcode_t mali_memory_cow_os_memory(mali_mem_backend *target_bk, + u32 target_offset, + u32 target_size, + mali_mem_backend *backend, + u32 range_start, + u32 range_size) +{ + mali_mem_cow *cow = &backend->cow_mem; + struct mali_page_node *m_page, *m_tmp, *page_node; + int target_page = 0; + struct page *new_page; + struct list_head *pages = NULL; + + pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size); + + if (NULL == pages) { + MALI_DEBUG_PRINT_ERROR(("No memory page need to cow ! \n")); + return _MALI_OSK_ERR_FAULT; + } + + MALI_DEBUG_ASSERT(0 == cow->count); + + INIT_LIST_HEAD(&cow->pages); + mutex_lock(&target_bk->mutex); + list_for_each_entry_safe(m_page, m_tmp, pages, list) { + /* add page from (target_offset,target_offset+size) to cow backend */ + if ((target_page >= target_offset / _MALI_OSK_MALI_PAGE_SIZE) && + (target_page < ((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE))) { + + /* allocate a new page node, alway use OS memory for COW */ + page_node = _mali_page_node_allocate(MALI_PAGE_NODE_OS); + + if (NULL == page_node) { + mutex_unlock(&target_bk->mutex); + goto error; + } + + INIT_LIST_HEAD(&page_node->list); + + /* check if in the modified range*/ + if ((cow->count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) && + (cow->count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) { + /* need to allocate a new page */ + /* To simplify the case, All COW memory is allocated from os memory ?*/ + new_page = mali_mem_cow_alloc_page(); + + if (NULL == new_page) { + kfree(page_node); + mutex_unlock(&target_bk->mutex); + goto error; + } + + _mali_page_node_add_page(page_node, new_page); + } else { + /*Add Block memory case*/ + if (m_page->type != MALI_PAGE_NODE_BLOCK) { + _mali_page_node_add_page(page_node, m_page->page); + } else { + page_node->type = MALI_PAGE_NODE_BLOCK; + _mali_page_node_add_block_item(page_node, m_page->blk_it); + } + + /* add ref to this page */ + _mali_page_node_ref(m_page); + } + + /* add it to COW backend page list */ + list_add_tail(&page_node->list, &cow->pages); + cow->count++; + } + target_page++; + } + mutex_unlock(&target_bk->mutex); + return _MALI_OSK_ERR_OK; +error: + mali_mem_cow_release(backend, MALI_FALSE); + return _MALI_OSK_ERR_FAULT; +} + +_mali_osk_errcode_t mali_memory_cow_swap_memory(mali_mem_backend *target_bk, + u32 target_offset, + u32 target_size, + mali_mem_backend *backend, + u32 range_start, + u32 range_size) +{ + mali_mem_cow *cow = &backend->cow_mem; + struct mali_page_node *m_page, *m_tmp, *page_node; + int target_page = 0; + struct mali_swap_item *swap_item; + struct list_head *pages = NULL; + + pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size); + if (NULL == pages) { + MALI_DEBUG_PRINT_ERROR(("No swap memory page need to cow ! \n")); + return _MALI_OSK_ERR_FAULT; + } + + MALI_DEBUG_ASSERT(0 == cow->count); + + INIT_LIST_HEAD(&cow->pages); + mutex_lock(&target_bk->mutex); + + backend->flags |= MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN; + + list_for_each_entry_safe(m_page, m_tmp, pages, list) { + /* add page from (target_offset,target_offset+size) to cow backend */ + if ((target_page >= target_offset / _MALI_OSK_MALI_PAGE_SIZE) && + (target_page < ((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE))) { + + /* allocate a new page node, use swap memory for COW memory swap cowed flag. */ + page_node = _mali_page_node_allocate(MALI_PAGE_NODE_SWAP); + + if (NULL == page_node) { + mutex_unlock(&target_bk->mutex); + goto error; + } + + /* check if in the modified range*/ + if ((cow->count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) && + (cow->count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) { + /* need to allocate a new page */ + /* To simplify the case, All COW memory is allocated from os memory ?*/ + swap_item = mali_mem_swap_alloc_swap_item(); + + if (NULL == swap_item) { + kfree(page_node); + mutex_unlock(&target_bk->mutex); + goto error; + } + + swap_item->idx = mali_mem_swap_idx_alloc(); + + if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == swap_item->idx) { + MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW.\n")); + kfree(page_node); + kfree(swap_item); + mutex_unlock(&target_bk->mutex); + goto error; + } + + _mali_page_node_add_swap_item(page_node, swap_item); + } else { + _mali_page_node_add_swap_item(page_node, m_page->swap_it); + + /* add ref to this page */ + _mali_page_node_ref(m_page); + } + + list_add_tail(&page_node->list, &cow->pages); + cow->count++; + } + target_page++; + } + mutex_unlock(&target_bk->mutex); + + return _MALI_OSK_ERR_OK; +error: + mali_mem_swap_release(backend, MALI_FALSE); + return _MALI_OSK_ERR_FAULT; + +} + + +_mali_osk_errcode_t _mali_mem_put_page_node(mali_page_node *node) +{ + if (node->type == MALI_PAGE_NODE_OS) { + return mali_mem_os_put_page(node->page); + } else if (node->type == MALI_PAGE_NODE_BLOCK) { + return mali_mem_block_unref_node(node); + } else if (node->type == MALI_PAGE_NODE_SWAP) { + return _mali_mem_swap_put_page_node(node); + } else + MALI_DEBUG_ASSERT(0); + return _MALI_OSK_ERR_FAULT; +} + + +/** +* Modify a range of a exist COW backend +* @backend -COW backend +* @range_start - offset of modified range (4K align) +* @range_size - size of modified range(in byte) +*/ +_mali_osk_errcode_t mali_memory_cow_modify_range(mali_mem_backend *backend, + u32 range_start, + u32 range_size) +{ + mali_mem_allocation *alloc = NULL; + struct mali_session_data *session; + mali_mem_cow *cow = &backend->cow_mem; + struct mali_page_node *m_page, *m_tmp; + LIST_HEAD(pages); + struct page *new_page; + u32 count = 0; + s32 change_pages_nr = 0; + _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK; + + if (range_start % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); + if (range_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); + + alloc = backend->mali_allocation; + MALI_DEBUG_ASSERT_POINTER(alloc); + + session = alloc->session; + MALI_DEBUG_ASSERT_POINTER(session); + + MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type); + MALI_DEBUG_ASSERT(((range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE) <= cow->count); + + mutex_lock(&backend->mutex); + + /* free pages*/ + list_for_each_entry_safe(m_page, m_tmp, &cow->pages, list) { + + /* check if in the modified range*/ + if ((count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) && + (count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) { + if (MALI_PAGE_NODE_SWAP != m_page->type) { + new_page = mali_mem_cow_alloc_page(); + + if (NULL == new_page) { + goto error; + } + if (1 != _mali_page_node_get_ref_count(m_page)) + change_pages_nr++; + /* unref old page*/ + _mali_osk_mutex_wait(session->cow_lock); + if (_mali_mem_put_page_node(m_page)) { + __free_page(new_page); + _mali_osk_mutex_signal(session->cow_lock); + goto error; + } + _mali_osk_mutex_signal(session->cow_lock); + /* add new page*/ + /* always use OS for COW*/ + m_page->type = MALI_PAGE_NODE_OS; + _mali_page_node_add_page(m_page, new_page); + } else { + struct mali_swap_item *swap_item; + + swap_item = mali_mem_swap_alloc_swap_item(); + + if (NULL == swap_item) { + goto error; + } + + swap_item->idx = mali_mem_swap_idx_alloc(); + + if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == swap_item->idx) { + MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW modify range.\n")); + kfree(swap_item); + goto error; + } + + if (1 != _mali_page_node_get_ref_count(m_page)) { + change_pages_nr++; + } + + if (_mali_mem_put_page_node(m_page)) { + mali_mem_swap_free_swap_item(swap_item); + goto error; + } + + _mali_page_node_add_swap_item(m_page, swap_item); + } + } + count++; + } + cow->change_pages_nr = change_pages_nr; + + MALI_DEBUG_ASSERT(MALI_MEM_COW == alloc->type); + + /* ZAP cpu mapping(modified range), and do cpu mapping here if need */ + if (NULL != alloc->cpu_mapping.vma) { + MALI_DEBUG_ASSERT(0 != alloc->backend_handle); + MALI_DEBUG_ASSERT(NULL != alloc->cpu_mapping.vma); + MALI_DEBUG_ASSERT(alloc->cpu_mapping.vma->vm_end - alloc->cpu_mapping.vma->vm_start >= range_size); + + if (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) { + zap_vma_ptes(alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size); + + ret = mali_mem_cow_cpu_map_pages_locked(backend, alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size / _MALI_OSK_MALI_PAGE_SIZE); + + if (unlikely(ret != _MALI_OSK_ERR_OK)) { + MALI_DEBUG_PRINT(2, ("mali_memory_cow_modify_range: cpu mapping failed !\n")); + ret = _MALI_OSK_ERR_FAULT; + } + } else { + /* used to trigger page fault for swappable cowed memory. */ + alloc->cpu_mapping.vma->vm_flags |= VM_PFNMAP; + alloc->cpu_mapping.vma->vm_flags |= VM_MIXEDMAP; + + zap_vma_ptes(alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size); + /* delete this flag to let swappble is ummapped regard to stauct page not page frame. */ + alloc->cpu_mapping.vma->vm_flags &= ~VM_PFNMAP; + alloc->cpu_mapping.vma->vm_flags &= ~VM_MIXEDMAP; + } + } + +error: + mutex_unlock(&backend->mutex); + return ret; + +} + + +/** +* Allocate pages for COW backend +* @alloc -allocation for COW allocation +* @target_bk - target allocation's backend(the allocation need to do COW) +* @target_offset - the offset in target allocation to do COW(for support COW a memory allocated from memory_bank, 4K align) +* @target_size - size of target allocation to do COW (for support memory bank)(in byte) +* @backend -COW backend +* @range_start - offset of modified range (4K align) +* @range_size - size of modified range(in byte) +*/ +_mali_osk_errcode_t mali_memory_do_cow(mali_mem_backend *target_bk, + u32 target_offset, + u32 target_size, + mali_mem_backend *backend, + u32 range_start, + u32 range_size) +{ + struct mali_session_data *session = backend->mali_allocation->session; + + MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS); + + /* size & offset must be a multiple of the system page size */ + if (target_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); + if (range_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); + if (target_offset % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); + if (range_start % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); + + /* check backend type */ + MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type); + + switch (target_bk->type) { + case MALI_MEM_OS: + case MALI_MEM_BLOCK: + return mali_memory_cow_os_memory(target_bk, target_offset, target_size, backend, range_start, range_size); + break; + case MALI_MEM_COW: + if (backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED) { + return mali_memory_cow_swap_memory(target_bk, target_offset, target_size, backend, range_start, range_size); + } else { + return mali_memory_cow_os_memory(target_bk, target_offset, target_size, backend, range_start, range_size); + } + break; + case MALI_MEM_SWAP: + return mali_memory_cow_swap_memory(target_bk, target_offset, target_size, backend, range_start, range_size); + break; + case MALI_MEM_EXTERNAL: + /*NOT support yet*/ + MALI_DEBUG_PRINT_ERROR(("External physical memory not supported ! \n")); + return _MALI_OSK_ERR_UNSUPPORTED; + break; + case MALI_MEM_DMA_BUF: + /*NOT support yet*/ + MALI_DEBUG_PRINT_ERROR(("DMA buffer not supported ! \n")); + return _MALI_OSK_ERR_UNSUPPORTED; + break; + case MALI_MEM_UMP: + /*NOT support yet*/ + MALI_DEBUG_PRINT_ERROR(("UMP buffer not supported ! \n")); + return _MALI_OSK_ERR_UNSUPPORTED; + break; + default: + /*Not support yet*/ + MALI_DEBUG_PRINT_ERROR(("Invalid memory type not supported ! \n")); + return _MALI_OSK_ERR_UNSUPPORTED; + break; + } + return _MALI_OSK_ERR_OK; +} + + +/** +* Map COW backend memory to mali +* Support OS/BLOCK for mali_page_node +*/ +int mali_mem_cow_mali_map(mali_mem_backend *mem_bkend, u32 range_start, u32 range_size) +{ + mali_mem_allocation *cow_alloc; + struct mali_page_node *m_page; + struct mali_session_data *session; + struct mali_page_directory *pagedir; + u32 virt, start; + + cow_alloc = mem_bkend->mali_allocation; + virt = cow_alloc->mali_vma_node.vm_node.start; + start = virt; + + MALI_DEBUG_ASSERT_POINTER(mem_bkend); + MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type); + MALI_DEBUG_ASSERT_POINTER(cow_alloc); + + session = cow_alloc->session; + pagedir = session->page_directory; + MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS); + list_for_each_entry(m_page, &mem_bkend->cow_mem.pages, list) { + if ((virt - start >= range_start) && (virt - start < range_start + range_size)) { + dma_addr_t phys = _mali_page_node_get_dma_addr(m_page); +#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) + MALI_DEBUG_ASSERT(0 == (phys >> 32)); +#endif + mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, + MALI_MMU_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT); + } + virt += MALI_MMU_PAGE_SIZE; + } + return 0; +} + +/** +* Map COW backend to cpu +* support OS/BLOCK memory +*/ +int mali_mem_cow_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma) +{ + mali_mem_cow *cow = &mem_bkend->cow_mem; + struct mali_page_node *m_page; + int ret; + unsigned long addr = vma->vm_start; + MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_COW); + + list_for_each_entry(m_page, &cow->pages, list) { + /* We should use vm_insert_page, but it does a dcache + * flush which makes it way slower than remap_pfn_range or vm_insert_pfn. + ret = vm_insert_page(vma, addr, page); + */ + ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page)); + + if (unlikely(0 != ret)) { + return ret; + } + addr += _MALI_OSK_MALI_PAGE_SIZE; + } + + return 0; +} + +/** +* Map some pages(COW backend) to CPU vma@vaddr +*@ mem_bkend - COW backend +*@ vma +*@ vaddr -start CPU vaddr mapped to +*@ num - max number of pages to map to CPU vaddr +*/ +_mali_osk_errcode_t mali_mem_cow_cpu_map_pages_locked(mali_mem_backend *mem_bkend, + struct vm_area_struct *vma, + unsigned long vaddr, + int num) +{ + mali_mem_cow *cow = &mem_bkend->cow_mem; + struct mali_page_node *m_page; + int ret; + int offset; + int count ; + unsigned long vstart = vma->vm_start; + count = 0; + MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_COW); + MALI_DEBUG_ASSERT(0 == vaddr % _MALI_OSK_MALI_PAGE_SIZE); + MALI_DEBUG_ASSERT(0 == vstart % _MALI_OSK_MALI_PAGE_SIZE); + offset = (vaddr - vstart) / _MALI_OSK_MALI_PAGE_SIZE; + + list_for_each_entry(m_page, &cow->pages, list) { + if ((count >= offset) && (count < offset + num)) { + ret = vm_insert_pfn(vma, vaddr, _mali_page_node_get_pfn(m_page)); + + if (unlikely(0 != ret)) { + if (count == offset) { + return _MALI_OSK_ERR_FAULT; + } else { + /* ret is EBUSY when page isn't in modify range, but now it's OK*/ + return _MALI_OSK_ERR_OK; + } + } + vaddr += _MALI_OSK_MALI_PAGE_SIZE; + } + count++; + } + return _MALI_OSK_ERR_OK; +} + +/** +* Release COW backend memory +* free it directly(put_page--unref page), not put into pool +*/ +u32 mali_mem_cow_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped) +{ + mali_mem_allocation *alloc; + struct mali_session_data *session; + u32 free_pages_nr = 0; + MALI_DEBUG_ASSERT_POINTER(mem_bkend); + MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type); + alloc = mem_bkend->mali_allocation; + MALI_DEBUG_ASSERT_POINTER(alloc); + + session = alloc->session; + MALI_DEBUG_ASSERT_POINTER(session); + + if (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (MALI_MEM_BACKEND_FLAG_SWAP_COWED & mem_bkend->flags)) { + /* Unmap the memory from the mali virtual address space. */ + if (MALI_TRUE == is_mali_mapped) + mali_mem_os_mali_unmap(alloc); + /* free cow backend list*/ + _mali_osk_mutex_wait(session->cow_lock); + free_pages_nr = mali_mem_os_free(&mem_bkend->cow_mem.pages, mem_bkend->cow_mem.count, MALI_TRUE); + _mali_osk_mutex_signal(session->cow_lock); + + free_pages_nr += mali_mem_block_free_list(&mem_bkend->cow_mem.pages); + + MALI_DEBUG_ASSERT(list_empty(&mem_bkend->cow_mem.pages)); + } else { + free_pages_nr = mali_mem_swap_release(mem_bkend, is_mali_mapped); + } + + + MALI_DEBUG_PRINT(4, ("COW Mem free : allocated size = 0x%x, free size = 0x%x\n", mem_bkend->cow_mem.count * _MALI_OSK_MALI_PAGE_SIZE, + free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE)); + + mem_bkend->cow_mem.count = 0; + return free_pages_nr; +} + + +/* Dst node could os node or swap node. */ +void _mali_mem_cow_copy_page(mali_page_node *src_node, mali_page_node *dst_node) +{ + void *dst, *src; + struct page *dst_page; + dma_addr_t dma_addr; + + MALI_DEBUG_ASSERT(src_node != NULL); + MALI_DEBUG_ASSERT(dst_node != NULL); + MALI_DEBUG_ASSERT(dst_node->type == MALI_PAGE_NODE_OS + || dst_node->type == MALI_PAGE_NODE_SWAP); + + if (dst_node->type == MALI_PAGE_NODE_OS) { + dst_page = dst_node->page; + } else { + dst_page = dst_node->swap_it->page; + } + + dma_unmap_page(&mali_platform_device->dev, _mali_page_node_get_dma_addr(dst_node), + _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL); + + /* map it , and copy the content*/ + dst = kmap_atomic(dst_page); + + if (src_node->type == MALI_PAGE_NODE_OS || + src_node->type == MALI_PAGE_NODE_SWAP) { + struct page *src_page; + + if (src_node->type == MALI_PAGE_NODE_OS) { + src_page = src_node->page; + } else { + src_page = src_node->swap_it->page; + } + + /* Clear and invaliate cache */ + /* In ARM architecture, speculative read may pull stale data into L1 cache + * for kernel linear mapping page table. DMA_BIDIRECTIONAL could + * invalidate the L1 cache so that following read get the latest data + */ + dma_unmap_page(&mali_platform_device->dev, _mali_page_node_get_dma_addr(src_node), + _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL); + + src = kmap_atomic(src_page); + memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE); + kunmap_atomic(src); + dma_addr = dma_map_page(&mali_platform_device->dev, src_page, + 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL); + + if (src_node->type == MALI_PAGE_NODE_SWAP) { + src_node->swap_it->dma_addr = dma_addr; + } + } else if (src_node->type == MALI_PAGE_NODE_BLOCK) { + /* + * use ioremap to map src for BLOCK memory + */ + src = ioremap_nocache(_mali_page_node_get_dma_addr(src_node), _MALI_OSK_MALI_PAGE_SIZE); + memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE); + iounmap(src); + } + kunmap_atomic(dst); + dma_addr = dma_map_page(&mali_platform_device->dev, dst_page, + 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE); + + if (dst_node->type == MALI_PAGE_NODE_SWAP) { + dst_node->swap_it->dma_addr = dma_addr; + } +} + + +/* +* allocate page on demand when CPU access it, +* THis used in page fault handler +*/ +_mali_osk_errcode_t mali_mem_cow_allocate_on_demand(mali_mem_backend *mem_bkend, u32 offset_page) +{ + struct page *new_page = NULL; + struct mali_page_node *new_node = NULL; + int i = 0; + struct mali_page_node *m_page, *found_node = NULL; + struct mali_session_data *session = NULL; + mali_mem_cow *cow = &mem_bkend->cow_mem; + MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type); + MALI_DEBUG_ASSERT(offset_page < mem_bkend->size / _MALI_OSK_MALI_PAGE_SIZE); + MALI_DEBUG_PRINT(4, ("mali_mem_cow_allocate_on_demand !, offset_page =0x%x\n", offset_page)); + + /* allocate new page here */ + new_page = mali_mem_cow_alloc_page(); + if (!new_page) + return _MALI_OSK_ERR_NOMEM; + + new_node = _mali_page_node_allocate(MALI_PAGE_NODE_OS); + if (!new_node) { + __free_page(new_page); + return _MALI_OSK_ERR_NOMEM; + } + + /* find the page in backend*/ + list_for_each_entry(m_page, &cow->pages, list) { + if (i == offset_page) { + found_node = m_page; + break; + } + i++; + } + MALI_DEBUG_ASSERT(found_node); + if (NULL == found_node) { + __free_page(new_page); + kfree(new_node); + return _MALI_OSK_ERR_ITEM_NOT_FOUND; + } + + _mali_page_node_add_page(new_node, new_page); + + /* Copy the src page's content to new page */ + _mali_mem_cow_copy_page(found_node, new_node); + + MALI_DEBUG_ASSERT_POINTER(mem_bkend->mali_allocation); + session = mem_bkend->mali_allocation->session; + MALI_DEBUG_ASSERT_POINTER(session); + if (1 != _mali_page_node_get_ref_count(found_node)) { + atomic_add(1, &session->mali_mem_allocated_pages); + if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) { + session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE; + } + mem_bkend->cow_mem.change_pages_nr++; + } + + _mali_osk_mutex_wait(session->cow_lock); + if (_mali_mem_put_page_node(found_node)) { + __free_page(new_page); + kfree(new_node); + _mali_osk_mutex_signal(session->cow_lock); + return _MALI_OSK_ERR_NOMEM; + } + _mali_osk_mutex_signal(session->cow_lock); + + list_replace(&found_node->list, &new_node->list); + + kfree(found_node); + + /* map to GPU side*/ + _mali_osk_mutex_wait(session->memory_lock); + mali_mem_cow_mali_map(mem_bkend, offset_page * _MALI_OSK_MALI_PAGE_SIZE, _MALI_OSK_MALI_PAGE_SIZE); + _mali_osk_mutex_signal(session->memory_lock); + return _MALI_OSK_ERR_OK; +} diff --git a/drivers/gpu/arm/mali/linux/mali_memory_cow.h b/drivers/gpu/arm/mali/linux/mali_memory_cow.h new file mode 100755 index 0000000000000..9b9e834915264 --- /dev/null +++ b/drivers/gpu/arm/mali/linux/mali_memory_cow.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_MEMORY_COW_H__ +#define __MALI_MEMORY_COW_H__ + +#include "mali_osk.h" +#include "mali_session.h" +#include "mali_memory_types.h" + +int mali_mem_cow_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma); +_mali_osk_errcode_t mali_mem_cow_cpu_map_pages_locked(mali_mem_backend *mem_bkend, + struct vm_area_struct *vma, + unsigned long vaddr, + int num); + +_mali_osk_errcode_t mali_memory_do_cow(mali_mem_backend *target_bk, + u32 target_offset, + u32 target_size, + mali_mem_backend *backend, + u32 range_start, + u32 range_size); + +_mali_osk_errcode_t mali_memory_cow_modify_range(mali_mem_backend *backend, + u32 range_start, + u32 range_size); + +_mali_osk_errcode_t mali_memory_cow_os_memory(mali_mem_backend *target_bk, + u32 target_offset, + u32 target_size, + mali_mem_backend *backend, + u32 range_start, + u32 range_size); + +void _mali_mem_cow_copy_page(mali_page_node *src_node, mali_page_node *dst_node); + +int mali_mem_cow_mali_map(mali_mem_backend *mem_bkend, u32 range_start, u32 range_size); +u32 mali_mem_cow_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped); +_mali_osk_errcode_t mali_mem_cow_allocate_on_demand(mali_mem_backend *mem_bkend, u32 offset_page); +#endif + diff --git a/drivers/gpu/arm/mali/linux/mali_memory_defer_bind.c b/drivers/gpu/arm/mali/linux/mali_memory_defer_bind.c new file mode 100755 index 0000000000000..6d84558d0faca --- /dev/null +++ b/drivers/gpu/arm/mali/linux/mali_memory_defer_bind.c @@ -0,0 +1,262 @@ +/* + * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_ARM +#include +#endif +#include + +#include "mali_memory.h" +#include "mali_kernel_common.h" +#include "mali_uk_types.h" +#include "mali_osk.h" +#include "mali_kernel_linux.h" +#include "mali_memory_defer_bind.h" +#include "mali_executor.h" +#include "mali_osk.h" +#include "mali_scheduler.h" +#include "mali_gp_job.h" + +mali_defer_bind_manager *mali_dmem_man = NULL; + +static u32 mali_dmem_get_gp_varying_size(struct mali_gp_job *gp_job) +{ + return gp_job->required_varying_memsize / _MALI_OSK_MALI_PAGE_SIZE; +} + +_mali_osk_errcode_t mali_mem_defer_bind_manager_init(void) +{ + mali_dmem_man = _mali_osk_calloc(1, sizeof(struct mali_defer_bind_manager)); + if (!mali_dmem_man) + return _MALI_OSK_ERR_NOMEM; + + atomic_set(&mali_dmem_man->num_used_pages, 0); + atomic_set(&mali_dmem_man->num_dmem, 0); + + return _MALI_OSK_ERR_OK; +} + + +void mali_mem_defer_bind_manager_destory(void) +{ + if (mali_dmem_man) { + MALI_DEBUG_ASSERT(0 == atomic_read(&mali_dmem_man->num_dmem)); + kfree(mali_dmem_man); + } + mali_dmem_man = NULL; +} + + +/*allocate pages from OS memory*/ +_mali_osk_errcode_t mali_mem_defer_alloc_mem(u32 require, struct mali_session_data *session, mali_defer_mem_block *dblock) +{ + int retval = 0; + u32 num_pages = require; + mali_mem_os_mem os_mem; + + retval = mali_mem_os_alloc_pages(&os_mem, num_pages * _MALI_OSK_MALI_PAGE_SIZE); + + /* add to free pages list */ + if (0 == retval) { + MALI_DEBUG_PRINT(4, ("mali_mem_defer_alloc_mem ,,*** pages allocate = 0x%x \n", num_pages)); + list_splice(&os_mem.pages, &dblock->free_pages); + atomic_add(os_mem.count, &dblock->num_free_pages); + atomic_add(os_mem.count, &session->mali_mem_allocated_pages); + if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) { + session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE; + } + return _MALI_OSK_ERR_OK; + } else + return _MALI_OSK_ERR_FAULT; +} + +_mali_osk_errcode_t mali_mem_prepare_mem_for_job(struct mali_gp_job *next_gp_job, mali_defer_mem_block *dblock) +{ + u32 require_page; + + if (!next_gp_job) + return _MALI_OSK_ERR_FAULT; + + require_page = mali_dmem_get_gp_varying_size(next_gp_job); + + MALI_DEBUG_PRINT(4, ("mali_mem_defer_prepare_mem_work, require alloc page 0x%x\n", + require_page)); + /* allocate more pages from OS */ + if (_MALI_OSK_ERR_OK != mali_mem_defer_alloc_mem(require_page, next_gp_job->session, dblock)) { + MALI_DEBUG_PRINT(1, ("ERROR##mali_mem_defer_prepare_mem_work, allocate page failed!!")); + return _MALI_OSK_ERR_NOMEM; + } + + next_gp_job->bind_flag = MALI_DEFER_BIND_MEMORY_PREPARED; + + return _MALI_OSK_ERR_OK; +} + + +/* do preparetion for allocation before defer bind */ +_mali_osk_errcode_t mali_mem_defer_bind_allocation_prepare(mali_mem_allocation *alloc, struct list_head *list, u32 *required_varying_memsize) +{ + mali_mem_backend *mem_bkend = NULL; + struct mali_backend_bind_list *bk_list = _mali_osk_calloc(1, sizeof(struct mali_backend_bind_list)); + if (NULL == bk_list) + return _MALI_OSK_ERR_FAULT; + + INIT_LIST_HEAD(&bk_list->node); + /* Get backend memory */ + mutex_lock(&mali_idr_mutex); + if (!(mem_bkend = idr_find(&mali_backend_idr, alloc->backend_handle))) { + MALI_DEBUG_PRINT(1, ("Can't find memory backend in defer bind!\n")); + mutex_unlock(&mali_idr_mutex); + _mali_osk_free(bk_list); + return _MALI_OSK_ERR_FAULT; + } + mutex_unlock(&mali_idr_mutex); + + /* If the mem backend has already been bound, no need to bind again.*/ + if (mem_bkend->os_mem.count > 0) { + _mali_osk_free(bk_list); + return _MALI_OSK_ERR_OK; + } + + MALI_DEBUG_PRINT(4, ("bind_allocation_prepare:: allocation =%x vaddr=0x%x!\n", alloc, alloc->mali_vma_node.vm_node.start)); + + INIT_LIST_HEAD(&mem_bkend->os_mem.pages); + + bk_list->bkend = mem_bkend; + bk_list->vaddr = alloc->mali_vma_node.vm_node.start; + bk_list->session = alloc->session; + bk_list->page_num = mem_bkend->size / _MALI_OSK_MALI_PAGE_SIZE; + *required_varying_memsize += mem_bkend->size; + MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS); + + /* add to job to do list */ + list_add(&bk_list->node, list); + + return _MALI_OSK_ERR_OK; +} + + + +/* bind phyiscal memory to allocation +This function will be called in IRQ handler*/ +static _mali_osk_errcode_t mali_mem_defer_bind_allocation(struct mali_backend_bind_list *bk_node, + struct list_head *pages) +{ + struct mali_session_data *session = bk_node->session; + mali_mem_backend *mem_bkend = bk_node->bkend; + MALI_DEBUG_PRINT(4, ("mali_mem_defer_bind_allocation, bind bkend = %x page num=0x%x vaddr=%x session=%x\n", mem_bkend, bk_node->page_num, bk_node->vaddr, session)); + + MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS); + list_splice(pages, &mem_bkend->os_mem.pages); + mem_bkend->os_mem.count = bk_node->page_num; + + if (mem_bkend->type == MALI_MEM_OS) { + mali_mem_os_mali_map(&mem_bkend->os_mem, session, bk_node->vaddr, 0, + mem_bkend->os_mem.count, MALI_MMU_FLAGS_DEFAULT); + } + smp_wmb(); + bk_node->flag = MALI_DEFER_BIND_MEMORY_BINDED; + mem_bkend->flags &= ~MALI_MEM_BACKEND_FLAG_NOT_BINDED; + mem_bkend->flags |= MALI_MEM_BACKEND_FLAG_BINDED; + return _MALI_OSK_ERR_OK; +} + + +static struct list_head *mali_mem_defer_get_free_page_list(u32 count, struct list_head *pages, mali_defer_mem_block *dblock) +{ + int i = 0; + struct mali_page_node *m_page, *m_tmp; + + if (atomic_read(&dblock->num_free_pages) < count) { + return NULL; + } else { + list_for_each_entry_safe(m_page, m_tmp, &dblock->free_pages, list) { + if (i < count) { + list_move_tail(&m_page->list, pages); + } else { + break; + } + i++; + } + MALI_DEBUG_ASSERT(i == count); + atomic_sub(count, &dblock->num_free_pages); + return pages; + } +} + + +/* called in job start IOCTL to bind physical memory for each allocations +@ bk_list backend list to do defer bind +@ pages page list to do this bind +@ count number of pages +*/ +_mali_osk_errcode_t mali_mem_defer_bind(struct mali_gp_job *gp, + struct mali_defer_mem_block *dmem_block) +{ + struct mali_defer_mem *dmem = NULL; + struct mali_backend_bind_list *bkn, *bkn_tmp; + LIST_HEAD(pages); + + if (gp->required_varying_memsize != (atomic_read(&dmem_block->num_free_pages) * _MALI_OSK_MALI_PAGE_SIZE)) { + MALI_DEBUG_PRINT_ERROR(("#BIND: The memsize of varying buffer not match to the pagesize of the dmem_block!!## \n")); + return _MALI_OSK_ERR_FAULT; + } + + MALI_DEBUG_PRINT(4, ("#BIND: GP job=%x## \n", gp)); + dmem = (mali_defer_mem *)_mali_osk_calloc(1, sizeof(struct mali_defer_mem)); + if (dmem) { + INIT_LIST_HEAD(&dmem->node); + gp->dmem = dmem; + } else { + return _MALI_OSK_ERR_NOMEM; + } + + atomic_add(1, &mali_dmem_man->num_dmem); + /* for each bk_list backend, do bind */ + list_for_each_entry_safe(bkn, bkn_tmp , &gp->vary_todo, node) { + INIT_LIST_HEAD(&pages); + if (likely(mali_mem_defer_get_free_page_list(bkn->page_num, &pages, dmem_block))) { + list_del(&bkn->node); + mali_mem_defer_bind_allocation(bkn, &pages); + _mali_osk_free(bkn); + } else { + /* not enough memory will not happen */ + MALI_DEBUG_PRINT_ERROR(("#BIND: NOT enough memory when binded !!## \n")); + _mali_osk_free(gp->dmem); + return _MALI_OSK_ERR_NOMEM; + } + } + + if (!list_empty(&gp->vary_todo)) { + MALI_DEBUG_PRINT_ERROR(("#BIND: The deferbind backend list isn't empty !!## \n")); + _mali_osk_free(gp->dmem); + return _MALI_OSK_ERR_FAULT; + } + + dmem->flag = MALI_DEFER_BIND_MEMORY_BINDED; + + return _MALI_OSK_ERR_OK; +} + +void mali_mem_defer_dmem_free(struct mali_gp_job *gp) +{ + if (gp->dmem) { + atomic_dec(&mali_dmem_man->num_dmem); + _mali_osk_free(gp->dmem); + } +} + diff --git a/drivers/gpu/arm/mali/linux/mali_memory_defer_bind.h b/drivers/gpu/arm/mali/linux/mali_memory_defer_bind.h new file mode 100755 index 0000000000000..98f6bebde995c --- /dev/null +++ b/drivers/gpu/arm/mali/linux/mali_memory_defer_bind.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#ifndef __MALI_MEMORY_DEFER_BIND_H_ +#define __MALI_MEMORY_DEFER_BIND_H_ + + +#include "mali_osk.h" +#include "mali_session.h" + +#include +#include +#include +#include +#include + + +#include "mali_memory_types.h" +#include "mali_memory_os_alloc.h" +#include "mali_uk_types.h" + +struct mali_gp_job; + +typedef struct mali_defer_mem { + struct list_head node; /*dlist node in bind manager */ + u32 flag; +} mali_defer_mem; + + +typedef struct mali_defer_mem_block { + struct list_head free_pages; /* page pool */ + atomic_t num_free_pages; +} mali_defer_mem_block; + +/* varying memory list need to bind */ +typedef struct mali_backend_bind_list { + struct list_head node; + struct mali_mem_backend *bkend; + u32 vaddr; + u32 page_num; + struct mali_session_data *session; + u32 flag; +} mali_backend_bind_lists; + + +typedef struct mali_defer_bind_manager { + atomic_t num_used_pages; + atomic_t num_dmem; +} mali_defer_bind_manager; + +_mali_osk_errcode_t mali_mem_defer_bind_manager_init(void); +void mali_mem_defer_bind_manager_destory(void); +_mali_osk_errcode_t mali_mem_defer_bind(struct mali_gp_job *gp, struct mali_defer_mem_block *dmem_block); +_mali_osk_errcode_t mali_mem_defer_bind_allocation_prepare(mali_mem_allocation *alloc, struct list_head *list, u32 *required_varying_memsize); +_mali_osk_errcode_t mali_mem_prepare_mem_for_job(struct mali_gp_job *next_gp_job, mali_defer_mem_block *dblock); +void mali_mem_defer_dmem_free(struct mali_gp_job *gp); + +#endif diff --git a/drivers/gpu/arm/mali/linux/mali_memory_dma_buf.c b/drivers/gpu/arm/mali/linux/mali_memory_dma_buf.c index 83890b786a519..f0d1e07be544d 100755 --- a/drivers/gpu/arm/mali/linux/mali_memory_dma_buf.c +++ b/drivers/gpu/arm/mali/linux/mali_memory_dma_buf.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2012-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -29,35 +29,6 @@ #include "mali_memory_virtual.h" #include "mali_pp_job.h" -static void mali_dma_buf_unmap(mali_mem_allocation *alloc, struct mali_dma_buf_attachment *mem); - -void mali_mem_dma_buf_release(mali_mem_backend *mem_backend) -{ - struct mali_dma_buf_attachment *mem; - MALI_DEBUG_ASSERT_POINTER(mem_backend); - MALI_DEBUG_ASSERT(MALI_MEM_DMA_BUF == mem_backend->type); - - mem = mem_backend->dma_buf.attachment; - MALI_DEBUG_ASSERT_POINTER(mem); - MALI_DEBUG_ASSERT_POINTER(mem->attachment); - MALI_DEBUG_ASSERT_POINTER(mem->buf); - MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release attachment %p\n", mem)); - -#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) - MALI_DEBUG_ASSERT_POINTER(mem_backend->mali_allocation); - /* We mapped implicitly on attach, so we need to unmap on release */ - mali_dma_buf_unmap(mem_backend->mali_allocation, mem); -#endif - /* Wait for buffer to become unmapped */ - wait_event(mem->wait_queue, !mem->is_mapped); - MALI_DEBUG_ASSERT(!mem->is_mapped); - - dma_buf_detach(mem->buf, mem->attachment); - dma_buf_put(mem->buf); - - _mali_osk_free(mem); -} - /* * Map DMA buf attachment \a mem into \a session at virtual address \a virt. */ @@ -138,7 +109,6 @@ static int mali_dma_buf_map(mali_mem_backend *mem_backend) } mem->is_mapped = MALI_TRUE; - session->mali_mem_array[mem_backend->type] += mem_backend->size; mali_session_memory_unlock(session); /* Wake up any thread waiting for buffer to become mapped */ wake_up_all(&mem->wait_queue); @@ -170,7 +140,6 @@ static void mali_dma_buf_unmap(mali_mem_allocation *alloc, struct mali_dma_buf_a alloc->flags); } mem->is_mapped = MALI_FALSE; - alloc->session->mali_mem_array[alloc->type] -= alloc->psize; } mali_session_memory_unlock(alloc->session); /* Wake up any thread waiting for buffer to become unmapped */ @@ -302,7 +271,7 @@ int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_ge return 0; } -_mali_osk_errcode_t mali_memory_bind_dma_buf(mali_mem_allocation *alloc, +_mali_osk_errcode_t mali_mem_bind_dma_buf(mali_mem_allocation *alloc, mali_mem_backend *mem_backend, int fd, u32 flags) { @@ -372,11 +341,29 @@ _mali_osk_errcode_t mali_memory_bind_dma_buf(mali_mem_allocation *alloc, return _MALI_OSK_ERR_FAULT; } - -void mali_memory_unbind_dma_buf(mali_mem_backend *mem_backend) +void mali_mem_unbind_dma_buf(mali_mem_backend *mem_backend) { + struct mali_dma_buf_attachment *mem; MALI_DEBUG_ASSERT_POINTER(mem_backend); - mali_mem_dma_buf_release(mem_backend); -} + MALI_DEBUG_ASSERT(MALI_MEM_DMA_BUF == mem_backend->type); + mem = mem_backend->dma_buf.attachment; + MALI_DEBUG_ASSERT_POINTER(mem); + MALI_DEBUG_ASSERT_POINTER(mem->attachment); + MALI_DEBUG_ASSERT_POINTER(mem->buf); + MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release attachment %p\n", mem)); +#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) + MALI_DEBUG_ASSERT_POINTER(mem_backend->mali_allocation); + /* We mapped implicitly on attach, so we need to unmap on release */ + mali_dma_buf_unmap(mem_backend->mali_allocation, mem); +#endif + /* Wait for buffer to become unmapped */ + wait_event(mem->wait_queue, !mem->is_mapped); + MALI_DEBUG_ASSERT(!mem->is_mapped); + + dma_buf_detach(mem->buf, mem->attachment); + dma_buf_put(mem->buf); + + _mali_osk_free(mem); +} diff --git a/drivers/gpu/arm/mali/linux/mali_memory_dma_buf.h b/drivers/gpu/arm/mali/linux/mali_memory_dma_buf.h index e8682ee8bb6d8..37b6f6b436806 100755 --- a/drivers/gpu/arm/mali/linux/mali_memory_dma_buf.h +++ b/drivers/gpu/arm/mali/linux/mali_memory_dma_buf.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -35,14 +35,12 @@ struct mali_dma_buf_attachment { int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_get_size_s __user *arg); -void mali_mem_dma_buf_release(mali_mem_backend *mem_backend); +void mali_mem_unbind_dma_buf(mali_mem_backend *mem_backend); -_mali_osk_errcode_t mali_memory_bind_dma_buf(mali_mem_allocation *alloc, +_mali_osk_errcode_t mali_mem_bind_dma_buf(mali_mem_allocation *alloc, mali_mem_backend *mem_backend, int fd, u32 flags); -void mali_memory_unbind_dma_buf(mali_mem_backend *mem_backend); - #if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) int mali_dma_buf_map_job(struct mali_pp_job *job); void mali_dma_buf_unmap_job(struct mali_pp_job *job); diff --git a/drivers/gpu/arm/mali/linux/mali_memory_external.c b/drivers/gpu/arm/mali/linux/mali_memory_external.c index ff2a5ae9311ac..c0097f6706f8b 100755 --- a/drivers/gpu/arm/mali/linux/mali_memory_external.c +++ b/drivers/gpu/arm/mali/linux/mali_memory_external.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -15,7 +15,7 @@ #include "mali_mem_validation.h" #include "mali_uk_types.h" -void mali_mem_external_release(mali_mem_backend *mem_backend) +void mali_mem_unbind_ext_buf(mali_mem_backend *mem_backend) { mali_mem_allocation *alloc; struct mali_session_data *session; @@ -29,11 +29,10 @@ void mali_mem_external_release(mali_mem_backend *mem_backend) mali_session_memory_lock(session); mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start, alloc->flags); - session->mali_mem_array[mem_backend->type] -= mem_backend->size; mali_session_memory_unlock(session); } -_mali_osk_errcode_t mali_memory_bind_ext_mem(mali_mem_allocation *alloc, +_mali_osk_errcode_t mali_mem_bind_ext_buf(mali_mem_allocation *alloc, mali_mem_backend *mem_backend, u32 phys_addr, u32 flag) @@ -83,16 +82,8 @@ _mali_osk_errcode_t mali_memory_bind_ext_mem(mali_mem_allocation *alloc, ("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n", phys_addr, (phys_addr + size - 1), virt)); - session->mali_mem_array[mem_backend->type] += mem_backend->size; mali_session_memory_unlock(session); MALI_SUCCESS; } - -void mali_memory_unbind_ext_mem(mali_mem_backend *mem_backend) -{ - MALI_DEBUG_ASSERT_POINTER(mem_backend); - mali_mem_external_release(mem_backend); -} - diff --git a/drivers/gpu/arm/mali/linux/mali_memory_external.h b/drivers/gpu/arm/mali/linux/mali_memory_external.h old mode 100644 new mode 100755 index 9e9a9e907d454..ba387b8a270fe --- a/drivers/gpu/arm/mali/linux/mali_memory_external.h +++ b/drivers/gpu/arm/mali/linux/mali_memory_external.h @@ -1,10 +1,10 @@ /* - * Copyright (C) 2011-2015 ARM Limited. All rights reserved. - * + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. - * + * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ @@ -16,16 +16,11 @@ extern "C" { #endif -_mali_osk_errcode_t mali_memory_bind_ext_mem(mali_mem_allocation *alloc, +_mali_osk_errcode_t mali_mem_bind_ext_buf(mali_mem_allocation *alloc, mali_mem_backend *mem_backend, u32 phys_addr, u32 flag); - -void mali_memory_unbind_ext_mem(mali_mem_backend *mem_backend); - -void mali_mem_external_release(mali_mem_backend *mem_backend); - - +void mali_mem_unbind_ext_buf(mali_mem_backend *mem_backend); #ifdef __cplusplus } diff --git a/drivers/gpu/arm/mali/linux/mali_memory_manager.c b/drivers/gpu/arm/mali/linux/mali_memory_manager.c old mode 100644 new mode 100755 index 510c82e2b393e..178abcfae37da --- a/drivers/gpu/arm/mali/linux/mali_memory_manager.c +++ b/drivers/gpu/arm/mali/linux/mali_memory_manager.c @@ -1,9 +1,9 @@ /* * Copyright (C) 2013-2015 ARM Limited. All rights reserved. - * + * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. - * + * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ @@ -15,6 +15,8 @@ #include #include #include +#include + #include #if defined(CONFIG_DMA_SHARED_BUFFER) #include @@ -29,6 +31,7 @@ #include "mali_memory_os_alloc.h" #if defined(CONFIG_DMA_SHARED_BUFFER) #include "mali_memory_dma_buf.h" +#include "mali_memory_secure.h" #endif #if defined(CONFIG_MALI400_UMP) #include "mali_memory_ump.h" @@ -37,9 +40,10 @@ #include "mali_memory_virtual.h" #include "mali_memory_util.h" #include "mali_memory_external.h" +#include "mali_memory_cow.h" #include "mali_memory_block_alloc.h" - -#define MALI_S32_MAX 0x7fffffff +#include "mali_ukk.h" +#include "mali_memory_swap_alloc.h" /* * New memory system interface @@ -61,11 +65,11 @@ int mali_memory_manager_init(struct mali_allocation_manager *mgr) /* init RB tree */ mgr->allocation_mgr_rb = RB_ROOT; - mgr->mali_allocation_nr = 0; + mgr->mali_allocation_num = 0; return 0; } -/* deinit allocation manager +/* Deinit allocation manager * Do some check for debug */ void mali_memory_manager_uninit(struct mali_allocation_manager *mgr) @@ -95,27 +99,26 @@ static mali_mem_allocation *mali_mem_allocation_struct_create(struct mali_sessio mali_allocation->session = session; INIT_LIST_HEAD(&mali_allocation->list); - kref_init(&mali_allocation->ref); + _mali_osk_atomic_init(&mali_allocation->mem_alloc_refcount, 1); /** *add to session list */ mutex_lock(&session->allocation_mgr.list_mutex); list_add_tail(&mali_allocation->list, &session->allocation_mgr.head); - session->allocation_mgr.mali_allocation_nr++; + session->allocation_mgr.mali_allocation_num++; mutex_unlock(&session->allocation_mgr.list_mutex); return mali_allocation; } - void mali_mem_allocation_struct_destory(mali_mem_allocation *alloc) { MALI_DEBUG_ASSERT_POINTER(alloc); MALI_DEBUG_ASSERT_POINTER(alloc->session); mutex_lock(&alloc->session->allocation_mgr.list_mutex); list_del(&alloc->list); - alloc->session->allocation_mgr.mali_allocation_nr--; + alloc->session->allocation_mgr.mali_allocation_num--; mutex_unlock(&alloc->session->allocation_mgr.list_mutex); kfree(alloc); @@ -133,6 +136,11 @@ int mali_mem_backend_struct_create(mali_mem_backend **backend, u32 psize) } mem_backend = *backend; mem_backend->size = psize; + mutex_init(&mem_backend->mutex); + INIT_LIST_HEAD(&mem_backend->list); + mem_backend->using_count = 0; + + /* link backend with id */ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0) again: @@ -164,6 +172,7 @@ int mali_mem_backend_struct_create(mali_mem_backend **backend, u32 psize) return index; } + static void mali_mem_backend_struct_destory(mali_mem_backend **backend, s32 backend_handle) { mali_mem_backend *mem_backend = *backend; @@ -175,11 +184,177 @@ static void mali_mem_backend_struct_destory(mali_mem_backend **backend, s32 back *backend = NULL; } +mali_mem_backend *mali_mem_backend_struct_search(struct mali_session_data *session, u32 mali_address) +{ + struct mali_vma_node *mali_vma_node = NULL; + mali_mem_backend *mem_bkend = NULL; + mali_mem_allocation *mali_alloc = NULL; + MALI_DEBUG_ASSERT_POINTER(session); + mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_address, 0); + if (NULL == mali_vma_node) { + MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_search:vma node was NULL\n")); + return NULL; + } + mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node); + /* Get backend memory & Map on CPU */ + mutex_lock(&mali_idr_mutex); + mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle); + mutex_unlock(&mali_idr_mutex); + MALI_DEBUG_ASSERT(NULL != mem_bkend); + return mem_bkend; +} + +static _mali_osk_errcode_t mali_mem_resize(struct mali_session_data *session, mali_mem_backend *mem_backend, u32 physical_size) +{ + _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT; + int retval = 0; + mali_mem_allocation *mali_allocation = NULL; + mali_mem_os_mem tmp_os_mem; + s32 change_page_count; + + MALI_DEBUG_ASSERT_POINTER(session); + MALI_DEBUG_ASSERT_POINTER(mem_backend); + MALI_DEBUG_PRINT(4, (" mali_mem_resize_memory called! \n")); + MALI_DEBUG_ASSERT(0 == physical_size % MALI_MMU_PAGE_SIZE); + + mali_allocation = mem_backend->mali_allocation; + MALI_DEBUG_ASSERT_POINTER(mali_allocation); + + MALI_DEBUG_ASSERT(MALI_MEM_FLAG_CAN_RESIZE & mali_allocation->flags); + MALI_DEBUG_ASSERT(MALI_MEM_OS == mali_allocation->type); + + mutex_lock(&mem_backend->mutex); + + /* Do resize*/ + if (physical_size > mem_backend->size) { + u32 add_size = physical_size - mem_backend->size; + + MALI_DEBUG_ASSERT(0 == add_size % MALI_MMU_PAGE_SIZE); + + /* Allocate new pages from os mem */ + retval = mali_mem_os_alloc_pages(&tmp_os_mem, add_size); + + if (retval) { + if (-ENOMEM == retval) { + ret = _MALI_OSK_ERR_NOMEM; + } else { + ret = _MALI_OSK_ERR_FAULT; + } + MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory allocation failed !\n")); + goto failed_alloc_memory; + } + + MALI_DEBUG_ASSERT(tmp_os_mem.count == add_size / MALI_MMU_PAGE_SIZE); + + /* Resize the memory of the backend */ + ret = mali_mem_os_resize_pages(&tmp_os_mem, &mem_backend->os_mem, 0, tmp_os_mem.count); + + if (ret) { + MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory resizing failed !\n")); + goto failed_resize_pages; + } + + /*Resize cpu mapping */ + if (NULL != mali_allocation->cpu_mapping.vma) { + ret = mali_mem_os_resize_cpu_map_locked(mem_backend, mali_allocation->cpu_mapping.vma, mali_allocation->cpu_mapping.vma->vm_start + mem_backend->size, add_size); + if (unlikely(ret != _MALI_OSK_ERR_OK)) { + MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: cpu mapping failed !\n")); + goto failed_cpu_map; + } + } + + /* Resize mali mapping */ + _mali_osk_mutex_wait(session->memory_lock); + ret = mali_mem_mali_map_resize(mali_allocation, physical_size); + + if (ret) { + MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_resize: mali map resize fail !\n")); + goto failed_gpu_map; + } + + ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, mali_allocation->mali_vma_node.vm_node.start, + mali_allocation->psize / MALI_MMU_PAGE_SIZE, add_size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties); + if (ret) { + MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: mali mapping failed !\n")); + goto failed_gpu_map; + } + + _mali_osk_mutex_signal(session->memory_lock); + } else { + u32 dec_size, page_count; + u32 vaddr = 0; + INIT_LIST_HEAD(&tmp_os_mem.pages); + tmp_os_mem.count = 0; + + dec_size = mem_backend->size - physical_size; + MALI_DEBUG_ASSERT(0 == dec_size % MALI_MMU_PAGE_SIZE); + + page_count = dec_size / MALI_MMU_PAGE_SIZE; + vaddr = mali_allocation->mali_vma_node.vm_node.start + physical_size; + + /* Resize the memory of the backend */ + ret = mali_mem_os_resize_pages(&mem_backend->os_mem, &tmp_os_mem, physical_size / MALI_MMU_PAGE_SIZE, page_count); + + if (ret) { + MALI_DEBUG_PRINT(4, ("_mali_ukk_mem_resize: mali map resize failed!\n")); + goto failed_resize_pages; + } + + /* Resize mali map */ + _mali_osk_mutex_wait(session->memory_lock); + mali_mem_mali_map_free(session, dec_size, vaddr, mali_allocation->flags); + _mali_osk_mutex_signal(session->memory_lock); + + /* Zap cpu mapping */ + if (0 != mali_allocation->cpu_mapping.addr) { + MALI_DEBUG_ASSERT(NULL != mali_allocation->cpu_mapping.vma); + zap_vma_ptes(mali_allocation->cpu_mapping.vma, mali_allocation->cpu_mapping.vma->vm_start + physical_size, dec_size); + } + + /* Free those extra pages */ + mali_mem_os_free(&tmp_os_mem.pages, tmp_os_mem.count, MALI_FALSE); + } + + /* Resize memory allocation and memory backend */ + change_page_count = (s32)(physical_size - mem_backend->size) / MALI_MMU_PAGE_SIZE; + mali_allocation->psize = physical_size; + mem_backend->size = physical_size; + mutex_unlock(&mem_backend->mutex); + + if (change_page_count > 0) { + atomic_add(change_page_count, &session->mali_mem_allocated_pages); + if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) { + session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE; + } + + } else { + atomic_sub((s32)(-change_page_count), &session->mali_mem_allocated_pages); + } + + return _MALI_OSK_ERR_OK; + +failed_gpu_map: + _mali_osk_mutex_signal(session->memory_lock); +failed_cpu_map: + if (physical_size > mem_backend->size) { + mali_mem_os_resize_pages(&mem_backend->os_mem, &tmp_os_mem, mem_backend->size / MALI_MMU_PAGE_SIZE, + (physical_size - mem_backend->size) / MALI_MMU_PAGE_SIZE); + } else { + mali_mem_os_resize_pages(&tmp_os_mem, &mem_backend->os_mem, 0, tmp_os_mem.count); + } +failed_resize_pages: + if (0 != tmp_os_mem.count) + mali_mem_os_free(&tmp_os_mem.pages, tmp_os_mem.count, MALI_FALSE); +failed_alloc_memory: + + mutex_unlock(&mem_backend->mutex); + return ret; +} + /* Set GPU MMU properties */ static void _mali_memory_gpu_map_property_set(u32 *properties, u32 flags) { - if (_MALI_MEMORY_GPU_READ_ALLOCATE & flags) { *properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE; } else { @@ -187,6 +362,38 @@ static void _mali_memory_gpu_map_property_set(u32 *properties, u32 flags) } } +_mali_osk_errcode_t mali_mem_add_mem_size(struct mali_session_data *session, u32 mali_addr, u32 add_size) +{ + mali_mem_backend *mem_backend = NULL; + _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT; + mali_mem_allocation *mali_allocation = NULL; + u32 new_physical_size; + MALI_DEBUG_ASSERT_POINTER(session); + MALI_DEBUG_ASSERT(0 == add_size % MALI_MMU_PAGE_SIZE); + + /* Get the memory backend that need to be resize. */ + mem_backend = mali_mem_backend_struct_search(session, mali_addr); + + if (NULL == mem_backend) { + MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory backend = NULL!\n")); + return ret; + } + + mali_allocation = mem_backend->mali_allocation; + + MALI_DEBUG_ASSERT_POINTER(mali_allocation); + + new_physical_size = add_size + mem_backend->size; + + if (new_physical_size > (mali_allocation->mali_vma_node.vm_node.size)) + return ret; + + MALI_DEBUG_ASSERT(new_physical_size != mem_backend->size); + + ret = mali_mem_resize(session, mem_backend, new_physical_size); + + return ret; +} /** * function@_mali_ukk_mem_allocate - allocate mali memory @@ -203,18 +410,17 @@ _mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args) MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! \n", args->gpu_vaddr, args->psize)); /* Check if the address is allocated - * Can we trust User mode? */ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0); + if (unlikely(mali_vma_node)) { - /* Not support yet */ - MALI_DEBUG_ASSERT(0); + MALI_DEBUG_PRINT_ERROR(("The mali virtual address has already been used ! \n")); return _MALI_OSK_ERR_FAULT; } - /** *create mali memory allocation */ + mali_allocation = mali_mem_allocation_struct_create(session); if (mali_allocation == NULL) { @@ -224,8 +430,19 @@ _mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args) mali_allocation->psize = args->psize; mali_allocation->vsize = args->vsize; - /* check if have dedicated memory */ - if (MALI_TRUE == mali_memory_have_dedicated_memory()) { + /* MALI_MEM_OS if need to support mem resize, + * or MALI_MEM_BLOCK if have dedicated memory, + * or MALI_MEM_OS, + * or MALI_MEM_SWAP. + */ + if (args->flags & _MALI_MEMORY_ALLOCATE_SWAPPABLE) { + mali_allocation->type = MALI_MEM_SWAP; + } else if (args->flags & _MALI_MEMORY_ALLOCATE_RESIZEABLE) { + mali_allocation->type = MALI_MEM_OS; + mali_allocation->flags |= MALI_MEM_FLAG_CAN_RESIZE; + } else if (args->flags & _MALI_MEMORY_ALLOCATE_SECURE) { + mali_allocation->type = MALI_MEM_SECURE; + } else if (MALI_TRUE == mali_memory_have_dedicated_memory()) { mali_allocation->type = MALI_MEM_BLOCK; } else { mali_allocation->type = MALI_MEM_OS; @@ -239,89 +456,140 @@ _mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args) mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node); - /* check if need to allocate backend */ - if (mali_allocation->psize == 0) - return _MALI_OSK_ERR_OK; + mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, args->psize); + if (mali_allocation->backend_handle < 0) { + ret = _MALI_OSK_ERR_NOMEM; + MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n")); + goto failed_alloc_backend; + } - /** - *allocate physical backend & pages - */ - if (likely(mali_allocation->psize > 0)) { - mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, args->psize); - if (mali_allocation->backend_handle < 0) { - ret = _MALI_OSK_ERR_NOMEM; - MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n")); - goto failed_alloc_backend; + + mem_backend->mali_allocation = mali_allocation; + mem_backend->type = mali_allocation->type; + + mali_allocation->mali_mapping.addr = args->gpu_vaddr; + + /* set gpu mmu propery */ + _mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags); + /* do prepare for MALI mapping */ + if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) { + _mali_osk_mutex_wait(session->memory_lock); + + ret = mali_mem_mali_map_prepare(mali_allocation); + if (0 != ret) { + _mali_osk_mutex_signal(session->memory_lock); + goto failed_prepare_map; } + _mali_osk_mutex_signal(session->memory_lock); + } - mem_backend->mali_allocation = mali_allocation; - mem_backend->type = mali_allocation->type; + if (mali_allocation->psize == 0) { + mem_backend->os_mem.count = 0; + INIT_LIST_HEAD(&mem_backend->os_mem.pages); + goto done; + } - if (mem_backend->type == MALI_MEM_OS) { - retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size); - } else if (mem_backend->type == MALI_MEM_BLOCK) { - /* try to allocated from BLOCK memory first, then try OS memory if failed.*/ - if (mali_mem_block_alloc(&mem_backend->block_mem, mem_backend->size)) { - retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size); - mem_backend->type = MALI_MEM_OS; - mali_allocation->type = MALI_MEM_OS; + if (args->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) { + mali_allocation->flags |= _MALI_MEMORY_ALLOCATE_DEFER_BIND; + mem_backend->flags |= MALI_MEM_BACKEND_FLAG_NOT_BINDED; + /* init for defer bind backend*/ + mem_backend->os_mem.count = 0; + INIT_LIST_HEAD(&mem_backend->os_mem.pages); + + goto done; + } + + if (likely(mali_allocation->psize > 0)) { + + if (MALI_MEM_SECURE == mem_backend->type) { +#if defined(CONFIG_DMA_SHARED_BUFFER) + ret = mali_mem_secure_attach_dma_buf(&mem_backend->secure_mem, mem_backend->size, args->secure_shared_fd); + if (_MALI_OSK_ERR_OK != ret) { + MALI_DEBUG_PRINT(1, ("Failed to attach dma buf for secure memory! \n")); + goto failed_alloc_pages; } +#else + ret = _MALI_OSK_ERR_UNSUPPORTED; + MALI_DEBUG_PRINT(1, ("DMA not supported for mali secure memory! \n")); + goto failed_alloc_pages; +#endif } else { - /* ONLY support mem_os type */ - MALI_DEBUG_ASSERT(0); - } - if (retval) { - ret = _MALI_OSK_ERR_NOMEM; - MALI_DEBUG_PRINT(1, (" can't allocate enough pages! \n")); - goto failed_alloc_pages; + /** + *allocate physical memory + */ + if (mem_backend->type == MALI_MEM_OS) { + retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size); + } else if (mem_backend->type == MALI_MEM_BLOCK) { + /* try to allocated from BLOCK memory first, then try OS memory if failed.*/ + if (mali_mem_block_alloc(&mem_backend->block_mem, mem_backend->size)) { + retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size); + mem_backend->type = MALI_MEM_OS; + mali_allocation->type = MALI_MEM_OS; + } + } else if (MALI_MEM_SWAP == mem_backend->type) { + retval = mali_mem_swap_alloc_pages(&mem_backend->swap_mem, mali_allocation->mali_vma_node.vm_node.size, &mem_backend->start_idx); + } else { + /* ONLY support mem_os type */ + MALI_DEBUG_ASSERT(0); + } + + if (retval) { + ret = _MALI_OSK_ERR_NOMEM; + MALI_DEBUG_PRINT(1, (" can't allocate enough pages! \n")); + goto failed_alloc_pages; + } } } /** *map to GPU side */ - mali_allocation->mali_mapping.addr = args->gpu_vaddr; - - /* set gpu mmu propery */ - _mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags); - if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) { _mali_osk_mutex_wait(session->memory_lock); /* Map on Mali */ - ret = mali_mem_mali_map_prepare(mali_allocation); - if (0 != ret) { - MALI_DEBUG_PRINT(1, (" prepare map fail! \n")); - goto failed_gpu_map; - } - /* only support os memory type now */ + if (mem_backend->type == MALI_MEM_OS) { - mali_mem_os_mali_map(mem_backend, args->gpu_vaddr, - mali_allocation->mali_mapping.properties); + ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, args->gpu_vaddr, 0, + mem_backend->size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties); + } else if (mem_backend->type == MALI_MEM_BLOCK) { mali_mem_block_mali_map(&mem_backend->block_mem, session, args->gpu_vaddr, mali_allocation->mali_mapping.properties); - } else { - /* Not support yet */ + } else if (mem_backend->type == MALI_MEM_SWAP) { + ret = mali_mem_swap_mali_map(&mem_backend->swap_mem, session, args->gpu_vaddr, + mali_allocation->mali_mapping.properties); + } else if (mem_backend->type == MALI_MEM_SECURE) { +#if defined(CONFIG_DMA_SHARED_BUFFER) + ret = mali_mem_secure_mali_map(&mem_backend->secure_mem, session, args->gpu_vaddr, mali_allocation->mali_mapping.properties); +#endif + } else { /* unsupport type */ MALI_DEBUG_ASSERT(0); } - session->mali_mem_array[mem_backend->type] += mem_backend->size; - if (session->mali_mem_array[MALI_MEM_OS] + session->mali_mem_array[MALI_MEM_BLOCK] > session->max_mali_mem_allocated) { - session->max_mali_mem_allocated = session->mali_mem_array[MALI_MEM_OS] + session->mali_mem_array[MALI_MEM_BLOCK]; - } + _mali_osk_mutex_signal(session->memory_lock); } +done: + if (MALI_MEM_OS == mem_backend->type) { + atomic_add(mem_backend->os_mem.count, &session->mali_mem_allocated_pages); + } else if (MALI_MEM_BLOCK == mem_backend->type) { + atomic_add(mem_backend->block_mem.count, &session->mali_mem_allocated_pages); + } else if (MALI_MEM_SECURE == mem_backend->type) { + atomic_add(mem_backend->secure_mem.count, &session->mali_mem_allocated_pages); + } else { + MALI_DEBUG_ASSERT(MALI_MEM_SWAP == mem_backend->type); + atomic_add(mem_backend->swap_mem.count, &session->mali_mem_allocated_pages); + atomic_add(mem_backend->swap_mem.count, &session->mali_mem_array[mem_backend->type]); + } + if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) { + session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE; + } return _MALI_OSK_ERR_OK; -failed_gpu_map: - _mali_osk_mutex_signal(session->memory_lock); - if (mem_backend->type == MALI_MEM_OS) { - mali_mem_os_free(&mem_backend->os_mem); - } else { - mali_mem_block_free(&mem_backend->block_mem); - } failed_alloc_pages: + mali_mem_mali_map_free(session, mali_allocation->psize, mali_allocation->mali_vma_node.vm_node.start, mali_allocation->flags); +failed_prepare_map: mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle); failed_alloc_backend: @@ -341,13 +609,16 @@ _mali_osk_errcode_t _mali_ukk_mem_free(_mali_uk_free_mem_s *args) /* find mali allocation structure by vaddress*/ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, vaddr, 0); - + if (NULL == mali_vma_node) { + MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_free: invalid addr: 0x%x\n", vaddr)); + return _MALI_OSK_ERR_INVALID_ARGS; + } MALI_DEBUG_ASSERT(NULL != mali_vma_node); mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node); if (mali_alloc) /* check ref_count */ - mali_allocation_unref(&mali_alloc); + args->free_pages_nr = mali_allocation_unref(&mali_alloc); return _MALI_OSK_ERR_OK; } @@ -406,8 +677,8 @@ _mali_osk_errcode_t _mali_ukk_mem_bind(_mali_uk_bind_mem_s *args) #if defined(CONFIG_MALI400_UMP) mali_allocation->type = MALI_MEM_UMP; mem_backend->type = MALI_MEM_UMP; - ret = mali_memory_bind_ump_buf(mali_allocation, mem_backend, - args->mem_union.bind_ump.secure_id, args->mem_union.bind_ump.flags); + ret = mali_mem_bind_ump_buf(mali_allocation, mem_backend, + args->mem_union.bind_ump.secure_id, args->mem_union.bind_ump.flags); if (_MALI_OSK_ERR_OK != ret) { MALI_DEBUG_PRINT(1, ("Bind ump buf failed\n")); goto Failed_bind_backend; @@ -421,8 +692,8 @@ _mali_osk_errcode_t _mali_ukk_mem_bind(_mali_uk_bind_mem_s *args) #if defined(CONFIG_DMA_SHARED_BUFFER) mali_allocation->type = MALI_MEM_DMA_BUF; mem_backend->type = MALI_MEM_DMA_BUF; - ret = mali_memory_bind_dma_buf(mali_allocation, mem_backend, - args->mem_union.bind_dma_buf.mem_fd, args->mem_union.bind_dma_buf.flags); + ret = mali_mem_bind_dma_buf(mali_allocation, mem_backend, + args->mem_union.bind_dma_buf.mem_fd, args->mem_union.bind_dma_buf.flags); if (_MALI_OSK_ERR_OK != ret) { MALI_DEBUG_PRINT(1, ("Bind dma buf failed\n")); goto Failed_bind_backend; @@ -434,14 +705,15 @@ _mali_osk_errcode_t _mali_ukk_mem_bind(_mali_uk_bind_mem_s *args) break; case _MALI_MEMORY_BIND_BACKEND_MALI_MEMORY: /* not allowed */ - MALI_DEBUG_ASSERT(0); + MALI_DEBUG_PRINT_ERROR(("Mali internal memory type not supported !\n")); + goto Failed_bind_backend; break; case _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY: mali_allocation->type = MALI_MEM_EXTERNAL; mem_backend->type = MALI_MEM_EXTERNAL; - ret = mali_memory_bind_ext_mem(mali_allocation, mem_backend, args->mem_union.bind_ext_memory.phys_addr, - args->mem_union.bind_ext_memory.flags); + ret = mali_mem_bind_ext_buf(mali_allocation, mem_backend, args->mem_union.bind_ext_memory.phys_addr, + args->mem_union.bind_ext_memory.flags); if (_MALI_OSK_ERR_OK != ret) { MALI_DEBUG_PRINT(1, ("Bind external buf failed\n")); goto Failed_bind_backend; @@ -450,16 +722,19 @@ _mali_osk_errcode_t _mali_ukk_mem_bind(_mali_uk_bind_mem_s *args) case _MALI_MEMORY_BIND_BACKEND_EXT_COW: /* not allowed */ - MALI_DEBUG_ASSERT(0); + MALI_DEBUG_PRINT_ERROR(("External cow memory type not supported !\n")); + goto Failed_bind_backend; break; default: - MALI_DEBUG_ASSERT(0); + MALI_DEBUG_PRINT_ERROR(("Invalid memory type not supported !\n")); + goto Failed_bind_backend; break; } + MALI_DEBUG_ASSERT(0 == mem_backend->size % MALI_MMU_PAGE_SIZE); + atomic_add(mem_backend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_backend->type]); return _MALI_OSK_ERR_OK; - Failed_bind_backend: mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle); @@ -493,8 +768,6 @@ _mali_osk_errcode_t _mali_ukk_mem_unbind(_mali_uk_unbind_mem_s *args) mali_allocation = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node); } else { MALI_DEBUG_ASSERT(NULL != mali_vma_node); - /* Not support yet */ - MALI_DEBUG_ASSERT(0); return _MALI_OSK_ERR_INVALID_ARGS; } @@ -504,8 +777,6 @@ _mali_osk_errcode_t _mali_ukk_mem_unbind(_mali_uk_unbind_mem_s *args) return _MALI_OSK_ERR_OK; } - - /* * Function _mali_ukk_mem_cow -- COW for an allocation * This function allocate new pages for a range (range, range+size) of allocation @@ -514,30 +785,209 @@ _mali_osk_errcode_t _mali_ukk_mem_unbind(_mali_uk_unbind_mem_s *args) _mali_osk_errcode_t _mali_ukk_mem_cow(_mali_uk_cow_mem_s *args) { _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT; + mali_mem_backend *target_backend = NULL; + mali_mem_backend *mem_backend = NULL; + struct mali_vma_node *mali_vma_node = NULL; + mali_mem_allocation *mali_allocation = NULL; + + struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx; + /* Get the target backend for cow */ + target_backend = mali_mem_backend_struct_search(session, args->target_handle); - /* create new alloction if needed */ + if (NULL == target_backend || 0 == target_backend->size) { + MALI_DEBUG_ASSERT_POINTER(target_backend); + MALI_DEBUG_ASSERT(0 != target_backend->size); + return ret; + } + + /*Cow not support resized mem */ + MALI_DEBUG_ASSERT(MALI_MEM_FLAG_CAN_RESIZE != (MALI_MEM_FLAG_CAN_RESIZE & target_backend->mali_allocation->flags)); + + /* Check if the new mali address is allocated */ + mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->vaddr, 0); + + if (unlikely(mali_vma_node)) { + MALI_DEBUG_PRINT_ERROR(("The mali virtual address has already been used ! \n")); + return ret; + } + + /* create new alloction for COW*/ + mali_allocation = mali_mem_allocation_struct_create(session); + if (mali_allocation == NULL) { + MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_cow: Failed to create allocation struct!\n")); + return _MALI_OSK_ERR_NOMEM; + } + mali_allocation->psize = args->target_size; + mali_allocation->vsize = args->target_size; + mali_allocation->type = MALI_MEM_COW; + + /*add allocation node to RB tree for index*/ + mali_allocation->mali_vma_node.vm_node.start = args->vaddr; + mali_allocation->mali_vma_node.vm_node.size = mali_allocation->vsize; + mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node); - /* Get the target allocation and it's backend*/ + /* create new backend for COW memory */ + mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, mali_allocation->psize); + if (mali_allocation->backend_handle < 0) { + ret = _MALI_OSK_ERR_NOMEM; + MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n")); + goto failed_alloc_backend; + } + mem_backend->mali_allocation = mali_allocation; + mem_backend->type = mali_allocation->type; + + if (target_backend->type == MALI_MEM_SWAP || + (MALI_MEM_COW == target_backend->type && (MALI_MEM_BACKEND_FLAG_SWAP_COWED & target_backend->flags))) { + mem_backend->flags |= MALI_MEM_BACKEND_FLAG_SWAP_COWED; + /** + * CoWed swap backends couldn't be mapped as non-linear vma, because if one + * vma is set with flag VM_NONLINEAR, the vma->vm_private_data will be used by kernel, + * while in mali driver, we use this variable to store the pointer of mali_allocation, so there + * is a conflict. + * To resolve this problem, we have to do some fake things, we reserved about 64MB + * space from index 0, there isn't really page's index will be set from 0 to (64MB>>PAGE_SHIFT_NUM), + * and all of CoWed swap memory backends' start_idx will be assigned with 0, and these + * backends will be mapped as linear and will add to priority tree of global swap file, while + * these vmas will never be found by using normal page->index, these pages in those vma + * also couldn't be swapped out. + */ + mem_backend->start_idx = 0; + } - /* allocate new pages from os mem for modified range */ + /* Add the target backend's cow count, also allocate new pages for COW backend from os mem + *for a modified range and keep the page which not in the modified range and Add ref to it + */ + MALI_DEBUG_PRINT(3, ("Cow mapping: target_addr: 0x%x; cow_addr: 0x%x, size: %u\n", target_backend->mali_allocation->mali_vma_node.vm_node.start, + mali_allocation->mali_vma_node.vm_node.start, mali_allocation->mali_vma_node.vm_node.size)); + ret = mali_memory_do_cow(target_backend, args->target_offset, args->target_size, mem_backend, args->range_start, args->range_size); + if (_MALI_OSK_ERR_OK != ret) { + MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_cow: Failed to cow!\n")); + goto failed_do_cow; + } - /* fill the COW backend, all pages for this allocation - * including the new page for modified range and pages not modified in old allocation. - * Do Add ref to pages from target allocation + /** + *map to GPU side */ + mali_allocation->mali_mapping.addr = args->vaddr; + /* set gpu mmu propery */ + _mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags); + + _mali_osk_mutex_wait(session->memory_lock); + /* Map on Mali */ + ret = mali_mem_mali_map_prepare(mali_allocation); + if (0 != ret) { + MALI_DEBUG_PRINT(1, (" prepare map fail! \n")); + goto failed_gpu_map; + } + + if (!(mem_backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) { + mali_mem_cow_mali_map(mem_backend, 0, mem_backend->size); + } + + _mali_osk_mutex_signal(session->memory_lock); + + mutex_lock(&target_backend->mutex); + target_backend->flags |= MALI_MEM_BACKEND_FLAG_COWED; + mutex_unlock(&target_backend->mutex); + + atomic_add(args->range_size / MALI_MMU_PAGE_SIZE, &session->mali_mem_allocated_pages); + if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) { + session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE; + } + return _MALI_OSK_ERR_OK; +failed_gpu_map: + _mali_osk_mutex_signal(session->memory_lock); + mali_mem_cow_release(mem_backend, MALI_FALSE); + mem_backend->cow_mem.count = 0; +failed_do_cow: + mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle); +failed_alloc_backend: + mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node); + mali_mem_allocation_struct_destory(mali_allocation); - /* map it to GPU side */ return ret; } -/** -* attach a backend to an exist mali allocation -*/ +_mali_osk_errcode_t _mali_ukk_mem_cow_modify_range(_mali_uk_cow_modify_range_s *args) +{ + _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT; + mali_mem_backend *mem_backend = NULL; + struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx; + MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_cow_modify_range called! \n")); + /* Get the backend that need to be modified. */ + mem_backend = mali_mem_backend_struct_search(session, args->vaddr); -/** -* deattach a backend from an exist mali allocation -*/ + if (NULL == mem_backend || 0 == mem_backend->size) { + MALI_DEBUG_ASSERT_POINTER(mem_backend); + MALI_DEBUG_ASSERT(0 != mem_backend->size); + return ret; + } + + MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_backend->type); + + ret = mali_memory_cow_modify_range(mem_backend, args->range_start, args->size); + args->change_pages_nr = mem_backend->cow_mem.change_pages_nr; + if (_MALI_OSK_ERR_OK != ret) + return ret; + _mali_osk_mutex_wait(session->memory_lock); + if (!(mem_backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) { + mali_mem_cow_mali_map(mem_backend, args->range_start, args->size); + } + _mali_osk_mutex_signal(session->memory_lock); + + atomic_add(args->change_pages_nr, &session->mali_mem_allocated_pages); + if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) { + session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE; + } + + return _MALI_OSK_ERR_OK; +} + + +_mali_osk_errcode_t _mali_ukk_mem_resize(_mali_uk_mem_resize_s *args) +{ + mali_mem_backend *mem_backend = NULL; + _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT; + + struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx; + + MALI_DEBUG_ASSERT_POINTER(session); + MALI_DEBUG_PRINT(4, (" mali_mem_resize_memory called! \n")); + MALI_DEBUG_ASSERT(0 == args->psize % MALI_MMU_PAGE_SIZE); + + /* Get the memory backend that need to be resize. */ + mem_backend = mali_mem_backend_struct_search(session, args->vaddr); + + if (NULL == mem_backend) { + MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory backend = NULL!\n")); + return ret; + } + + MALI_DEBUG_ASSERT(args->psize != mem_backend->size); + + ret = mali_mem_resize(session, mem_backend, args->psize); + + return ret; +} + +_mali_osk_errcode_t _mali_ukk_mem_usage_get(_mali_uk_profiling_memory_usage_get_s *args) +{ + args->memory_usage = _mali_ukk_report_memory_usage(); + if (0 != args->vaddr) { + mali_mem_backend *mem_backend = NULL; + struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx; + /* Get the backend that need to be modified. */ + mem_backend = mali_mem_backend_struct_search(session, args->vaddr); + if (NULL == mem_backend) { + MALI_DEBUG_ASSERT_POINTER(mem_backend); + return _MALI_OSK_ERR_FAULT; + } + if (MALI_MEM_COW == mem_backend->type) + args->change_pages_nr = mem_backend->cow_mem.change_pages_nr; + } + return _MALI_OSK_ERR_OK; +} diff --git a/drivers/gpu/arm/mali/linux/mali_memory_manager.h b/drivers/gpu/arm/mali/linux/mali_memory_manager.h old mode 100644 new mode 100755 index e44c032c19573..0ced28026f3f0 --- a/drivers/gpu/arm/mali/linux/mali_memory_manager.h +++ b/drivers/gpu/arm/mali/linux/mali_memory_manager.h @@ -1,47 +1,51 @@ -/* - * Copyright (C) 2013-2015 ARM Limited. All rights reserved. - * +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. - * + * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __MALI_MEMORY_MANAGER_H__ -#define __MALI_MEMORY_MANAGER_H__ - -#include "mali_osk.h" -#include -#include -#include -#include -#include -#include "mali_memory_types.h" -#include "mali_memory_os_alloc.h" -#include "mali_uk_types.h" - -struct mali_allocation_manager { - rwlock_t vm_lock; - struct rb_root allocation_mgr_rb; - struct list_head head; - struct mutex list_mutex; - u32 mali_allocation_nr; -}; - -extern struct idr mali_backend_idr; -extern struct mutex mali_idr_mutex; - -int mali_memory_manager_init(struct mali_allocation_manager *mgr); -void mali_memory_manager_uninit(struct mali_allocation_manager *mgr); - -void mali_mem_allocation_struct_destory(mali_mem_allocation *alloc); - -_mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args); -_mali_osk_errcode_t _mali_ukk_mem_free(_mali_uk_free_mem_s *args); -_mali_osk_errcode_t _mali_ukk_mem_bind(_mali_uk_bind_mem_s *args); -_mali_osk_errcode_t _mali_ukk_mem_unbind(_mali_uk_unbind_mem_s *args); - - -#endif - + */ + +#ifndef __MALI_MEMORY_MANAGER_H__ +#define __MALI_MEMORY_MANAGER_H__ + +#include "mali_osk.h" +#include +#include +#include +#include +#include +#include "mali_memory_types.h" +#include "mali_memory_os_alloc.h" +#include "mali_uk_types.h" + +struct mali_allocation_manager { + rwlock_t vm_lock; + struct rb_root allocation_mgr_rb; + struct list_head head; + struct mutex list_mutex; + u32 mali_allocation_num; +}; + +extern struct idr mali_backend_idr; +extern struct mutex mali_idr_mutex; + +int mali_memory_manager_init(struct mali_allocation_manager *mgr); +void mali_memory_manager_uninit(struct mali_allocation_manager *mgr); + +void mali_mem_allocation_struct_destory(mali_mem_allocation *alloc); +_mali_osk_errcode_t mali_mem_add_mem_size(struct mali_session_data *session, u32 mali_addr, u32 add_size); +mali_mem_backend *mali_mem_backend_struct_search(struct mali_session_data *session, u32 mali_address); +_mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args); +_mali_osk_errcode_t _mali_ukk_mem_free(_mali_uk_free_mem_s *args); +_mali_osk_errcode_t _mali_ukk_mem_bind(_mali_uk_bind_mem_s *args); +_mali_osk_errcode_t _mali_ukk_mem_unbind(_mali_uk_unbind_mem_s *args); +_mali_osk_errcode_t _mali_ukk_mem_cow(_mali_uk_cow_mem_s *args); +_mali_osk_errcode_t _mali_ukk_mem_cow_modify_range(_mali_uk_cow_modify_range_s *args); +_mali_osk_errcode_t _mali_ukk_mem_usage_get(_mali_uk_profiling_memory_usage_get_s *args); +_mali_osk_errcode_t _mali_ukk_mem_resize(_mali_uk_mem_resize_s *args); + +#endif + diff --git a/drivers/gpu/arm/mali/linux/mali_memory_os_alloc.c b/drivers/gpu/arm/mali/linux/mali_memory_os_alloc.c index 106cb0bd1b40e..90bc2f6d83f53 100755 --- a/drivers/gpu/arm/mali/linux/mali_memory_os_alloc.c +++ b/drivers/gpu/arm/mali/linux/mali_memory_os_alloc.c @@ -46,21 +46,8 @@ static unsigned long mali_mem_os_shrink_count(struct shrinker *shrinker, struct #endif #endif static void mali_mem_os_trim_pool(struct work_struct *work); -static void mali_mem_os_free_page(struct mali_page_node *m_page); - -static struct mali_mem_os_allocator { - spinlock_t pool_lock; - struct list_head pool_pages; - size_t pool_count; - - atomic_t allocated_pages; - size_t allocation_limit; - - struct shrinker shrinker; - struct delayed_work timed_shrinker; - struct workqueue_struct *wq; -} mali_mem_os_allocator = { +struct mali_mem_os_allocator mali_mem_os_allocator = { .pool_lock = __SPIN_LOCK_UNLOCKED(pool_lock), .pool_pages = LIST_HEAD_INIT(mali_mem_os_allocator.pool_pages), .pool_count = 0, @@ -84,36 +71,87 @@ static struct mali_mem_os_allocator { #endif }; -void mali_mem_os_free(mali_mem_os_mem *os_mem) +u32 mali_mem_os_free(struct list_head *os_pages, u32 pages_count, mali_bool cow_flag) { LIST_HEAD(pages); - - atomic_sub(os_mem->count, &mali_mem_os_allocator.allocated_pages); + struct mali_page_node *m_page, *m_tmp; + u32 free_pages_nr = 0; + + if (MALI_TRUE == cow_flag) { + list_for_each_entry_safe(m_page, m_tmp, os_pages, list) { + /*only handle OS node here */ + if (m_page->type == MALI_PAGE_NODE_OS) { + if (1 == _mali_page_node_get_ref_count(m_page)) { + list_move(&m_page->list, &pages); + atomic_sub(1, &mali_mem_os_allocator.allocated_pages); + free_pages_nr ++; + } else { + _mali_page_node_unref(m_page); + m_page->page = NULL; + list_del(&m_page->list); + kfree(m_page); + } + } + } + } else { + list_cut_position(&pages, os_pages, os_pages->prev); + atomic_sub(pages_count, &mali_mem_os_allocator.allocated_pages); + free_pages_nr = pages_count; + } /* Put pages on pool. */ - list_cut_position(&pages, &os_mem->pages, os_mem->pages.prev); - spin_lock(&mali_mem_os_allocator.pool_lock); - list_splice(&pages, &mali_mem_os_allocator.pool_pages); - mali_mem_os_allocator.pool_count += os_mem->count; - + mali_mem_os_allocator.pool_count += free_pages_nr; spin_unlock(&mali_mem_os_allocator.pool_lock); if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) { MALI_DEBUG_PRINT(5, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count)); queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES); } + return free_pages_nr; } /** -* free memory without put it into page pool +* put page without put it into page pool +*/ +_mali_osk_errcode_t mali_mem_os_put_page(struct page *page) +{ + MALI_DEBUG_ASSERT_POINTER(page); + if (1 == page_count(page)) { + atomic_sub(1, &mali_mem_os_allocator.allocated_pages); + dma_unmap_page(&mali_platform_device->dev, page_private(page), + _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE); + ClearPagePrivate(page); + } + put_page(page); + return _MALI_OSK_ERR_OK; +} -void mali_mem_os_free_not_pooled(mali_mem_os_mem *os_mem) +_mali_osk_errcode_t mali_mem_os_resize_pages(mali_mem_os_mem *mem_from, mali_mem_os_mem *mem_to, u32 start_page, u32 page_count) { + struct mali_page_node *m_page, *m_tmp; + u32 i = 0; + MALI_DEBUG_ASSERT_POINTER(mem_from); + MALI_DEBUG_ASSERT_POINTER(mem_to); + + if (mem_from->count < start_page + page_count) { + return _MALI_OSK_ERR_INVALID_ARGS; + } + + list_for_each_entry_safe(m_page, m_tmp, &mem_from->pages, list) { + if (i >= start_page && i < start_page + page_count) { + list_move_tail(&m_page->list, &mem_to->pages); + mem_from->count--; + mem_to->count++; + } + i++; + } + + return _MALI_OSK_ERR_OK; } -*/ + int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size) { @@ -162,18 +200,23 @@ int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size) /* Allocate new pages, if needed. */ for (i = 0; i < remaining; i++) { dma_addr_t dma_addr; - struct mali_page_node *new_page_node = NULL; gfp_t flags = __GFP_ZERO | __GFP_NORETRY | __GFP_NOWARN | __GFP_COLD; int err; #if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE) flags |= GFP_HIGHUSER; #else - /* After 3.15.0 kernel use ZONE_DMA replace ZONE_DMA32 */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) +#ifdef CONFIG_ZONE_DMA32 flags |= GFP_DMA32; #else +#ifdef CONFIG_ZONE_DMA flags |= GFP_DMA; +#else + /* arm64 utgard only work on < 4G, but the kernel + * didn't provide method to allocte memory < 4G + */ + MALI_DEBUG_ASSERT(0); +#endif #endif #endif @@ -183,7 +226,7 @@ int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size) /* Calculate the number of pages actually allocated, and free them. */ os_mem->count = (page_count - remaining) + i; atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages); - mali_mem_os_free(os_mem); + mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE); return -ENOMEM; } @@ -198,7 +241,7 @@ int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size) __free_page(new_page); os_mem->count = (page_count - remaining) + i; atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages); - mali_mem_os_free(os_mem); + mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE); return -EFAULT; } @@ -206,8 +249,8 @@ int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size) SetPagePrivate(new_page); set_page_private(new_page, dma_addr); - new_page_node = kmalloc(sizeof(mali_page_node), GFP_KERNEL); - if (unlikely(NULL == new_page_node)) { + m_page = _mali_page_node_allocate(MALI_PAGE_NODE_OS); + if (unlikely(NULL == m_page)) { MALI_PRINT_ERROR(("OS Mem: Can't allocate mali_page node! \n")); dma_unmap_page(&mali_platform_device->dev, page_private(new_page), _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE); @@ -215,13 +258,12 @@ int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size) __free_page(new_page); os_mem->count = (page_count - remaining) + i; atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages); - mali_mem_os_free(os_mem); + mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE); return -EFAULT; } - new_page_node->page = new_page; - INIT_LIST_HEAD(&new_page_node->list); + m_page->page = new_page; - list_add_tail(&new_page_node->list, &os_mem->pages); + list_add_tail(&m_page->list, &os_mem->pages); } atomic_add(page_count, &mali_mem_os_allocator.allocated_pages); @@ -235,38 +277,66 @@ int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size) } -void mali_mem_os_mali_map(mali_mem_backend *mem_bkend, u32 vaddr, u32 props) +_mali_osk_errcode_t mali_mem_os_mali_map(mali_mem_os_mem *os_mem, struct mali_session_data *session, u32 vaddr, u32 start_page, u32 mapping_pgae_num, u32 props) { - struct mali_session_data *session; - struct mali_page_directory *pagedir; + struct mali_page_directory *pagedir = session->page_directory; struct mali_page_node *m_page; - u32 virt = vaddr; + u32 virt; u32 prop = props; - MALI_DEBUG_ASSERT_POINTER(mem_bkend); - MALI_DEBUG_ASSERT_POINTER(mem_bkend->mali_allocation); - - session = mem_bkend->mali_allocation->session; MALI_DEBUG_ASSERT_POINTER(session); - pagedir = session->page_directory; + MALI_DEBUG_ASSERT_POINTER(os_mem); + + MALI_DEBUG_ASSERT(start_page <= os_mem->count); + MALI_DEBUG_ASSERT((start_page + mapping_pgae_num) <= os_mem->count); + + if ((start_page + mapping_pgae_num) == os_mem->count) { - list_for_each_entry(m_page, &mem_bkend->os_mem.pages, list) { - dma_addr_t phys = page_private(m_page->page); + virt = vaddr + MALI_MMU_PAGE_SIZE * (start_page + mapping_pgae_num); + list_for_each_entry_reverse(m_page, &os_mem->pages, list) { + + virt -= MALI_MMU_PAGE_SIZE; + if (mapping_pgae_num > 0) { + dma_addr_t phys = page_private(m_page->page); #if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) - /* Verify that the "physical" address is 32-bit and - * usable for Mali, when on a system with bus addresses - * wider than 32-bit. */ - MALI_DEBUG_ASSERT(0 == (phys >> 32)); + /* Verify that the "physical" address is 32-bit and + * usable for Mali, when on a system with bus addresses + * wider than 32-bit. */ + MALI_DEBUG_ASSERT(0 == (phys >> 32)); #endif + mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop); + } else { + break; + } + mapping_pgae_num--; + } + + } else { + u32 i = 0; + virt = vaddr; + list_for_each_entry(m_page, &os_mem->pages, list) { + + if (i >= start_page) { + dma_addr_t phys = page_private(m_page->page); - mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop); - virt += MALI_MMU_PAGE_SIZE; +#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) + /* Verify that the "physical" address is 32-bit and + * usable for Mali, when on a system with bus addresses + * wider than 32-bit. */ + MALI_DEBUG_ASSERT(0 == (phys >> 32)); +#endif + mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop); + } + i++; + virt += MALI_MMU_PAGE_SIZE; + } } + return _MALI_OSK_ERR_OK; } -static void mali_mem_os_mali_unmap(mali_mem_allocation *alloc) +void mali_mem_os_mali_unmap(mali_mem_allocation *alloc) { struct mali_session_data *session; MALI_DEBUG_ASSERT_POINTER(alloc); @@ -276,16 +346,17 @@ static void mali_mem_os_mali_unmap(mali_mem_allocation *alloc) mali_session_memory_lock(session); mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start, alloc->flags); - session->mali_mem_array[alloc->type] -= alloc->psize; mali_session_memory_unlock(session); } -int mali_mem_os_cpu_map(mali_mem_os_mem *os_mem, struct vm_area_struct *vma) +int mali_mem_os_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma) { + mali_mem_os_mem *os_mem = &mem_bkend->os_mem; struct mali_page_node *m_page; struct page *page; int ret; unsigned long addr = vma->vm_start; + MALI_DEBUG_ASSERT(MALI_MEM_OS == mem_bkend->type); list_for_each_entry(m_page, &os_mem->pages, list) { /* We should use vm_insert_page, but it does a dcache @@ -304,21 +375,110 @@ int mali_mem_os_cpu_map(mali_mem_os_mem *os_mem, struct vm_area_struct *vma) return 0; } -void mali_mem_os_release(mali_mem_backend *mem_bkend) +_mali_osk_errcode_t mali_mem_os_resize_cpu_map_locked(mali_mem_backend *mem_bkend, struct vm_area_struct *vma, unsigned long start_vaddr, u32 mappig_size) +{ + mali_mem_os_mem *os_mem = &mem_bkend->os_mem; + struct mali_page_node *m_page; + int ret; + int offset; + int mapping_page_num; + int count ; + + unsigned long vstart = vma->vm_start; + count = 0; + MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS); + MALI_DEBUG_ASSERT(0 == start_vaddr % _MALI_OSK_MALI_PAGE_SIZE); + MALI_DEBUG_ASSERT(0 == vstart % _MALI_OSK_MALI_PAGE_SIZE); + offset = (start_vaddr - vstart) / _MALI_OSK_MALI_PAGE_SIZE; + MALI_DEBUG_ASSERT(offset <= os_mem->count); + mapping_page_num = mappig_size / _MALI_OSK_MALI_PAGE_SIZE; + MALI_DEBUG_ASSERT((offset + mapping_page_num) <= os_mem->count); + + if ((offset + mapping_page_num) == os_mem->count) { + + unsigned long vm_end = start_vaddr + mappig_size; + + list_for_each_entry_reverse(m_page, &os_mem->pages, list) { + + vm_end -= _MALI_OSK_MALI_PAGE_SIZE; + if (mapping_page_num > 0) { + ret = vm_insert_pfn(vma, vm_end, page_to_pfn(m_page->page)); + + if (unlikely(0 != ret)) { + /*will return -EBUSY If the page has already been mapped into table, but it's OK*/ + if (-EBUSY == ret) { + break; + } else { + MALI_DEBUG_PRINT(1, ("OS Mem: mali_mem_os_resize_cpu_map_locked failed, ret = %d, offset is %d,page_count is %d\n", + ret, offset + mapping_page_num, os_mem->count)); + } + return _MALI_OSK_ERR_FAULT; + } + } else { + break; + } + mapping_page_num--; + + } + } else { + + list_for_each_entry(m_page, &os_mem->pages, list) { + if (count >= offset) { + + ret = vm_insert_pfn(vma, vstart, page_to_pfn(m_page->page)); + + if (unlikely(0 != ret)) { + /*will return -EBUSY If the page has already been mapped into table, but it's OK*/ + if (-EBUSY == ret) { + break; + } else { + MALI_DEBUG_PRINT(1, ("OS Mem: mali_mem_os_resize_cpu_map_locked failed, ret = %d, count is %d, offset is %d,page_count is %d\n", + ret, count, offset, os_mem->count)); + } + return _MALI_OSK_ERR_FAULT; + } + } + count++; + vstart += _MALI_OSK_MALI_PAGE_SIZE; + } + } + return _MALI_OSK_ERR_OK; +} + +u32 mali_mem_os_release(mali_mem_backend *mem_bkend) { mali_mem_allocation *alloc; + struct mali_session_data *session; + u32 free_pages_nr = 0; MALI_DEBUG_ASSERT_POINTER(mem_bkend); MALI_DEBUG_ASSERT(MALI_MEM_OS == mem_bkend->type); alloc = mem_bkend->mali_allocation; MALI_DEBUG_ASSERT_POINTER(alloc); + session = alloc->session; + MALI_DEBUG_ASSERT_POINTER(session); + /* Unmap the memory from the mali virtual address space. */ mali_mem_os_mali_unmap(alloc); - + mutex_lock(&mem_bkend->mutex); /* Free pages */ - mali_mem_os_free(&mem_bkend->os_mem); + if (MALI_MEM_BACKEND_FLAG_COWED & mem_bkend->flags) { + /* Lock to avoid the free race condition for the cow shared memory page node. */ + _mali_osk_mutex_wait(session->cow_lock); + free_pages_nr = mali_mem_os_free(&mem_bkend->os_mem.pages, mem_bkend->os_mem.count, MALI_TRUE); + _mali_osk_mutex_signal(session->cow_lock); + } else { + free_pages_nr = mali_mem_os_free(&mem_bkend->os_mem.pages, mem_bkend->os_mem.count, MALI_FALSE); + } + mutex_unlock(&mem_bkend->mutex); + + MALI_DEBUG_PRINT(4, ("OS Mem free : allocated size = 0x%x, free size = 0x%x\n", mem_bkend->os_mem.count * _MALI_OSK_MALI_PAGE_SIZE, + free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE)); + + mem_bkend->os_mem.count = 0; + return free_pages_nr; } @@ -401,17 +561,19 @@ void mali_mem_os_release_table_page(mali_dma_addr phys, void *virt) } } -static void mali_mem_os_free_page(struct mali_page_node *m_page) +void mali_mem_os_free_page_node(struct mali_page_node *m_page) { struct page *page = m_page->page; - BUG_ON(page_count(page) != 1); - - dma_unmap_page(&mali_platform_device->dev, page_private(page), - _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE); - - ClearPagePrivate(page); + MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_OS); + if (1 == page_count(page)) { + dma_unmap_page(&mali_platform_device->dev, page_private(page), + _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE); + ClearPagePrivate(page); + } __free_page(page); + m_page->page = NULL; + list_del(&m_page->list); kfree(m_page); } @@ -530,7 +692,7 @@ static unsigned long mali_mem_os_shrink(struct shrinker *shrinker, struct shrink spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags); list_for_each_entry_safe(m_page, m_tmp, &pages, list) { - mali_mem_os_free_page(m_page); + mali_mem_os_free_page_node(m_page); } if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) { @@ -576,7 +738,7 @@ static void mali_mem_os_trim_pool(struct work_struct *data) spin_unlock(&mali_mem_os_allocator.pool_lock); list_for_each_entry_safe(m_page, m_tmp, &pages, list) { - mali_mem_os_free_page(m_page); + mali_mem_os_free_page_node(m_page); } /* Release some pages from page table page pool */ @@ -617,7 +779,7 @@ void mali_mem_os_term(void) spin_lock(&mali_mem_os_allocator.pool_lock); list_for_each_entry_safe(m_page, m_tmp, &mali_mem_os_allocator.pool_pages, list) { - mali_mem_os_free_page(m_page); + mali_mem_os_free_page_node(m_page); --mali_mem_os_allocator.pool_count; } diff --git a/drivers/gpu/arm/mali/linux/mali_memory_os_alloc.h b/drivers/gpu/arm/mali/linux/mali_memory_os_alloc.h index 572b06742988e..b92fffe0dfecb 100755 --- a/drivers/gpu/arm/mali/linux/mali_memory_os_alloc.h +++ b/drivers/gpu/arm/mali/linux/mali_memory_os_alloc.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -14,26 +14,41 @@ #include "mali_osk.h" #include "mali_memory_types.h" + /** @brief Release Mali OS memory * * The session memory_lock must be held when calling this function. * * @param mem_bkend Pointer to the mali_mem_backend to release */ -void mali_mem_os_release(mali_mem_backend *mem_bkend); +u32 mali_mem_os_release(mali_mem_backend *mem_bkend); _mali_osk_errcode_t mali_mem_os_get_table_page(mali_dma_addr *phys, mali_io_address *mapping); void mali_mem_os_release_table_page(mali_dma_addr phys, void *virt); _mali_osk_errcode_t mali_mem_os_init(void); + void mali_mem_os_term(void); + u32 mali_mem_os_stat(void); +void mali_mem_os_free_page_node(struct mali_page_node *m_page); + int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size); -void mali_mem_os_free(mali_mem_os_mem *os_mem); -void mali_mem_os_mali_map(mali_mem_backend *mem_bkend, u32 vaddr, u32 props); -int mali_mem_os_cpu_map(mali_mem_os_mem *os_mem, struct vm_area_struct *vma); +u32 mali_mem_os_free(struct list_head *os_pages, u32 pages_count, mali_bool cow_flag); + +_mali_osk_errcode_t mali_mem_os_put_page(struct page *page); + +_mali_osk_errcode_t mali_mem_os_resize_pages(mali_mem_os_mem *mem_from, mali_mem_os_mem *mem_to, u32 start_page, u32 page_count); + +_mali_osk_errcode_t mali_mem_os_mali_map(mali_mem_os_mem *os_mem, struct mali_session_data *session, u32 vaddr, u32 start_page, u32 mapping_pgae_num, u32 props); + +void mali_mem_os_mali_unmap(mali_mem_allocation *alloc); + +int mali_mem_os_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma); + +_mali_osk_errcode_t mali_mem_os_resize_cpu_map_locked(mali_mem_backend *mem_bkend, struct vm_area_struct *vma, unsigned long start_vaddr, u32 mappig_size); #endif /* __MALI_MEMORY_OS_ALLOC_H__ */ diff --git a/drivers/gpu/arm/mali/linux/mali_memory_secure.c b/drivers/gpu/arm/mali/linux/mali_memory_secure.c new file mode 100755 index 0000000000000..7856ae6c74cef --- /dev/null +++ b/drivers/gpu/arm/mali/linux/mali_memory_secure.c @@ -0,0 +1,169 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_kernel_common.h" +#include "mali_memory.h" +#include "mali_memory_secure.h" +#include "mali_osk.h" +#include +#include +#include + +_mali_osk_errcode_t mali_mem_secure_attach_dma_buf(mali_mem_secure *secure_mem, u32 size, int mem_fd) +{ + struct dma_buf *buf; + MALI_DEBUG_ASSERT_POINTER(secure_mem); + + /* get dma buffer */ + buf = dma_buf_get(mem_fd); + if (IS_ERR_OR_NULL(buf)) { + MALI_DEBUG_PRINT_ERROR(("Failed to get dma buf!\n")); + return _MALI_OSK_ERR_FAULT; + } + + if (size != buf->size) { + MALI_DEBUG_PRINT_ERROR(("The secure mem size not match to the dma buf size!\n")); + goto failed_alloc_mem; + } + + secure_mem->buf = buf; + secure_mem->attachment = dma_buf_attach(secure_mem->buf, &mali_platform_device->dev); + if (NULL == secure_mem->attachment) { + MALI_DEBUG_PRINT_ERROR(("Failed to get dma buf attachment!\n")); + goto failed_dma_attach; + } + + secure_mem->sgt = dma_buf_map_attachment(secure_mem->attachment, DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(secure_mem->sgt)) { + MALI_DEBUG_PRINT_ERROR(("Failed to map dma buf attachment\n")); + goto failed_dma_map; + } + + secure_mem->count = size / MALI_MMU_PAGE_SIZE; + + return _MALI_OSK_ERR_OK; + +failed_dma_map: + dma_buf_detach(secure_mem->buf, secure_mem->attachment); +failed_dma_attach: +failed_alloc_mem: + dma_buf_put(buf); + return _MALI_OSK_ERR_FAULT; +} + +_mali_osk_errcode_t mali_mem_secure_mali_map(mali_mem_secure *secure_mem, struct mali_session_data *session, u32 vaddr, u32 props) +{ + struct mali_page_directory *pagedir; + struct scatterlist *sg; + u32 virt = vaddr; + u32 prop = props; + int i; + + MALI_DEBUG_ASSERT_POINTER(secure_mem); + MALI_DEBUG_ASSERT_POINTER(secure_mem->sgt); + MALI_DEBUG_ASSERT_POINTER(session); + + pagedir = session->page_directory; + + for_each_sg(secure_mem->sgt->sgl, sg, secure_mem->sgt->nents, i) { + u32 size = sg_dma_len(sg); + dma_addr_t phys = sg_dma_address(sg); + + /* sg must be page aligned. */ + MALI_DEBUG_ASSERT(0 == size % MALI_MMU_PAGE_SIZE); + MALI_DEBUG_ASSERT(0 == (phys & ~(uintptr_t)0xFFFFFFFF)); + + mali_mmu_pagedir_update(pagedir, virt, phys, size, prop); + + MALI_DEBUG_PRINT(3, ("The secure mem physical address: 0x%x gpu virtual address: 0x%x! \n", phys, virt)); + virt += size; + } + + return _MALI_OSK_ERR_OK; +} + +void mali_mem_secure_mali_unmap(mali_mem_allocation *alloc) +{ + struct mali_session_data *session; + MALI_DEBUG_ASSERT_POINTER(alloc); + session = alloc->session; + MALI_DEBUG_ASSERT_POINTER(session); + + mali_session_memory_lock(session); + mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start, + alloc->flags); + mali_session_memory_unlock(session); +} + + +int mali_mem_secure_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma) +{ + + int ret = 0; + struct scatterlist *sg; + mali_mem_secure *secure_mem = &mem_bkend->secure_mem; + unsigned long addr = vma->vm_start; + int i; + + MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_SECURE); + + for_each_sg(secure_mem->sgt->sgl, sg, secure_mem->sgt->nents, i) { + phys_addr_t phys; + dma_addr_t dev_addr; + u32 size, j; + dev_addr = sg_dma_address(sg); +#if defined(CONFIG_ARM64) ||LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) + phys = dma_to_phys(&mali_platform_device->dev, dev_addr); +#else + phys = page_to_phys(pfn_to_page(dma_to_pfn(&mali_platform_device->dev, dev_addr))); +#endif + size = sg_dma_len(sg); + MALI_DEBUG_ASSERT(0 == size % _MALI_OSK_MALI_PAGE_SIZE); + + for (j = 0; j < size / _MALI_OSK_MALI_PAGE_SIZE; j++) { + ret = vm_insert_pfn(vma, addr, PFN_DOWN(phys)); + + if (unlikely(0 != ret)) { + return -EFAULT; + } + addr += _MALI_OSK_MALI_PAGE_SIZE; + phys += _MALI_OSK_MALI_PAGE_SIZE; + + MALI_DEBUG_PRINT(3, ("The secure mem physical address: 0x%x , cpu virtual address: 0x%x! \n", phys, addr)); + } + } + return ret; +} + +u32 mali_mem_secure_release(mali_mem_backend *mem_bkend) +{ + struct mali_mem_secure *mem; + mali_mem_allocation *alloc = mem_bkend->mali_allocation; + u32 free_pages_nr = 0; + MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_SECURE); + + mem = &mem_bkend->secure_mem; + MALI_DEBUG_ASSERT_POINTER(mem->attachment); + MALI_DEBUG_ASSERT_POINTER(mem->buf); + MALI_DEBUG_ASSERT_POINTER(mem->sgt); + /* Unmap the memory from the mali virtual address space. */ + mali_mem_secure_mali_unmap(alloc); + mutex_lock(&mem_bkend->mutex); + dma_buf_unmap_attachment(mem->attachment, mem->sgt, DMA_BIDIRECTIONAL); + dma_buf_detach(mem->buf, mem->attachment); + dma_buf_put(mem->buf); + mutex_unlock(&mem_bkend->mutex); + + free_pages_nr = mem->count; + + return free_pages_nr; +} + + diff --git a/drivers/gpu/arm/mali/linux/mali_memory_secure.h b/drivers/gpu/arm/mali/linux/mali_memory_secure.h new file mode 100755 index 0000000000000..64f40d4fab04c --- /dev/null +++ b/drivers/gpu/arm/mali/linux/mali_memory_secure.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2010, 2013, 2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_MEMORY_SECURE_H__ +#define __MALI_MEMORY_SECURE_H__ + +#include "mali_session.h" +#include "mali_memory.h" +#include + +#include "mali_memory_types.h" + +_mali_osk_errcode_t mali_mem_secure_attach_dma_buf(mali_mem_secure *secure_mem, u32 size, int mem_fd); + +_mali_osk_errcode_t mali_mem_secure_mali_map(mali_mem_secure *secure_mem, struct mali_session_data *session, u32 vaddr, u32 props); + +void mali_mem_secure_mali_unmap(mali_mem_allocation *alloc); + +int mali_mem_secure_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma); + +u32 mali_mem_secure_release(mali_mem_backend *mem_bkend); + +#endif /* __MALI_MEMORY_SECURE_H__ */ diff --git a/drivers/gpu/arm/mali/linux/mali_memory_swap_alloc.c b/drivers/gpu/arm/mali/linux/mali_memory_swap_alloc.c new file mode 100755 index 0000000000000..47933283680cb --- /dev/null +++ b/drivers/gpu/arm/mali/linux/mali_memory_swap_alloc.c @@ -0,0 +1,942 @@ +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mali_osk.h" +#include "mali_osk_mali.h" +#include "mali_memory.h" +#include "mali_memory_manager.h" +#include "mali_memory_virtual.h" +#include "mali_memory_cow.h" +#include "mali_ukk.h" +#include "mali_kernel_utilization.h" +#include "mali_memory_swap_alloc.h" + + +static struct _mali_osk_bitmap idx_mgr; +static struct file *global_swap_file; +static struct address_space *global_swap_space; +static _mali_osk_wq_work_t *mali_mem_swap_out_workq = NULL; +static u32 mem_backend_swapped_pool_size; +#ifdef MALI_MEM_SWAP_TRACKING +static u32 mem_backend_swapped_unlock_size; +#endif +/* Lock order: mem_backend_swapped_pool_lock > each memory backend's mutex lock. + * This lock used to protect mem_backend_swapped_pool_size and mem_backend_swapped_pool. */ +static struct mutex mem_backend_swapped_pool_lock; +static struct list_head mem_backend_swapped_pool; + +extern struct mali_mem_os_allocator mali_mem_os_allocator; + +#define MALI_SWAP_LOW_MEM_DEFAULT_VALUE (60*1024*1024) +#define MALI_SWAP_INVALIDATE_MALI_ADDRESS (0) /* Used to mark the given memory cookie is invalidate. */ +#define MALI_SWAP_GLOBAL_SWAP_FILE_SIZE (0xFFFFFFFF) +#define MALI_SWAP_GLOBAL_SWAP_FILE_INDEX ((MALI_SWAP_GLOBAL_SWAP_FILE_SIZE) >> PAGE_CACHE_SHIFT) +#define MALI_SWAP_GLOBAL_SWAP_FILE_INDEX_RESERVE (1 << 15) /* Reserved for CoW nonlinear swap backend memory, the space size is 128MB. */ + +unsigned int mali_mem_swap_out_threshold_value = MALI_SWAP_LOW_MEM_DEFAULT_VALUE; + +/** + * We have two situations to do shrinking things, one is we met low GPU utilization which shows GPU needn't touch too + * swappable backends in short time, and the other one is we add new swappable backends, the total pool size exceed + * the threshold value of the swapped pool size. + */ +typedef enum { + MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION = 100, + MALI_MEM_SWAP_SHRINK_FOR_ADDING_NEW_BACKENDS = 257, +} _mali_mem_swap_pool_shrink_type_t; + +static void mali_mem_swap_swapped_bkend_pool_check_for_low_utilization(void *arg); + +_mali_osk_errcode_t mali_mem_swap_init(void) +{ + gfp_t flags = __GFP_NORETRY | __GFP_NOWARN; + + if (_MALI_OSK_ERR_OK != _mali_osk_bitmap_init(&idx_mgr, MALI_SWAP_GLOBAL_SWAP_FILE_INDEX, MALI_SWAP_GLOBAL_SWAP_FILE_INDEX_RESERVE)) { + return _MALI_OSK_ERR_NOMEM; + } + + global_swap_file = shmem_file_setup("mali_swap", MALI_SWAP_GLOBAL_SWAP_FILE_SIZE, VM_NORESERVE); + if (IS_ERR(global_swap_file)) { + _mali_osk_bitmap_term(&idx_mgr); + return _MALI_OSK_ERR_NOMEM; + } + + global_swap_space = global_swap_file->f_path.dentry->d_inode->i_mapping; + + mali_mem_swap_out_workq = _mali_osk_wq_create_work(mali_mem_swap_swapped_bkend_pool_check_for_low_utilization, NULL); + if (NULL == mali_mem_swap_out_workq) { + _mali_osk_bitmap_term(&idx_mgr); + fput(global_swap_file); + return _MALI_OSK_ERR_NOMEM; + } + +#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE) + flags |= GFP_HIGHUSER; +#else +#ifdef CONFIG_ZONE_DMA32 + flags |= GFP_DMA32; +#else +#ifdef CONFIG_ZONE_DMA + flags |= GFP_DMA; +#else + /* arm64 utgard only work on < 4G, but the kernel + * didn't provide method to allocte memory < 4G + */ + MALI_DEBUG_ASSERT(0); +#endif +#endif +#endif + + /* When we use shmem_read_mapping_page to allocate/swap-in, it will + * use these flags to allocate new page if need.*/ + mapping_set_gfp_mask(global_swap_space, flags); + + mem_backend_swapped_pool_size = 0; +#ifdef MALI_MEM_SWAP_TRACKING + mem_backend_swapped_unlock_size = 0; +#endif + mutex_init(&mem_backend_swapped_pool_lock); + INIT_LIST_HEAD(&mem_backend_swapped_pool); + + MALI_DEBUG_PRINT(2, ("Mali SWAP: Swap out threshold vaule is %uM\n", mali_mem_swap_out_threshold_value >> 20)); + + return _MALI_OSK_ERR_OK; +} + +void mali_mem_swap_term(void) +{ + _mali_osk_bitmap_term(&idx_mgr); + + fput(global_swap_file); + + _mali_osk_wq_delete_work(mali_mem_swap_out_workq); + + MALI_DEBUG_ASSERT(list_empty(&mem_backend_swapped_pool)); + MALI_DEBUG_ASSERT(0 == mem_backend_swapped_pool_size); + + return; +} + +struct file *mali_mem_swap_get_global_swap_file(void) +{ + return global_swap_file; +} + +/* Judge if swappable backend in swapped pool. */ +static mali_bool mali_memory_swap_backend_in_swapped_pool(mali_mem_backend *mem_bkend) +{ + MALI_DEBUG_ASSERT_POINTER(mem_bkend); + + return !list_empty(&mem_bkend->list); +} + +void mali_memory_swap_list_backend_delete(mali_mem_backend *mem_bkend) +{ + MALI_DEBUG_ASSERT_POINTER(mem_bkend); + + mutex_lock(&mem_backend_swapped_pool_lock); + mutex_lock(&mem_bkend->mutex); + + if (MALI_FALSE == mali_memory_swap_backend_in_swapped_pool(mem_bkend)) { + mutex_unlock(&mem_bkend->mutex); + mutex_unlock(&mem_backend_swapped_pool_lock); + return; + } + + MALI_DEBUG_ASSERT(!list_empty(&mem_bkend->list)); + + list_del_init(&mem_bkend->list); + + mutex_unlock(&mem_bkend->mutex); + + mem_backend_swapped_pool_size -= mem_bkend->size; + + mutex_unlock(&mem_backend_swapped_pool_lock); +} + +static void mali_mem_swap_out_page_node(mali_page_node *page_node) +{ + MALI_DEBUG_ASSERT(page_node); + + dma_unmap_page(&mali_platform_device->dev, page_node->swap_it->dma_addr, + _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE); + set_page_dirty(page_node->swap_it->page); + page_cache_release(page_node->swap_it->page); +} + +void mali_mem_swap_unlock_single_mem_backend(mali_mem_backend *mem_bkend) +{ + mali_page_node *m_page; + + MALI_DEBUG_ASSERT(1 == mutex_is_locked(&mem_bkend->mutex)); + + if (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN == (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN)) { + return; + } + + mem_bkend->flags |= MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN; + + list_for_each_entry(m_page, &mem_bkend->swap_mem.pages, list) { + mali_mem_swap_out_page_node(m_page); + } + + return; +} + +static void mali_mem_swap_unlock_partial_locked_mem_backend(mali_mem_backend *mem_bkend, mali_page_node *page_node) +{ + mali_page_node *m_page; + + MALI_DEBUG_ASSERT(1 == mutex_is_locked(&mem_bkend->mutex)); + + list_for_each_entry(m_page, &mem_bkend->swap_mem.pages, list) { + if (m_page == page_node) { + break; + } + mali_mem_swap_out_page_node(m_page); + } +} + +static void mali_mem_swap_swapped_bkend_pool_shrink(_mali_mem_swap_pool_shrink_type_t shrink_type) +{ + mali_mem_backend *bkend, *tmp_bkend; + long system_free_size; + u32 last_gpu_utilization, gpu_utilization_threshold_value, temp_swap_out_threshold_value; + + MALI_DEBUG_ASSERT(1 == mutex_is_locked(&mem_backend_swapped_pool_lock)); + + if (MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION == shrink_type) { + /** + * When we met that system memory is very low and Mali locked swappable memory size is less than + * threshold value, and at the same time, GPU load is very low and don't need high performance, + * at this condition, we can unlock more swap memory backend from swapped backends pool. + */ + gpu_utilization_threshold_value = MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION; + temp_swap_out_threshold_value = (mali_mem_swap_out_threshold_value >> 2); + } else { + /* When we add swappable memory backends to swapped pool, we need to think that we couldn't + * hold too much swappable backends in Mali driver, and also we need considering performance. + * So there is a balance for swapping out memory backend, we should follow the following conditions: + * 1. Total memory size in global mem backend swapped pool is more than the defined threshold value. + * 2. System level free memory size is less than the defined threshold value. + * 3. Please note that GPU utilization problem isn't considered in this condition. + */ + gpu_utilization_threshold_value = MALI_MEM_SWAP_SHRINK_FOR_ADDING_NEW_BACKENDS; + temp_swap_out_threshold_value = mali_mem_swap_out_threshold_value; + } + + /* Get system free pages number. */ + system_free_size = global_page_state(NR_FREE_PAGES) * PAGE_SIZE; + last_gpu_utilization = _mali_ukk_utilization_gp_pp(); + + if ((last_gpu_utilization < gpu_utilization_threshold_value) + && (system_free_size < mali_mem_swap_out_threshold_value) + && (mem_backend_swapped_pool_size > temp_swap_out_threshold_value)) { + list_for_each_entry_safe(bkend, tmp_bkend, &mem_backend_swapped_pool, list) { + if (mem_backend_swapped_pool_size <= temp_swap_out_threshold_value) { + break; + } + + mutex_lock(&bkend->mutex); + + /* check if backend is in use. */ + if (0 < bkend->using_count) { + mutex_unlock(&bkend->mutex); + continue; + } + + mali_mem_swap_unlock_single_mem_backend(bkend); + list_del_init(&bkend->list); + mem_backend_swapped_pool_size -= bkend->size; +#ifdef MALI_MEM_SWAP_TRACKING + mem_backend_swapped_unlock_size += bkend->size; +#endif + mutex_unlock(&bkend->mutex); + } + } + + return; +} + +static void mali_mem_swap_swapped_bkend_pool_check_for_low_utilization(void *arg) +{ + MALI_IGNORE(arg); + + mutex_lock(&mem_backend_swapped_pool_lock); + + mali_mem_swap_swapped_bkend_pool_shrink(MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION); + + mutex_unlock(&mem_backend_swapped_pool_lock); +} + +/** + * After PP job finished, we add all of swappable memory backend used by this PP + * job to the tail of the global swapped pool, and if the total size of swappable memory is more than threshold + * value, we also need to shrink the swapped pool start from the head of the list. + */ +void mali_memory_swap_list_backend_add(mali_mem_backend *mem_bkend) +{ + mutex_lock(&mem_backend_swapped_pool_lock); + mutex_lock(&mem_bkend->mutex); + + if (mali_memory_swap_backend_in_swapped_pool(mem_bkend)) { + MALI_DEBUG_ASSERT(!list_empty(&mem_bkend->list)); + + list_del_init(&mem_bkend->list); + list_add_tail(&mem_bkend->list, &mem_backend_swapped_pool); + mutex_unlock(&mem_bkend->mutex); + mutex_unlock(&mem_backend_swapped_pool_lock); + return; + } + + list_add_tail(&mem_bkend->list, &mem_backend_swapped_pool); + + mutex_unlock(&mem_bkend->mutex); + mem_backend_swapped_pool_size += mem_bkend->size; + + mali_mem_swap_swapped_bkend_pool_shrink(MALI_MEM_SWAP_SHRINK_FOR_ADDING_NEW_BACKENDS); + + mutex_unlock(&mem_backend_swapped_pool_lock); + return; +} + + +u32 mali_mem_swap_idx_alloc(void) +{ + return _mali_osk_bitmap_alloc(&idx_mgr); +} + +void mali_mem_swap_idx_free(u32 idx) +{ + _mali_osk_bitmap_free(&idx_mgr, idx); +} + +static u32 mali_mem_swap_idx_range_alloc(u32 count) +{ + u32 index; + + index = _mali_osk_bitmap_alloc_range(&idx_mgr, count); + + return index; +} + +static void mali_mem_swap_idx_range_free(u32 idx, int num) +{ + _mali_osk_bitmap_free_range(&idx_mgr, idx, num); +} + +struct mali_swap_item *mali_mem_swap_alloc_swap_item(void) +{ + mali_swap_item *swap_item; + + swap_item = kzalloc(sizeof(mali_swap_item), GFP_KERNEL); + + if (NULL == swap_item) { + return NULL; + } + + atomic_set(&swap_item->ref_count, 1); + swap_item->page = NULL; + atomic_add(1, &mali_mem_os_allocator.allocated_pages); + + return swap_item; +} + +void mali_mem_swap_free_swap_item(mali_swap_item *swap_item) +{ + struct inode *file_node; + long long start, end; + + /* If this swap item is shared, we just reduce the reference counter. */ + if (0 == atomic_dec_return(&swap_item->ref_count)) { + file_node = global_swap_file->f_path.dentry->d_inode; + start = swap_item->idx; + start = start << 12; + end = start + PAGE_SIZE; + + shmem_truncate_range(file_node, start, (end - 1)); + + mali_mem_swap_idx_free(swap_item->idx); + + atomic_sub(1, &mali_mem_os_allocator.allocated_pages); + + kfree(swap_item); + } +} + +/* Used to allocate new swap item for new memory allocation and cow page for write. */ +struct mali_page_node *_mali_mem_swap_page_node_allocate(void) +{ + struct mali_page_node *m_page; + + m_page = _mali_page_node_allocate(MALI_PAGE_NODE_SWAP); + + if (NULL == m_page) { + return NULL; + } + + m_page->swap_it = mali_mem_swap_alloc_swap_item(); + + if (NULL == m_page->swap_it) { + kfree(m_page); + return NULL; + } + + return m_page; +} + +_mali_osk_errcode_t _mali_mem_swap_put_page_node(struct mali_page_node *m_page) +{ + + mali_mem_swap_free_swap_item(m_page->swap_it); + + return _MALI_OSK_ERR_OK; +} + +void _mali_mem_swap_page_node_free(struct mali_page_node *m_page) +{ + _mali_mem_swap_put_page_node(m_page); + + kfree(m_page); + + return; +} + +u32 mali_mem_swap_free(mali_mem_swap *swap_mem) +{ + struct mali_page_node *m_page, *m_tmp; + u32 free_pages_nr = 0; + + MALI_DEBUG_ASSERT_POINTER(swap_mem); + + list_for_each_entry_safe(m_page, m_tmp, &swap_mem->pages, list) { + MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_SWAP); + + /* free the page node and release the swap item, if the ref count is 1, + * then need also free the swap item. */ + list_del(&m_page->list); + if (1 == _mali_page_node_get_ref_count(m_page)) { + free_pages_nr++; + } + + _mali_mem_swap_page_node_free(m_page); + } + + return free_pages_nr; +} + +static u32 mali_mem_swap_cow_free(mali_mem_cow *cow_mem) +{ + struct mali_page_node *m_page, *m_tmp; + u32 free_pages_nr = 0; + + MALI_DEBUG_ASSERT_POINTER(cow_mem); + + list_for_each_entry_safe(m_page, m_tmp, &cow_mem->pages, list) { + MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_SWAP); + + /* free the page node and release the swap item, if the ref count is 1, + * then need also free the swap item. */ + list_del(&m_page->list); + if (1 == _mali_page_node_get_ref_count(m_page)) { + free_pages_nr++; + } + + _mali_mem_swap_page_node_free(m_page); + } + + return free_pages_nr; +} + +u32 mali_mem_swap_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped) +{ + mali_mem_allocation *alloc; + u32 free_pages_nr = 0; + + MALI_DEBUG_ASSERT_POINTER(mem_bkend); + alloc = mem_bkend->mali_allocation; + MALI_DEBUG_ASSERT_POINTER(alloc); + + if (is_mali_mapped) { + mali_mem_swap_mali_unmap(alloc); + } + + mali_memory_swap_list_backend_delete(mem_bkend); + + mutex_lock(&mem_bkend->mutex); + /* To make sure the given memory backend was unlocked from Mali side, + * and then free this memory block. */ + mali_mem_swap_unlock_single_mem_backend(mem_bkend); + mutex_unlock(&mem_bkend->mutex); + + if (MALI_MEM_SWAP == mem_bkend->type) { + free_pages_nr = mali_mem_swap_free(&mem_bkend->swap_mem); + } else { + free_pages_nr = mali_mem_swap_cow_free(&mem_bkend->cow_mem); + } + + return free_pages_nr; +} + +mali_bool mali_mem_swap_in_page_node(struct mali_page_node *page_node) +{ + MALI_DEBUG_ASSERT(NULL != page_node); + + page_node->swap_it->page = shmem_read_mapping_page(global_swap_space, page_node->swap_it->idx); + + if (IS_ERR(page_node->swap_it->page)) { + MALI_DEBUG_PRINT_ERROR(("SWAP Mem: failed to swap in page with index: %d.\n", page_node->swap_it->idx)); + return MALI_FALSE; + } + + /* Ensure page is flushed from CPU caches. */ + page_node->swap_it->dma_addr = dma_map_page(&mali_platform_device->dev, page_node->swap_it->page, + 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE); + + return MALI_TRUE; +} + +int mali_mem_swap_alloc_pages(mali_mem_swap *swap_mem, u32 size, u32 *bkend_idx) +{ + size_t page_count = PAGE_ALIGN(size) / PAGE_SIZE; + struct mali_page_node *m_page; + long system_free_size; + u32 i, index; + mali_bool ret; + + MALI_DEBUG_ASSERT(NULL != swap_mem); + MALI_DEBUG_ASSERT(NULL != bkend_idx); + MALI_DEBUG_ASSERT(page_count <= MALI_SWAP_GLOBAL_SWAP_FILE_INDEX_RESERVE); + + if (atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE + size > mali_mem_os_allocator.allocation_limit) { + MALI_DEBUG_PRINT(2, ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n", + size, + atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE, + mali_mem_os_allocator.allocation_limit)); + return _MALI_OSK_ERR_NOMEM; + } + + INIT_LIST_HEAD(&swap_mem->pages); + swap_mem->count = page_count; + index = mali_mem_swap_idx_range_alloc(page_count); + + if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == index) { + MALI_PRINT_ERROR(("Mali Swap: Failed to allocate continuous index for swappable Mali memory.")); + return _MALI_OSK_ERR_FAULT; + } + + for (i = 0; i < page_count; i++) { + m_page = _mali_mem_swap_page_node_allocate(); + + if (NULL == m_page) { + MALI_DEBUG_PRINT_ERROR(("SWAP Mem: Failed to allocate mali page node.")); + swap_mem->count = i; + + mali_mem_swap_free(swap_mem); + mali_mem_swap_idx_range_free(index + i, page_count - i); + return _MALI_OSK_ERR_FAULT; + } + + m_page->swap_it->idx = index + i; + + ret = mali_mem_swap_in_page_node(m_page); + + if (MALI_FALSE == ret) { + MALI_DEBUG_PRINT_ERROR(("SWAP Mem: Allocate new page from SHMEM file failed.")); + _mali_mem_swap_page_node_free(m_page); + mali_mem_swap_idx_range_free(index + i + 1, page_count - i - 1); + + swap_mem->count = i; + mali_mem_swap_free(swap_mem); + return _MALI_OSK_ERR_NOMEM; + } + + list_add_tail(&m_page->list, &swap_mem->pages); + } + + system_free_size = global_page_state(NR_FREE_PAGES) * PAGE_SIZE; + + if ((system_free_size < mali_mem_swap_out_threshold_value) + && (mem_backend_swapped_pool_size > (mali_mem_swap_out_threshold_value >> 2)) + && mali_utilization_enabled()) { + _mali_osk_wq_schedule_work(mali_mem_swap_out_workq); + } + + *bkend_idx = index; + return 0; +} + +void mali_mem_swap_mali_unmap(mali_mem_allocation *alloc) +{ + struct mali_session_data *session; + + MALI_DEBUG_ASSERT_POINTER(alloc); + session = alloc->session; + MALI_DEBUG_ASSERT_POINTER(session); + + mali_session_memory_lock(session); + mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start, + alloc->flags); + mali_session_memory_unlock(session); +} + + +/* Insert these pages from shmem to mali page table*/ +_mali_osk_errcode_t mali_mem_swap_mali_map(mali_mem_swap *swap_mem, struct mali_session_data *session, u32 vaddr, u32 props) +{ + struct mali_page_directory *pagedir = session->page_directory; + struct mali_page_node *m_page; + dma_addr_t phys; + u32 virt = vaddr; + u32 prop = props; + + list_for_each_entry(m_page, &swap_mem->pages, list) { + MALI_DEBUG_ASSERT(NULL != m_page->swap_it->page); + phys = m_page->swap_it->dma_addr; + + mali_mmu_pagedir_update(pagedir, virt, phys, MALI_MMU_PAGE_SIZE, prop); + virt += MALI_MMU_PAGE_SIZE; + } + + return _MALI_OSK_ERR_OK; +} + +int mali_mem_swap_in_pages(struct mali_pp_job *job) +{ + u32 num_memory_cookies; + struct mali_session_data *session; + struct mali_vma_node *mali_vma_node = NULL; + mali_mem_allocation *mali_alloc = NULL; + mali_mem_backend *mem_bkend = NULL; + struct mali_page_node *m_page; + mali_bool swap_in_success = MALI_TRUE; + int i; + + MALI_DEBUG_ASSERT_POINTER(job); + + num_memory_cookies = mali_pp_job_num_memory_cookies(job); + session = mali_pp_job_get_session(job); + + MALI_DEBUG_ASSERT_POINTER(session); + + for (i = 0; i < num_memory_cookies; i++) { + + u32 mali_addr = mali_pp_job_get_memory_cookie(job, i); + + mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0); + if (NULL == mali_vma_node) { + job->memory_cookies[i] = MALI_SWAP_INVALIDATE_MALI_ADDRESS; + swap_in_success = MALI_FALSE; + MALI_PRINT_ERROR(("SWAP Mem: failed to find mali_vma_node through Mali address: 0x%08x.\n", mali_addr)); + continue; + } + + mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node); + MALI_DEBUG_ASSERT(NULL != mali_alloc); + + if (MALI_MEM_SWAP != mali_alloc->type && + MALI_MEM_COW != mali_alloc->type) { + continue; + } + + /* Get backend memory & Map on GPU */ + mutex_lock(&mali_idr_mutex); + mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle); + mutex_unlock(&mali_idr_mutex); + MALI_DEBUG_ASSERT(NULL != mem_bkend); + + /* We neednot hold backend's lock here, race safe.*/ + if ((MALI_MEM_COW == mem_bkend->type) && + (!(mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) { + continue; + } + + mutex_lock(&mem_bkend->mutex); + + /* When swap_in_success is MALI_FALSE, it means this job has memory backend that could not be swapped in, + * and it will be aborted in mali scheduler, so here, we just mark those memory cookies which + * should not be swapped out when delete job to invalide */ + if (MALI_FALSE == swap_in_success) { + job->memory_cookies[i] = MALI_SWAP_INVALIDATE_MALI_ADDRESS; + mutex_unlock(&mem_bkend->mutex); + continue; + } + + /* Before swap in, checking if this memory backend has been swapped in by the latest flushed jobs. */ + ++mem_bkend->using_count; + + if (1 < mem_bkend->using_count) { + MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN != (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN & mem_bkend->flags)); + mutex_unlock(&mem_bkend->mutex); + continue; + } + + if (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN != (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN & mem_bkend->flags)) { + mutex_unlock(&mem_bkend->mutex); + continue; + } + + + list_for_each_entry(m_page, &mem_bkend->swap_mem.pages, list) { + if (MALI_FALSE == mali_mem_swap_in_page_node(m_page)) { + /* Don't have enough memory to swap in page, so release pages have already been swapped + * in and then mark this pp job to be fail. */ + mali_mem_swap_unlock_partial_locked_mem_backend(mem_bkend, m_page); + swap_in_success = MALI_FALSE; + break; + } + } + + if (swap_in_success) { +#ifdef MALI_MEM_SWAP_TRACKING + mem_backend_swapped_unlock_size -= mem_bkend->size; +#endif + _mali_osk_mutex_wait(session->memory_lock); + mali_mem_swap_mali_map(&mem_bkend->swap_mem, session, mali_alloc->mali_mapping.addr, mali_alloc->mali_mapping.properties); + _mali_osk_mutex_signal(session->memory_lock); + + /* Remove the unlock flag from mem backend flags, mark this backend has been swapped in. */ + mem_bkend->flags &= ~(MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN); + mutex_unlock(&mem_bkend->mutex); + } else { + --mem_bkend->using_count; + /* Marking that this backend is not swapped in, need not to be processed anymore. */ + job->memory_cookies[i] = MALI_SWAP_INVALIDATE_MALI_ADDRESS; + mutex_unlock(&mem_bkend->mutex); + } + } + + job->swap_status = swap_in_success ? MALI_SWAP_IN_SUCC : MALI_SWAP_IN_FAIL; + + return _MALI_OSK_ERR_OK; +} + +int mali_mem_swap_out_pages(struct mali_pp_job *job) +{ + u32 num_memory_cookies; + struct mali_session_data *session; + struct mali_vma_node *mali_vma_node = NULL; + mali_mem_allocation *mali_alloc = NULL; + mali_mem_backend *mem_bkend = NULL; + int i; + + MALI_DEBUG_ASSERT_POINTER(job); + + num_memory_cookies = mali_pp_job_num_memory_cookies(job); + session = mali_pp_job_get_session(job); + + MALI_DEBUG_ASSERT_POINTER(session); + + + for (i = 0; i < num_memory_cookies; i++) { + u32 mali_addr = mali_pp_job_get_memory_cookie(job, i); + + if (MALI_SWAP_INVALIDATE_MALI_ADDRESS == mali_addr) { + continue; + } + + mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0); + + if (NULL == mali_vma_node) { + MALI_PRINT_ERROR(("SWAP Mem: failed to find mali_vma_node through Mali address: 0x%08x.\n", mali_addr)); + continue; + } + + mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node); + MALI_DEBUG_ASSERT(NULL != mali_alloc); + + if (MALI_MEM_SWAP != mali_alloc->type && + MALI_MEM_COW != mali_alloc->type) { + continue; + } + + mutex_lock(&mali_idr_mutex); + mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle); + mutex_unlock(&mali_idr_mutex); + MALI_DEBUG_ASSERT(NULL != mem_bkend); + + /* We neednot hold backend's lock here, race safe.*/ + if ((MALI_MEM_COW == mem_bkend->type) && + (!(mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) { + continue; + } + + mutex_lock(&mem_bkend->mutex); + + MALI_DEBUG_ASSERT(0 < mem_bkend->using_count); + + /* Reducing the using_count of mem backend means less pp job are using this memory backend, + * if this count get to zero, it means no pp job is using it now, could put it to swap out list. */ + --mem_bkend->using_count; + + if (0 < mem_bkend->using_count) { + mutex_unlock(&mem_bkend->mutex); + continue; + } + mutex_unlock(&mem_bkend->mutex); + + mali_memory_swap_list_backend_add(mem_bkend); + } + + return _MALI_OSK_ERR_OK; +} + +int mali_mem_swap_allocate_page_on_demand(mali_mem_backend *mem_bkend, u32 offset, struct page **pagep) +{ + struct mali_page_node *m_page, *found_node = NULL; + struct page *found_page; + mali_mem_swap *swap = NULL; + mali_mem_cow *cow = NULL; + dma_addr_t dma_addr; + u32 i = 0; + + if (MALI_MEM_SWAP == mem_bkend->type) { + swap = &mem_bkend->swap_mem; + list_for_each_entry(m_page, &swap->pages, list) { + if (i == offset) { + found_node = m_page; + break; + } + i++; + } + } else { + MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type); + MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_SWAP_COWED == (MALI_MEM_BACKEND_FLAG_SWAP_COWED & mem_bkend->flags)); + + cow = &mem_bkend->cow_mem; + list_for_each_entry(m_page, &cow->pages, list) { + if (i == offset) { + found_node = m_page; + break; + } + i++; + } + } + + if (NULL == found_node) { + return _MALI_OSK_ERR_FAULT; + } + + found_page = shmem_read_mapping_page(global_swap_space, found_node->swap_it->idx); + + if (!IS_ERR(found_page)) { + lock_page(found_page); + dma_addr = dma_map_page(&mali_platform_device->dev, found_page, + 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE); + dma_unmap_page(&mali_platform_device->dev, dma_addr, + _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE); + + *pagep = found_page; + } else { + return _MALI_OSK_ERR_NOMEM; + } + + return _MALI_OSK_ERR_OK; +} + +int mali_mem_swap_cow_page_on_demand(mali_mem_backend *mem_bkend, u32 offset, struct page **pagep) +{ + struct mali_page_node *m_page, *found_node = NULL, *new_node = NULL; + mali_mem_cow *cow = NULL; + u32 i = 0; + + MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type); + MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_SWAP_COWED == (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)); + MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN == (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN & mem_bkend->flags)); + MALI_DEBUG_ASSERT(!mali_memory_swap_backend_in_swapped_pool(mem_bkend)); + + cow = &mem_bkend->cow_mem; + list_for_each_entry(m_page, &cow->pages, list) { + if (i == offset) { + found_node = m_page; + break; + } + i++; + } + + if (NULL == found_node) { + return _MALI_OSK_ERR_FAULT; + } + + new_node = _mali_mem_swap_page_node_allocate(); + + if (NULL == new_node) { + return _MALI_OSK_ERR_FAULT; + } + + new_node->swap_it->idx = mali_mem_swap_idx_alloc(); + + if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == new_node->swap_it->idx) { + MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW on demand.\n")); + kfree(new_node->swap_it); + kfree(new_node); + return _MALI_OSK_ERR_FAULT; + } + + if (MALI_FALSE == mali_mem_swap_in_page_node(new_node)) { + _mali_mem_swap_page_node_free(new_node); + return _MALI_OSK_ERR_FAULT; + } + + /* swap in found node for copy in kernel. */ + if (MALI_FALSE == mali_mem_swap_in_page_node(found_node)) { + mali_mem_swap_out_page_node(new_node); + _mali_mem_swap_page_node_free(new_node); + return _MALI_OSK_ERR_FAULT; + } + + _mali_mem_cow_copy_page(found_node, new_node); + + list_replace(&found_node->list, &new_node->list); + + if (1 != _mali_page_node_get_ref_count(found_node)) { + atomic_add(1, &mem_bkend->mali_allocation->session->mali_mem_allocated_pages); + if (atomic_read(&mem_bkend->mali_allocation->session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > mem_bkend->mali_allocation->session->max_mali_mem_allocated_size) { + mem_bkend->mali_allocation->session->max_mali_mem_allocated_size = atomic_read(&mem_bkend->mali_allocation->session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE; + } + mem_bkend->cow_mem.change_pages_nr++; + } + + mali_mem_swap_out_page_node(found_node); + _mali_mem_swap_page_node_free(found_node); + + /* When swap in the new page node, we have called dma_map_page for this page.\n */ + dma_unmap_page(&mali_platform_device->dev, new_node->swap_it->dma_addr, + _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE); + + lock_page(new_node->swap_it->page); + + *pagep = new_node->swap_it->page; + + return _MALI_OSK_ERR_OK; +} + +#ifdef MALI_MEM_SWAP_TRACKING +void mali_mem_swap_tracking(u32 *swap_pool_size, u32 *unlock_size) +{ + *swap_pool_size = mem_backend_swapped_pool_size; + *unlock_size = mem_backend_swapped_unlock_size; +} +#endif diff --git a/drivers/gpu/arm/mali/linux/mali_memory_swap_alloc.h b/drivers/gpu/arm/mali/linux/mali_memory_swap_alloc.h new file mode 100755 index 0000000000000..3624d710d78b9 --- /dev/null +++ b/drivers/gpu/arm/mali/linux/mali_memory_swap_alloc.h @@ -0,0 +1,121 @@ +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_MEMORY_SWAP_ALLOC_H__ +#define __MALI_MEMORY_SWAP_ALLOC_H__ + +#include "mali_osk.h" +#include "mali_session.h" + +#include "mali_memory_types.h" +#include "mali_pp_job.h" + +/** + * Initialize memory swapping module. + */ +_mali_osk_errcode_t mali_mem_swap_init(void); + +void mali_mem_swap_term(void); + +/** + * Return global share memory file to other modules. + */ +struct file *mali_mem_swap_get_global_swap_file(void); + +/** + * Unlock the given memory backend and pages in it could be swapped out by kernel. + */ +void mali_mem_swap_unlock_single_mem_backend(mali_mem_backend *mem_bkend); + +/** + * Remove the given memory backend from global swap list. + */ +void mali_memory_swap_list_backend_delete(mali_mem_backend *mem_bkend); + +/** + * Add the given memory backend to global swap list. + */ +void mali_memory_swap_list_backend_add(mali_mem_backend *mem_bkend); + +/** + * Allocate 1 index from bitmap used as page index in global swap file. + */ +u32 mali_mem_swap_idx_alloc(void); + +void mali_mem_swap_idx_free(u32 idx); + +/** + * Allocate a new swap item without page index. + */ +struct mali_swap_item *mali_mem_swap_alloc_swap_item(void); + +/** + * Free a swap item, truncate the corresponding space in page cache and free index of page. + */ +void mali_mem_swap_free_swap_item(mali_swap_item *swap_item); + +/** + * Allocate a page node with swap item. + */ +struct mali_page_node *_mali_mem_swap_page_node_allocate(void); + +/** + * Reduce the reference count of given page node and if return 0, just free this page node. + */ +_mali_osk_errcode_t _mali_mem_swap_put_page_node(struct mali_page_node *m_page); + +void _mali_mem_swap_page_node_free(struct mali_page_node *m_page); + +/** + * Free a swappable memory backend. + */ +u32 mali_mem_swap_free(mali_mem_swap *swap_mem); + +/** + * Ummap and free. + */ +u32 mali_mem_swap_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped); + +/** + * Read in a page from global swap file with the pre-allcated page index. + */ +mali_bool mali_mem_swap_in_page_node(struct mali_page_node *page_node); + +int mali_mem_swap_alloc_pages(mali_mem_swap *swap_mem, u32 size, u32 *bkend_idx); + +_mali_osk_errcode_t mali_mem_swap_mali_map(mali_mem_swap *swap_mem, struct mali_session_data *session, u32 vaddr, u32 props); + +void mali_mem_swap_mali_unmap(mali_mem_allocation *alloc); + +/** + * When pp job created, we need swap in all of memory backend needed by this pp job. + */ +int mali_mem_swap_in_pages(struct mali_pp_job *job); + +/** + * Put all of memory backends used this pp job to the global swap list. + */ +int mali_mem_swap_out_pages(struct mali_pp_job *job); + +/** + * This will be called in page fault to process CPU read&write. + */ +int mali_mem_swap_allocate_page_on_demand(mali_mem_backend *mem_bkend, u32 offset, struct page **pagep) ; + +/** + * Used to process cow on demand for swappable memory backend. + */ +int mali_mem_swap_cow_page_on_demand(mali_mem_backend *mem_bkend, u32 offset, struct page **pagep); + +#ifdef MALI_MEM_SWAP_TRACKING +void mali_mem_swap_tracking(u32 *swap_pool_size, u32 *unlock_size); +#endif +#endif /* __MALI_MEMORY_SWAP_ALLOC_H__ */ + diff --git a/drivers/gpu/arm/mali/linux/mali_memory_types.h b/drivers/gpu/arm/mali/linux/mali_memory_types.h index 5376f77b28a65..66e5e4d710b6e 100755 --- a/drivers/gpu/arm/mali/linux/mali_memory_types.h +++ b/drivers/gpu/arm/mali/linux/mali_memory_types.h @@ -11,8 +11,10 @@ #ifndef __MALI_MEMORY_TYPES_H__ #define __MALI_MEMORY_TYPES_H__ +#include + #if defined(CONFIG_MALI400_UMP) -#include "ump_kernel_interface.h" +#include #endif typedef u32 mali_address_t; @@ -20,9 +22,12 @@ typedef u32 mali_address_t; typedef enum mali_mem_type { MALI_MEM_OS, MALI_MEM_EXTERNAL, + MALI_MEM_SWAP, MALI_MEM_DMA_BUF, MALI_MEM_UMP, MALI_MEM_BLOCK, + MALI_MEM_COW, + MALI_MEM_SECURE, MALI_MEM_TYPE_MAX, } mali_mem_type; @@ -33,20 +38,32 @@ typedef struct mali_block_item { unsigned long phy_addr; } mali_block_item; +/** + * idx is used to locate the given page in the address space of swap file. + * ref_count is used to mark how many memory backends are using this item. + */ +typedef struct mali_swap_item { + u32 idx; + atomic_t ref_count; + struct page *page; + dma_addr_t dma_addr; +} mali_swap_item; + typedef enum mali_page_node_type { MALI_PAGE_NODE_OS, MALI_PAGE_NODE_BLOCK, + MALI_PAGE_NODE_SWAP, } mali_page_node_type; -typedef struct mali_block_node { - struct list_head list; - mali_block_item *blk_it; /*pointer to block item*/ - u32 type; -} mali_block_node; - typedef struct mali_page_node { struct list_head list; - struct page *page; + union { + struct page *page; + mali_block_item *blk_it; /*pointer to block item*/ + mali_swap_item *swap_it; + }; + + u32 type; } mali_page_node; typedef struct mali_mem_os_mem { @@ -90,7 +107,7 @@ typedef struct mali_mem_virt_mali_mapping { typedef struct mali_mem_virt_cpu_mapping { void __user *addr; - u32 ref; + struct vm_area_struct *vma; } mali_mem_virt_cpu_mapping; #define MALI_MEM_ALLOCATION_VALID_MAGIC 0xdeda110c @@ -112,22 +129,10 @@ typedef struct mali_vma_node { typedef struct mali_mem_allocation { MALI_DEBUG_CODE(u32 magic); mali_mem_type type; /**< Type of memory */ - int id; /**< ID in the descriptor map for this allocation */ - - u32 size; /**< Size of the allocation */ u32 flags; /**< Flags for this allocation */ struct mali_session_data *session; /**< Pointer to session that owns the allocation */ - /* Union selected by type. */ - union { - mali_mem_os_mem os_mem; /**< MALI_MEM_OS */ - mali_mem_external ext_mem; /**< MALI_MEM_EXTERNAL */ - mali_mem_dma_buf dma_buf; /**< MALI_MEM_DMA_BUF */ - mali_mem_ump ump_mem; /**< MALI_MEM_UMP */ - mali_mem_block_mem block_mem; /**< MALI_MEM_BLOCK */ - }; - mali_mem_virt_cpu_mapping cpu_mapping; /**< CPU mapping */ mali_mem_virt_mali_mapping mali_mapping; /**< Mali mapping */ @@ -137,17 +142,53 @@ typedef struct mali_mem_allocation { u32 psize; /* physical backend memory size*/ struct list_head list; s32 backend_handle; /* idr for mem_backend */ - struct kref ref; + _mali_osk_atomic_t mem_alloc_refcount; } mali_mem_allocation; +struct mali_mem_os_allocator { + spinlock_t pool_lock; + struct list_head pool_pages; + size_t pool_count; + + atomic_t allocated_pages; + size_t allocation_limit; + + struct shrinker shrinker; + struct delayed_work timed_shrinker; + struct workqueue_struct *wq; +}; /* COW backend memory type */ typedef struct mali_mem_cow { struct list_head pages; /**< all pages for this cow backend allocation, including new allocated pages for modified range*/ u32 count; /**< number of pages */ + s32 change_pages_nr; } mali_mem_cow; +typedef struct mali_mem_swap { + struct list_head pages; + u32 count; +} mali_mem_swap; + +typedef struct mali_mem_secure { +#if defined(CONFIG_DMA_SHARED_BUFFER) + struct dma_buf *buf; + struct dma_buf_attachment *attachment; + struct sg_table *sgt; +#endif + u32 count; +} mali_mem_secure; + +#define MALI_MEM_BACKEND_FLAG_COWED (0x1) /* COW has happen on this backend */ +#define MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE (0x2) /* This is an COW backend, mapped as not allowed cpu to write */ +#define MALI_MEM_BACKEND_FLAG_SWAP_COWED (0x4) /* Mark the given backend is cowed from swappable memory. */ +/* Mark this backend is not swapped_in in MALI driver, and before using it, + * we should swap it in and set up corresponding page table. */ +#define MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN (0x8) +#define MALI_MEM_BACKEND_FLAG_NOT_BINDED (0x1 << 5) /* this backend it not back with physical memory, used for defer bind */ +#define MALI_MEM_BACKEND_FLAG_BINDED (0x1 << 6) /* this backend it back with physical memory, used for defer bind */ + typedef struct mali_mem_backend { mali_mem_type type; /**< Type of backend memory */ u32 flags; /**< Flags for this allocation */ @@ -160,11 +201,19 @@ typedef struct mali_mem_backend { mali_mem_ump ump_mem; /**< MALI_MEM_UMP */ mali_mem_block_mem block_mem; /**< MALI_MEM_BLOCK */ mali_mem_cow cow_mem; + mali_mem_swap swap_mem; + mali_mem_secure secure_mem; }; mali_mem_allocation *mali_allocation; + struct mutex mutex; + mali_mem_type cow_type; + + struct list_head list; /**< Used to link swappable memory backend to the global swappable list */ + int using_count; /**< Mark how many PP jobs are using this memory backend */ + u32 start_idx; /**< If the correspondign vma of this backend is linear, this value will be used to set vma->vm_pgoff */ } mali_mem_backend; -#define MALI_MEM_FLAG_MALI_GUARD_PAGE (1 << 0) +#define MALI_MEM_FLAG_MALI_GUARD_PAGE (_MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) #define MALI_MEM_FLAG_DONT_CPU_MAP (1 << 1) - +#define MALI_MEM_FLAG_CAN_RESIZE (_MALI_MEMORY_ALLOCATE_RESIZEABLE) #endif /* __MALI_MEMORY_TYPES__ */ diff --git a/drivers/gpu/arm/mali/linux/mali_memory_ump.c b/drivers/gpu/arm/mali/linux/mali_memory_ump.c index 207ac9196a09a..2b1da79c19a1b 100755 --- a/drivers/gpu/arm/mali/linux/mali_memory_ump.c +++ b/drivers/gpu/arm/mali/linux/mali_memory_ump.c @@ -1,9 +1,9 @@ /* - * Copyright (C) 2012-2015 ARM Limited. All rights reserved. - * + * Copyright (C) 2012-2016 ARM Limited. All rights reserved. + * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. - * + * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ @@ -14,7 +14,7 @@ #include "mali_session.h" #include "mali_kernel_linux.h" #include "mali_memory.h" -#include "ump_kernel_interface.h" +#include static int mali_mem_ump_map(mali_mem_backend *mem_backend) { @@ -90,7 +90,6 @@ static int mali_mem_ump_map(mali_mem_backend *mem_backend) offset += _MALI_OSK_MALI_PAGE_SIZE; } - session->mali_mem_array[mem_backend->type] += mem_backend->size; mali_session_memory_unlock(session); _mali_osk_free(ump_blocks); return 0; @@ -105,12 +104,10 @@ static void mali_mem_ump_unmap(mali_mem_allocation *alloc) mali_session_memory_lock(session); mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start, alloc->flags); - - session->mali_mem_array[alloc->type] -= alloc->psize; mali_session_memory_unlock(session); } -int mali_memory_bind_ump_buf(mali_mem_allocation *alloc, mali_mem_backend *mem_backend, u32 secure_id, u32 flags) +int mali_mem_bind_ump_buf(mali_mem_allocation *alloc, mali_mem_backend *mem_backend, u32 secure_id, u32 flags) { ump_dd_handle ump_mem; int ret; @@ -140,7 +137,7 @@ int mali_memory_bind_ump_buf(mali_mem_allocation *alloc, mali_mem_backend *mem_b return _MALI_OSK_ERR_OK; } -void mali_mem_ump_release(mali_mem_backend *mem_backend) +void mali_mem_unbind_ump_buf(mali_mem_backend *mem_backend) { ump_dd_handle ump_mem; mali_mem_allocation *alloc; @@ -155,10 +152,3 @@ void mali_mem_ump_release(mali_mem_backend *mem_backend) ump_dd_reference_release(ump_mem); } -int mali_memory_unbind_ump_buf(mali_mem_backend *mem_backend) -{ - MALI_DEBUG_ASSERT_POINTER(mem_backend); - mali_mem_ump_release(mem_backend); - return _MALI_OSK_ERR_OK; -} - diff --git a/drivers/gpu/arm/mali/linux/mali_memory_ump.h b/drivers/gpu/arm/mali/linux/mali_memory_ump.h old mode 100644 new mode 100755 index 0f4aa198e28d7..7f03a49d8732e --- a/drivers/gpu/arm/mali/linux/mali_memory_ump.h +++ b/drivers/gpu/arm/mali/linux/mali_memory_ump.h @@ -1,9 +1,9 @@ /* - * Copyright (C) 2011-2015 ARM Limited. All rights reserved. - * + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. - * + * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ @@ -19,9 +19,8 @@ extern "C" { #include "mali_osk.h" #include "mali_memory.h" -int mali_memory_bind_ump_buf(mali_mem_allocation *alloc, mali_mem_backend *mem_backend, u32 secure_id, u32 flags); -int mali_memory_unbind_ump_buf(mali_mem_backend *mem_backend); -void mali_mem_ump_release(mali_mem_backend *mem_backend); +int mali_mem_bind_ump_buf(mali_mem_allocation *alloc, mali_mem_backend *mem_backend, u32 secure_id, u32 flags); +void mali_mem_unbind_ump_buf(mali_mem_backend *mem_backend); #ifdef __cplusplus } diff --git a/drivers/gpu/arm/mali/linux/mali_memory_util.c b/drivers/gpu/arm/mali/linux/mali_memory_util.c old mode 100644 new mode 100755 index 5947cafd967da..d5b39a962905c --- a/drivers/gpu/arm/mali/linux/mali_memory_util.c +++ b/drivers/gpu/arm/mali/linux/mali_memory_util.c @@ -1,9 +1,9 @@ /* * Copyright (C) 2013-2015 ARM Limited. All rights reserved. - * + * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. - * + * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ @@ -25,6 +25,7 @@ #include "mali_memory_os_alloc.h" #if defined(CONFIG_DMA_SHARED_BUFFER) #include "mali_memory_dma_buf.h" +#include "mali_memory_secure.h" #endif #if defined(CONFIG_MALI400_UMP) #include "mali_memory_ump.h" @@ -32,19 +33,19 @@ #include "mali_memory_external.h" #include "mali_memory_manager.h" #include "mali_memory_virtual.h" +#include "mali_memory_cow.h" #include "mali_memory_block_alloc.h" +#include "mali_memory_swap_alloc.h" + + /** *function @_mali_free_allocation_mem - free a memory allocation -* support backend type: -* MALI_MEM_OS -* maybe COW later? */ -static void _mali_free_allocation_mem(struct kref *kref) +static u32 _mali_free_allocation_mem(mali_mem_allocation *mali_alloc) { mali_mem_backend *mem_bkend = NULL; - - mali_mem_allocation *mali_alloc = container_of(kref, struct mali_mem_allocation, ref); + u32 free_pages_nr = 0; struct mali_session_data *session = mali_alloc->session; MALI_DEBUG_PRINT(4, (" _mali_free_allocation_mem, psize =0x%x! \n", mali_alloc->psize)); @@ -55,39 +56,66 @@ static void _mali_free_allocation_mem(struct kref *kref) mutex_lock(&mali_idr_mutex); mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle); mutex_unlock(&mali_idr_mutex); - MALI_DEBUG_ASSERT(NULL != mem_bkend); switch (mem_bkend->type) { case MALI_MEM_OS: - mali_mem_os_release(mem_bkend); + free_pages_nr = mali_mem_os_release(mem_bkend); + atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages); break; case MALI_MEM_UMP: #if defined(CONFIG_MALI400_UMP) - mali_mem_ump_release(mem_bkend); + mali_mem_unbind_ump_buf(mem_bkend); + atomic_sub(mem_bkend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_bkend->type]); #else - MALI_DEBUG_PRINT(2, ("DMA not supported\n")); + MALI_DEBUG_PRINT(1, ("UMP not supported\n")); #endif break; case MALI_MEM_DMA_BUF: #if defined(CONFIG_DMA_SHARED_BUFFER) - mali_mem_dma_buf_release(mem_bkend); + mali_mem_unbind_dma_buf(mem_bkend); + atomic_sub(mem_bkend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_bkend->type]); #else - MALI_DEBUG_PRINT(2, ("DMA not supported\n")); + MALI_DEBUG_PRINT(1, ("DMA not supported\n")); #endif break; case MALI_MEM_EXTERNAL: - mali_mem_external_release(mem_bkend); + mali_mem_unbind_ext_buf(mem_bkend); + atomic_sub(mem_bkend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_bkend->type]); break; + case MALI_MEM_BLOCK: - mali_mem_block_release(mem_bkend); + free_pages_nr = mali_mem_block_release(mem_bkend); + atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages); + break; + + case MALI_MEM_COW: + if (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED) { + free_pages_nr = mali_mem_swap_release(mem_bkend, MALI_TRUE); + } else { + free_pages_nr = mali_mem_cow_release(mem_bkend, MALI_TRUE); + } + atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages); + break; + case MALI_MEM_SWAP: + free_pages_nr = mali_mem_swap_release(mem_bkend, MALI_TRUE); + atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages); + atomic_sub(free_pages_nr, &session->mali_mem_array[mem_bkend->type]); + break; + case MALI_MEM_SECURE: +#if defined(CONFIG_DMA_SHARED_BUFFER) + free_pages_nr = mali_mem_secure_release(mem_bkend); + atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages); +#else + MALI_DEBUG_PRINT(1, ("DMA not supported for mali secure memory\n")); +#endif break; default: MALI_DEBUG_PRINT(1, ("mem type %d is not in the mali_mem_type enum.\n", mem_bkend->type)); break; } - /* remove backend memory idex */ + /*Remove backend memory idex */ mutex_lock(&mali_idr_mutex); idr_remove(&mali_backend_idr, mali_alloc->backend_handle); mutex_unlock(&mali_idr_mutex); @@ -96,29 +124,30 @@ static void _mali_free_allocation_mem(struct kref *kref) /* remove memory allocation */ mali_vma_offset_remove(&session->allocation_mgr, &mali_alloc->mali_vma_node); mali_mem_allocation_struct_destory(mali_alloc); - + return free_pages_nr; } - - /** * ref_count for allocation */ -void mali_allocation_unref(struct mali_mem_allocation **alloc) +u32 mali_allocation_unref(struct mali_mem_allocation **alloc) { + u32 free_pages_nr = 0; mali_mem_allocation *mali_alloc = *alloc; *alloc = NULL; - kref_put(&mali_alloc->ref, _mali_free_allocation_mem); + if (0 == _mali_osk_atomic_dec_return(&mali_alloc->mem_alloc_refcount)) { + free_pages_nr = _mali_free_allocation_mem(mali_alloc); + } + return free_pages_nr; } void mali_allocation_ref(struct mali_mem_allocation *alloc) { - kref_get(&alloc->ref); + _mali_osk_atomic_inc(&alloc->mem_alloc_refcount); } void mali_free_session_allocations(struct mali_session_data *session) { - struct mali_mem_allocation *entry, *next; MALI_DEBUG_PRINT(4, (" mali_free_session_allocations! \n")); @@ -127,4 +156,3 @@ void mali_free_session_allocations(struct mali_session_data *session) mali_allocation_unref(&entry); } } - diff --git a/drivers/gpu/arm/mali/linux/mali_memory_util.h b/drivers/gpu/arm/mali/linux/mali_memory_util.h old mode 100644 new mode 100755 index 419e05f099395..b3cc7e9eaa620 --- a/drivers/gpu/arm/mali/linux/mali_memory_util.h +++ b/drivers/gpu/arm/mali/linux/mali_memory_util.h @@ -1,17 +1,20 @@ -/* - * Copyright (C) 2013-2015 ARM Limited. All rights reserved. - * +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. - * + * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - - -void mali_allocation_unref(struct mali_mem_allocation **alloc); - -void mali_allocation_ref(struct mali_mem_allocation *alloc); - -void mali_free_session_allocations(struct mali_session_data *session); - + */ + +#ifndef __MALI_MEMORY_UTIL_H__ +#define __MALI_MEMORY_UTIL_H__ + +u32 mali_allocation_unref(struct mali_mem_allocation **alloc); + +void mali_allocation_ref(struct mali_mem_allocation *alloc); + +void mali_free_session_allocations(struct mali_session_data *session); + +#endif diff --git a/drivers/gpu/arm/mali/linux/mali_memory_virtual.c b/drivers/gpu/arm/mali/linux/mali_memory_virtual.c old mode 100644 new mode 100755 index 535feffb82256..cb85ca23737d4 --- a/drivers/gpu/arm/mali/linux/mali_memory_virtual.c +++ b/drivers/gpu/arm/mali/linux/mali_memory_virtual.c @@ -1,128 +1,127 @@ -/* - * Copyright (C) 2013-2015 ARM Limited. All rights reserved. - * +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. - * + * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "mali_osk.h" -#include "mali_osk_mali.h" -#include "mali_kernel_linux.h" -#include "mali_scheduler.h" -#include "mali_memory_os_alloc.h" -#include "mali_memory_manager.h" -#include "mali_memory_virtual.h" - - -/** -*internal helper to link node into the rb-tree -*/ -static inline void _mali_vma_offset_add_rb(struct mali_allocation_manager *mgr, - struct mali_vma_node *node) -{ - struct rb_node **iter = &mgr->allocation_mgr_rb.rb_node; - struct rb_node *parent = NULL; - struct mali_vma_node *iter_node; - - while (likely(*iter)) { - parent = *iter; - iter_node = rb_entry(*iter, struct mali_vma_node, vm_rb); - - if (node->vm_node.start < iter_node->vm_node.start) - iter = &(*iter)->rb_left; - else if (node->vm_node.start > iter_node->vm_node.start) - iter = &(*iter)->rb_right; - else - /* Not support yet */ - MALI_DEBUG_ASSERT(0); - } - - rb_link_node(&node->vm_rb, parent, iter); - rb_insert_color(&node->vm_rb, &mgr->allocation_mgr_rb); -} - -/** - * mali_vma_offset_add() - Add offset node to RB Tree - */ -int mali_vma_offset_add(struct mali_allocation_manager *mgr, - struct mali_vma_node *node) -{ - int ret = 0; - write_lock(&mgr->vm_lock); - - if (node->vm_node.allocated) { - goto out; - } - - _mali_vma_offset_add_rb(mgr, node); - /* set to allocated */ - node->vm_node.allocated = 1; - -out: - write_unlock(&mgr->vm_lock); - return ret; -} - -/** - * mali_vma_offset_remove() - Remove offset node from RB tree - */ -void mali_vma_offset_remove(struct mali_allocation_manager *mgr, - struct mali_vma_node *node) -{ - write_lock(&mgr->vm_lock); - - if (node->vm_node.allocated) { - rb_erase(&node->vm_rb, &mgr->allocation_mgr_rb); - memset(&node->vm_node, 0, sizeof(node->vm_node)); - } - write_unlock(&mgr->vm_lock); -} - -/** -* mali_vma_offset_search - Search the node in RB tree -*/ -struct mali_vma_node *mali_vma_offset_search(struct mali_allocation_manager *mgr, - unsigned long start, unsigned long pages) -{ - struct mali_vma_node *node, *best; - struct rb_node *iter; - unsigned long offset; - read_lock(&mgr->vm_lock); - - iter = mgr->allocation_mgr_rb.rb_node; - best = NULL; - - while (likely(iter)) { - node = rb_entry(iter, struct mali_vma_node, vm_rb); - offset = node->vm_node.start; - if (start >= offset) { - iter = iter->rb_right; - best = node; - if (start == offset) - break; - } else { - iter = iter->rb_left; - } - } - - if (best) { - offset = best->vm_node.start + best->vm_node.size; - if (offset <= start + pages) - best = NULL; - } - read_unlock(&mgr->vm_lock); - - return best; -} - + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "mali_osk.h" +#include "mali_osk_mali.h" +#include "mali_kernel_linux.h" +#include "mali_scheduler.h" +#include "mali_memory_os_alloc.h" +#include "mali_memory_manager.h" +#include "mali_memory_virtual.h" + + +/** +*internal helper to link node into the rb-tree +*/ +static inline void _mali_vma_offset_add_rb(struct mali_allocation_manager *mgr, + struct mali_vma_node *node) +{ + struct rb_node **iter = &mgr->allocation_mgr_rb.rb_node; + struct rb_node *parent = NULL; + struct mali_vma_node *iter_node; + + while (likely(*iter)) { + parent = *iter; + iter_node = rb_entry(*iter, struct mali_vma_node, vm_rb); + + if (node->vm_node.start < iter_node->vm_node.start) + iter = &(*iter)->rb_left; + else if (node->vm_node.start > iter_node->vm_node.start) + iter = &(*iter)->rb_right; + else + MALI_DEBUG_ASSERT(0); + } + + rb_link_node(&node->vm_rb, parent, iter); + rb_insert_color(&node->vm_rb, &mgr->allocation_mgr_rb); +} + +/** + * mali_vma_offset_add() - Add offset node to RB Tree + */ +int mali_vma_offset_add(struct mali_allocation_manager *mgr, + struct mali_vma_node *node) +{ + int ret = 0; + write_lock(&mgr->vm_lock); + + if (node->vm_node.allocated) { + goto out; + } + + _mali_vma_offset_add_rb(mgr, node); + /* set to allocated */ + node->vm_node.allocated = 1; + +out: + write_unlock(&mgr->vm_lock); + return ret; +} + +/** + * mali_vma_offset_remove() - Remove offset node from RB tree + */ +void mali_vma_offset_remove(struct mali_allocation_manager *mgr, + struct mali_vma_node *node) +{ + write_lock(&mgr->vm_lock); + + if (node->vm_node.allocated) { + rb_erase(&node->vm_rb, &mgr->allocation_mgr_rb); + memset(&node->vm_node, 0, sizeof(node->vm_node)); + } + write_unlock(&mgr->vm_lock); +} + +/** +* mali_vma_offset_search - Search the node in RB tree +*/ +struct mali_vma_node *mali_vma_offset_search(struct mali_allocation_manager *mgr, + unsigned long start, unsigned long pages) +{ + struct mali_vma_node *node, *best; + struct rb_node *iter; + unsigned long offset; + read_lock(&mgr->vm_lock); + + iter = mgr->allocation_mgr_rb.rb_node; + best = NULL; + + while (likely(iter)) { + node = rb_entry(iter, struct mali_vma_node, vm_rb); + offset = node->vm_node.start; + if (start >= offset) { + iter = iter->rb_right; + best = node; + if (start == offset) + break; + } else { + iter = iter->rb_left; + } + } + + if (best) { + offset = best->vm_node.start + best->vm_node.size; + if (offset <= start + pages) + best = NULL; + } + read_unlock(&mgr->vm_lock); + + return best; +} + diff --git a/drivers/gpu/arm/mali/linux/mali_memory_virtual.h b/drivers/gpu/arm/mali/linux/mali_memory_virtual.h old mode 100644 new mode 100755 index 1f64c5e4a55dc..9815f28f7ae11 --- a/drivers/gpu/arm/mali/linux/mali_memory_virtual.h +++ b/drivers/gpu/arm/mali/linux/mali_memory_virtual.h @@ -1,35 +1,35 @@ -/* - * Copyright (C) 2013-2015 ARM Limited. All rights reserved. - * +/* + * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved. + * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. - * + * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ -#ifndef __MALI_GPU_VMEM_H__ -#define __MALI_GPU_VMEM_H__ - -#include "mali_osk.h" -#include "mali_session.h" -#include -#include -#include -#include -#include -#include "mali_memory_types.h" -#include "mali_memory_os_alloc.h" -#include "mali_memory_manager.h" - - - -int mali_vma_offset_add(struct mali_allocation_manager *mgr, - struct mali_vma_node *node); - -void mali_vma_offset_remove(struct mali_allocation_manager *mgr, - struct mali_vma_node *node); - -struct mali_vma_node *mali_vma_offset_search(struct mali_allocation_manager *mgr, - unsigned long start, unsigned long pages); - -#endif + */ +#ifndef __MALI_GPU_VMEM_H__ +#define __MALI_GPU_VMEM_H__ + +#include "mali_osk.h" +#include "mali_session.h" +#include +#include +#include +#include +#include +#include "mali_memory_types.h" +#include "mali_memory_os_alloc.h" +#include "mali_memory_manager.h" + + + +int mali_vma_offset_add(struct mali_allocation_manager *mgr, + struct mali_vma_node *node); + +void mali_vma_offset_remove(struct mali_allocation_manager *mgr, + struct mali_vma_node *node); + +struct mali_vma_node *mali_vma_offset_search(struct mali_allocation_manager *mgr, + unsigned long start, unsigned long pages); + +#endif diff --git a/drivers/gpu/arm/mali/linux/mali_osk_atomics.c b/drivers/gpu/arm/mali/linux/mali_osk_atomics.c index ba630e2730cf3..03f4421ef1eab 100755 --- a/drivers/gpu/arm/mali/linux/mali_osk_atomics.c +++ b/drivers/gpu/arm/mali/linux/mali_osk_atomics.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_osk_bitmap.c b/drivers/gpu/arm/mali/linux/mali_osk_bitmap.c new file mode 100755 index 0000000000000..67cc7e42b83f6 --- /dev/null +++ b/drivers/gpu/arm/mali/linux/mali_osk_bitmap.c @@ -0,0 +1,152 @@ +/* + * Copyright (C) 2010, 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_bitmap.c + * Implementation of the OS abstraction layer for the kernel device driver + */ + +#include +#include +#include +#include +#include +#include "common/mali_kernel_common.h" +#include "mali_osk_types.h" +#include "mali_osk.h" + +u32 _mali_osk_bitmap_alloc(struct _mali_osk_bitmap *bitmap) +{ + u32 obj; + + MALI_DEBUG_ASSERT_POINTER(bitmap); + + _mali_osk_spinlock_lock(bitmap->lock); + + obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->reserve); + + if (obj < bitmap->max) { + set_bit(obj, bitmap->table); + } else { + obj = -1; + } + + if (obj != -1) + --bitmap->avail; + _mali_osk_spinlock_unlock(bitmap->lock); + + return obj; +} + +void _mali_osk_bitmap_free(struct _mali_osk_bitmap *bitmap, u32 obj) +{ + MALI_DEBUG_ASSERT_POINTER(bitmap); + + _mali_osk_bitmap_free_range(bitmap, obj, 1); +} + +u32 _mali_osk_bitmap_alloc_range(struct _mali_osk_bitmap *bitmap, int cnt) +{ + u32 obj; + + MALI_DEBUG_ASSERT_POINTER(bitmap); + + if (0 >= cnt) { + return -1; + } + + if (1 == cnt) { + return _mali_osk_bitmap_alloc(bitmap); + } + + _mali_osk_spinlock_lock(bitmap->lock); + obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, + bitmap->last, cnt, 0); + + if (obj >= bitmap->max) { + obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, + bitmap->reserve, cnt, 0); + } + + if (obj < bitmap->max) { + bitmap_set(bitmap->table, obj, cnt); + + bitmap->last = (obj + cnt); + if (bitmap->last >= bitmap->max) { + bitmap->last = bitmap->reserve; + } + } else { + obj = -1; + } + + if (obj != -1) { + bitmap->avail -= cnt; + } + + _mali_osk_spinlock_unlock(bitmap->lock); + + return obj; +} + +u32 _mali_osk_bitmap_avail(struct _mali_osk_bitmap *bitmap) +{ + MALI_DEBUG_ASSERT_POINTER(bitmap); + + return bitmap->avail; +} + +void _mali_osk_bitmap_free_range(struct _mali_osk_bitmap *bitmap, u32 obj, int cnt) +{ + MALI_DEBUG_ASSERT_POINTER(bitmap); + + _mali_osk_spinlock_lock(bitmap->lock); + bitmap_clear(bitmap->table, obj, cnt); + bitmap->last = min(bitmap->last, obj); + + bitmap->avail += cnt; + _mali_osk_spinlock_unlock(bitmap->lock); +} + +int _mali_osk_bitmap_init(struct _mali_osk_bitmap *bitmap, u32 num, u32 reserve) +{ + MALI_DEBUG_ASSERT_POINTER(bitmap); + MALI_DEBUG_ASSERT(reserve <= num); + + bitmap->reserve = reserve; + bitmap->last = reserve; + bitmap->max = num; + bitmap->avail = num - reserve; + bitmap->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_UNORDERED, _MALI_OSK_LOCK_ORDER_FIRST); + if (!bitmap->lock) { + return _MALI_OSK_ERR_NOMEM; + } + bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * + sizeof(long), GFP_KERNEL); + if (!bitmap->table) { + _mali_osk_spinlock_term(bitmap->lock); + return _MALI_OSK_ERR_NOMEM; + } + + return _MALI_OSK_ERR_OK; +} + +void _mali_osk_bitmap_term(struct _mali_osk_bitmap *bitmap) +{ + MALI_DEBUG_ASSERT_POINTER(bitmap); + + if (NULL != bitmap->lock) { + _mali_osk_spinlock_term(bitmap->lock); + } + + if (NULL != bitmap->table) { + kfree(bitmap->table); + } +} + diff --git a/drivers/gpu/arm/mali/linux/mali_osk_irq.c b/drivers/gpu/arm/mali/linux/mali_osk_irq.c index 6876b6db20190..4a75845ab044d 100755 --- a/drivers/gpu/arm/mali/linux/mali_osk_irq.c +++ b/drivers/gpu/arm/mali/linux/mali_osk_irq.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -13,12 +13,7 @@ * Implementation of the OS abstraction layer for the kernel device driver */ -#include -#include -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 29)) -#include -#endif -#include /* For memory allocation */ +#include /* For memory allocation */ #include #include #include @@ -35,16 +30,6 @@ typedef struct _mali_osk_irq_t_struct { typedef irqreturn_t (*irq_handler_func_t)(int, void *, struct pt_regs *); static irqreturn_t irq_handler_upper_half(int port_name, void *dev_id); /* , struct pt_regs *regs*/ -#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6 -u32 get_irqnum(struct _mali_osk_irq_t_struct* irq) -{ - if (irq) - return irq->irqnum; - else - return 0; -} -#endif - #if defined(DEBUG) struct test_interrupt_data { @@ -168,7 +153,7 @@ _mali_osk_irq_t *_mali_osk_irq_init(u32 irqnum, _mali_osk_irq_uhandler_t uhandle #if defined(DEBUG) /* Verify that the configured interrupt settings are working */ if (_MALI_OSK_ERR_OK != test_interrupt(irqnum, trigger_func, ack_func, probe_data, description)) { - MALI_DEBUG_PRINT(2, ("Test of IRQ handler for core '%s' failed\n", description)); + MALI_DEBUG_PRINT(2, ("Test of IRQ(%d) handler for core '%s' failed\n", irqnum, description)); kfree(irq_object); return NULL; } diff --git a/drivers/gpu/arm/mali/linux/mali_osk_locks.c b/drivers/gpu/arm/mali/linux/mali_osk_locks.c index 50c0a9d238190..4fa93981858e1 100755 --- a/drivers/gpu/arm/mali/linux/mali_osk_locks.c +++ b/drivers/gpu/arm/mali/linux/mali_osk_locks.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_osk_locks.h b/drivers/gpu/arm/mali/linux/mali_osk_locks.h index aa32a81e7496b..a05586f0e12bd 100755 --- a/drivers/gpu/arm/mali/linux/mali_osk_locks.h +++ b/drivers/gpu/arm/mali/linux/mali_osk_locks.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_osk_low_level_mem.c b/drivers/gpu/arm/mali/linux/mali_osk_low_level_mem.c index c14e872a7d639..bc713a175d203 100755 --- a/drivers/gpu/arm/mali/linux/mali_osk_low_level_mem.c +++ b/drivers/gpu/arm/mali/linux/mali_osk_low_level_mem.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_osk_mali.c b/drivers/gpu/arm/mali/linux/mali_osk_mali.c index f5b72e6398953..23b2c1778333e 100755 --- a/drivers/gpu/arm/mali/linux/mali_osk_mali.c +++ b/drivers/gpu/arm/mali/linux/mali_osk_mali.c @@ -24,6 +24,15 @@ #include "mali_osk.h" /* kernel side OS functions */ #include "mali_kernel_linux.h" +static mali_bool mali_secure_mode_enabled = MALI_FALSE; +static mali_bool mali_secure_mode_supported = MALI_FALSE; + +/* Function that init the mali gpu secure mode */ +void (*mali_secure_mode_deinit)(void) = NULL; +/* Function that enable the mali gpu secure mode */ +int (*mali_secure_mode_enable)(void) = NULL; +/* Function that disable the mali gpu secure mode */ +int (*mali_secure_mode_disable)(void) = NULL; #ifdef CONFIG_MALI_DT @@ -52,6 +61,11 @@ #define MALI_OSK_RESOURCE_L2_LOCATION_START 20 #define MALI_OSK_RESOURCE_l2_LOCATION_END 22 +/** + * DMA unit location. + */ +#define MALI_OSK_RESOURCE_DMA_LOCATION 26 + static _mali_osk_resource_t mali_osk_resource_bank[MALI_OSK_MAX_RESOURCE_NUMBER] = { {.description = "Mali_GP", .base = MALI_OFFSET_GP, .irq_name = "IRQGP",}, {.description = "Mali_GP_MMU", .base = MALI_OFFSET_GP_MMU, .irq_name = "IRQGPMMU",}, @@ -82,20 +96,36 @@ static _mali_osk_resource_t mali_osk_resource_bank[MALI_OSK_MAX_RESOURCE_NUMBER] {.description = "Mali_DMA", .base = MALI_OFFSET_DMA,}, }; +static int _mali_osk_get_compatible_name(const char **out_string) +{ + struct device_node *node = mali_platform_device->dev.of_node; + + MALI_DEBUG_ASSERT(NULL != node); + + return of_property_read_string(node, "compatible", out_string); +} + _mali_osk_errcode_t _mali_osk_resource_initialize(void) { - mali_bool mali_is_450 = MALI_FALSE; + mali_bool mali_is_450 = MALI_FALSE, mali_is_470 = MALI_FALSE; int i, pp_core_num = 0, l2_core_num = 0; struct resource *res; + const char *compatible_name = NULL; + + if (0 == _mali_osk_get_compatible_name(&compatible_name)) { + if (0 == strncmp(compatible_name, "arm,mali-450", strlen("arm,mali-450"))) { + mali_is_450 = MALI_TRUE; + MALI_DEBUG_PRINT(2, ("mali-450 device tree detected.")); + } else if (0 == strncmp(compatible_name, "arm,mali-470", strlen("arm,mali-470"))) { + mali_is_470 = MALI_TRUE; + MALI_DEBUG_PRINT(2, ("mali-470 device tree detected.")); + } + } for (i = 0; i < MALI_OSK_RESOURCE_WITH_IRQ_NUMBER; i++) { res = platform_get_resource_byname(mali_platform_device, IORESOURCE_IRQ, mali_osk_resource_bank[i].irq_name); if (res) { mali_osk_resource_bank[i].irq = res->start; - if (0 == strncmp("Mali_PP_Broadcast", mali_osk_resource_bank[i].description, - strlen(mali_osk_resource_bank[i].description))) { - mali_is_450 = MALI_TRUE; - } } else { mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS; } @@ -117,14 +147,15 @@ _mali_osk_errcode_t _mali_osk_resource_initialize(void) /** * we can caculate the number of l2 cache core according the number of pp core number - * and device type(mali400/mali450). + * and device type(mali400/mali450/mali470). */ - if (mali_is_450 && 4 < pp_core_num) { - l2_core_num = 3; - } else if (mali_is_450 && 4 >= pp_core_num) { - l2_core_num = 2; - } else { - l2_core_num = 1; + l2_core_num = 1; + if (mali_is_450) { + if (pp_core_num > 4) { + l2_core_num = 3; + } else if (pp_core_num <= 4) { + l2_core_num = 2; + } } for (i = MALI_OSK_RESOURCE_l2_LOCATION_END; i > MALI_OSK_RESOURCE_L2_LOCATION_START + l2_core_num - 1; i--) { @@ -132,12 +163,15 @@ _mali_osk_errcode_t _mali_osk_resource_initialize(void) } /* If device is not mali-450 type, we have to remove related resource from resource bank. */ - if (!mali_is_450) { + if (!(mali_is_450 || mali_is_470)) { for (i = MALI_OSK_RESOURCE_l2_LOCATION_END + 1; i < MALI_OSK_MAX_RESOURCE_NUMBER; i++) { mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS; } } + if (mali_is_470) + mali_osk_resource_bank[MALI_OSK_RESOURCE_DMA_LOCATION].base = MALI_OSK_INVALID_RESOURCE_ADDRESS; + return _MALI_OSK_ERR_OK; } @@ -325,22 +359,18 @@ _mali_osk_errcode_t _mali_osk_device_data_get(_mali_osk_device_data *data) return _MALI_OSK_ERR_ITEM_NOT_FOUND; } -u32 _mali_osk_l2_resource_count(void) +u32 _mali_osk_identify_gpu_resource(void) { - u32 l2_core_num = 0; - - if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_L2_RESOURCE0, NULL)) - l2_core_num++; - if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_L2_RESOURCE1, NULL)) - l2_core_num++; - - if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_L2_RESOURCE2, NULL)) - l2_core_num++; + /* Mali 450 */ + return 0x450; - MALI_DEBUG_ASSERT(0 < l2_core_num); + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_DLBU, NULL)) + /* Mali 470 */ + return 0x470; - return l2_core_num; + /* Mali 400 */ + return 0x400; } mali_bool _mali_osk_shared_interrupts(void) @@ -367,3 +397,95 @@ mali_bool _mali_osk_shared_interrupts(void) return MALI_FALSE; } + +_mali_osk_errcode_t _mali_osk_gpu_secure_mode_init(void) +{ + _mali_osk_device_data data = { 0, }; + + if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) { + if ((NULL != data.secure_mode_init) && (NULL != data.secure_mode_deinit) + && (NULL != data.secure_mode_enable) && (NULL != data.secure_mode_disable)) { + int err = data.secure_mode_init(); + if (err) { + MALI_DEBUG_PRINT(1, ("Failed to init gpu secure mode.\n")); + return _MALI_OSK_ERR_FAULT; + } + + mali_secure_mode_deinit = data.secure_mode_deinit; + mali_secure_mode_enable = data.secure_mode_enable; + mali_secure_mode_disable = data.secure_mode_disable; + + mali_secure_mode_supported = MALI_TRUE; + mali_secure_mode_enabled = MALI_FALSE; + return _MALI_OSK_ERR_OK; + } + } + MALI_DEBUG_PRINT(3, ("GPU secure mode not supported.\n")); + return _MALI_OSK_ERR_UNSUPPORTED; + +} + +_mali_osk_errcode_t _mali_osk_gpu_secure_mode_deinit(void) +{ + if (NULL != mali_secure_mode_deinit) { + mali_secure_mode_deinit(); + mali_secure_mode_enabled = MALI_FALSE; + mali_secure_mode_supported = MALI_FALSE; + return _MALI_OSK_ERR_OK; + } + MALI_DEBUG_PRINT(3, ("GPU secure mode not supported.\n")); + return _MALI_OSK_ERR_UNSUPPORTED; + +} + + +_mali_osk_errcode_t _mali_osk_gpu_secure_mode_enable(void) +{ + /* the mali executor lock must be held before enter this function. */ + + MALI_DEBUG_ASSERT(MALI_FALSE == mali_secure_mode_enabled); + + if (NULL != mali_secure_mode_enable) { + if (mali_secure_mode_enable()) { + MALI_DEBUG_PRINT(1, ("Failed to enable gpu secure mode.\n")); + return _MALI_OSK_ERR_FAULT; + } + mali_secure_mode_enabled = MALI_TRUE; + return _MALI_OSK_ERR_OK; + } + MALI_DEBUG_PRINT(1, ("GPU secure mode not supported.\n")); + return _MALI_OSK_ERR_UNSUPPORTED; +} + +_mali_osk_errcode_t _mali_osk_gpu_secure_mode_disable(void) +{ + /* the mali executor lock must be held before enter this function. */ + + MALI_DEBUG_ASSERT(MALI_TRUE == mali_secure_mode_enabled); + + if (NULL != mali_secure_mode_disable) { + if (mali_secure_mode_disable()) { + MALI_DEBUG_PRINT(1, ("Failed to disable gpu secure mode.\n")); + return _MALI_OSK_ERR_FAULT; + } + mali_secure_mode_enabled = MALI_FALSE; + + return _MALI_OSK_ERR_OK; + + } + MALI_DEBUG_PRINT(1, ("GPU secure mode not supported.\n")); + return _MALI_OSK_ERR_UNSUPPORTED; + +} + +mali_bool _mali_osk_gpu_secure_mode_is_enabled(void) +{ + return mali_secure_mode_enabled; +} + +mali_bool _mali_osk_gpu_secure_mode_is_supported(void) +{ + return mali_secure_mode_supported; +} + + diff --git a/drivers/gpu/arm/mali/linux/mali_osk_math.c b/drivers/gpu/arm/mali/linux/mali_osk_math.c index 085ce76f7665f..3f06723ad062c 100755 --- a/drivers/gpu/arm/mali/linux/mali_osk_math.c +++ b/drivers/gpu/arm/mali/linux/mali_osk_math.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_osk_memory.c b/drivers/gpu/arm/mali/linux/mali_osk_memory.c index 390e613e186da..ad5494d338d3a 100755 --- a/drivers/gpu/arm/mali/linux/mali_osk_memory.c +++ b/drivers/gpu/arm/mali/linux/mali_osk_memory.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2011, 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2011, 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_osk_misc.c b/drivers/gpu/arm/mali/linux/mali_osk_misc.c index 0a619e3fc27e2..7dda2834c544a 100755 --- a/drivers/gpu/arm/mali/linux/mali_osk_misc.c +++ b/drivers/gpu/arm/mali/linux/mali_osk_misc.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -42,17 +42,6 @@ u32 _mali_osk_snprintf(char *buf, u32 size, const char *fmt, ...) return res; } -void _mali_osk_ctxprintf(_mali_osk_print_ctx *print_ctx, const char *fmt, ...) -{ - va_list args; - char buf[512]; - - va_start(args, fmt); - vscnprintf(buf, 512, fmt, args); - seq_printf(print_ctx, buf); - va_end(args); -} - void _mali_osk_abort(void) { /* make a simple fault by dereferencing a NULL pointer */ diff --git a/drivers/gpu/arm/mali/linux/mali_osk_notification.c b/drivers/gpu/arm/mali/linux/mali_osk_notification.c index e66fe83f35571..58678417b34f4 100755 --- a/drivers/gpu/arm/mali/linux/mali_osk_notification.c +++ b/drivers/gpu/arm/mali/linux/mali_osk_notification.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_osk_pm.c b/drivers/gpu/arm/mali/linux/mali_osk_pm.c index 21180d33fe757..860ce925e77fa 100755 --- a/drivers/gpu/arm/mali/linux/mali_osk_pm.c +++ b/drivers/gpu/arm/mali/linux/mali_osk_pm.c @@ -15,6 +15,7 @@ #include +#include "mali_kernel_linux.h" #ifdef CONFIG_PM_RUNTIME #include #endif /* CONFIG_PM_RUNTIME */ @@ -22,7 +23,6 @@ #include #include "mali_osk.h" #include "mali_kernel_common.h" -#include "mali_kernel_linux.h" /* Can NOT run in atomic context */ _mali_osk_errcode_t _mali_osk_pm_dev_ref_get_sync(void) diff --git a/drivers/gpu/arm/mali/linux/mali_osk_profiling.c b/drivers/gpu/arm/mali/linux/mali_osk_profiling.c index a9b22e9e3556f..35d1abcb49643 100755 --- a/drivers/gpu/arm/mali/linux/mali_osk_profiling.c +++ b/drivers/gpu/arm/mali/linux/mali_osk_profiling.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2012-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -7,8 +7,12 @@ * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ - +#include #include +#include +#include +#include +#include #include #include "mali_kernel_common.h" @@ -22,19 +26,770 @@ #include "mali_l2_cache.h" #include "mali_user_settings_db.h" #include "mali_executor.h" +#include "mali_memory_manager.h" + +#define MALI_PROFILING_STREAM_DATA_DEFAULT_SIZE 100 +#define MALI_PROFILING_STREAM_HOLD_TIME 1000000 /*1 ms */ + +#define MALI_PROFILING_STREAM_BUFFER_SIZE (1 << 12) +#define MALI_PROFILING_STREAM_BUFFER_NUM 100 + +/** + * Define the mali profiling stream struct. + */ +typedef struct mali_profiling_stream { + u8 data[MALI_PROFILING_STREAM_BUFFER_SIZE]; + u32 used_size; + struct list_head list; +} mali_profiling_stream; + +typedef struct mali_profiling_stream_list { + spinlock_t spin_lock; + struct list_head free_list; + struct list_head queue_list; +} mali_profiling_stream_list; + +static const char mali_name[] = "4xx"; +static const char utgard_setup_version[] = "ANNOTATE_SETUP 1\n"; + +static u32 profiling_sample_rate = 0; +static u32 first_sw_counter_index = 0; + +static mali_bool l2_cache_counter_if_enabled = MALI_FALSE; +static u32 num_counters_enabled = 0; +static u32 mem_counters_enabled = 0; + +static _mali_osk_atomic_t stream_fd_if_used; + +static wait_queue_head_t stream_fd_wait_queue; +static mali_profiling_counter *global_mali_profiling_counters = NULL; +static u32 num_global_mali_profiling_counters = 0; + +static mali_profiling_stream_list *global_mali_stream_list = NULL; +static mali_profiling_stream *mali_counter_stream = NULL; +static mali_profiling_stream *mali_core_activity_stream = NULL; +static u64 mali_core_activity_stream_dequeue_time = 0; +static spinlock_t mali_activity_lock; +static u32 mali_activity_cores_num = 0; +static struct hrtimer profiling_sampling_timer; + +const char *_mali_mem_counter_descriptions[] = _MALI_MEM_COUTNER_DESCRIPTIONS; +const char *_mali_special_counter_descriptions[] = _MALI_SPCIAL_COUNTER_DESCRIPTIONS; + +static u32 current_profiling_pid = 0; + +static void _mali_profiling_stream_list_destory(mali_profiling_stream_list *profiling_stream_list) +{ + mali_profiling_stream *profiling_stream, *tmp_profiling_stream; + MALI_DEBUG_ASSERT_POINTER(profiling_stream_list); + + list_for_each_entry_safe(profiling_stream, tmp_profiling_stream, &profiling_stream_list->free_list, list) { + list_del(&profiling_stream->list); + kfree(profiling_stream); + } + + list_for_each_entry_safe(profiling_stream, tmp_profiling_stream, &profiling_stream_list->queue_list, list) { + list_del(&profiling_stream->list); + kfree(profiling_stream); + } + + kfree(profiling_stream_list); +} + +static void _mali_profiling_global_stream_list_free(void) +{ + mali_profiling_stream *profiling_stream, *tmp_profiling_stream; + unsigned long irq_flags; + + MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list); + spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags); + list_for_each_entry_safe(profiling_stream, tmp_profiling_stream, &global_mali_stream_list->queue_list, list) { + profiling_stream->used_size = 0; + list_move(&profiling_stream->list, &global_mali_stream_list->free_list); + } + spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags); +} + +static _mali_osk_errcode_t _mali_profiling_global_stream_list_dequeue(struct list_head *stream_list, mali_profiling_stream **new_mali_profiling_stream) +{ + unsigned long irq_flags; + _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK; + MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list); + MALI_DEBUG_ASSERT_POINTER(stream_list); + + spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags); + + if (!list_empty(stream_list)) { + *new_mali_profiling_stream = list_entry(stream_list->next, mali_profiling_stream, list); + list_del_init(&(*new_mali_profiling_stream)->list); + } else { + ret = _MALI_OSK_ERR_NOMEM; + } + + spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags); + + return ret; +} + +static void _mali_profiling_global_stream_list_queue(struct list_head *stream_list, mali_profiling_stream *current_mali_profiling_stream) +{ + unsigned long irq_flags; + MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list); + MALI_DEBUG_ASSERT_POINTER(stream_list); + + spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags); + list_add_tail(¤t_mali_profiling_stream->list, stream_list); + spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags); +} + +static mali_bool _mali_profiling_global_stream_queue_list_if_empty(void) +{ + MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list); + return list_empty(&global_mali_stream_list->queue_list); +} + +static u32 _mali_profiling_global_stream_queue_list_next_size(void) +{ + unsigned long irq_flags; + u32 size = 0; + MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list); + + spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags); + if (!list_empty(&global_mali_stream_list->queue_list)) { + mali_profiling_stream *next_mali_profiling_stream = + list_entry(global_mali_stream_list->queue_list.next, mali_profiling_stream, list); + size = next_mali_profiling_stream->used_size; + } + spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags); + return size; +} + +/* The mali profiling stream file operations functions. */ +static ssize_t _mali_profiling_stream_read( + struct file *filp, + char __user *buffer, + size_t size, + loff_t *f_pos); + +static unsigned int _mali_profiling_stream_poll(struct file *filp, poll_table *wait); + +static int _mali_profiling_stream_release(struct inode *inode, struct file *filp); + +/* The timeline stream file operations structure. */ +static const struct file_operations mali_profiling_stream_fops = { + .release = _mali_profiling_stream_release, + .read = _mali_profiling_stream_read, + .poll = _mali_profiling_stream_poll, +}; + +static ssize_t _mali_profiling_stream_read( + struct file *filp, + char __user *buffer, + size_t size, + loff_t *f_pos) +{ + u32 copy_len = 0; + mali_profiling_stream *current_mali_profiling_stream; + u32 used_size; + MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list); + + while (!_mali_profiling_global_stream_queue_list_if_empty()) { + used_size = _mali_profiling_global_stream_queue_list_next_size(); + if (used_size <= ((u32)size - copy_len)) { + current_mali_profiling_stream = NULL; + _mali_profiling_global_stream_list_dequeue(&global_mali_stream_list->queue_list, + ¤t_mali_profiling_stream); + MALI_DEBUG_ASSERT_POINTER(current_mali_profiling_stream); + if (copy_to_user(&buffer[copy_len], current_mali_profiling_stream->data, current_mali_profiling_stream->used_size)) { + current_mali_profiling_stream->used_size = 0; + _mali_profiling_global_stream_list_queue(&global_mali_stream_list->free_list, current_mali_profiling_stream); + return -EFAULT; + } + copy_len += current_mali_profiling_stream->used_size; + current_mali_profiling_stream->used_size = 0; + _mali_profiling_global_stream_list_queue(&global_mali_stream_list->free_list, current_mali_profiling_stream); + } else { + break; + } + } + return (ssize_t)copy_len; +} + +static unsigned int _mali_profiling_stream_poll(struct file *filp, poll_table *wait) +{ + poll_wait(filp, &stream_fd_wait_queue, wait); + if (!_mali_profiling_global_stream_queue_list_if_empty()) + return POLLIN; + return 0; +} + +static int _mali_profiling_stream_release(struct inode *inode, struct file *filp) +{ + _mali_osk_atomic_init(&stream_fd_if_used, 0); + return 0; +} + +/* The funs for control packet and stream data.*/ +static void _mali_profiling_set_packet_size(unsigned char *const buf, const u32 size) +{ + u32 i; + + for (i = 0; i < sizeof(size); ++i) + buf[i] = (size >> 8 * i) & 0xFF; +} + +static u32 _mali_profiling_get_packet_size(unsigned char *const buf) +{ + u32 i; + u32 size = 0; + for (i = 0; i < sizeof(size); ++i) + size |= (u32)buf[i] << 8 * i; + return size; +} + +static u32 _mali_profiling_read_packet_int(unsigned char *const buf, u32 *const pos, u32 const packet_size) +{ + u64 int_value = 0; + u8 shift = 0; + u8 byte_value = ~0; + + while ((byte_value & 0x80) != 0) { + if ((*pos) >= packet_size) + return -1; + byte_value = buf[*pos]; + *pos += 1; + int_value |= (u32)(byte_value & 0x7f) << shift; + shift += 7; + } + + if (shift < 8 * sizeof(int_value) && (byte_value & 0x40) != 0) { + int_value |= -(1 << shift); + } + + return int_value; +} + +static u32 _mali_profiling_pack_int(u8 *const buf, u32 const buf_size, u32 const pos, s32 value) +{ + u32 add_bytes = 0; + int more = 1; + while (more) { + /* low order 7 bits of val */ + char byte_value = value & 0x7f; + value >>= 7; + + if ((value == 0 && (byte_value & 0x40) == 0) || (value == -1 && (byte_value & 0x40) != 0)) { + more = 0; + } else { + byte_value |= 0x80; + } + + if ((pos + add_bytes) >= buf_size) + return 0; + buf[pos + add_bytes] = byte_value; + add_bytes++; + } + + return add_bytes; +} + +static int _mali_profiling_pack_long(uint8_t *const buf, u32 const buf_size, u32 const pos, s64 val) +{ + int add_bytes = 0; + int more = 1; + while (more) { + /* low order 7 bits of x */ + char byte_value = val & 0x7f; + val >>= 7; + + if ((val == 0 && (byte_value & 0x40) == 0) || (val == -1 && (byte_value & 0x40) != 0)) { + more = 0; + } else { + byte_value |= 0x80; + } + + MALI_DEBUG_ASSERT((pos + add_bytes) < buf_size); + buf[pos + add_bytes] = byte_value; + add_bytes++; + } + + return add_bytes; +} + +static void _mali_profiling_stream_add_counter(mali_profiling_stream *profiling_stream, s64 current_time, u32 key, u32 counter_value) +{ + u32 add_size = STREAM_HEADER_SIZE; + MALI_DEBUG_ASSERT_POINTER(profiling_stream); + MALI_DEBUG_ASSERT((profiling_stream->used_size) < MALI_PROFILING_STREAM_BUFFER_SIZE); + + profiling_stream->data[profiling_stream->used_size] = STREAM_HEADER_COUNTER_VALUE; + + add_size += _mali_profiling_pack_long(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE, + profiling_stream->used_size + add_size, current_time); + add_size += _mali_profiling_pack_int(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE, + profiling_stream->used_size + add_size, (s32)0); + add_size += _mali_profiling_pack_int(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE, + profiling_stream->used_size + add_size, (s32)key); + add_size += _mali_profiling_pack_int(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE, + profiling_stream->used_size + add_size, (s32)counter_value); + + _mali_profiling_set_packet_size(profiling_stream->data + profiling_stream->used_size + 1, + add_size - STREAM_HEADER_SIZE); + + profiling_stream->used_size += add_size; +} + +/* The callback function for sampling timer.*/ +static enum hrtimer_restart _mali_profiling_sampling_counters(struct hrtimer *timer) +{ + u32 counter_index; + s64 current_time; + MALI_DEBUG_ASSERT_POINTER(global_mali_profiling_counters); + MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list); + + MALI_DEBUG_ASSERT(NULL == mali_counter_stream); + if (_MALI_OSK_ERR_OK == _mali_profiling_global_stream_list_dequeue( + &global_mali_stream_list->free_list, &mali_counter_stream)) { + + MALI_DEBUG_ASSERT_POINTER(mali_counter_stream); + MALI_DEBUG_ASSERT(0 == mali_counter_stream->used_size); + + /* Capture l2 cache counter values if enabled */ + if (MALI_TRUE == l2_cache_counter_if_enabled) { + int i, j = 0; + _mali_profiling_l2_counter_values l2_counters_values; + _mali_profiling_get_l2_counters(&l2_counters_values); + + for (i = COUNTER_L2_0_C0; i <= COUNTER_L2_2_C1; i++) { + if (0 == (j % 2)) + _mali_osk_profiling_record_global_counters(i, l2_counters_values.cores[j / 2].value0); + else + _mali_osk_profiling_record_global_counters(i, l2_counters_values.cores[j / 2].value1); + j++; + } + } + + current_time = (s64)_mali_osk_boot_time_get_ns(); + + /* Add all enabled counter values into stream */ + for (counter_index = 0; counter_index < num_global_mali_profiling_counters; counter_index++) { + /* No need to sample these couners here. */ + if (global_mali_profiling_counters[counter_index].enabled) { + if ((global_mali_profiling_counters[counter_index].counter_id >= FIRST_MEM_COUNTER && + global_mali_profiling_counters[counter_index].counter_id <= LAST_MEM_COUNTER) + || (global_mali_profiling_counters[counter_index].counter_id == COUNTER_VP_ACTIVITY) + || (global_mali_profiling_counters[counter_index].counter_id == COUNTER_FP_ACTIVITY) + || (global_mali_profiling_counters[counter_index].counter_id == COUNTER_FILMSTRIP)) { + + continue; + } + + if (global_mali_profiling_counters[counter_index].counter_id >= COUNTER_L2_0_C0 && + global_mali_profiling_counters[counter_index].counter_id <= COUNTER_L2_2_C1) { + + u32 prev_val = global_mali_profiling_counters[counter_index].prev_counter_value; + + _mali_profiling_stream_add_counter(mali_counter_stream, current_time, global_mali_profiling_counters[counter_index].key, + global_mali_profiling_counters[counter_index].current_counter_value - prev_val); + + prev_val = global_mali_profiling_counters[counter_index].current_counter_value; + + global_mali_profiling_counters[counter_index].prev_counter_value = prev_val; + } else { + + if (global_mali_profiling_counters[counter_index].counter_id == COUNTER_TOTAL_ALLOC_PAGES) { + u32 total_alloc_mem = _mali_ukk_report_memory_usage(); + global_mali_profiling_counters[counter_index].current_counter_value = total_alloc_mem / _MALI_OSK_MALI_PAGE_SIZE; + } + _mali_profiling_stream_add_counter(mali_counter_stream, current_time, global_mali_profiling_counters[counter_index].key, + global_mali_profiling_counters[counter_index].current_counter_value); + if (global_mali_profiling_counters[counter_index].counter_id < FIRST_SPECIAL_COUNTER) + global_mali_profiling_counters[counter_index].current_counter_value = 0; + } + } + } + _mali_profiling_global_stream_list_queue(&global_mali_stream_list->queue_list, mali_counter_stream); + mali_counter_stream = NULL; + } else { + MALI_DEBUG_PRINT(1, ("Not enough mali profiling stream buffer!\n")); + } + + wake_up_interruptible(&stream_fd_wait_queue); + + /*Enable the sampling timer again*/ + if (0 != num_counters_enabled && 0 != profiling_sample_rate) { + hrtimer_forward_now(&profiling_sampling_timer, ns_to_ktime(profiling_sample_rate)); + return HRTIMER_RESTART; + } + return HRTIMER_NORESTART; +} + +static void _mali_profiling_sampling_core_activity_switch(int counter_id, int core, u32 activity, u32 pid) +{ + unsigned long irq_flags; + + spin_lock_irqsave(&mali_activity_lock, irq_flags); + if (activity == 0) + mali_activity_cores_num--; + else + mali_activity_cores_num++; + spin_unlock_irqrestore(&mali_activity_lock, irq_flags); + + if (NULL != global_mali_profiling_counters) { + int i ; + for (i = 0; i < num_global_mali_profiling_counters; i++) { + if (counter_id == global_mali_profiling_counters[i].counter_id && global_mali_profiling_counters[i].enabled) { + u64 current_time = _mali_osk_boot_time_get_ns(); + u32 add_size = STREAM_HEADER_SIZE; + + if (NULL != mali_core_activity_stream) { + if ((mali_core_activity_stream_dequeue_time + MALI_PROFILING_STREAM_HOLD_TIME < current_time) || + (MALI_PROFILING_STREAM_DATA_DEFAULT_SIZE > MALI_PROFILING_STREAM_BUFFER_SIZE + - mali_core_activity_stream->used_size)) { + _mali_profiling_global_stream_list_queue(&global_mali_stream_list->queue_list, mali_core_activity_stream); + mali_core_activity_stream = NULL; + wake_up_interruptible(&stream_fd_wait_queue); + } + } + + if (NULL == mali_core_activity_stream) { + if (_MALI_OSK_ERR_OK == _mali_profiling_global_stream_list_dequeue( + &global_mali_stream_list->free_list, &mali_core_activity_stream)) { + mali_core_activity_stream_dequeue_time = current_time; + } else { + MALI_DEBUG_PRINT(1, ("Not enough mali profiling stream buffer!\n")); + wake_up_interruptible(&stream_fd_wait_queue); + break; + } + + } + + mali_core_activity_stream->data[mali_core_activity_stream->used_size] = STREAM_HEADER_CORE_ACTIVITY; + + add_size += _mali_profiling_pack_long(mali_core_activity_stream->data, + MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, (s64)current_time); + add_size += _mali_profiling_pack_int(mali_core_activity_stream->data, + MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, core); + add_size += _mali_profiling_pack_int(mali_core_activity_stream->data, + MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, (s32)global_mali_profiling_counters[i].key); + add_size += _mali_profiling_pack_int(mali_core_activity_stream->data, + MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, activity); + add_size += _mali_profiling_pack_int(mali_core_activity_stream->data, + MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, pid); + + _mali_profiling_set_packet_size(mali_core_activity_stream->data + mali_core_activity_stream->used_size + 1, + add_size - STREAM_HEADER_SIZE); + + mali_core_activity_stream->used_size += add_size; + + if (0 == mali_activity_cores_num) { + _mali_profiling_global_stream_list_queue(&global_mali_stream_list->queue_list, mali_core_activity_stream); + mali_core_activity_stream = NULL; + wake_up_interruptible(&stream_fd_wait_queue); + } + + break; + } + } + } +} + +static mali_bool _mali_profiling_global_counters_init(void) +{ + int core_id, counter_index, counter_number, counter_id; + u32 num_l2_cache_cores; + u32 num_pp_cores; + u32 num_gp_cores = 1; + + MALI_DEBUG_ASSERT(NULL == global_mali_profiling_counters); + num_pp_cores = mali_pp_get_glob_num_pp_cores(); + num_l2_cache_cores = mali_l2_cache_core_get_glob_num_l2_cores(); + + num_global_mali_profiling_counters = 3 * (num_gp_cores + num_pp_cores) + 2 * num_l2_cache_cores + + MALI_PROFILING_SW_COUNTERS_NUM + + MALI_PROFILING_SPECIAL_COUNTERS_NUM + + MALI_PROFILING_MEM_COUNTERS_NUM; + global_mali_profiling_counters = _mali_osk_calloc(num_global_mali_profiling_counters, sizeof(mali_profiling_counter)); + + if (NULL == global_mali_profiling_counters) + return MALI_FALSE; + + counter_index = 0; + /*Vertex processor counters */ + for (core_id = 0; core_id < num_gp_cores; core_id ++) { + global_mali_profiling_counters[counter_index].counter_id = ACTIVITY_VP_0 + core_id; + _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name, + sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_VP_%d_active", mali_name, core_id); + + for (counter_number = 0; counter_number < 2; counter_number++) { + counter_index++; + global_mali_profiling_counters[counter_index].counter_id = COUNTER_VP_0_C0 + (2 * core_id) + counter_number; + _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name, + sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_VP_%d_cnt%d", mali_name, core_id, counter_number); + } + } + + /* Fragment processors' counters */ + for (core_id = 0; core_id < num_pp_cores; core_id++) { + counter_index++; + global_mali_profiling_counters[counter_index].counter_id = ACTIVITY_FP_0 + core_id; + _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name, + sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_FP_%d_active", mali_name, core_id); + + for (counter_number = 0; counter_number < 2; counter_number++) { + counter_index++; + global_mali_profiling_counters[counter_index].counter_id = COUNTER_FP_0_C0 + (2 * core_id) + counter_number; + _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name, + sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_FP_%d_cnt%d", mali_name, core_id, counter_number); + } + } + + /* L2 Cache counters */ + for (core_id = 0; core_id < num_l2_cache_cores; core_id++) { + for (counter_number = 0; counter_number < 2; counter_number++) { + counter_index++; + global_mali_profiling_counters[counter_index].counter_id = COUNTER_L2_0_C0 + (2 * core_id) + counter_number; + _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name, + sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_L2_%d_cnt%d", mali_name, core_id, counter_number); + } + } + + /* Now set up the software counter entries */ + for (counter_id = FIRST_SW_COUNTER; counter_id <= LAST_SW_COUNTER; counter_id++) { + counter_index++; + + if (0 == first_sw_counter_index) + first_sw_counter_index = counter_index; + + global_mali_profiling_counters[counter_index].counter_id = counter_id; + _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name, + sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_SW_%d", mali_name, counter_id - FIRST_SW_COUNTER); + } + + /* Now set up the special counter entries */ + for (counter_id = FIRST_SPECIAL_COUNTER; counter_id <= LAST_SPECIAL_COUNTER; counter_id++) { + + counter_index++; + _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name, + sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_%s", + mali_name, _mali_special_counter_descriptions[counter_id - FIRST_SPECIAL_COUNTER]); + + global_mali_profiling_counters[counter_index].counter_id = counter_id; + } + + /* Now set up the mem counter entries*/ + for (counter_id = FIRST_MEM_COUNTER; counter_id <= LAST_MEM_COUNTER; counter_id++) { + + counter_index++; + _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name, + sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_%s", + mali_name, _mali_mem_counter_descriptions[counter_id - FIRST_MEM_COUNTER]); + + global_mali_profiling_counters[counter_index].counter_id = counter_id; + } + + MALI_DEBUG_ASSERT((counter_index + 1) == num_global_mali_profiling_counters); + + return MALI_TRUE; +} + +void _mali_profiling_notification_mem_counter(struct mali_session_data *session, u32 counter_id, u32 key, int enable) +{ + + MALI_DEBUG_ASSERT_POINTER(session); + + if (NULL != session) { + _mali_osk_notification_t *notification; + _mali_osk_notification_queue_t *queue; + + queue = session->ioctl_queue; + MALI_DEBUG_ASSERT(NULL != queue); + + notification = _mali_osk_notification_create(_MALI_NOTIFICATION_ANNOTATE_PROFILING_MEM_COUNTER, + sizeof(_mali_uk_annotate_profiling_mem_counter_s)); + + if (NULL != notification) { + _mali_uk_annotate_profiling_mem_counter_s *data = notification->result_buffer; + data->counter_id = counter_id; + data->key = key; + data->enable = enable; + + _mali_osk_notification_queue_send(queue, notification); + } else { + MALI_PRINT_ERROR(("Failed to create notification object!\n")); + } + } else { + MALI_PRINT_ERROR(("Failed to find the right session!\n")); + } +} + +void _mali_profiling_notification_enable(struct mali_session_data *session, u32 sampling_rate, int enable) +{ + MALI_DEBUG_ASSERT_POINTER(session); + + if (NULL != session) { + _mali_osk_notification_t *notification; + _mali_osk_notification_queue_t *queue; + + queue = session->ioctl_queue; + MALI_DEBUG_ASSERT(NULL != queue); + + notification = _mali_osk_notification_create(_MALI_NOTIFICATION_ANNOTATE_PROFILING_ENABLE, + sizeof(_mali_uk_annotate_profiling_enable_s)); + + if (NULL != notification) { + _mali_uk_annotate_profiling_enable_s *data = notification->result_buffer; + data->sampling_rate = sampling_rate; + data->enable = enable; + + _mali_osk_notification_queue_send(queue, notification); + } else { + MALI_PRINT_ERROR(("Failed to create notification object!\n")); + } + } else { + MALI_PRINT_ERROR(("Failed to find the right session!\n")); + } +} + _mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start) { + int i; + mali_profiling_stream *new_mali_profiling_stream = NULL; + mali_profiling_stream_list *new_mali_profiling_stream_list = NULL; if (MALI_TRUE == auto_start) { mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE); } + /*Init the global_mali_stream_list*/ + MALI_DEBUG_ASSERT(NULL == global_mali_stream_list); + new_mali_profiling_stream_list = (mali_profiling_stream_list *)kmalloc(sizeof(mali_profiling_stream_list), GFP_KERNEL); + + if (NULL == new_mali_profiling_stream_list) { + return _MALI_OSK_ERR_NOMEM; + } + + spin_lock_init(&new_mali_profiling_stream_list->spin_lock); + INIT_LIST_HEAD(&new_mali_profiling_stream_list->free_list); + INIT_LIST_HEAD(&new_mali_profiling_stream_list->queue_list); + + spin_lock_init(&mali_activity_lock); + mali_activity_cores_num = 0; + + for (i = 0; i < MALI_PROFILING_STREAM_BUFFER_NUM; i++) { + new_mali_profiling_stream = (mali_profiling_stream *)kmalloc(sizeof(mali_profiling_stream), GFP_KERNEL); + if (NULL == new_mali_profiling_stream) { + _mali_profiling_stream_list_destory(new_mali_profiling_stream_list); + return _MALI_OSK_ERR_NOMEM; + } + + INIT_LIST_HEAD(&new_mali_profiling_stream->list); + new_mali_profiling_stream->used_size = 0; + list_add_tail(&new_mali_profiling_stream->list, &new_mali_profiling_stream_list->free_list); + + } + + _mali_osk_atomic_init(&stream_fd_if_used, 0); + init_waitqueue_head(&stream_fd_wait_queue); + + hrtimer_init(&profiling_sampling_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + + profiling_sampling_timer.function = _mali_profiling_sampling_counters; + + global_mali_stream_list = new_mali_profiling_stream_list; + return _MALI_OSK_ERR_OK; } void _mali_osk_profiling_term(void) { - /* Nothing to do */ + if (0 != profiling_sample_rate) { + hrtimer_cancel(&profiling_sampling_timer); + profiling_sample_rate = 0; + } + _mali_osk_atomic_term(&stream_fd_if_used); + + if (NULL != global_mali_profiling_counters) { + _mali_osk_free(global_mali_profiling_counters); + global_mali_profiling_counters = NULL; + num_global_mali_profiling_counters = 0; + } + + if (NULL != global_mali_stream_list) { + _mali_profiling_stream_list_destory(global_mali_stream_list); + global_mali_stream_list = NULL; + } + +} + +void _mali_osk_profiling_stop_sampling(u32 pid) +{ + if (pid == current_profiling_pid) { + + int i; + /* Reset all counter states when closing connection.*/ + for (i = 0; i < num_global_mali_profiling_counters; ++i) { + _mali_profiling_set_event(global_mali_profiling_counters[i].counter_id, MALI_HW_CORE_NO_COUNTER); + global_mali_profiling_counters[i].enabled = 0; + global_mali_profiling_counters[i].prev_counter_value = 0; + global_mali_profiling_counters[i].current_counter_value = 0; + } + l2_cache_counter_if_enabled = MALI_FALSE; + num_counters_enabled = 0; + mem_counters_enabled = 0; + _mali_profiling_control(FBDUMP_CONTROL_ENABLE, 0); + _mali_profiling_control(SW_COUNTER_ENABLE, 0); + /* Delete sampling timer when closing connection. */ + if (0 != profiling_sample_rate) { + hrtimer_cancel(&profiling_sampling_timer); + profiling_sample_rate = 0; + } + current_profiling_pid = 0; + } +} + +void _mali_osk_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4) +{ + /*Record the freq & volt to global_mali_profiling_counters here. */ + if (0 != profiling_sample_rate) { + u32 channel; + u32 state; + channel = (event_id >> 16) & 0xFF; + state = ((event_id >> 24) & 0xF) << 24; + + switch (state) { + case MALI_PROFILING_EVENT_TYPE_SINGLE: + if ((MALI_PROFILING_EVENT_CHANNEL_GPU >> 16) == channel) { + u32 reason = (event_id & 0xFFFF); + if (MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE == reason) { + _mali_osk_profiling_record_global_counters(COUNTER_FREQUENCY, data0); + _mali_osk_profiling_record_global_counters(COUNTER_VOLTAGE, data1); + } + } + break; + case MALI_PROFILING_EVENT_TYPE_START: + if ((MALI_PROFILING_EVENT_CHANNEL_GP0 >> 16) == channel) { + _mali_profiling_sampling_core_activity_switch(COUNTER_VP_ACTIVITY, 0, 1, data1); + } else if (channel >= (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16) && + (MALI_PROFILING_EVENT_CHANNEL_PP7 >> 16) >= channel) { + u32 core_id = channel - (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16); + _mali_profiling_sampling_core_activity_switch(COUNTER_FP_ACTIVITY, core_id, 1, data1); + } + break; + case MALI_PROFILING_EVENT_TYPE_STOP: + if ((MALI_PROFILING_EVENT_CHANNEL_GP0 >> 16) == channel) { + _mali_profiling_sampling_core_activity_switch(COUNTER_VP_ACTIVITY, 0, 0, 0); + } else if (channel >= (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16) && + (MALI_PROFILING_EVENT_CHANNEL_PP7 >> 16) >= channel) { + u32 core_id = channel - (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16); + _mali_profiling_sampling_core_activity_switch(COUNTER_FP_ACTIVITY, core_id, 0, 0); + } + break; + default: + break; + } + } + trace_mali_timeline_event(event_id, data0, data1, data2, data3, data4); } void _mali_osk_profiling_report_sw_counters(u32 *counters) @@ -42,9 +797,17 @@ void _mali_osk_profiling_report_sw_counters(u32 *counters) trace_mali_sw_counters(_mali_osk_get_pid(), _mali_osk_get_tid(), NULL, counters); } -void _mali_osk_profiling_memory_usage_get(u32 *memory_usage) +void _mali_osk_profiling_record_global_counters(int counter_id, u32 value) { - *memory_usage = _mali_ukk_report_memory_usage(); + if (NULL != global_mali_profiling_counters) { + int i ; + for (i = 0; i < num_global_mali_profiling_counters; i++) { + if (counter_id == global_mali_profiling_counters[i].counter_id && global_mali_profiling_counters[i].enabled) { + global_mali_profiling_counters[i].current_counter_value = value; + break; + } + } + } } _mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args) @@ -61,12 +824,279 @@ _mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s * _mali_osk_profiling_report_sw_counters(counters); + if (NULL != global_mali_profiling_counters) { + int i; + for (i = 0; i < MALI_PROFILING_SW_COUNTERS_NUM; i ++) { + if (global_mali_profiling_counters[first_sw_counter_index + i].enabled) { + global_mali_profiling_counters[first_sw_counter_index + i].current_counter_value = *(counters + i); + } + } + } + return _MALI_OSK_ERR_OK; } -_mali_osk_errcode_t _mali_ukk_profiling_memory_usage_get(_mali_uk_profiling_memory_usage_get_s *args) +_mali_osk_errcode_t _mali_ukk_profiling_stream_fd_get(_mali_uk_profiling_stream_fd_get_s *args) { - _mali_osk_profiling_memory_usage_get(&args->memory_usage); + struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx; + MALI_DEBUG_ASSERT_POINTER(session); + + if (1 == _mali_osk_atomic_inc_return(&stream_fd_if_used)) { + + s32 fd = anon_inode_getfd("[mali_profiling_stream]", &mali_profiling_stream_fops, + session, + O_RDONLY | O_CLOEXEC); + + args->stream_fd = fd; + if (0 > fd) { + _mali_osk_atomic_dec(&stream_fd_if_used); + return _MALI_OSK_ERR_FAULT; + } + args->stream_fd = fd; + } else { + _mali_osk_atomic_dec(&stream_fd_if_used); + args->stream_fd = -1; + return _MALI_OSK_ERR_BUSY; + } + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t _mali_ukk_profiling_control_set(_mali_uk_profiling_control_set_s *args) +{ + u32 control_packet_size; + u32 output_buffer_size; + + struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx; + MALI_DEBUG_ASSERT_POINTER(session); + + if (NULL == global_mali_profiling_counters && MALI_FALSE == _mali_profiling_global_counters_init()) { + MALI_PRINT_ERROR(("Failed to create global_mali_profiling_counters.\n")); + return _MALI_OSK_ERR_FAULT; + } + + control_packet_size = args->control_packet_size; + output_buffer_size = args->response_packet_size; + + if (0 != control_packet_size) { + u8 control_type; + u8 *control_packet_data; + u8 *response_packet_data; + u32 version_length = sizeof(utgard_setup_version) - 1; + + control_packet_data = (u8 *)(uintptr_t)args->control_packet_data; + MALI_DEBUG_ASSERT_POINTER(control_packet_data); + response_packet_data = (u8 *)(uintptr_t)args->response_packet_data; + MALI_DEBUG_ASSERT_POINTER(response_packet_data); + + /*Decide if need to ignore Utgard setup version.*/ + if (control_packet_size >= version_length) { + if (0 == memcmp(control_packet_data, utgard_setup_version, version_length)) { + if (control_packet_size == version_length) { + args->response_packet_size = 0; + return _MALI_OSK_ERR_OK; + } else { + control_packet_data += version_length; + control_packet_size -= version_length; + } + } + } + + current_profiling_pid = _mali_osk_get_pid(); + + control_type = control_packet_data[0]; + switch (control_type) { + case PACKET_HEADER_COUNTERS_REQUEST: { + int i; + + if (PACKET_HEADER_SIZE > control_packet_size || + control_packet_size != _mali_profiling_get_packet_size(control_packet_data + 1)) { + MALI_PRINT_ERROR(("Wrong control packet size, type 0x%x,size 0x%x.\n", control_packet_data[0], control_packet_size)); + return _MALI_OSK_ERR_FAULT; + } + + /* Send supported counters */ + if (PACKET_HEADER_SIZE > output_buffer_size) + return _MALI_OSK_ERR_FAULT; + + *response_packet_data = PACKET_HEADER_COUNTERS_ACK; + args->response_packet_size = PACKET_HEADER_SIZE; + + for (i = 0; i < num_global_mali_profiling_counters; ++i) { + u32 name_size = strlen(global_mali_profiling_counters[i].counter_name); + + if ((args->response_packet_size + name_size + 1) > output_buffer_size) { + MALI_PRINT_ERROR(("Response packet data is too large..\n")); + return _MALI_OSK_ERR_FAULT; + } + + memcpy(response_packet_data + args->response_packet_size, + global_mali_profiling_counters[i].counter_name, name_size + 1); + + args->response_packet_size += (name_size + 1); + + if (global_mali_profiling_counters[i].counter_id == COUNTER_VP_ACTIVITY) { + args->response_packet_size += _mali_profiling_pack_int(response_packet_data, + output_buffer_size, args->response_packet_size, (s32)1); + } else if (global_mali_profiling_counters[i].counter_id == COUNTER_FP_ACTIVITY) { + args->response_packet_size += _mali_profiling_pack_int(response_packet_data, + output_buffer_size, args->response_packet_size, (s32)mali_pp_get_glob_num_pp_cores()); + } else { + args->response_packet_size += _mali_profiling_pack_int(response_packet_data, + output_buffer_size, args->response_packet_size, (s32) - 1); + } + } + + _mali_profiling_set_packet_size(response_packet_data + 1, args->response_packet_size); + break; + } + + case PACKET_HEADER_COUNTERS_ENABLE: { + int i; + u32 request_pos = PACKET_HEADER_SIZE; + mali_bool sw_counter_if_enabled = MALI_FALSE; + + if (PACKET_HEADER_SIZE > control_packet_size || + control_packet_size != _mali_profiling_get_packet_size(control_packet_data + 1)) { + MALI_PRINT_ERROR(("Wrong control packet size , type 0x%x,size 0x%x.\n", control_packet_data[0], control_packet_size)); + return _MALI_OSK_ERR_FAULT; + } + + /* Init all counter states before enable requested counters.*/ + for (i = 0; i < num_global_mali_profiling_counters; ++i) { + _mali_profiling_set_event(global_mali_profiling_counters[i].counter_id, MALI_HW_CORE_NO_COUNTER); + global_mali_profiling_counters[i].enabled = 0; + global_mali_profiling_counters[i].prev_counter_value = 0; + global_mali_profiling_counters[i].current_counter_value = 0; + + if (global_mali_profiling_counters[i].counter_id >= FIRST_MEM_COUNTER && + global_mali_profiling_counters[i].counter_id <= LAST_MEM_COUNTER) { + _mali_profiling_notification_mem_counter(session, global_mali_profiling_counters[i].counter_id, 0, 0); + } + } + + l2_cache_counter_if_enabled = MALI_FALSE; + num_counters_enabled = 0; + mem_counters_enabled = 0; + _mali_profiling_control(FBDUMP_CONTROL_ENABLE, 0); + _mali_profiling_control(SW_COUNTER_ENABLE, 0); + _mali_profiling_notification_enable(session, 0, 0); + + /* Enable requested counters */ + while (request_pos < control_packet_size) { + u32 begin = request_pos; + u32 event; + u32 key; + + /* Check the counter name which should be ended with null */ + while (request_pos < control_packet_size && control_packet_data[request_pos] != '\0') { + ++request_pos; + } + + if (request_pos >= control_packet_size) + return _MALI_OSK_ERR_FAULT; + + ++request_pos; + event = _mali_profiling_read_packet_int(control_packet_data, &request_pos, control_packet_size); + key = _mali_profiling_read_packet_int(control_packet_data, &request_pos, control_packet_size); + + for (i = 0; i < num_global_mali_profiling_counters; ++i) { + u32 name_size = strlen((char *)(control_packet_data + begin)); + + if (strncmp(global_mali_profiling_counters[i].counter_name, (char *)(control_packet_data + begin), name_size) == 0) { + if (!sw_counter_if_enabled && (FIRST_SW_COUNTER <= global_mali_profiling_counters[i].counter_id + && global_mali_profiling_counters[i].counter_id <= LAST_SW_COUNTER)) { + sw_counter_if_enabled = MALI_TRUE; + _mali_profiling_control(SW_COUNTER_ENABLE, 1); + } + + if (COUNTER_FILMSTRIP == global_mali_profiling_counters[i].counter_id) { + _mali_profiling_control(FBDUMP_CONTROL_ENABLE, 1); + _mali_profiling_control(FBDUMP_CONTROL_RATE, event & 0xff); + _mali_profiling_control(FBDUMP_CONTROL_RESIZE_FACTOR, (event >> 8) & 0xff); + } + + if (global_mali_profiling_counters[i].counter_id >= FIRST_MEM_COUNTER && + global_mali_profiling_counters[i].counter_id <= LAST_MEM_COUNTER) { + _mali_profiling_notification_mem_counter(session, global_mali_profiling_counters[i].counter_id, + key, 1); + mem_counters_enabled++; + } + + global_mali_profiling_counters[i].counter_event = event; + global_mali_profiling_counters[i].key = key; + global_mali_profiling_counters[i].enabled = 1; + + _mali_profiling_set_event(global_mali_profiling_counters[i].counter_id, + global_mali_profiling_counters[i].counter_event); + num_counters_enabled++; + break; + } + } + + if (i == num_global_mali_profiling_counters) { + MALI_PRINT_ERROR(("Counter name does not match for type %u.\n", control_type)); + return _MALI_OSK_ERR_FAULT; + } + } + + if (PACKET_HEADER_SIZE <= output_buffer_size) { + *response_packet_data = PACKET_HEADER_ACK; + _mali_profiling_set_packet_size(response_packet_data + 1, PACKET_HEADER_SIZE); + args->response_packet_size = PACKET_HEADER_SIZE; + } else { + return _MALI_OSK_ERR_FAULT; + } + + break; + } + + case PACKET_HEADER_START_CAPTURE_VALUE: { + u32 live_rate; + u32 request_pos = PACKET_HEADER_SIZE; + + if (PACKET_HEADER_SIZE > control_packet_size || + control_packet_size != _mali_profiling_get_packet_size(control_packet_data + 1)) { + MALI_PRINT_ERROR(("Wrong control packet size , type 0x%x,size 0x%x.\n", control_packet_data[0], control_packet_size)); + return _MALI_OSK_ERR_FAULT; + } + + /* Read samping rate in nanoseconds and live rate, start capture.*/ + profiling_sample_rate = _mali_profiling_read_packet_int(control_packet_data, + &request_pos, control_packet_size); + + live_rate = _mali_profiling_read_packet_int(control_packet_data, &request_pos, control_packet_size); + + if (PACKET_HEADER_SIZE <= output_buffer_size) { + *response_packet_data = PACKET_HEADER_ACK; + _mali_profiling_set_packet_size(response_packet_data + 1, PACKET_HEADER_SIZE); + args->response_packet_size = PACKET_HEADER_SIZE; + } else { + return _MALI_OSK_ERR_FAULT; + } + + if (0 != num_counters_enabled && 0 != profiling_sample_rate) { + _mali_profiling_global_stream_list_free(); + if (mem_counters_enabled > 0) { + _mali_profiling_notification_enable(session, profiling_sample_rate, 1); + } + hrtimer_start(&profiling_sampling_timer, + ktime_set(profiling_sample_rate / 1000000000, profiling_sample_rate % 1000000000), + HRTIMER_MODE_REL_PINNED); + } + + break; + } + default: + MALI_PRINT_ERROR(("Unsupported profiling packet header type %u.\n", control_type)); + args->response_packet_size = 0; + return _MALI_OSK_ERR_FAULT; + } + } else { + _mali_osk_profiling_stop_sampling(current_profiling_pid); + _mali_profiling_notification_enable(session, 0, 0); + } + return _MALI_OSK_ERR_OK; } @@ -157,6 +1187,7 @@ int _mali_profiling_set_event(u32 counter_id, s32 event_id) u32 counter_src = (counter_id - COUNTER_L2_0_C0) & 1; mali_l2_cache_core_set_counter_src(l2_cache_core, counter_src, event_id); + l2_cache_counter_if_enabled = MALI_TRUE; } } else { return 0; /* Failure, unknown event */ diff --git a/drivers/gpu/arm/mali/linux/mali_osk_specific.h b/drivers/gpu/arm/mali/linux/mali_osk_specific.h index db034a5b3c707..adcca2937967b 100755 --- a/drivers/gpu/arm/mali/linux/mali_osk_specific.h +++ b/drivers/gpu/arm/mali/linux/mali_osk_specific.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2012-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_osk_time.c b/drivers/gpu/arm/mali/linux/mali_osk_time.c index 4deaa101e48f9..76876b61834f5 100755 --- a/drivers/gpu/arm/mali/linux/mali_osk_time.c +++ b/drivers/gpu/arm/mali/linux/mali_osk_time.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_osk_timers.c b/drivers/gpu/arm/mali/linux/mali_osk_timers.c index 6bbaee749d64d..8ada2da8d386b 100755 --- a/drivers/gpu/arm/mali/linux/mali_osk_timers.c +++ b/drivers/gpu/arm/mali/linux/mali_osk_timers.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_osk_wait_queue.c b/drivers/gpu/arm/mali/linux/mali_osk_wait_queue.c index 15d5ce250eb19..caa3abe109fb4 100755 --- a/drivers/gpu/arm/mali/linux/mali_osk_wait_queue.c +++ b/drivers/gpu/arm/mali/linux/mali_osk_wait_queue.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_osk_wq.c b/drivers/gpu/arm/mali/linux/mali_osk_wq.c index 2c34c91a7922d..06afa04032c59 100755 --- a/drivers/gpu/arm/mali/linux/mali_osk_wq.c +++ b/drivers/gpu/arm/mali/linux/mali_osk_wq.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_pmu_power_up_down.c b/drivers/gpu/arm/mali/linux/mali_pmu_power_up_down.c index 7414a3e706ca2..6a6c9f8c69067 100755 --- a/drivers/gpu/arm/mali/linux/mali_pmu_power_up_down.c +++ b/drivers/gpu/arm/mali/linux/mali_pmu_power_up_down.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2010, 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2012-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_profiling_events.h b/drivers/gpu/arm/mali/linux/mali_profiling_events.h index 0b90e8c5cf26f..5e51095894e77 100755 --- a/drivers/gpu/arm/mali/linux/mali_profiling_events.h +++ b/drivers/gpu/arm/mali/linux/mali_profiling_events.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2015 ARM Limited. All rights reserved. + * Copyright (C) 2012, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_profiling_gator_api.h b/drivers/gpu/arm/mali/linux/mali_profiling_gator_api.h index c98d127366bab..e37197138f8d8 100755 --- a/drivers/gpu/arm/mali/linux/mali_profiling_gator_api.h +++ b/drivers/gpu/arm/mali/linux/mali_profiling_gator_api.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2013, 2015 ARM Limited. All rights reserved. + * Copyright (C) 2012-2013, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_profiling_internal.c b/drivers/gpu/arm/mali/linux/mali_profiling_internal.c index 1e460dc173cdb..918caa07fd3bb 100755 --- a/drivers/gpu/arm/mali/linux/mali_profiling_internal.c +++ b/drivers/gpu/arm/mali/linux/mali_profiling_internal.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -104,6 +104,7 @@ _mali_osk_errcode_t _mali_internal_profiling_start(u32 *limit) new_profile_entries = _mali_osk_valloc(*limit * sizeof(mali_profiling_entry)); if (NULL == new_profile_entries) { + _mali_osk_mutex_signal(lock); _mali_osk_vfree(new_profile_entries); return _MALI_OSK_ERR_NOMEM; } diff --git a/drivers/gpu/arm/mali/linux/mali_profiling_internal.h b/drivers/gpu/arm/mali/linux/mali_profiling_internal.h index 1c6f4da691d24..6e05ffdc927a2 100755 --- a/drivers/gpu/arm/mali/linux/mali_profiling_internal.h +++ b/drivers/gpu/arm/mali/linux/mali_profiling_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_sync.c b/drivers/gpu/arm/mali/linux/mali_sync.c index 215ecc75a28cc..a0d227c78c4d1 100755 --- a/drivers/gpu/arm/mali/linux/mali_sync.c +++ b/drivers/gpu/arm/mali/linux/mali_sync.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2012-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -13,6 +13,7 @@ #include "mali_osk.h" #include "mali_kernel_common.h" #include "mali_timeline.h" +#include "mali_executor.h" #include #include @@ -125,6 +126,24 @@ static void timeline_free_pt(struct sync_pt *pt) static void timeline_release(struct sync_timeline *sync_timeline) { + struct mali_sync_timeline_container *mali_sync_tl = NULL; + struct mali_timeline *mali_tl = NULL; + + MALI_DEBUG_ASSERT_POINTER(sync_timeline); + + mali_sync_tl = to_mali_sync_tl_container(sync_timeline); + MALI_DEBUG_ASSERT_POINTER(mali_sync_tl); + + mali_tl = mali_sync_tl->timeline; + + /* always signaled timeline didn't have mali container */ + if (mali_tl) { + if (NULL != mali_tl->spinlock) { + mali_spinlock_reentrant_term(mali_tl->spinlock); + } + _mali_osk_free(mali_tl); + } + module_put(THIS_MODULE); } @@ -148,6 +167,42 @@ static void timeline_print_pt(struct seq_file *s, struct sync_pt *sync_pt) } } +static void timeline_print_obj(struct seq_file *s, struct sync_timeline *sync_tl) +{ + struct mali_sync_timeline_container *mali_sync_tl = NULL; + struct mali_timeline *mali_tl = NULL; + + MALI_DEBUG_ASSERT_POINTER(sync_tl); + + mali_sync_tl = to_mali_sync_tl_container(sync_tl); + MALI_DEBUG_ASSERT_POINTER(mali_sync_tl); + + mali_tl = mali_sync_tl->timeline; + + if (NULL != mali_tl) { + seq_printf(s, "oldest (%u) ", mali_tl->point_oldest); + seq_printf(s, "next (%u)", mali_tl->point_next); + seq_printf(s, "\n"); + +#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS) + { + u32 tid = _mali_osk_get_tid(); + struct mali_timeline_system *system = mali_tl->system; + + mali_spinlock_reentrant_wait(mali_tl->spinlock, tid); + if (!mali_tl->destroyed) { + mali_spinlock_reentrant_wait(system->spinlock, tid); + mali_timeline_debug_print_timeline(mali_tl, s); + mali_spinlock_reentrant_signal(system->spinlock, tid); + } + mali_spinlock_reentrant_signal(mali_tl->spinlock, tid); + + /* dump job queue status and group running status */ + mali_executor_status_dump(); + } +#endif + } +} #else static void timeline_pt_value_str(struct sync_pt *pt, char *str, int size) { @@ -170,22 +225,43 @@ static void timeline_pt_value_str(struct sync_pt *pt, char *str, int size) static void timeline_value_str(struct sync_timeline *timeline, char *str, int size) { - struct mali_sync_timeline_container *mali_sync_tl; + struct mali_sync_timeline_container *mali_sync_tl = NULL; + struct mali_timeline *mali_tl = NULL; MALI_DEBUG_ASSERT_POINTER(timeline); - MALI_DEBUG_ASSERT_POINTER(str); mali_sync_tl = to_mali_sync_tl_container(timeline); - MALI_DEBUG_ASSERT_POINTER(mali_sync_tl); - if (NULL != mali_sync_tl->timeline) { - _mali_osk_snprintf(str, size, "oldest (%u) next (%u)\n", mali_sync_tl->timeline->point_oldest, - mali_sync_tl->timeline->point_next); + mali_tl = mali_sync_tl->timeline; + + if (NULL != mali_tl) { + _mali_osk_snprintf(str, size, "oldest (%u) ", mali_tl->point_oldest); + _mali_osk_snprintf(str, size, "next (%u)", mali_tl->point_next); + _mali_osk_snprintf(str, size, "\n"); + +#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS) + { + u32 tid = _mali_osk_get_tid(); + struct mali_timeline_system *system = mali_tl->system; + + mali_spinlock_reentrant_wait(mali_tl->spinlock, tid); + if (!mali_tl->destroyed) { + mali_spinlock_reentrant_wait(system->spinlock, tid); + mali_timeline_debug_direct_print_timeline(mali_tl); + mali_spinlock_reentrant_signal(system->spinlock, tid); + } + mali_spinlock_reentrant_signal(mali_tl->spinlock, tid); + + /* dump job queue status and group running status */ + mali_executor_status_dump(); + } +#endif } } #endif + static struct sync_timeline_ops mali_timeline_ops = { .driver_name = "Mali", .dup = timeline_dup, @@ -195,6 +271,7 @@ static struct sync_timeline_ops mali_timeline_ops = { .release_obj = timeline_release, #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) .print_pt = timeline_print_pt, + .print_obj = timeline_print_obj, #else .pt_value_str = timeline_pt_value_str, .timeline_value_str = timeline_value_str, @@ -222,12 +299,6 @@ struct sync_timeline *mali_sync_timeline_create(struct mali_timeline *timeline, return sync_tl; } -mali_bool mali_sync_timeline_is_ours(struct sync_timeline *sync_tl) -{ - MALI_DEBUG_ASSERT_POINTER(sync_tl); - return (sync_tl->ops == &mali_timeline_ops) ? MALI_TRUE : MALI_FALSE; -} - s32 mali_sync_fence_fd_alloc(struct sync_fence *sync_fence) { s32 fd = -1; diff --git a/drivers/gpu/arm/mali/linux/mali_sync.h b/drivers/gpu/arm/mali/linux/mali_sync.h index 61c75368dfa3c..79efb8e652745 100755 --- a/drivers/gpu/arm/mali/linux/mali_sync.h +++ b/drivers/gpu/arm/mali/linux/mali_sync.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2012-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -41,14 +41,6 @@ struct mali_timeline; */ struct sync_timeline *mali_sync_timeline_create(struct mali_timeline *timeline, const char *name); -/** - * Check if sync timeline belongs to Mali. - * - * @param sync_tl Sync timeline to check. - * @return MALI_TRUE if sync timeline belongs to Mali, MALI_FALSE if not. - */ -mali_bool mali_sync_timeline_is_ours(struct sync_timeline *sync_tl); - /** * Creates a file descriptor representing the sync fence. Will release sync fence if allocation of * file descriptor fails. diff --git a/drivers/gpu/arm/mali/linux/mali_uk_types.h b/drivers/gpu/arm/mali/linux/mali_uk_types.h index 1884cdbba4248..00ba28b143003 100755 --- a/drivers/gpu/arm/mali/linux/mali_uk_types.h +++ b/drivers/gpu/arm/mali/linux/mali_uk_types.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2015 ARM Limited. All rights reserved. + * Copyright (C) 2012, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_ukk_core.c b/drivers/gpu/arm/mali/linux/mali_ukk_core.c index 215cd3659fa56..a6c43b712d46b 100755 --- a/drivers/gpu/arm/mali/linux/mali_ukk_core.c +++ b/drivers/gpu/arm/mali/linux/mali_ukk_core.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -130,3 +130,17 @@ int request_high_priority_wrapper(struct mali_session_data *session_data, _mali_ return map_errcode(err); } + +int pending_submit_wrapper(struct mali_session_data *session_data, _mali_uk_pending_submit_s __user *uargs) +{ + _mali_uk_pending_submit_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + + kargs.ctx = (uintptr_t)session_data; + err = _mali_ukk_pending_submit(&kargs); + if (_MALI_OSK_ERR_OK != err) return map_errcode(err); + + return 0; +} diff --git a/drivers/gpu/arm/mali/linux/mali_ukk_gp.c b/drivers/gpu/arm/mali/linux/mali_ukk_gp.c index d4144c0f5e481..d7c5d2f973e53 100755 --- a/drivers/gpu/arm/mali/linux/mali_ukk_gp.c +++ b/drivers/gpu/arm/mali/linux/mali_ukk_gp.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2012-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_ukk_mem.c b/drivers/gpu/arm/mali/linux/mali_ukk_mem.c index 68d140d03fd4e..3dfe345f84831 100755 --- a/drivers/gpu/arm/mali/linux/mali_ukk_mem.c +++ b/drivers/gpu/arm/mali/linux/mali_ukk_mem.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -61,6 +61,10 @@ int mem_free_wrapper(struct mali_session_data *session_data, _mali_uk_free_mem_s return map_errcode(err); } + if (0 != put_user(kargs.free_pages_nr, &uargs->free_pages_nr)) { + return -EFAULT; + } + return 0; } @@ -109,6 +113,80 @@ int mem_unbind_wrapper(struct mali_session_data *session_data, _mali_uk_unbind_m } +int mem_cow_wrapper(struct mali_session_data *session_data, _mali_uk_cow_mem_s __user *uargs) +{ + _mali_uk_cow_mem_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_cow_mem_s))) { + return -EFAULT; + } + kargs.ctx = (uintptr_t)session_data; + + err = _mali_ukk_mem_cow(&kargs); + + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + if (0 != put_user(kargs.backend_handle, &uargs->backend_handle)) { + return -EFAULT; + } + + return 0; +} + +int mem_cow_modify_range_wrapper(struct mali_session_data *session_data, _mali_uk_cow_modify_range_s __user *uargs) +{ + _mali_uk_cow_modify_range_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_cow_modify_range_s))) { + return -EFAULT; + } + kargs.ctx = (uintptr_t)session_data; + + err = _mali_ukk_mem_cow_modify_range(&kargs); + + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + if (0 != put_user(kargs.change_pages_nr, &uargs->change_pages_nr)) { + return -EFAULT; + } + return 0; +} + + +int mem_resize_mem_wrapper(struct mali_session_data *session_data, _mali_uk_mem_resize_s __user *uargs) +{ + _mali_uk_mem_resize_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_mem_resize_s))) { + return -EFAULT; + } + kargs.ctx = (uintptr_t)session_data; + + err = _mali_ukk_mem_resize(&kargs); + + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + return 0; +} + int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_write_safe_s __user *uargs) { _mali_uk_mem_write_safe_s kargs; @@ -226,3 +304,30 @@ int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mal if (buffer) _mali_osk_vfree(buffer); return rc; } + +int mem_usage_get_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_memory_usage_get_s __user *uargs) +{ + _mali_osk_errcode_t err; + _mali_uk_profiling_memory_usage_get_s kargs; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_memory_usage_get_s))) { + return -EFAULT; + } + + kargs.ctx = (uintptr_t)session_data; + err = _mali_ukk_mem_usage_get(&kargs); + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */ + if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_memory_usage_get_s))) { + return -EFAULT; + } + + return 0; +} + diff --git a/drivers/gpu/arm/mali/linux/mali_ukk_pp.c b/drivers/gpu/arm/mali/linux/mali_ukk_pp.c index 4c1c381cb90f9..2c59bc72b6ad2 100755 --- a/drivers/gpu/arm/mali/linux/mali_ukk_pp.c +++ b/drivers/gpu/arm/mali/linux/mali_ukk_pp.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2012-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_ukk_profiling.c b/drivers/gpu/arm/mali/linux/mali_ukk_profiling.c index 62ed1d648c64a..2487bf3b505a5 100755 --- a/drivers/gpu/arm/mali/linux/mali_ukk_profiling.c +++ b/drivers/gpu/arm/mali/linux/mali_ukk_profiling.c @@ -37,28 +37,6 @@ int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk return 0; } -int profiling_memory_usage_get_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_memory_usage_get_s __user *uargs) -{ - _mali_osk_errcode_t err; - _mali_uk_profiling_memory_usage_get_s kargs; - - MALI_CHECK_NON_NULL(uargs, -EINVAL); - MALI_CHECK_NON_NULL(session_data, -EINVAL); - - kargs.ctx = (uintptr_t)session_data; - err = _mali_ukk_profiling_memory_usage_get(&kargs); - if (_MALI_OSK_ERR_OK != err) { - return map_errcode(err); - } - - kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */ - if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_memory_usage_get_s))) { - return -EFAULT; - } - - return 0; -} - int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs) { _mali_uk_sw_counters_report_s kargs; @@ -103,3 +81,103 @@ int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, return 0; } + +int profiling_get_stream_fd_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stream_fd_get_s __user *uargs) +{ + _mali_uk_profiling_stream_fd_get_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + + if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_stream_fd_get_s))) { + return -EFAULT; + } + + kargs.ctx = (uintptr_t)session_data; + err = _mali_ukk_profiling_stream_fd_get(&kargs); + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_stream_fd_get_s))) { + return -EFAULT; + } + + return 0; +} + +int profiling_control_set_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_control_set_s __user *uargs) +{ + _mali_uk_profiling_control_set_s kargs; + _mali_osk_errcode_t err; + u8 *kernel_control_data = NULL; + u8 *kernel_response_data = NULL; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + + if (0 != get_user(kargs.control_packet_size, &uargs->control_packet_size)) return -EFAULT; + if (0 != get_user(kargs.response_packet_size, &uargs->response_packet_size)) return -EFAULT; + + kargs.ctx = (uintptr_t)session_data; + + + /* Sanity check about the size */ + if (kargs.control_packet_size > PAGE_SIZE || kargs.response_packet_size > PAGE_SIZE) + return -EINVAL; + + if (0 != kargs.control_packet_size) { + + if (0 == kargs.response_packet_size) + return -EINVAL; + + kernel_control_data = _mali_osk_calloc(1, kargs.control_packet_size); + if (NULL == kernel_control_data) { + return -ENOMEM; + } + + kernel_response_data = _mali_osk_calloc(1, kargs.response_packet_size); + if (NULL == kernel_response_data) { + _mali_osk_free(kernel_control_data); + return -ENOMEM; + } + + kargs.control_packet_data = (uintptr_t)kernel_control_data; + kargs.response_packet_data = (uintptr_t)kernel_response_data; + + if (0 != copy_from_user((void *)(uintptr_t)kernel_control_data, (void *)(uintptr_t)uargs->control_packet_data, kargs.control_packet_size)) { + _mali_osk_free(kernel_control_data); + _mali_osk_free(kernel_response_data); + return -EFAULT; + } + + err = _mali_ukk_profiling_control_set(&kargs); + if (_MALI_OSK_ERR_OK != err) { + _mali_osk_free(kernel_control_data); + _mali_osk_free(kernel_response_data); + return map_errcode(err); + } + + if (0 != kargs.response_packet_size && 0 != copy_to_user(((void *)(uintptr_t)uargs->response_packet_data), ((void *)(uintptr_t)kargs.response_packet_data), kargs.response_packet_size)) { + _mali_osk_free(kernel_control_data); + _mali_osk_free(kernel_response_data); + return -EFAULT; + } + + if (0 != put_user(kargs.response_packet_size, &uargs->response_packet_size)) { + _mali_osk_free(kernel_control_data); + _mali_osk_free(kernel_response_data); + return -EFAULT; + } + + _mali_osk_free(kernel_control_data); + _mali_osk_free(kernel_response_data); + } else { + + err = _mali_ukk_profiling_control_set(&kargs); + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + } + return 0; +} diff --git a/drivers/gpu/arm/mali/linux/mali_ukk_soft_job.c b/drivers/gpu/arm/mali/linux/mali_ukk_soft_job.c index 11c70060e489d..beeb7531a662c 100755 --- a/drivers/gpu/arm/mali/linux/mali_ukk_soft_job.c +++ b/drivers/gpu/arm/mali/linux/mali_ukk_soft_job.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_ukk_timeline.c b/drivers/gpu/arm/mali/linux/mali_ukk_timeline.c index 484d4041c8694..f32d8b0d9cad3 100755 --- a/drivers/gpu/arm/mali/linux/mali_ukk_timeline.c +++ b/drivers/gpu/arm/mali/linux/mali_ukk_timeline.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013, 2015 ARM Limited. All rights reserved. + * Copyright (C) 2013, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_ukk_vsync.c b/drivers/gpu/arm/mali/linux/mali_ukk_vsync.c index 487c2478df4d6..5b54fb9872410 100755 --- a/drivers/gpu/arm/mali/linux/mali_ukk_vsync.c +++ b/drivers/gpu/arm/mali/linux/mali_ukk_vsync.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011-2015 ARM Limited. All rights reserved. + * Copyright (C) 2011-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/linux/mali_ukk_wrappers.h b/drivers/gpu/arm/mali/linux/mali_ukk_wrappers.h index 9b03e4394e053..7e59346facace 100755 --- a/drivers/gpu/arm/mali/linux/mali_ukk_wrappers.h +++ b/drivers/gpu/arm/mali/linux/mali_ukk_wrappers.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -29,15 +29,19 @@ int get_api_version_v2_wrapper(struct mali_session_data *session_data, _mali_uk_ int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs); int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs); int request_high_priority_wrapper(struct mali_session_data *session_data, _mali_uk_request_high_priority_s __user *uargs); +int pending_submit_wrapper(struct mali_session_data *session_data, _mali_uk_pending_submit_s __user *uargs); int mem_alloc_wrapper(struct mali_session_data *session_data, _mali_uk_alloc_mem_s __user *uargs); int mem_free_wrapper(struct mali_session_data *session_data, _mali_uk_free_mem_s __user *uargs); int mem_bind_wrapper(struct mali_session_data *session_data, _mali_uk_bind_mem_s __user *uargs); int mem_unbind_wrapper(struct mali_session_data *session_data, _mali_uk_unbind_mem_s __user *uargs); - +int mem_cow_wrapper(struct mali_session_data *session_data, _mali_uk_cow_mem_s __user *uargs); +int mem_cow_modify_range_wrapper(struct mali_session_data *session_data, _mali_uk_cow_modify_range_s __user *uargs); +int mem_resize_mem_wrapper(struct mali_session_data *session_data, _mali_uk_mem_resize_s __user *uargs); int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_write_safe_s __user *uargs); int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user *uargs); int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user *uargs); +int mem_usage_get_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_memory_usage_get_s __user *uargs); int timeline_get_latest_point_wrapper(struct mali_session_data *session, _mali_uk_timeline_get_latest_point_s __user *uargs); int timeline_wait_wrapper(struct mali_session_data *session, _mali_uk_timeline_wait_s __user *uargs); @@ -56,7 +60,8 @@ int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs); int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs); -int profiling_memory_usage_get_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_memory_usage_get_s __user *uargs); +int profiling_get_stream_fd_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stream_fd_get_s __user *uargs); +int profiling_control_set_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_control_set_s __user *uargs); int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs); diff --git a/drivers/gpu/arm/mali/platform/mali_scaling.h b/drivers/gpu/arm/mali/platform/mali_scaling.h index 7fc2e8fd9703e..003fd28be634e 100644 --- a/drivers/gpu/arm/mali/platform/mali_scaling.h +++ b/drivers/gpu/arm/mali/platform/mali_scaling.h @@ -76,8 +76,6 @@ typedef struct mali_plat_info_t { u32 dvfs_table_size; mali_scale_info_t scale_info; - u32 maxclk_sysfs; - u32 maxpp_sysfs; /* set upper limit of pp or frequency, for THERMAL thermal or band width saving.*/ u32 limit_on; @@ -118,8 +116,6 @@ void flush_scaling_job(void); void get_mali_rt_clkpp(u32* clk, u32* pp); u32 set_mali_rt_clkpp(u32 clk, u32 pp, u32 flush); void revise_mali_rt(void); -/* get max gpu clk level of this chip*/ -int get_gpu_max_clk_level(void); /* get or set the scale mode. */ u32 get_mali_schel_mode(void); diff --git a/drivers/gpu/arm/mali/platform/meson_bu/mali_clock.c b/drivers/gpu/arm/mali/platform/meson_bu/mali_clock.c index 9e9911cab2d38..0f302288753c4 100644 --- a/drivers/gpu/arm/mali/platform/meson_bu/mali_clock.c +++ b/drivers/gpu/arm/mali/platform/meson_bu/mali_clock.c @@ -4,14 +4,12 @@ #include #include #include -#include #include "mali_scaling.h" #include "mali_clock.h" #ifndef AML_CLK_LOCK_ERROR #define AML_CLK_LOCK_ERROR 1 #endif -#define GXBBM_MAX_GPU_FREQ 700000000UL static unsigned gpu_dbg_level = 0; module_param(gpu_dbg_level, uint, 0644); @@ -28,9 +26,6 @@ MODULE_PARM_DESC(gpu_dbg_level, "gpu debug level"); gpu_dbg(1, "line(%d), clk_cntl=0x%08x\n" fmt, __LINE__, mplt_read(HHI_MALI_CLK_CNTL), ## arg);\ } while (0) -//disable print -#define _dev_info(...) - //static DEFINE_SPINLOCK(lock); static mali_plat_info_t* pmali_plat = NULL; //static u32 mali_extr_backup = 0; @@ -216,7 +211,6 @@ int mali_dt_info(struct platform_device *pdev, struct mali_plat_info_t *mpdata) mpdata->cfg_pp = 6; } mpdata->scale_info.maxpp = mpdata->cfg_pp; - mpdata->maxpp_sysfs = mpdata->cfg_pp; _dev_info(&pdev->dev, "max pp is %d\n", mpdata->scale_info.maxpp); ret = of_property_read_u32(gpu_dn,"min_pp", @@ -254,7 +248,34 @@ int mali_dt_info(struct platform_device *pdev, struct mali_plat_info_t *mpdata) of_get_property(gpu_dn, "tbl", &length); length = length /sizeof(u32); - _dev_info(&pdev->dev, "clock dvfs cfg table size is %d\n", length); + _dev_info(&pdev->dev, "clock dvfs table size is %d\n", length); + + ret = of_property_read_u32(gpu_dn,"max_clk", + &mpdata->cfg_clock); + if (ret) { + dev_notice(&pdev->dev, "max clk set %d\n", length-2); + mpdata->cfg_clock = length-2; + } + + mpdata->cfg_clock_bkup = mpdata->cfg_clock; + mpdata->scale_info.maxclk = mpdata->cfg_clock; + _dev_info(&pdev->dev, "max clk is %d\n", mpdata->scale_info.maxclk); + + ret = of_property_read_u32(gpu_dn,"turbo_clk", + &mpdata->turbo_clock); + if (ret) { + dev_notice(&pdev->dev, "turbo clk set to %d\n", length-1); + mpdata->turbo_clock = length-1; + } + _dev_info(&pdev->dev, "turbo clk is %d\n", mpdata->turbo_clock); + + ret = of_property_read_u32(gpu_dn,"def_clk", + &mpdata->def_clock); + if (ret) { + dev_notice(&pdev->dev, "default clk set to %d\n", length/2-1); + mpdata->def_clock = length/2 - 1; + } + _dev_info(&pdev->dev, "default clk is %d\n", mpdata->def_clock); mpdata->dvfs_table = devm_kzalloc(&pdev->dev, sizeof(struct mali_dvfs_threshold_table)*length, @@ -264,6 +285,7 @@ int mali_dt_info(struct platform_device *pdev, struct mali_plat_info_t *mpdata) dev_err(&pdev->dev, "failed to alloc dvfs table\n"); return -ENOMEM; } + mpdata->dvfs_table_size = length; mpdata->clk_sample = devm_kzalloc(&pdev->dev, sizeof(u32)*length, GFP_KERNEL); if (mpdata->clk_sample == NULL) { dev_err(&pdev->dev, "failed to alloc clk_sample table\n"); @@ -278,6 +300,7 @@ int mali_dt_info(struct platform_device *pdev, struct mali_plat_info_t *mpdata) } clk_item = mpdata->clk_items; // + of_property_for_each_u32(gpu_dn, "tbl", prop, p, u) { dvfs_clk_hdl = (phandle) u; gpu_clk_dn = of_find_node_by_phandle(dvfs_clk_hdl); @@ -285,14 +308,6 @@ int mali_dt_info(struct platform_device *pdev, struct mali_plat_info_t *mpdata) if (ret) { dev_notice(&pdev->dev, "read clk_freq failed\n"); } -#if 0 -#ifdef MESON_CPU_VERSION_OPS - if (is_meson_gxbbm_cpu()) { - if (dvfs_tbl->clk_freq >= GXBBM_MAX_GPU_FREQ) - continue; - } -#endif -#endif ret = of_property_read_string(gpu_clk_dn,"clk_parent", &dvfs_tbl->clk_parent); if (ret) { @@ -330,40 +345,11 @@ int mali_dt_info(struct platform_device *pdev, struct mali_plat_info_t *mpdata) clk_item ++; clk_sample ++; i++; - mpdata->dvfs_table_size ++; - } - - ret = of_property_read_u32(gpu_dn,"max_clk", - &mpdata->cfg_clock); - if (ret) { - dev_notice(&pdev->dev, "max clk set %d\n", mpdata->dvfs_table_size-2); - mpdata->cfg_clock = mpdata->dvfs_table_size-2; } - mpdata->cfg_clock_bkup = mpdata->cfg_clock; - mpdata->maxclk_sysfs = mpdata->cfg_clock; - mpdata->scale_info.maxclk = mpdata->cfg_clock; - _dev_info(&pdev->dev, "max clk is %d\n", mpdata->scale_info.maxclk); - - ret = of_property_read_u32(gpu_dn,"turbo_clk", - &mpdata->turbo_clock); - if (ret) { - dev_notice(&pdev->dev, "turbo clk set to %d\n", mpdata->dvfs_table_size-1); - mpdata->turbo_clock = mpdata->dvfs_table_size-1; - } - _dev_info(&pdev->dev, "turbo clk is %d\n", mpdata->turbo_clock); - - ret = of_property_read_u32(gpu_dn,"def_clk", - &mpdata->def_clock); - if (ret) { - dev_notice(&pdev->dev, "default clk set to %d\n", mpdata->dvfs_table_size/2-1); - mpdata->def_clock = mpdata->dvfs_table_size/2 - 1; - } - _dev_info(&pdev->dev, "default clk is %d\n", mpdata->def_clock); - dvfs_tbl = mpdata->dvfs_table; clk_sample = mpdata->clk_sample; - for (i = 0; i< mpdata->dvfs_table_size; i++) { + for (i = 0; i< length; i++) { _dev_info(&pdev->dev, "====================%d====================\n" "clk_freq=%10d, clk_parent=%9s, voltage=%d, keep_count=%d, threshod=<%d %d>, clk_sample=%d\n", i, @@ -373,7 +359,6 @@ int mali_dt_info(struct platform_device *pdev, struct mali_plat_info_t *mpdata) dvfs_tbl ++; clk_sample ++; } - _dev_info(&pdev->dev, "clock dvfs table size is %d\n", mpdata->dvfs_table_size); mpdata->clk_mali = devm_clk_get(&pdev->dev, "clk_mali"); mpdata->clk_mali_0 = devm_clk_get(&pdev->dev, "clk_mali_0"); diff --git a/drivers/gpu/arm/mali/platform/meson_bu/meson_main2.c b/drivers/gpu/arm/mali/platform/meson_bu/meson_main2.c index 8dd3dc4dd1ee3..98f29b72b385f 100644 --- a/drivers/gpu/arm/mali/platform/meson_bu/meson_main2.c +++ b/drivers/gpu/arm/mali/platform/meson_bu/meson_main2.c @@ -32,11 +32,7 @@ int mali_pm_statue = 0; extern void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data); u32 mali_gp_reset_fail = 0; -module_param(mali_gp_reset_fail, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */ -MODULE_PARM_DESC(mali_gp_reset_fail, "times of failed to reset GP"); u32 mali_core_timeout = 0; -module_param(mali_core_timeout, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */ -MODULE_PARM_DESC(mali_core_timeout, "timeout of failed to reset GP"); static struct mali_gpu_device_data mali_gpu_data = { diff --git a/drivers/gpu/arm/mali/platform/meson_bu/mpgpu.c b/drivers/gpu/arm/mali/platform/meson_bu/mpgpu.c index b4801092cf55f..a7ef47d282da5 100644 --- a/drivers/gpu/arm/mali/platform/meson_bu/mpgpu.c +++ b/drivers/gpu/arm/mali/platform/meson_bu/mpgpu.c @@ -29,7 +29,6 @@ #include #include #include -//#include "mali_pp_scheduler.h" #include "meson_main.h" #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 @@ -53,6 +52,8 @@ static ssize_t domain_stat_read(struct class *class, #define MAX_TOKEN 20 #define FULL_UTILIZATION 256 +extern int utilization_level; + static ssize_t mpgpu_write(struct class *class, struct class_attribute *attr, const char *buf, size_t count) { @@ -140,14 +141,16 @@ static ssize_t scale_mode_write(struct class *class, return count; } +static ssize_t utilization(struct class *class, struct class_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", utilization_level * 100 / 255); +} + static ssize_t max_pp_read(struct class *class, struct class_attribute *attr, char *buf) { mali_plat_info_t* pmali_plat = get_mali_plat_data(); - printk("maxpp:%d, maxpp_sysfs:%d, total=%d\n", - pmali_plat->scale_info.maxpp, pmali_plat->maxpp_sysfs, - pmali_plat->cfg_pp); - return sprintf(buf, "%d\n", pmali_plat->cfg_pp); + return sprintf(buf, "%d\n", pmali_plat->scale_info.maxpp); } static ssize_t max_pp_write(struct class *class, @@ -165,7 +168,6 @@ static ssize_t max_pp_write(struct class *class, if ((0 != ret) || (val > pmali_plat->cfg_pp) || (val < pinfo->minpp)) return -EINVAL; - pmali_plat->maxpp_sysfs = val; pinfo->maxpp = val; revise_mali_rt(); @@ -204,9 +206,7 @@ static ssize_t max_freq_read(struct class *class, struct class_attribute *attr, char *buf) { mali_plat_info_t* pmali_plat = get_mali_plat_data(); - printk("maxclk:%d, maxclk_sys:%d, max gpu level=%d\n", - pmali_plat->scale_info.maxclk, pmali_plat->maxclk_sysfs, get_gpu_max_clk_level()); - return sprintf(buf, "%d\n", get_gpu_max_clk_level()); + return sprintf(buf, "%d\n", pmali_plat->scale_info.maxclk); } static ssize_t max_freq_write(struct class *class, @@ -224,7 +224,6 @@ static ssize_t max_freq_write(struct class *class, if ((0 != ret) || (val > pmali_plat->cfg_clock) || (val < pinfo->minclk)) return -EINVAL; - pmali_plat->maxclk_sysfs = val; pinfo->maxclk = val; revise_mali_rt(); @@ -323,6 +322,7 @@ static struct class_attribute mali_class_attrs[] = { __ATTR(max_pp, 0644, max_pp_read, max_pp_write), __ATTR(cur_freq, 0644, freq_read, freq_write), __ATTR(cur_pp, 0644, current_pp_read, current_pp_write), + __ATTR(utilization, 0644, utilization, NULL), }; static struct class mpgpu_class = { diff --git a/drivers/gpu/arm/mali/platform/meson_bu/platform_gx.c b/drivers/gpu/arm/mali/platform/meson_bu/platform_gx.c index 2dfefadfa82a6..000bc2fa5295e 100644 --- a/drivers/gpu/arm/mali/platform/meson_bu/platform_gx.c +++ b/drivers/gpu/arm/mali/platform/meson_bu/platform_gx.c @@ -24,8 +24,6 @@ #ifdef CONFIG_GPU_THERMAL #include #include -#include -#include #endif #include #include @@ -34,7 +32,6 @@ #include "mali_scaling.h" #include "mali_clock.h" #include "meson_main.h" -#include "mali_executor.h" /* * For Meson 8 M2. @@ -42,8 +39,8 @@ */ static void mali_plat_preheat(void); static mali_plat_info_t mali_plat_data = { - .bst_gpu = 210, /* threshold for boosting gpu. */ - .bst_pp = 160, /* threshold for boosting PP. */ + .bst_gpu = 1, /* threshold for boosting gpu. */ + .bst_pp = 1, /* threshold for boosting PP. */ .have_switch = 1, .limit_on = 1, .plat_preheat = mali_plat_preheat, @@ -99,11 +96,6 @@ unsigned int get_mali_max_level(void) return mali_plat_data.dvfs_table_size - 1; } -int get_gpu_max_clk_level(void) -{ - return mali_plat_data.cfg_clock; -} - #ifdef CONFIG_GPU_THERMAL static void set_limit_mali_freq(u32 idx) { @@ -111,10 +103,6 @@ static void set_limit_mali_freq(u32 idx) return; if (idx > mali_plat_data.turbo_clock || idx < mali_plat_data.scale_info.minclk) return; - if (idx > mali_plat_data.maxclk_sysfs) { - printk("idx > max freq\n"); - return; - } mali_plat_data.scale_info.maxclk= idx; revise_mali_rt(); } @@ -123,11 +111,6 @@ static u32 get_limit_mali_freq(void) { return mali_plat_data.scale_info.maxclk; } - -static u32 get_mali_utilization(void) -{ - return (_mali_ukk_utilization_pp() * 100) / 256; -} #endif #ifdef CONFIG_GPU_THERMAL @@ -139,29 +122,12 @@ static u32 set_limit_pp_num(u32 num) if (num > mali_plat_data.cfg_pp || num < mali_plat_data.scale_info.minpp) goto quit; - - if (num > mali_plat_data.maxpp_sysfs) { - printk("pp > sysfs set pp\n"); - goto quit; - } - mali_plat_data.scale_info.maxpp = num; revise_mali_rt(); ret = 0; quit: return ret; } -static u32 mali_get_online_pp(void) -{ - unsigned int val; - mali_plat_info_t* pmali_plat = get_mali_plat_data(); - - val = readl(pmali_plat->reg_base_aobus + 0xf0) & 0xff; - if (val == 0x07) /* No pp is working */ - return 0; - - return mali_executor_get_num_cores_enabled(); -} #endif void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data); @@ -348,11 +314,7 @@ void mali_post_init(void) gcdev->get_gpu_max_level = get_mali_max_level; gcdev->set_gpu_freq_idx = set_limit_mali_freq; gcdev->get_gpu_current_max_level = get_limit_mali_freq; - gcdev->get_gpu_freq = get_mali_freq; - gcdev->get_gpu_loading = get_mali_utilization; - gcdev->get_online_pp = mali_get_online_pp; err = gpufreq_cooling_register(gcdev); - aml_thermal_min_update(gcdev->cool_dev); if (err < 0) printk("register GPU cooling error\n"); printk("gpu cooling register okay with err=%d\n",err); @@ -367,7 +329,6 @@ void mali_post_init(void) gccdev->max_gpu_core_num=mali_plat_data.cfg_pp; gccdev->set_max_pp_num=set_limit_pp_num; err = (int)gpucore_cooling_register(gccdev); - aml_thermal_min_update(gccdev->cool_dev); if (err < 0) printk("register GPU cooling error\n"); printk("gpu core cooling register okay with err=%d\n",err); diff --git a/drivers/gpu/arm/mali/platform/meson_bu/scaling.c b/drivers/gpu/arm/mali/platform/meson_bu/scaling.c index 54a6d66254bf4..bafec732d979f 100644 --- a/drivers/gpu/arm/mali/platform/meson_bu/scaling.c +++ b/drivers/gpu/arm/mali/platform/meson_bu/scaling.c @@ -25,6 +25,7 @@ #include "meson_main2.h" #include "mali_clock.h" +int utilization_level = 0; static int currentStep; #ifndef CONFIG_MALI_DVFS static int num_cores_enabled; @@ -327,9 +328,10 @@ static void mali_decide_next_status(struct mali_gpu_utilization_data *data, int* u32 ld_up, ld_down; u32 change_mode; + utilization_level = data->utilization_gpu; *pp_change_flag = 0; change_mode = 0; - utilization = data->utilization_gpu; + utilization = 255; scalingdbg(5, "line(%d), scaling_mode=%d, MALI_TURBO_MODE=%d, turbo=%d, maxclk=%d\n", __LINE__, scaling_mode, MALI_TURBO_MODE, @@ -405,16 +407,6 @@ static void mali_decide_next_status(struct mali_gpu_utilization_data *data, int* *pp_change_flag = -1; } } - - if (decided_fs_idx < 0 ) { - printk("gpu debug, next index below 0\n"); - decided_fs_idx = 0; - } - if (decided_fs_idx > pmali_plat->scale_info.maxclk) { - decided_fs_idx = pmali_plat->scale_info.maxclk; - printk("gpu debug, next index above max, set to %d\n", decided_fs_idx); - } - if (change_mode) mali_stay_count = pmali_plat->dvfs_table[decided_fs_idx].keep_count; *next_fs_idx = decided_fs_idx; @@ -475,7 +467,7 @@ void set_mali_schel_mode(u32 mode) MALI_DEBUG_ASSERT(mode < MALI_SCALING_MODE_MAX); if (mode >= MALI_SCALING_MODE_MAX) return; - scaling_mode = mode; + scaling_mode = MALI_TURBO_MODE; //disable thermal in turbo mode if (scaling_mode == MALI_TURBO_MODE) { diff --git a/drivers/gpu/arm/mali/platform/meson_m400/platform_mx.c b/drivers/gpu/arm/mali/platform/meson_m400/platform_mx.c index 3b30ec02752ad..0bc4cebc7e79f 100755 --- a/drivers/gpu/arm/mali/platform/meson_m400/platform_mx.c +++ b/drivers/gpu/arm/mali/platform/meson_m400/platform_mx.c @@ -229,11 +229,6 @@ void mali_core_scaling_term(void) } -int get_gpu_max_clk_level(void) -{ - return 0; -} - void mali_post_init(void) { } diff --git a/drivers/gpu/arm/mali/platform/mpgpu.c b/drivers/gpu/arm/mali/platform/mpgpu.c index 40575ff05a2be..f7c0decfe0af5 100755 --- a/drivers/gpu/arm/mali/platform/mpgpu.c +++ b/drivers/gpu/arm/mali/platform/mpgpu.c @@ -29,7 +29,6 @@ #include #include #include -#include "mali_pp_scheduler.h" #include "meson_main.h" #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 @@ -146,10 +145,7 @@ static ssize_t max_pp_read(struct class *class, struct class_attribute *attr, char *buf) { mali_plat_info_t* pmali_plat = get_mali_plat_data(); - printk("maxpp:%d, maxpp_sysfs:%d, total=%d\n", - pmali_plat->scale_info.maxpp, pmali_plat->maxpp_sysfs, - mali_pp_scheduler_get_num_cores_total()); - return sprintf(buf, "%d\n", mali_pp_scheduler_get_num_cores_total()); + return sprintf(buf, "%d\n", pmali_plat->scale_info.maxpp); } static ssize_t max_pp_write(struct class *class, @@ -167,7 +163,6 @@ static ssize_t max_pp_write(struct class *class, if ((0 != ret) || (val > pmali_plat->cfg_pp) || (val < pinfo->minpp)) return -EINVAL; - pmali_plat->maxpp_sysfs = val; pinfo->maxpp = val; revise_mali_rt(); @@ -206,9 +201,7 @@ static ssize_t max_freq_read(struct class *class, struct class_attribute *attr, char *buf) { mali_plat_info_t* pmali_plat = get_mali_plat_data(); - printk("maxclk:%d, maxclk_sys:%d, max gpu level=%d\n", - pmali_plat->scale_info.maxclk, pmali_plat->maxclk_sysfs, get_gpu_max_clk_level()); - return sprintf(buf, "%d\n", get_gpu_max_clk_level()); + return sprintf(buf, "%d\n", pmali_plat->scale_info.maxclk); } static ssize_t max_freq_write(struct class *class, @@ -226,7 +219,6 @@ static ssize_t max_freq_write(struct class *class, if ((0 != ret) || (val > pmali_plat->cfg_clock) || (val < pinfo->minclk)) return -EINVAL; - pmali_plat->maxclk_sysfs = val; pinfo->maxclk = val; revise_mali_rt(); diff --git a/drivers/gpu/arm/mali/regs/mali_200_regs.h b/drivers/gpu/arm/mali/regs/mali_200_regs.h index 5c072fb5b815d..c904ad23b28db 100755 --- a/drivers/gpu/arm/mali/regs/mali_200_regs.h +++ b/drivers/gpu/arm/mali/regs/mali_200_regs.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2012-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -25,12 +25,11 @@ enum mali200_mgmt_reg { MALI200_REG_ADDR_MGMT_INT_MASK = 0x1028, MALI200_REG_ADDR_MGMT_INT_STATUS = 0x102c, - MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW = 0x1044, - MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x1050, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x1080, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x1084, + MALI200_REG_ADDR_MGMT_PERF_CNT_0_LIMIT = 0x1088, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x108c, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x10a0, @@ -125,6 +124,8 @@ enum mali200_wb_unit_regs { #define MALI300_PP_PRODUCT_ID 0xCE07 #define MALI400_PP_PRODUCT_ID 0xCD07 #define MALI450_PP_PRODUCT_ID 0xCF07 +#define MALI470_PP_PRODUCT_ID 0xCF08 + #endif /* _MALI200_REGS_H_ */ diff --git a/drivers/gpu/arm/mali/regs/mali_gp_regs.h b/drivers/gpu/arm/mali/regs/mali_gp_regs.h index c5e6116f758f4..435953d46e8b5 100755 --- a/drivers/gpu/arm/mali/regs/mali_gp_regs.h +++ b/drivers/gpu/arm/mali/regs/mali_gp_regs.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2012-2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -31,13 +31,13 @@ typedef enum { MALIGP2_REG_ADDR_MGMT_INT_CLEAR = 0x28, MALIGP2_REG_ADDR_MGMT_INT_MASK = 0x2C, MALIGP2_REG_ADDR_MGMT_INT_STAT = 0x30, - MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW = 0x34, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x3C, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x40, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x44, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x48, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x4C, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x50, + MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_LIMIT = 0x54, MALIGP2_REG_ADDR_MGMT_STATUS = 0x68, MALIGP2_REG_ADDR_MGMT_VERSION = 0x6C, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR_READ = 0x80, diff --git a/drivers/gpu/arm/mali/timestamp-arm11-cc/mali_timestamp.c b/drivers/gpu/arm/mali/timestamp-arm11-cc/mali_timestamp.c index a486e2f7684f7..f7f7baf4d8be3 100755 --- a/drivers/gpu/arm/mali/timestamp-arm11-cc/mali_timestamp.c +++ b/drivers/gpu/arm/mali/timestamp-arm11-cc/mali_timestamp.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2011, 2013, 2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2011, 2013, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/timestamp-arm11-cc/mali_timestamp.h b/drivers/gpu/arm/mali/timestamp-arm11-cc/mali_timestamp.h index 65f3ab274c09b..757f6433bf3d3 100755 --- a/drivers/gpu/arm/mali/timestamp-arm11-cc/mali_timestamp.h +++ b/drivers/gpu/arm/mali/timestamp-arm11-cc/mali_timestamp.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2011, 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2011, 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/timestamp-default/mali_timestamp.c b/drivers/gpu/arm/mali/timestamp-default/mali_timestamp.c index a486e2f7684f7..f7f7baf4d8be3 100755 --- a/drivers/gpu/arm/mali/timestamp-default/mali_timestamp.c +++ b/drivers/gpu/arm/mali/timestamp-default/mali_timestamp.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2011, 2013, 2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2011, 2013, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/mali/timestamp-default/mali_timestamp.h b/drivers/gpu/arm/mali/timestamp-default/mali_timestamp.h index 8ba47060828f0..21700fe3216f8 100755 --- a/drivers/gpu/arm/mali/timestamp-default/mali_timestamp.h +++ b/drivers/gpu/arm/mali/timestamp-default/mali_timestamp.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2011, 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2011, 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/t83x/kernel/Documentation/devicetree/bindings/arm/mali-midgard.txt b/drivers/gpu/arm/t83x/kernel/Documentation/devicetree/bindings/arm/mali-midgard.txt deleted file mode 100755 index acd2c6d8f39c8..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/Documentation/devicetree/bindings/arm/mali-midgard.txt +++ /dev/null @@ -1,67 +0,0 @@ -# -# (C) COPYRIGHT 2013-2015 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - -* ARM Mali Midgard devices - - -Required properties: - -- compatible : Should be mali, replacing digits with x from the back, -until malitxx, ending with arm,mali-midgard, the latter not optional. -- reg : Physical base address of the device and length of the register area. -- interrupts : Contains the three IRQ lines required by T-6xx devices -- interrupt-names : Contains the names of IRQ resources in the order they were -provided in the interrupts property. Must contain: "JOB, "MMU", "GPU". - -Optional: - -- clocks : Phandle to clock for the Mali T-6xx device. -- clock-names : Shall be "clk_mali". -- mali-supply : Phandle to regulator for the Mali device. Refer to -Documentation/devicetree/bindings/regulator/regulator.txt for details. -- operating-points : Refer to Documentation/devicetree/bindings/power/opp.txt -for details. -- snoop_enable_smc : SMC function ID to enable CCI snooping on the GPU port(s). -- snoop_disable_smc : SMC function ID to disable CCI snooping on the GPU port(s). -- jm_config : For T860/T880. Sets job manager configuration. An array containing: - - 1 to override the TIMESTAMP value, 0 otherwise. - - 1 to override clock gate, forcing them to be always on, 0 otherwise. - - 1 to enable job throttle, limiting the number of cores that can be started - simultaneously, 0 otherwise. - - Value between 0 and 63 (including). If job throttle is enabled, this is one - less than the number of cores that can be started simultaneously. - -Example for a Mali-T602: - -gpu@0xfc010000 { - compatible = "arm,malit602", "arm,malit60x", "arm,malit6xx", "arm,mali-midgard"; - reg = <0xfc010000 0x4000>; - interrupts = <0 36 4>, <0 37 4>, <0 38 4>; - interrupt-names = "JOB", "MMU", "GPU"; - - clocks = <&pclk_mali>; - clock-names = "clk_mali"; - mali-supply = <&vdd_mali>; - operating-points = < - /* KHz uV */ - 533000 1250000, - 450000 1150000, - 400000 1125000, - 350000 1075000, - 266000 1025000, - 160000 925000, - 100000 912500, - >; -}; diff --git a/drivers/gpu/arm/t83x/kernel/Documentation/dma-buf-test-exporter.txt b/drivers/gpu/arm/t83x/kernel/Documentation/dma-buf-test-exporter.txt deleted file mode 100755 index 4208010cc7883..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/Documentation/dma-buf-test-exporter.txt +++ /dev/null @@ -1,40 +0,0 @@ -# -# (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -===================== -dma-buf-test-exporter -===================== - -Overview --------- - -The dma-buf-test-exporter is a simple exporter of dma_buf objects. -It has a private API to allocate and manipulate the buffers which are represented as dma_buf fds. -The private API allows: -* simple allocation of physically non-contiguous buffers -* simple allocation of physically contiguous buffers -* query kernel side API usage stats (number of attachments, number of mappings, mmaps) -* failure mode configuration (fail attach, mapping, mmap) -* kernel side memset of buffers - -The buffers support all of the dma_buf API, including mmap. - -It supports being compiled as a module both in-tree and out-of-tree. - -See include/linux/dma-buf-test-exporter.h for the ioctl interface. -See Documentation/dma-buf-sharing.txt for details on dma_buf. - - diff --git a/drivers/gpu/arm/t83x/kernel/Documentation/kds.txt b/drivers/gpu/arm/t83x/kernel/Documentation/kds.txt deleted file mode 100755 index 639288c106b72..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/Documentation/kds.txt +++ /dev/null @@ -1,268 +0,0 @@ -# -# (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -============================== -kds - Kernel Dependency System -============================== - -Introduction ------------- -kds provides a mechanism for clients to atomically lock down multiple abstract resources. -This can be done either synchronously or asynchronously. -Abstract resources is used to allow a set of clients to use kds to control access to any -resource, an example is structured memory buffers. - -kds supports that buffer is locked for exclusive access and sharing of buffers. - -kds can be built as either a integrated feature of the kernel or as a module. -It supports being compiled as a module both in-tree and out-of-tree. - - -Concepts --------- -A core concept in kds is abstract resources. -A kds resource is just an abstraction for some client object, kds doesn't care what it is. -Typically EGL will consider UMP buffers as being a resource, thus each UMP buffer has -a kds resource for synchronization to the buffer. - -kds allows a client to create and destroy the abstract resource objects. -A new resource object is made available asap (it is just a simple malloc with some initializations), -while destroy it requires some external synchronization. - -The other core concept in kds is consumer of resources. -kds is requested to allow a client to consume a set of resources and the client will be notified when it can consume the resources. - -Exclusive access allows only one client to consume a resource. -Shared access permits multiple consumers to acceess a resource concurrently. - - -APIs ----- -kds provides simple resource allocate and destroy functions. -Clients use this to instantiate and control the lifetime of the resources kds manages. - -kds provides two ways to wait for resources: -- Asynchronous wait: the client specifies a function pointer to be called when wait is over -- Synchronous wait: Function blocks until access is gained. - -The synchronous API has a timeout for the wait. -The call can early out if a signal is delivered. - -After a client is done consuming the resource kds must be notified to release the resources and let some other client take ownership. -This is done via resource set release call. - -A Windows comparison: -kds implements WaitForMultipleObjectsEx(..., bWaitAll = TRUE, ...) but also has an asynchronous version in addition. -kds resources can be seen as being the same as NT object manager resources. - -Internals ---------- -kds guarantees atomicity when a set of resources is operated on. -This is implemented via a global resource lock which is taken by kds when it updates resource objects. - -Internally a resource in kds is a linked list head with some flags. - -When a consumer requests access to a set of resources it is queued on each of the resources. -The link from the consumer to the resources can be triggered. Once all links are triggered -the registered callback is called or the blocking function returns. -A link is considered triggered if it is the first on the list of consumers of a resource, -or if all the links ahead of it is marked as shared and itself is of the type shared. - -When the client is done consuming the consumer object is removed from the linked lists of -the resources and a potential new consumer becomes the head of the resources. -As we add and remove consumers atomically across all resources we can guarantee that -we never introduces a A->B + B->A type of loops/deadlocks. - - -kbase/base implementation -------------------------- -A HW job needs access to a set of shared resources. -EGL tracks this and encodes the set along with the atom in the ringbuffer. -EGL allocates a (k)base dep object to represent the dependency to the set of resources and encodes that along with the list of resources. -This dep object is use to create a dependency from a job chain(atom) to the resources it needs to run. -When kbase decodes the atom in the ringbuffer it finds the set of resources and calls kds to request all the needed resources. -As EGL needs to know when the kds request is delivered a new base event object is needed: atom enqueued. This event is only delivered for atoms which uses kds. -The callback kbase registers trigger the dependency object described which would trigger the existing JD system to release the job chain. -When the atom is done kds resource set release is call to release the resources. - -EGL will typically use exclusive access to the render target, while all buffers used as input can be marked as shared. - - -Buffer publish/vsync --------------------- -EGL will use a separate ioctl or DRM flip to request the flip. -If the LCD driver is integrated with kds EGL can do these operations early. -The LCD driver must then implement the ioctl or DRM flip to be asynchronous with kds async call. -The LCD driver binds a kds resource to each virtual buffer (2 buffers in case of double-buffering). -EGL will make a dependency to the target kds resource in the kbase atom. -After EGL receives a atom enqueued event it can ask the LCD driver to pan to the target kds resource. -When the atom is completed it'll release the resource and the LCD driver will get its callback. -In the callback it'll load the target buffer into the DMA unit of the LCD hardware. -The LCD driver will be the consumer of both buffers for a short period. -The LCD driver will call kds resource set release on the previous on-screen buffer when the next vsync/dma read end is handled. - -=============================================== -Kernel driver kds client design considerations -=============================================== - -Number of resources --------------------- - -The kds api allows a client to wait for ownership of a number of resources, where by the client does not take on ownership of any of the resources in the resource set -until all of the resources in the set are released. Consideration must be made with respect to performance, as waiting on large number of resources will incur -a greater overhead and may increase system latency. It may be worth considering how independent each of the resources are, for example if the same set of resources -are waited upon by each of the clients, then it may be possible to aggregate these into one resource that each client waits upon. - -Clients with shared access ---------------------------- - -The kds api allows a number of clients to gain shared access to a resource simultaneously, consideration must be made with respect to performance, large numbers of clients -wanting shared access can incur a performance penalty and may increase system latency, specifically when the clients are granted access. Having an excessively high -number of clients with shared access should be avoided, consideration should be made to the call back configuration being used. See Callbacks and Scenario 1 below. - -Callbacks ----------- - -Careful consideration must be made as to which callback type is most appropriate for kds clients, direct callbacks are called immediately from the context in which the -ownership of the resource is passed to the next waiter in the list. Where as when using deferred callbacks the callback is deferred and called from outside the context -that is relinquishing ownership, while this reduces the latency in the releasing clients context it does incur a cost as there is more latency between a resource -becoming free and the new client owning the resource callback being executed. - -Obviously direct callbacks have a performance advantage, as the call back is immediate and does not have to wait for the kernel to context switch to schedule in the -execution of the callback. - -However as the callback is immediate and within the context that is granting ownership it is important that the callback perform the MINIMUM amount of work necessary, -long call backs could cause poor system latency. Special care and attention must be taken if the direct callbacks can be called from IRQ handlers, such as when -kds_resource_set_release is called from an IRQ handler, in this case you have to avoid any calls that may sleep. - -Deferred contexts have the advantage that the call backs are deferred until they are scheduled by the kernel, therefore they are allowed to call functions that may sleep -and if scheduled from IRQ context not incur as much system latency as would be seen with direct callbacks from within the IRQ. - -Once the clients callback has been called, the client is considered to be owning the resource. Within the callback the client may only need to perform a small amount of work -before the client need to give up owner ship. The kds_resource_release function may be called from with in the call back, but consideration must be made when using direct -callbacks, with both respect to execution time and stack usage. Consider the example in Scenario 2 with direct callbacks: - -Scenario 1 - Shared client access - direct callbacks: - -Resources: X -Clients: A(S), B(S), C(S), D(S), E(E) -where: (S) = shared user, (E) = exclusive - -Clients kds callback handler: - -client__cb( p1, p2 ) -{ -} - -Where is either A,B,C,D - Queue |Owner -1. E Requests X exclusive | -2. E Owns X exclusive |E -3. A Requests X shared A|E -4. B Requests X shared BA|E -5. C Requests X shared CBA|E -6. D Requests X shared DCBA|E -7. E Releases X |DCBA -8. A Owns X shared |DCBA -9. B Owns X shared |DCBA -10. C Owns X shared |DCBA -11. D Owns X shared |DCBA - -At point 7 it is important to note that when E releases X; A,B,C and D become the new shared owners of X and the call back for each of the client(A,B,C,D) triggered, so consideration -must be made as to whether a direct or deferred callback is suitable, using direct callbacks would result in the call graph. - -Call graph when E releases X: - kds_resource_set_release( .. ) - +->client_A_cb( .. ) - +->client_B_cb( .. ) - +->client_C_cb( .. ) - +->client_D_cb( .. ) - - -Scenario 2 - Immediate resource release - direct callbacks: - -Resource: X -Clients: A, B, C, D - -Clients kds callback handler: - -client__cb( p1, p2 ) -{ - kds_resource_set_release( .. ); -} - -Where is either A,B,C,D - -1. A Owns X exclusive -2. B Requests X exclusive (direct callback) -3. C Requests X exclusive (direct callback) -4. D Requests X exclusive (direct callback) -5. A Releases X - -Call graph when A releases X: - kds_resource_set_release( .. ) - +->client_B_cb( .. ) - +->kds_resource_set_release( .. ) - +->client_C_cb( .. ) - +->kds_resource_set_release( .. ) - +->client_D_cb( .. ) - -As can be seen when a client releases the resource, with direct call backs it is possible to create nested calls - -IRQ Considerations -------------------- - -Usage of kds_resource_release in IRQ handlers should be carefully considered. - -Things to keep in mind: - -1.) Are you using direct or deferred callbacks? -2.) How many resources are you releasing? -3.) How many shared waiters are pending on the resource? - -Releasing ownership and wait cancellation ------------------------------------------- - -Client Wait Cancellation -------------------------- - -It may be necessary in certain circumstances for the client to cancel the wait for kds resources for error handling, process termination etc. Cancellation is -performed using kds_resource_set_release or kds_resource_set_release_sync using the rset that was received from kds_async_waitall, kds_resource_set_release_sync -being used for waits which are using deferred callbacks. - -It is possible that while the request to cancel the wait is being issued by the client, the client is granted access to the resources. Normally after the client -has taken ownership and finishes with that resource, it will release ownership to signal other waiters which are pending, this causes a race with the cancellation. -To prevent KDS trying to remove a wait twice from the internal list and accessing memory that is potentially freed, it is very important that all releasers use the -same rset pointer. Here is a simplified example of bad usage that must be avoided in any client implementation: - -Senario 3 - Bad release from multiple contexts: - - This scenaro is highlighting bad usage of the kds API - - kds_resource_set * rset; - kds_resource_set * rset_copy; - - kds_async_waitall( &rset, ... ... ... ); - - /* Don't do this */ - rset_copy = rset; - -Context A: - kds_resource_set_release( &rset ) - -Context B: - kds_resource_set_release( &rset_copy ) - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_lock/sconscript b/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_lock/sconscript deleted file mode 100755 index 82126932de9a0..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_lock/sconscript +++ /dev/null @@ -1,27 +0,0 @@ -# -# (C) COPYRIGHT 2012, 2014 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - -import os -import re -Import('env') - -linux_config_file = os.path.normpath(os.environ['KDIR']) + '/.config' -search_term = '^[\ ]*CONFIG_DMA_SHARED_BUFFER_USES_KDS[\ ]*=[\ ]*y' -for line in open(linux_config_file, 'r'): - if re.search(search_term, line): - SConscript( 'src/sconscript' ) - if env['tests'] and Glob('tests/sconscript'): - SConscript( 'tests/sconscript' ) - break diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_lock/src/Kbuild b/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_lock/src/Kbuild deleted file mode 100755 index 68dad07ad6077..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_lock/src/Kbuild +++ /dev/null @@ -1,18 +0,0 @@ -# -# (C) COPYRIGHT 2012 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - -ifneq ($(CONFIG_DMA_SHARED_BUFFER),) -obj-m := dma_buf_lock.o -endif diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_lock/src/Makefile b/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_lock/src/Makefile deleted file mode 100755 index cf76132e6dde7..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_lock/src/Makefile +++ /dev/null @@ -1,32 +0,0 @@ -# -# (C) COPYRIGHT 2012 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - -# linux build system bootstrap for out-of-tree module - -# default to building for the host -ARCH ?= $(shell uname -m) - -ifeq ($(KDIR),) -$(error Must specify KDIR to point to the kernel to target)) -endif - -all: dma_buf_lock - -dma_buf_lock: - $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../../include" - -clean: - $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_lock/src/dma_buf_lock.c b/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_lock/src/dma_buf_lock.c deleted file mode 100755 index 9613ffcfd6964..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_lock/src/dma_buf_lock.c +++ /dev/null @@ -1,481 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "dma_buf_lock.h" - -/* Maximum number of buffers that a single handle can address */ -#define DMA_BUF_LOCK_BUF_MAX 32 - -#define DMA_BUF_LOCK_DEBUG 1 - -static dev_t dma_buf_lock_dev; -static struct cdev dma_buf_lock_cdev; -static struct class *dma_buf_lock_class; -static char dma_buf_lock_dev_name[] = "dma_buf_lock"; - -#ifdef HAVE_UNLOCKED_IOCTL -static long dma_buf_lock_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); -#else -static int dma_buf_lock_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -#endif - -static struct file_operations dma_buf_lock_fops = -{ - .owner = THIS_MODULE, -#ifdef HAVE_UNLOCKED_IOCTL - .unlocked_ioctl = dma_buf_lock_ioctl, -#else - .ioctl = dma_buf_lock_ioctl, -#endif - .compat_ioctl = dma_buf_lock_ioctl, -}; - -typedef struct dma_buf_lock_resource -{ - int *list_of_dma_buf_fds; /* List of buffers copied from userspace */ - atomic_t locked; /* Status of lock */ - struct dma_buf **dma_bufs; - struct kds_resource **kds_resources; /* List of KDS resources associated with buffers */ - struct kds_resource_set *resource_set; - unsigned long exclusive; /* Exclusive access bitmap */ - wait_queue_head_t wait; - struct kds_callback cb; - struct kref refcount; - struct list_head link; - int count; -} dma_buf_lock_resource; - -static LIST_HEAD(dma_buf_lock_resource_list); -static DEFINE_MUTEX(dma_buf_lock_mutex); - -static inline int is_dma_buf_lock_file(struct file *); -static void dma_buf_lock_dounlock(struct kref *ref); - -static int dma_buf_lock_handle_release(struct inode *inode, struct file *file) -{ - dma_buf_lock_resource *resource; - - if (!is_dma_buf_lock_file(file)) - return -EINVAL; - - resource = file->private_data; -#if DMA_BUF_LOCK_DEBUG - printk("dma_buf_lock_handle_release\n"); -#endif - mutex_lock(&dma_buf_lock_mutex); - kref_put(&resource->refcount, dma_buf_lock_dounlock); - mutex_unlock(&dma_buf_lock_mutex); - - return 0; -} - -static void dma_buf_lock_kds_callback(void *param1, void *param2) -{ - dma_buf_lock_resource *resource = param1; -#if DMA_BUF_LOCK_DEBUG - printk("dma_buf_lock_kds_callback\n"); -#endif - atomic_set(&resource->locked, 1); - - wake_up(&resource->wait); -} - -static unsigned int dma_buf_lock_handle_poll(struct file *file, - struct poll_table_struct *wait) -{ - dma_buf_lock_resource *resource; - unsigned int ret = 0; - - if (!is_dma_buf_lock_file(file)) - return POLLERR; - - resource = file->private_data; -#if DMA_BUF_LOCK_DEBUG - printk("dma_buf_lock_handle_poll\n"); -#endif - if (1 == atomic_read(&resource->locked)) - { - /* Resources have been locked */ - ret = POLLIN | POLLRDNORM; - if (resource->exclusive) - { - ret |= POLLOUT | POLLWRNORM; - } - } - else - { - if (!poll_does_not_wait(wait)) - { - poll_wait(file, &resource->wait, wait); - } - } -#if DMA_BUF_LOCK_DEBUG - printk("dma_buf_lock_handle_poll : return %i\n", ret); -#endif - return ret; -} - -static const struct file_operations dma_buf_lock_handle_fops = { - .release = dma_buf_lock_handle_release, - .poll = dma_buf_lock_handle_poll, -}; - -/* - * is_dma_buf_lock_file - Check if struct file* is associated with dma_buf_lock - */ -static inline int is_dma_buf_lock_file(struct file *file) -{ - return file->f_op == &dma_buf_lock_handle_fops; -} - - - -/* - * Start requested lock. - * - * Allocates required memory, copies dma_buf_fd list from userspace, - * acquires related KDS resources, and starts the lock. - */ -static int dma_buf_lock_dolock(dma_buf_lock_k_request *request) -{ - dma_buf_lock_resource *resource; - int size; - int fd; - int i; - int ret; - - if (NULL == request->list_of_dma_buf_fds) - { - return -EINVAL; - } - if (request->count <= 0) - { - return -EINVAL; - } - if (request->count > DMA_BUF_LOCK_BUF_MAX) - { - return -EINVAL; - } - if (request->exclusive != DMA_BUF_LOCK_NONEXCLUSIVE && - request->exclusive != DMA_BUF_LOCK_EXCLUSIVE) - { - return -EINVAL; - } - - resource = kzalloc(sizeof(dma_buf_lock_resource), GFP_KERNEL); - if (NULL == resource) - { - return -ENOMEM; - } - - atomic_set(&resource->locked, 0); - kref_init(&resource->refcount); - INIT_LIST_HEAD(&resource->link); - resource->count = request->count; - - /* Allocate space to store dma_buf_fds received from user space */ - size = request->count * sizeof(int); - resource->list_of_dma_buf_fds = kmalloc(size, GFP_KERNEL); - - if (NULL == resource->list_of_dma_buf_fds) - { - kfree(resource); - return -ENOMEM; - } - - /* Allocate space to store dma_buf pointers associated with dma_buf_fds */ - size = sizeof(struct dma_buf *) * request->count; - resource->dma_bufs = kmalloc(size, GFP_KERNEL); - - if (NULL == resource->dma_bufs) - { - kfree(resource->list_of_dma_buf_fds); - kfree(resource); - return -ENOMEM; - } - /* Allocate space to store kds_resources associated with dma_buf_fds */ - size = sizeof(struct kds_resource *) * request->count; - resource->kds_resources = kmalloc(size, GFP_KERNEL); - - if (NULL == resource->kds_resources) - { - kfree(resource->dma_bufs); - kfree(resource->list_of_dma_buf_fds); - kfree(resource); - return -ENOMEM; - } - - /* Copy requested list of dma_buf_fds from user space */ - size = request->count * sizeof(int); - if (0 != copy_from_user(resource->list_of_dma_buf_fds, (void __user *)request->list_of_dma_buf_fds, size)) - { - kfree(resource->list_of_dma_buf_fds); - kfree(resource->dma_bufs); - kfree(resource->kds_resources); - kfree(resource); - return -ENOMEM; - } -#if DMA_BUF_LOCK_DEBUG - for (i = 0; i < request->count; i++) - { - printk("dma_buf %i = %X\n", i, resource->list_of_dma_buf_fds[i]); - } -#endif - - /* Add resource to global list */ - mutex_lock(&dma_buf_lock_mutex); - - list_add(&resource->link, &dma_buf_lock_resource_list); - - mutex_unlock(&dma_buf_lock_mutex); - - for (i = 0; i < request->count; i++) - { - /* Convert fd into dma_buf structure */ - resource->dma_bufs[i] = dma_buf_get(resource->list_of_dma_buf_fds[i]); - - if (IS_ERR_VALUE(PTR_ERR(resource->dma_bufs[i]))) - { - mutex_lock(&dma_buf_lock_mutex); - kref_put(&resource->refcount, dma_buf_lock_dounlock); - mutex_unlock(&dma_buf_lock_mutex); - return -EINVAL; - } - - /*Get kds_resource associated with dma_buf */ - resource->kds_resources[i] = get_dma_buf_kds_resource(resource->dma_bufs[i]); - - if (NULL == resource->kds_resources[i]) - { - mutex_lock(&dma_buf_lock_mutex); - kref_put(&resource->refcount, dma_buf_lock_dounlock); - mutex_unlock(&dma_buf_lock_mutex); - return -EINVAL; - } -#if DMA_BUF_LOCK_DEBUG - printk("dma_buf_lock_dolock : dma_buf_fd %i dma_buf %X kds_resource %X\n", resource->list_of_dma_buf_fds[i], - (unsigned int)resource->dma_bufs[i], (unsigned int)resource->kds_resources[i]); -#endif - } - - kds_callback_init(&resource->cb, 1, dma_buf_lock_kds_callback); - init_waitqueue_head(&resource->wait); - - kref_get(&resource->refcount); - - /* Create file descriptor associated with lock request */ - fd = anon_inode_getfd("dma_buf_lock", &dma_buf_lock_handle_fops, - (void *)resource, 0); - if (fd < 0) - { - mutex_lock(&dma_buf_lock_mutex); - kref_put(&resource->refcount, dma_buf_lock_dounlock); - kref_put(&resource->refcount, dma_buf_lock_dounlock); - mutex_unlock(&dma_buf_lock_mutex); - return fd; - } - - resource->exclusive = request->exclusive; - - /* Start locking process */ - ret = kds_async_waitall(&resource->resource_set, - &resource->cb, resource, NULL, - request->count, &resource->exclusive, - resource->kds_resources); - - if (IS_ERR_VALUE(ret)) - { - put_unused_fd(fd); - - mutex_lock(&dma_buf_lock_mutex); - kref_put(&resource->refcount, dma_buf_lock_dounlock); - mutex_unlock(&dma_buf_lock_mutex); - - return ret; - } - -#if DMA_BUF_LOCK_DEBUG - printk("dma_buf_lock_dolock : complete\n"); -#endif - mutex_lock(&dma_buf_lock_mutex); - kref_put(&resource->refcount, dma_buf_lock_dounlock); - mutex_unlock(&dma_buf_lock_mutex); - - return fd; -} - -static void dma_buf_lock_dounlock(struct kref *ref) -{ - int i; - dma_buf_lock_resource *resource = container_of(ref, dma_buf_lock_resource, refcount); - - atomic_set(&resource->locked, 0); - - kds_callback_term(&resource->cb); - - kds_resource_set_release(&resource->resource_set); - - list_del(&resource->link); - - for (i = 0; i < resource->count; i++) - { - dma_buf_put(resource->dma_bufs[i]); - } - - kfree(resource->kds_resources); - kfree(resource->dma_bufs); - kfree(resource->list_of_dma_buf_fds); - kfree(resource); -} - -static int __init dma_buf_lock_init(void) -{ - int err; -#if DMA_BUF_LOCK_DEBUG - printk("dma_buf_lock_init\n"); -#endif - err = alloc_chrdev_region(&dma_buf_lock_dev, 0, 1, dma_buf_lock_dev_name); - - if (0 == err) - { - cdev_init(&dma_buf_lock_cdev, &dma_buf_lock_fops); - - err = cdev_add(&dma_buf_lock_cdev, dma_buf_lock_dev, 1); - - if (0 == err) - { - dma_buf_lock_class = class_create(THIS_MODULE, dma_buf_lock_dev_name); - if (IS_ERR(dma_buf_lock_class)) - { - err = PTR_ERR(dma_buf_lock_class); - } - else - { - struct device *mdev; - mdev = device_create(dma_buf_lock_class, NULL, dma_buf_lock_dev, NULL, dma_buf_lock_dev_name); - if (!IS_ERR(mdev)) - { - return 0; - } - - err = PTR_ERR(mdev); - class_destroy(dma_buf_lock_class); - } - cdev_del(&dma_buf_lock_cdev); - } - - unregister_chrdev_region(dma_buf_lock_dev, 1); - } -#if DMA_BUF_LOCK_DEBUG - printk("dma_buf_lock_init failed\n"); -#endif - return err; -} - -static void __exit dma_buf_lock_exit(void) -{ -#if DMA_BUF_LOCK_DEBUG - printk("dma_buf_lock_exit\n"); -#endif - - /* Unlock all outstanding references */ - while (1) - { - mutex_lock(&dma_buf_lock_mutex); - if (list_empty(&dma_buf_lock_resource_list)) - { - mutex_unlock(&dma_buf_lock_mutex); - break; - } - else - { - dma_buf_lock_resource *resource = list_entry(dma_buf_lock_resource_list.next, - dma_buf_lock_resource, link); - kref_put(&resource->refcount, dma_buf_lock_dounlock); - mutex_unlock(&dma_buf_lock_mutex); - } - } - - device_destroy(dma_buf_lock_class, dma_buf_lock_dev); - - class_destroy(dma_buf_lock_class); - - cdev_del(&dma_buf_lock_cdev); - - unregister_chrdev_region(dma_buf_lock_dev, 1); -} - -#ifdef HAVE_UNLOCKED_IOCTL -static long dma_buf_lock_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) -#else -static int dma_buf_lock_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) -#endif -{ - dma_buf_lock_k_request request; - int size = _IOC_SIZE(cmd); - - if (_IOC_TYPE(cmd) != DMA_BUF_LOCK_IOC_MAGIC) - { - return -ENOTTY; - - } - if ((_IOC_NR(cmd) < DMA_BUF_LOCK_IOC_MINNR) || (_IOC_NR(cmd) > DMA_BUF_LOCK_IOC_MAXNR)) - { - return -ENOTTY; - } - - switch (cmd) - { - case DMA_BUF_LOCK_FUNC_LOCK_ASYNC: - if (size != sizeof(dma_buf_lock_k_request)) - { - return -ENOTTY; - } - if (copy_from_user(&request, (void __user *)arg, size)) - { - return -EFAULT; - } -#if DMA_BUF_LOCK_DEBUG - printk("DMA_BUF_LOCK_FUNC_LOCK_ASYNC - %i\n", request.count); -#endif - return dma_buf_lock_dolock(&request); - } - - return -ENOTTY; -} - -module_init(dma_buf_lock_init); -module_exit(dma_buf_lock_exit); - -MODULE_LICENSE("GPL"); - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_lock/src/dma_buf_lock.h b/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_lock/src/dma_buf_lock.h deleted file mode 100755 index e1b9348b9cfd5..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_lock/src/dma_buf_lock.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#ifndef _DMA_BUF_LOCK_H -#define _DMA_BUF_LOCK_H - -typedef enum dma_buf_lock_exclusive -{ - DMA_BUF_LOCK_NONEXCLUSIVE = 0, - DMA_BUF_LOCK_EXCLUSIVE = -1 -} dma_buf_lock_exclusive; - -typedef struct dma_buf_lock_k_request -{ - int count; - int *list_of_dma_buf_fds; - int timeout; - dma_buf_lock_exclusive exclusive; -} dma_buf_lock_k_request; - -#define DMA_BUF_LOCK_IOC_MAGIC '~' - -#define DMA_BUF_LOCK_FUNC_LOCK_ASYNC _IOW(DMA_BUF_LOCK_IOC_MAGIC, 11, dma_buf_lock_k_request) - -#define DMA_BUF_LOCK_IOC_MINNR 11 -#define DMA_BUF_LOCK_IOC_MAXNR 11 - -#endif /* _DMA_BUF_LOCK_H */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_lock/src/sconscript b/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_lock/src/sconscript deleted file mode 100755 index 251c9a6f53d50..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_lock/src/sconscript +++ /dev/null @@ -1,36 +0,0 @@ -# -# (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - -import os -import re -Import('env') - -if env['v'] != '1': - env['MAKECOMSTR'] = '[MAKE] ${SOURCE.dir}' - -src = [Glob('#kernel/drivers/base/dma_buf_lock/src/*.c'), Glob('#kernel/drivers/base/dma_buf_lock/src/*.h'), Glob('#kernel/drivers/base/dma_buf_lock/src/K*')] - -if env.GetOption('clean') : - # Clean module - env.Execute(Action("make clean", '[CLEAN] dma_buf_lock')) - cmd = env.Command('$STATIC_LIB_PATH/dma_buf_lock.ko', src, []) - env.ProgTarget('dma_buf_lock', cmd) - -else: - # Build module - makeAction=Action("cd ${SOURCE.dir} && make dma_buf_lock && cp dma_buf_lock.ko $STATIC_LIB_PATH/", '$MAKECOMSTR') - cmd = env.Command('$STATIC_LIB_PATH/dma_buf_lock.ko', src, [makeAction]) - env.ProgTarget('dma_buf_lock', cmd) - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_test_exporter/Kbuild b/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_test_exporter/Kbuild deleted file mode 100755 index 56b9f86d2d52a..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_test_exporter/Kbuild +++ /dev/null @@ -1,18 +0,0 @@ -# -# (C) COPYRIGHT 2012 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - -ifneq ($(CONFIG_DMA_SHARED_BUFFER),) -obj-$(CONFIG_DMA_SHARED_BUFFER_TEST_EXPORTER) += dma-buf-test-exporter.o -endif diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_test_exporter/Kconfig b/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_test_exporter/Kconfig deleted file mode 100755 index 974f0c23dbd27..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_test_exporter/Kconfig +++ /dev/null @@ -1,20 +0,0 @@ -# -# (C) COPYRIGHT 2012 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - -config DMA_SHARED_BUFFER_TEST_EXPORTER - tristate "Test exporter for the dma-buf framework" - depends on DMA_SHARED_BUFFER - help - This option enables the test exporter usable to help test importerts. diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_test_exporter/Makefile b/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_test_exporter/Makefile deleted file mode 100755 index 06e3d5c121a52..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_test_exporter/Makefile +++ /dev/null @@ -1,30 +0,0 @@ -# -# (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - -# linux build system bootstrap for out-of-tree module - -# default to building for the host -ARCH ?= $(shell uname -m) - -ifeq ($(KDIR),) -$(error Must specify KDIR to point to the kernel to target)) -endif - -all: - $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../include" CONFIG_DMA_SHARED_BUFFER_TEST_EXPORTER=m - -clean: - $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_test_exporter/dma-buf-test-exporter.c b/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_test_exporter/dma-buf-test-exporter.c deleted file mode 100755 index 852c55075eb24..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_test_exporter/dma-buf-test-exporter.c +++ /dev/null @@ -1,635 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) -#include -#include -#endif - -struct dma_buf_te_alloc { - /* the real alloc */ - int nr_pages; - struct page **pages; - - /* the debug usage tracking */ - int nr_attached_devices; - int nr_device_mappings; - int nr_cpu_mappings; - - /* failure simulation */ - int fail_attach; - int fail_map; - int fail_mmap; - - bool contiguous; - dma_addr_t contig_dma_addr; - void *contig_cpu_addr; -}; - -static struct miscdevice te_device; - -static int dma_buf_te_attach(struct dma_buf *buf, struct device *dev, struct dma_buf_attachment *attachment) -{ - struct dma_buf_te_alloc *alloc; - alloc = buf->priv; - - if (alloc->fail_attach) - return -EFAULT; - - /* dma_buf is externally locked during call */ - alloc->nr_attached_devices++; - return 0; -} - -static void dma_buf_te_detach(struct dma_buf *buf, struct dma_buf_attachment *attachment) -{ - struct dma_buf_te_alloc *alloc; - alloc = buf->priv; - /* dma_buf is externally locked during call */ - - alloc->nr_attached_devices--; -} - -static struct sg_table *dma_buf_te_map(struct dma_buf_attachment *attachment, enum dma_data_direction direction) -{ - struct sg_table *sg; - struct scatterlist *iter; - struct dma_buf_te_alloc *alloc; - int i; - int ret; - - alloc = attachment->dmabuf->priv; - - if (alloc->fail_map) - return ERR_PTR(-ENOMEM); - -#if !(defined(ARCH_HAS_SG_CHAIN) || defined(CONFIG_ARCH_HAS_SG_CHAIN)) - /* if the ARCH can't chain we can't have allocs larger than a single sg can hold */ - if (alloc->nr_pages > SG_MAX_SINGLE_ALLOC) - return ERR_PTR(-EINVAL); -#endif - - sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); - if (!sg) - return ERR_PTR(-ENOMEM); - - /* from here we access the allocation object, so lock the dmabuf pointing to it */ - mutex_lock(&attachment->dmabuf->lock); - - if (alloc->contiguous) - ret = sg_alloc_table(sg, 1, GFP_KERNEL); - else - ret = sg_alloc_table(sg, alloc->nr_pages, GFP_KERNEL); - if (ret) { - mutex_unlock(&attachment->dmabuf->lock); - kfree(sg); - return ERR_PTR(ret); - } - - if (alloc->contiguous) { - sg_dma_len(sg->sgl) = alloc->nr_pages * PAGE_SIZE; - sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(alloc->contig_dma_addr)), alloc->nr_pages * PAGE_SIZE, 0); - sg_dma_address(sg->sgl) = alloc->contig_dma_addr; - } else { - for_each_sg(sg->sgl, iter, alloc->nr_pages, i) - sg_set_page(iter, alloc->pages[i], PAGE_SIZE, 0); - } - - if (!dma_map_sg(attachment->dev, sg->sgl, sg->nents, direction)) { - mutex_unlock(&attachment->dmabuf->lock); - sg_free_table(sg); - kfree(sg); - return ERR_PTR(-ENOMEM); - } - - alloc->nr_device_mappings++; - mutex_unlock(&attachment->dmabuf->lock); - return sg; -} - -static void dma_buf_te_unmap(struct dma_buf_attachment *attachment, - struct sg_table *sg, enum dma_data_direction direction) -{ - struct dma_buf_te_alloc *alloc; - - alloc = attachment->dmabuf->priv; - - dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, direction); - sg_free_table(sg); - kfree(sg); - - mutex_lock(&attachment->dmabuf->lock); - alloc->nr_device_mappings--; - mutex_unlock(&attachment->dmabuf->lock); -} - -static void dma_buf_te_release(struct dma_buf *buf) -{ - int i; - struct dma_buf_te_alloc *alloc; - alloc = buf->priv; - /* no need for locking */ - - if (alloc->contiguous) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); - dma_free_attrs(te_device.this_device, - alloc->nr_pages * PAGE_SIZE, - alloc->contig_cpu_addr, alloc->contig_dma_addr, &attrs); -#else - dma_free_writecombine(te_device.this_device, - alloc->nr_pages * PAGE_SIZE, - alloc->contig_cpu_addr, alloc->contig_dma_addr); -#endif - } else { - for (i = 0; i < alloc->nr_pages; i++) - __free_page(alloc->pages[i]); - } - kfree(alloc->pages); - kfree(alloc); -} - - -static void dma_buf_te_mmap_open(struct vm_area_struct *vma) -{ - struct dma_buf *dma_buf; - struct dma_buf_te_alloc *alloc; - dma_buf = vma->vm_private_data; - alloc = dma_buf->priv; - - mutex_lock(&dma_buf->lock); - alloc->nr_cpu_mappings++; - mutex_unlock(&dma_buf->lock); -} - -static void dma_buf_te_mmap_close(struct vm_area_struct *vma) -{ - struct dma_buf *dma_buf; - struct dma_buf_te_alloc *alloc; - dma_buf = vma->vm_private_data; - alloc = dma_buf->priv; - - BUG_ON(alloc->nr_cpu_mappings <= 0); - mutex_lock(&dma_buf->lock); - alloc->nr_cpu_mappings--; - mutex_unlock(&dma_buf->lock); -} - -static int dma_buf_te_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) -{ - struct dma_buf_te_alloc *alloc; - struct dma_buf *dmabuf; - struct page *pageptr; - - dmabuf = vma->vm_private_data; - alloc = dmabuf->priv; - - if (vmf->pgoff > alloc->nr_pages) - return VM_FAULT_SIGBUS; - - pageptr = alloc->pages[vmf->pgoff]; - - BUG_ON(!pageptr); - - get_page(pageptr); - vmf->page = pageptr; - - return 0; -} - -struct vm_operations_struct dma_buf_te_vm_ops = { - .open = dma_buf_te_mmap_open, - .close = dma_buf_te_mmap_close, - .fault = dma_buf_te_mmap_fault -}; - -static int dma_buf_te_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) -{ - struct dma_buf_te_alloc *alloc; - alloc = dmabuf->priv; - - if (alloc->fail_mmap) - return -ENOMEM; - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) - vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; -#else - vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTEXPAND; -#endif - vma->vm_ops = &dma_buf_te_vm_ops; - vma->vm_private_data = dmabuf; - - /* we fault in the pages on access */ - - /* call open to do the ref-counting */ - dma_buf_te_vm_ops.open(vma); - - return 0; -} - -static void *dma_buf_te_kmap_atomic(struct dma_buf *buf, unsigned long page_num) -{ - /* IGNORE */ - return NULL; -} - -static void *dma_buf_te_kmap(struct dma_buf *buf, unsigned long page_num) -{ - /* IGNORE */ - return NULL; -} - -static struct dma_buf_ops dma_buf_te_ops = { - /* real handlers */ - .attach = dma_buf_te_attach, - .detach = dma_buf_te_detach, - .map_dma_buf = dma_buf_te_map, - .unmap_dma_buf = dma_buf_te_unmap, - .release = dma_buf_te_release, - .mmap = dma_buf_te_mmap, - - /* nop handlers for mandatory functions we ignore */ - .kmap_atomic = dma_buf_te_kmap_atomic, - .kmap = dma_buf_te_kmap -}; - -static int do_dma_buf_te_ioctl_version(struct dma_buf_te_ioctl_version __user *buf) -{ - struct dma_buf_te_ioctl_version v; - - if (copy_from_user(&v, buf, sizeof(v))) - return -EFAULT; - - if (v.op != DMA_BUF_TE_ENQ) - return -EFAULT; - - v.op = DMA_BUF_TE_ACK; - v.major = DMA_BUF_TE_VER_MAJOR; - v.minor = DMA_BUF_TE_VER_MINOR; - - if (copy_to_user(buf, &v, sizeof(v))) - return -EFAULT; - else - return 0; -} - -static int do_dma_buf_te_ioctl_alloc(struct dma_buf_te_ioctl_alloc __user *buf, bool contiguous) -{ - struct dma_buf_te_ioctl_alloc alloc_req; - struct dma_buf_te_alloc *alloc; - struct dma_buf *dma_buf; - int i = 0; - int fd; - - if (copy_from_user(&alloc_req, buf, sizeof(alloc_req))) { - dev_err(te_device.this_device, "%s: couldn't get user data", __func__); - goto no_input; - } - - if (!alloc_req.size) { - dev_err(te_device.this_device, "%s: no size specified", __func__); - goto invalid_size; - } - -#if !(defined(ARCH_HAS_SG_CHAIN) || defined(CONFIG_ARCH_HAS_SG_CHAIN)) - /* Whilst it is possible to allocate larger buffer, we won't be able to - * map it during actual usage (mmap() still succeeds). We fail here so - * userspace code can deal with it early than having driver failure - * later on. */ - if (alloc_req.size > SG_MAX_SINGLE_ALLOC) { - dev_err(te_device.this_device, "%s: buffer size of %llu pages exceeded the mapping limit of %lu pages", - __func__, alloc_req.size, SG_MAX_SINGLE_ALLOC); - goto invalid_size; - } -#endif - - alloc = kzalloc(sizeof(struct dma_buf_te_alloc), GFP_KERNEL); - if (NULL == alloc) { - dev_err(te_device.this_device, "%s: couldn't alloc object", __func__); - goto no_alloc_object; - } - - alloc->nr_pages = alloc_req.size; - alloc->contiguous = contiguous; - - alloc->pages = kzalloc(sizeof(struct page *) * alloc->nr_pages, GFP_KERNEL); - if (!alloc->pages) { - dev_err(te_device.this_device, - "%s: couldn't alloc %d page structures", __func__, - alloc->nr_pages); - goto free_alloc_object; - } - - if (contiguous) { - dma_addr_t dma_aux; - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); - alloc->contig_cpu_addr = dma_alloc_attrs(te_device.this_device, - alloc->nr_pages * PAGE_SIZE, - &alloc->contig_dma_addr, - GFP_KERNEL | __GFP_ZERO, &attrs); -#else - alloc->contig_cpu_addr = dma_alloc_writecombine(te_device.this_device, - alloc->nr_pages * PAGE_SIZE, - &alloc->contig_dma_addr, - GFP_KERNEL | __GFP_ZERO); -#endif - if (!alloc->contig_cpu_addr) { - dev_err(te_device.this_device, "%s: couldn't alloc contiguous buffer %d pages", __func__, alloc->nr_pages); - goto free_page_struct; - } - dma_aux = alloc->contig_dma_addr; - for (i = 0; i < alloc->nr_pages; i++) { - alloc->pages[i] = pfn_to_page(PFN_DOWN(dma_aux)); - dma_aux += PAGE_SIZE; - } - } else { - for (i = 0; i < alloc->nr_pages; i++) { - alloc->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (NULL == alloc->pages[i]) { - dev_err(te_device.this_device, "%s: couldn't alloc page", __func__); - goto no_page; - } - } - } - - /* alloc ready, let's export it */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) - dma_buf = dma_buf_export(alloc, &dma_buf_te_ops, - alloc->nr_pages << PAGE_SHIFT, O_CLOEXEC|O_RDWR, NULL); -#else - dma_buf = dma_buf_export(alloc, &dma_buf_te_ops, - alloc->nr_pages << PAGE_SHIFT, O_CLOEXEC|O_RDWR); -#endif - - if (IS_ERR_OR_NULL(dma_buf)) { - dev_err(te_device.this_device, "%s: couldn't export dma_buf", __func__); - goto no_export; - } - - /* get fd for buf */ - fd = dma_buf_fd(dma_buf, O_CLOEXEC); - - if (fd < 0) { - dev_err(te_device.this_device, "%s: couldn't get fd from dma_buf", __func__); - goto no_fd; - } - - return fd; - -no_fd: - dma_buf_put(dma_buf); -no_export: - /* i still valid */ -no_page: - if (contiguous) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); - dma_free_attrs(te_device.this_device, - alloc->nr_pages * PAGE_SIZE, - alloc->contig_cpu_addr, alloc->contig_dma_addr, &attrs); -#else - dma_free_writecombine(te_device.this_device, - alloc->nr_pages * PAGE_SIZE, - alloc->contig_cpu_addr, alloc->contig_dma_addr); -#endif - } else { - while (i-- > 0) - __free_page(alloc->pages[i]); - } -free_page_struct: - kfree(alloc->pages); -free_alloc_object: - kfree(alloc); -no_alloc_object: -invalid_size: -no_input: - return -EFAULT; -} - -static int do_dma_buf_te_ioctl_status(struct dma_buf_te_ioctl_status __user *arg) -{ - struct dma_buf_te_ioctl_status status; - struct dma_buf *dmabuf; - struct dma_buf_te_alloc *alloc; - int res = -EINVAL; - - if (copy_from_user(&status, arg, sizeof(status))) - return -EFAULT; - - dmabuf = dma_buf_get(status.fd); - if (IS_ERR_OR_NULL(dmabuf)) - return -EINVAL; - - /* verify it's one of ours */ - if (dmabuf->ops != &dma_buf_te_ops) - goto err_have_dmabuf; - - /* ours, get the current status */ - alloc = dmabuf->priv; - - /* lock while reading status to take a snapshot */ - mutex_lock(&dmabuf->lock); - status.attached_devices = alloc->nr_attached_devices; - status.device_mappings = alloc->nr_device_mappings; - status.cpu_mappings = alloc->nr_cpu_mappings; - mutex_unlock(&dmabuf->lock); - - if (copy_to_user(arg, &status, sizeof(status))) - goto err_have_dmabuf; - - /* All OK */ - res = 0; - -err_have_dmabuf: - dma_buf_put(dmabuf); - return res; -} - -static int do_dma_buf_te_ioctl_set_failing(struct dma_buf_te_ioctl_set_failing __user *arg) -{ - struct dma_buf *dmabuf; - struct dma_buf_te_ioctl_set_failing f; - struct dma_buf_te_alloc *alloc; - int res = -EINVAL; - - if (copy_from_user(&f, arg, sizeof(f))) - return -EFAULT; - - dmabuf = dma_buf_get(f.fd); - if (IS_ERR_OR_NULL(dmabuf)) - return -EINVAL; - - /* verify it's one of ours */ - if (dmabuf->ops != &dma_buf_te_ops) - goto err_have_dmabuf; - - /* ours, set the fail modes */ - alloc = dmabuf->priv; - /* lock to set the fail modes atomically */ - mutex_lock(&dmabuf->lock); - alloc->fail_attach = f.fail_attach; - alloc->fail_map = f.fail_map; - alloc->fail_mmap = f.fail_mmap; - mutex_unlock(&dmabuf->lock); - - /* success */ - res = 0; - -err_have_dmabuf: - dma_buf_put(dmabuf); - return res; -} - -static u32 dma_te_buf_fill(struct dma_buf *dma_buf, unsigned int value) -{ - struct dma_buf_attachment *attachment; - struct sg_table *sgt; - struct scatterlist *sg; - unsigned int count; - unsigned int offset = 0; - int ret = 0; - int i; - - attachment = dma_buf_attach(dma_buf, te_device.this_device); - if (IS_ERR_OR_NULL(attachment)) - return -EBUSY; - - sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL); - if (IS_ERR_OR_NULL(sgt)) { - ret = PTR_ERR(sgt); - goto no_import; - } - - for_each_sg(sgt->sgl, sg, sgt->nents, count) { - ret = dma_buf_begin_cpu_access(dma_buf, offset, sg_dma_len(sg), DMA_BIDIRECTIONAL); - if (ret) - goto no_cpu_access; - for (i = 0; i < sg_dma_len(sg); i = i + PAGE_SIZE) { - void *addr; - - addr = dma_buf_kmap(dma_buf, i >> PAGE_SHIFT); - if (!addr) { - /* dma_buf_kmap is unimplemented in exynos and returns NULL */ - dma_buf_end_cpu_access(dma_buf, offset, sg_dma_len(sg), DMA_BIDIRECTIONAL); - ret = -EPERM; - goto no_cpu_access; - } - memset(addr, value, PAGE_SIZE); - dma_buf_kunmap(dma_buf, i >> PAGE_SHIFT, addr); - } - dma_buf_end_cpu_access(dma_buf, offset, sg_dma_len(sg), DMA_BIDIRECTIONAL); - offset += sg_dma_len(sg); - } - -no_cpu_access: - dma_buf_unmap_attachment(attachment, sgt, DMA_BIDIRECTIONAL); -no_import: - dma_buf_detach(dma_buf, attachment); - return ret; -} - -static int do_dma_buf_te_ioctl_fill(struct dma_buf_te_ioctl_fill __user *arg) -{ - - struct dma_buf *dmabuf; - struct dma_buf_te_ioctl_fill f; - int ret; - - if (copy_from_user(&f, arg, sizeof(f))) - return -EFAULT; - - dmabuf = dma_buf_get(f.fd); - if (IS_ERR_OR_NULL(dmabuf)) - return -EINVAL; - - ret = dma_te_buf_fill(dmabuf, f.value); - dma_buf_put(dmabuf); - - return ret; -} - -static long dma_buf_te_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - switch (cmd) { - case DMA_BUF_TE_VERSION: - return do_dma_buf_te_ioctl_version((struct dma_buf_te_ioctl_version __user *)arg); - case DMA_BUF_TE_ALLOC: - return do_dma_buf_te_ioctl_alloc((struct dma_buf_te_ioctl_alloc __user *)arg, false); - case DMA_BUF_TE_ALLOC_CONT: - return do_dma_buf_te_ioctl_alloc((struct dma_buf_te_ioctl_alloc __user *)arg, true); - case DMA_BUF_TE_QUERY: - return do_dma_buf_te_ioctl_status((struct dma_buf_te_ioctl_status __user *)arg); - case DMA_BUF_TE_SET_FAILING: - return do_dma_buf_te_ioctl_set_failing((struct dma_buf_te_ioctl_set_failing __user *)arg); - case DMA_BUF_TE_FILL: - return do_dma_buf_te_ioctl_fill((struct dma_buf_te_ioctl_fill __user *)arg); - default: - return -ENOTTY; - } -} - -static const struct file_operations dma_buf_te_fops = { - .owner = THIS_MODULE, - .unlocked_ioctl = dma_buf_te_ioctl, - .compat_ioctl = dma_buf_te_ioctl, -}; - -static int __init dma_buf_te_init(void) -{ - int res; - te_device.minor = MISC_DYNAMIC_MINOR; - te_device.name = "dma_buf_te"; - te_device.fops = &dma_buf_te_fops; - - res = misc_register(&te_device); - if (res) { - printk(KERN_WARNING"Misc device registration failed of 'dma_buf_te'\n"); - return res; - } - te_device.this_device->coherent_dma_mask = DMA_BIT_MASK(32); - - dev_info(te_device.this_device, "dma_buf_te ready\n"); - return 0; - -} - -static void __exit dma_buf_te_exit(void) -{ - misc_deregister(&te_device); -} - -module_init(dma_buf_te_init); -module_exit(dma_buf_te_exit); -MODULE_LICENSE("GPL"); - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_test_exporter/sconscript b/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_test_exporter/sconscript deleted file mode 100755 index bfb8a99f9d45d..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/dma_buf_test_exporter/sconscript +++ /dev/null @@ -1,36 +0,0 @@ -# -# (C) COPYRIGHT 2010-2013 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -import os -Import('env') - -if env['v'] != '1': - env['MAKECOMSTR'] = '[MAKE] ${SOURCE.dir}' - -src = [Glob('#kernel/drivers/base/dma_buf_test_exporter/*.c'), Glob('#kernel/include/linux/*.h'), Glob('#kernel/drivers/base/dma_buf_test_exporter/K*')] - -env.Append( CPPPATH = '#kernel/include' ) - -if env.GetOption('clean') : - env.Execute(Action("make clean", '[CLEAN] dma-buf-test-exporter')) - cmd = env.Command('$STATIC_LIB_PATH/dma-buf-test-exporter.ko', src, []) - env.ProgTarget('dma-buf-test-exporter', cmd) -else: - makeAction=Action("cd ${SOURCE.dir} && make && ( ( [ -f dma-buf-test-exporter.ko ] && cp dma-buf-test-exporter.ko $STATIC_LIB_PATH/ ) || touch $STATIC_LIB_PATH/dma-buf-test-exporter.ko)", '$MAKECOMSTR') - cmd = env.Command('$STATIC_LIB_PATH/dma-buf-test-exporter.ko', src, [makeAction]) - env.ProgTarget('dma-buf-test-exporter', cmd) - - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/kds/Kbuild b/drivers/gpu/arm/t83x/kernel/drivers/base/kds/Kbuild deleted file mode 100755 index a88acd8fdce85..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/kds/Kbuild +++ /dev/null @@ -1,18 +0,0 @@ -# -# (C) COPYRIGHT 2012 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -obj-$(CONFIG_KDS) += kds.o -obj-$(CONFIG_KDS_TEST) += kds_test.o diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/kds/Kconfig b/drivers/gpu/arm/t83x/kernel/drivers/base/kds/Kconfig deleted file mode 100755 index 5f96165f67d53..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/kds/Kconfig +++ /dev/null @@ -1,20 +0,0 @@ -# -# (C) COPYRIGHT 2012 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -config KDS - tristate "Kernel dependency system" - help - This option enables the generic kernel dependency system diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/kds/Makefile b/drivers/gpu/arm/t83x/kernel/drivers/base/kds/Makefile deleted file mode 100755 index 364d1515c1ae4..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/kds/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -# -# (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -# linux build system bootstrap for out-of-tree module - -# default to building for the host -ARCH ?= $(shell uname -m) -CONFIG_KDS_TEST ?= n - -ifeq ($(KDIR),) -$(error Must specify KDIR to point to the kernel to target)) -endif - -all: kds - -kds: - $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../include" CONFIG_KDS=m - -kds_test: - $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../include" CONFIG_KDS_TEST=m - -clean: - $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/kds/kds.c b/drivers/gpu/arm/t83x/kernel/drivers/base/kds/kds.c deleted file mode 100755 index 62612d414d0e7..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/kds/kds.c +++ /dev/null @@ -1,559 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#define KDS_LINK_TRIGGERED (1u << 0) -#define KDS_LINK_EXCLUSIVE (1u << 1) - -#define KDS_INVALID (void *)-2 -#define KDS_RESOURCE (void *)-1 - -struct kds_resource_set -{ - unsigned long num_resources; - unsigned long pending; - struct kds_callback *cb; - void *callback_parameter; - void *callback_extra_parameter; - struct list_head callback_link; - struct work_struct callback_work; - atomic_t cb_queued; - /* This resource set will be freed when there are no pending - * callbacks */ - struct kref refcount; - - /* This is only initted when kds_waitall() is called. */ - wait_queue_head_t wake; - - struct kds_link resources[0]; - -}; - -static DEFINE_SPINLOCK(kds_lock); - -static void __resource_set_release(struct kref *ref) -{ - struct kds_resource_set *rset = container_of(ref, - struct kds_resource_set, refcount); - - kfree(rset); -} - -int kds_callback_init(struct kds_callback *cb, int direct, kds_callback_fn user_cb) -{ - int ret = 0; - - cb->direct = direct; - cb->user_cb = user_cb; - - if (!direct) - { - cb->wq = alloc_workqueue("kds", WQ_UNBOUND | WQ_HIGHPRI, WQ_UNBOUND_MAX_ACTIVE); - if (!cb->wq) - ret = -ENOMEM; - } - else - { - cb->wq = NULL; - } - - return ret; -} -EXPORT_SYMBOL(kds_callback_init); - -void kds_callback_term(struct kds_callback *cb) -{ - if (!cb->direct) - { - BUG_ON(!cb->wq); - destroy_workqueue(cb->wq); - } - else - { - BUG_ON(cb->wq); - } -} - -EXPORT_SYMBOL(kds_callback_term); - -static void kds_do_user_callback(struct kds_resource_set *rset) -{ - rset->cb->user_cb(rset->callback_parameter, rset->callback_extra_parameter); -} - -static void kds_queued_callback(struct work_struct *work) -{ - struct kds_resource_set *rset; - rset = container_of(work, struct kds_resource_set, callback_work); - - atomic_dec(&rset->cb_queued); - - kds_do_user_callback(rset); -} - -static void kds_callback_perform(struct kds_resource_set *rset) -{ - if (rset->cb->direct) - kds_do_user_callback(rset); - else - { - int result; - - atomic_inc(&rset->cb_queued); - - result = queue_work(rset->cb->wq, &rset->callback_work); - /* if we got a 0 return it means we've triggered the same rset twice! */ - WARN_ON(!result); - } -} - -void kds_resource_init(struct kds_resource * const res) -{ - BUG_ON(!res); - INIT_LIST_HEAD(&res->waiters.link); - res->waiters.parent = KDS_RESOURCE; -} -EXPORT_SYMBOL(kds_resource_init); - -int kds_resource_term(struct kds_resource *res) -{ - unsigned long lflags; - BUG_ON(!res); - spin_lock_irqsave(&kds_lock, lflags); - if (!list_empty(&res->waiters.link)) - { - spin_unlock_irqrestore(&kds_lock, lflags); - printk(KERN_ERR "ERROR: KDS resource is still in use\n"); - return -EBUSY; - } - res->waiters.parent = KDS_INVALID; - spin_unlock_irqrestore(&kds_lock, lflags); - return 0; -} -EXPORT_SYMBOL(kds_resource_term); - -int kds_async_waitall( - struct kds_resource_set ** const pprset, - struct kds_callback *cb, - void *callback_parameter, - void *callback_extra_parameter, - int number_resources, - unsigned long *exclusive_access_bitmap, - struct kds_resource **resource_list) -{ - struct kds_resource_set *rset = NULL; - unsigned long lflags; - int i; - int triggered; - int err = -EFAULT; - - BUG_ON(!pprset); - BUG_ON(!resource_list); - BUG_ON(!cb); - - WARN_ONCE(number_resources > 10, "Waiting on a high numbers of resources may increase latency, see documentation."); - - rset = kmalloc(sizeof(*rset) + number_resources * sizeof(struct kds_link), GFP_KERNEL); - if (!rset) - { - return -ENOMEM; - } - - rset->num_resources = number_resources; - rset->pending = number_resources; - rset->cb = cb; - rset->callback_parameter = callback_parameter; - rset->callback_extra_parameter = callback_extra_parameter; - INIT_LIST_HEAD(&rset->callback_link); - INIT_WORK(&rset->callback_work, kds_queued_callback); - atomic_set(&rset->cb_queued, 0); - kref_init(&rset->refcount); - - for (i = 0; i < number_resources; i++) - { - INIT_LIST_HEAD(&rset->resources[i].link); - rset->resources[i].parent = rset; - } - - spin_lock_irqsave(&kds_lock, lflags); - - for (i = 0; i < number_resources; i++) - { - unsigned long link_state = 0; - - if (test_bit(i, exclusive_access_bitmap)) - { - link_state |= KDS_LINK_EXCLUSIVE; - } - - /* no-one else waiting? */ - if (list_empty(&resource_list[i]->waiters.link)) - { - link_state |= KDS_LINK_TRIGGERED; - rset->pending--; - } - /* Adding a non-exclusive and the current tail is a triggered non-exclusive? */ - else if (((link_state & KDS_LINK_EXCLUSIVE) == 0) && - (((list_entry(resource_list[i]->waiters.link.prev, struct kds_link, link)->state & (KDS_LINK_EXCLUSIVE | KDS_LINK_TRIGGERED)) == KDS_LINK_TRIGGERED))) - { - link_state |= KDS_LINK_TRIGGERED; - rset->pending--; - } - rset->resources[i].state = link_state; - - /* avoid double wait (hang) */ - if (!list_empty(&resource_list[i]->waiters.link)) - { - /* adding same rset again? */ - if (list_entry(resource_list[i]->waiters.link.prev, struct kds_link, link)->parent == rset) - { - goto roll_back; - } - } - list_add_tail(&rset->resources[i].link, &resource_list[i]->waiters.link); - } - - triggered = (rset->pending == 0); - - /* set the pointer before the callback is called so it sees it */ - *pprset = rset; - - spin_unlock_irqrestore(&kds_lock, lflags); - - if (triggered) - { - /* all resources obtained, trigger callback */ - kds_callback_perform(rset); - } - - return 0; - -roll_back: - /* roll back */ - while (i-- > 0) - { - list_del(&rset->resources[i].link); - } - err = -EINVAL; - - spin_unlock_irqrestore(&kds_lock, lflags); - kfree(rset); - return err; -} -EXPORT_SYMBOL(kds_async_waitall); - -static void wake_up_sync_call(void *callback_parameter, void *callback_extra_parameter) -{ - wait_queue_head_t *wait = (wait_queue_head_t *)callback_parameter; - wake_up(wait); -} - -static struct kds_callback sync_cb = -{ - wake_up_sync_call, - 1, - NULL, -}; - -struct kds_resource_set *kds_waitall( - int number_resources, - unsigned long *exclusive_access_bitmap, - struct kds_resource **resource_list, - unsigned long jiffies_timeout) -{ - struct kds_resource_set *rset; - unsigned long lflags; - int i; - int triggered = 0; - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); - - rset = kmalloc(sizeof(*rset) + number_resources * sizeof(struct kds_link), GFP_KERNEL); - if (!rset) - return rset; - - rset->num_resources = number_resources; - rset->pending = number_resources; - init_waitqueue_head(&rset->wake); - INIT_LIST_HEAD(&rset->callback_link); - INIT_WORK(&rset->callback_work, kds_queued_callback); - atomic_set(&rset->cb_queued, 0); - kref_init(&rset->refcount); - - spin_lock_irqsave(&kds_lock, lflags); - - for (i = 0; i < number_resources; i++) - { - unsigned long link_state = 0; - - if (test_bit(i, exclusive_access_bitmap)) - { - link_state |= KDS_LINK_EXCLUSIVE; - } - - if (list_empty(&resource_list[i]->waiters.link)) - { - link_state |= KDS_LINK_TRIGGERED; - rset->pending--; - } - /* Adding a non-exclusive and the current tail is a triggered non-exclusive? */ - else if (((link_state & KDS_LINK_EXCLUSIVE) == 0) && - (((list_entry(resource_list[i]->waiters.link.prev, struct kds_link, link)->state & (KDS_LINK_EXCLUSIVE | KDS_LINK_TRIGGERED)) == KDS_LINK_TRIGGERED))) - { - link_state |= KDS_LINK_TRIGGERED; - rset->pending--; - } - - INIT_LIST_HEAD(&rset->resources[i].link); - rset->resources[i].parent = rset; - rset->resources[i].state = link_state; - - /* avoid double wait (hang) */ - if (!list_empty(&resource_list[i]->waiters.link)) - { - /* adding same rset again? */ - if (list_entry(resource_list[i]->waiters.link.prev, struct kds_link, link)->parent == rset) - { - goto roll_back; - } - } - - list_add_tail(&rset->resources[i].link, &resource_list[i]->waiters.link); - } - - if (rset->pending == 0) - triggered = 1; - else - { - rset->cb = &sync_cb; - rset->callback_parameter = &rset->wake; - rset->callback_extra_parameter = NULL; - } - - spin_unlock_irqrestore(&kds_lock, lflags); - - if (!triggered) - { - long wait_res = 0; - long timeout = (jiffies_timeout == KDS_WAIT_BLOCKING) ? - MAX_SCHEDULE_TIMEOUT : jiffies_timeout; - - if (timeout) - { - wait_res = wait_event_interruptible_timeout(rset->wake, - rset->pending == 0, timeout); - } - - if ((wait_res == -ERESTARTSYS) || (wait_res == 0)) - { - /* use \a kds_resource_set_release to roll back */ - kds_resource_set_release(&rset); - return ERR_PTR(wait_res); - } - } - return rset; - -roll_back: - /* roll back */ - while (i-- > 0) - { - list_del(&rset->resources[i].link); - } - - spin_unlock_irqrestore(&kds_lock, lflags); - kfree(rset); - return ERR_PTR(-EINVAL); -} -EXPORT_SYMBOL(kds_waitall); - -static void trigger_new_rset_owner(struct kds_resource_set *rset, - struct list_head *triggered) -{ - if (0 == --rset->pending) { - /* new owner now triggered, track for callback later */ - kref_get(&rset->refcount); - list_add(&rset->callback_link, triggered); - } -} - -static void __kds_resource_set_release_common(struct kds_resource_set *rset) -{ - struct list_head triggered = LIST_HEAD_INIT(triggered); - struct kds_resource_set *it; - unsigned long lflags; - int i; - - spin_lock_irqsave(&kds_lock, lflags); - - for (i = 0; i < rset->num_resources; i++) - { - struct kds_resource *resource; - struct kds_link *it = NULL; - - /* fetch the previous entry on the linked list */ - it = list_entry(rset->resources[i].link.prev, struct kds_link, link); - /* unlink ourself */ - list_del(&rset->resources[i].link); - - /* any waiters? */ - if (list_empty(&it->link)) - continue; - - /* were we the head of the list? (head if prev is a resource) */ - if (it->parent != KDS_RESOURCE) - { - if ((it->state & KDS_LINK_TRIGGERED) && !(it->state & KDS_LINK_EXCLUSIVE)) - { - /* - * previous was triggered and not exclusive, so we - * trigger non-exclusive until end-of-list or first - * exclusive - */ - - struct kds_link *it_waiting = it; - - list_for_each_entry(it, &it_waiting->link, link) - { - /* exclusive found, stop triggering */ - if (it->state & KDS_LINK_EXCLUSIVE) - break; - - it->state |= KDS_LINK_TRIGGERED; - /* a parent to update? */ - if (it->parent != KDS_RESOURCE) - trigger_new_rset_owner( - it->parent, - &triggered); - } - } - continue; - } - - /* we were the head, find the kds_resource */ - resource = container_of(it, struct kds_resource, waiters); - - /* we know there is someone waiting from the any-waiters test above */ - - /* find the head of the waiting list */ - it = list_first_entry(&resource->waiters.link, struct kds_link, link); - - /* new exclusive owner? */ - if (it->state & KDS_LINK_EXCLUSIVE) - { - /* link now triggered */ - it->state |= KDS_LINK_TRIGGERED; - /* a parent to update? */ - trigger_new_rset_owner(it->parent, &triggered); - } - /* exclusive releasing ? */ - else if (rset->resources[i].state & KDS_LINK_EXCLUSIVE) - { - /* trigger non-exclusive until end-of-list or first exclusive */ - list_for_each_entry(it, &resource->waiters.link, link) - { - /* exclusive found, stop triggering */ - if (it->state & KDS_LINK_EXCLUSIVE) - break; - - it->state |= KDS_LINK_TRIGGERED; - /* a parent to update? */ - trigger_new_rset_owner(it->parent, &triggered); - } - } - } - - spin_unlock_irqrestore(&kds_lock, lflags); - - while (!list_empty(&triggered)) - { - it = list_first_entry(&triggered, struct kds_resource_set, callback_link); - list_del(&it->callback_link); - kds_callback_perform(it); - - /* Free the resource set if no callbacks pending */ - kref_put(&it->refcount, &__resource_set_release); - } -} - -void kds_resource_set_release(struct kds_resource_set **pprset) -{ - struct kds_resource_set *rset; - int queued; - - rset = cmpxchg(pprset, *pprset, NULL); - - if (!rset) - { - /* caught a race between a cancelation - * and a completion, nothing to do */ - return; - } - - __kds_resource_set_release_common(rset); - - /* - * Caller is responsible for guaranteeing that callback work is not - * pending (i.e. its running or completed) prior to calling release. - */ - queued = atomic_read(&rset->cb_queued); - BUG_ON(queued); - - kref_put(&rset->refcount, &__resource_set_release); -} -EXPORT_SYMBOL(kds_resource_set_release); - -void kds_resource_set_release_sync(struct kds_resource_set **pprset) -{ - struct kds_resource_set *rset; - - rset = cmpxchg(pprset, *pprset, NULL); - if (!rset) - { - /* caught a race between a cancelation - * and a completion, nothing to do */ - return; - } - - __kds_resource_set_release_common(rset); - - /* - * In the case of a kds async wait cancellation ensure the deferred - * call back does not get scheduled if a trigger fired at the same time - * to release the wait. - */ - cancel_work_sync(&rset->callback_work); - - kref_put(&rset->refcount, &__resource_set_release); -} -EXPORT_SYMBOL(kds_resource_set_release_sync); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("ARM Ltd."); -MODULE_VERSION("1.0"); diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/kds/sconscript b/drivers/gpu/arm/t83x/kernel/drivers/base/kds/sconscript deleted file mode 100755 index 7fc1bc4182e5e..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/kds/sconscript +++ /dev/null @@ -1,66 +0,0 @@ -# -# (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -import os -import re -Import('env') - -#Android uses sync_pt to accomplish KDS functionality. -#Midgard KDS is not used by Android -if env['os'] == 'android': - Return() - -# If KDS is built into the kernel already we skip building the module ourselves -linux_config_file = os.path.normpath(os.environ['KDIR']) + '/.config' -search_term = '^[\ ]*CONFIG_KDS[\ ]*=[\ ]*y' -build_kds = 1 -for line in open(linux_config_file, 'r'): - if re.search(search_term, line): - # KDS in kernel. Do not build KDS kernel module but - # still allow for building kds_test module. - build_kds = 0 - -if env['v'] != '1': - env['MAKECOMSTR'] = '[MAKE] ${SOURCE.dir}' - -src = [Glob('#kernel/drivers/base/kds/*.c'), Glob('#kernel/include/linux/*.h'), Glob('#kernel/drivers/base/kds/K*')] - -env.Append( CPPPATH = '#kernel/include' ) - -if Glob('tests/sconscript'): - SConscript( 'tests/sconscript' ) - -if env.GetOption('clean') : - # Clean KDS module - if build_kds or (int(env['unit']) == 1): - env.Execute(Action("make clean", '[CLEAN] kds')) - cmd = env.Command('$STATIC_LIB_PATH/kds.ko', src, []) - env.ProgTarget('kds', cmd) -else: - # Build KDS module - if build_kds: - makeAction=Action("cd ${SOURCE.dir} && make kds && cp kds.ko $STATIC_LIB_PATH/", '$MAKECOMSTR') - cmd = env.Command('$STATIC_LIB_PATH/kds.ko', src, [makeAction]) - env.ProgTarget('kds', cmd) - - # Build KDS test module - if int(env['unit']) == 1: - makeActionTest=Action("cd ${SOURCE.dir} && make kds_test && cp kds_test.ko $STATIC_LIB_PATH/", '$MAKECOMSTR') - cmdTest = env.Command('$STATIC_LIB_PATH/kds_test.ko', src, [makeActionTest]) - env.ProgTarget('kds', cmdTest) - if build_kds: - Depends(cmdTest, cmd) - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/sconscript b/drivers/gpu/arm/t83x/kernel/drivers/base/sconscript deleted file mode 100755 index b57cc7fb0a41f..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/sconscript +++ /dev/null @@ -1,33 +0,0 @@ -# -# (C) COPYRIGHT 2010-2014 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - -Import( 'env' ) - -if Glob('bus_logger/sconscript'): - if env['buslog'] == '1': - SConscript('bus_logger/sconscript') - -if Glob('dma_buf_lock/sconscript'): - SConscript('dma_buf_lock/sconscript') - -if Glob('kds/sconscript'): - SConscript('kds/sconscript') - -if Glob('ump/sconscript'): - SConscript('ump/sconscript') - -if Glob('dma_buf_test_exporter/sconscript'): - SConscript('dma_buf_test_exporter/sconscript') - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/Kbuild b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/Kbuild deleted file mode 100755 index 2bbdba27d76f1..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/Kbuild +++ /dev/null @@ -1,18 +0,0 @@ -# -# (C) COPYRIGHT 2012 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -obj-y += src/ - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/Kconfig b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/Kconfig deleted file mode 100755 index f7451e67a5ee8..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/Kconfig +++ /dev/null @@ -1,26 +0,0 @@ -# -# (C) COPYRIGHT 2012 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -config UMP - tristate "Enable Unified Memory Provider (UMP) support" - default n - help - Enable this option to build support for the ARM UMP module. - UMP can be used by the Mali T6xx module to improve performance - by reducing the copying of data by sharing memory. - - To compile this driver as a module, choose M here: - this will generate one module, called ump. diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/docs/Doxyfile b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/docs/Doxyfile deleted file mode 100755 index fbec8eb40a02d..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/docs/Doxyfile +++ /dev/null @@ -1,125 +0,0 @@ -# -# (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -############################################################################## - -# This file contains per-module Doxygen configuration. Please do not add -# extra settings to this file without consulting all stakeholders, as they -# may cause override project-wide settings. -# -# Additionally, when defining aliases, macros, sections etc, use the module -# name as a prefix e.g. gles_my_alias. - -############################################################################## - -@INCLUDE = ../../bldsys/Doxyfile_common - -# The INPUT tag can be used to specify the files and/or directories that contain -# documented source files. You may enter file names like "myfile.cpp" or -# directories like "/usr/src/myproject". Separate the files or directories -# with spaces. - -INPUT += ../../kernel/include/linux/ump-common.h ../../kernel/include/linux/ump.h - -############################################################################## -# Everything below here is optional, and in most cases not required -############################################################################## - -# This tag can be used to specify a number of aliases that acts -# as commands in the documentation. An alias has the form "name=value". -# For example adding "sideeffect=\par Side Effects:\n" will allow you to -# put the command \sideeffect (or @sideeffect) in the documentation, which -# will result in a user-defined paragraph with heading "Side Effects:". -# You can put \n's in the value part of an alias to insert newlines. - -ALIASES += - -# The ENABLED_SECTIONS tag can be used to enable conditional -# documentation sections, marked by \if sectionname ... \endif. - -ENABLED_SECTIONS += - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank the following patterns are tested: -# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx -# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 - -FILE_PATTERNS += - -# The EXCLUDE tag can be used to specify files and/or directories that should -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. - -EXCLUDE += - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. Note that the wildcards are matched -# against the file with absolute path, so to exclude all test directories -# for example use the pattern */test/* - -EXCLUDE_PATTERNS += - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test - -EXCLUDE_SYMBOLS += - -# The EXAMPLE_PATH tag can be used to specify one or more files or -# directories that contain example code fragments that are included (see -# the \include command). - -EXAMPLE_PATH += ../../kernel/drivers/base/ump - -# The IMAGE_PATH tag can be used to specify one or more files or -# directories that contain image that are included in the documentation (see -# the \image command). - -IMAGE_PATH += - -# The INCLUDE_PATH tag can be used to specify one or more directories that -# contain include files that are not input files but should be processed by -# the preprocessor. - -INCLUDE_PATH += - -# The PREDEFINED tag can be used to specify one or more macro names that -# are defined before the preprocessor is started (similar to the -D option of -# gcc). The argument of the tag is a list of macros of the form: name -# or name=definition (no spaces). If the definition and the = are -# omitted =1 is assumed. To prevent a macro definition from being -# undefined via #undef or recursively expanded use the := operator -# instead of the = operator. - -PREDEFINED += - -# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then -# this tag can be used to specify a list of macro names that should be expanded. -# The macro definition that is found in the sources will be used. -# Use the PREDEFINED tag if you want to use a different macro definition. - -EXPAND_AS_DEFINED += - -# The DOTFILE_DIRS tag can be used to specify one or more directories that -# contain dot files that are included in the documentation (see the -# \dotfile command). - -DOTFILE_DIRS += diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/docs/sconscript b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/docs/sconscript deleted file mode 100755 index 521278d138bd4..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/docs/sconscript +++ /dev/null @@ -1,31 +0,0 @@ -# -# (C) COPYRIGHT 2010-2011, 2013 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -Import('env') - -doxygen_sources = [ - 'Doxyfile', - Glob('*.png'), - Glob('../*.h'), - Glob('../src/*.h') ] - -if env['doc'] == '1': - doxygen_target = env.Command('doxygen/html/index.html', doxygen_sources, - ['cd ${SOURCE.dir} && doxygen']) - env.Clean(doxygen_target, './doxygen') - - Alias('doxygen', doxygen_target) - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/example_kernel_api.c b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/example_kernel_api.c deleted file mode 100755 index 858dd8b52f045..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/example_kernel_api.c +++ /dev/null @@ -1,73 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#include - -#include -#include - -/* - * Example routine to display information about an UMP allocation - * The routine takes an secure_id which can come from a different kernel module - * or from a client application (i.e. an ioctl). - * It creates a ump handle from the secure id (which validates the secure id) - * and if successful dumps the physical memory information. - * It follows the API and pins the memory while "using" the physical memory. - * Finally it calls the release function to indicate it's finished with the handle. - * - * If the function can't look up the handle it fails with return value -1. - * If the testy succeeds then it return 0. - * */ - -static int display_ump_memory_information(ump_secure_id secure_id) -{ - const ump_dd_physical_block_64 * ump_blocks = NULL; - ump_dd_handle ump_mem; - uint64_t nr_blocks; - int i; - ump_alloc_flags flags; - - /* get a handle from the secure id */ - ump_mem = ump_dd_from_secure_id(secure_id); - - if (UMP_DD_INVALID_MEMORY_HANDLE == ump_mem) - { - /* invalid ID received */ - return -1; - } - - /* at this point we know we've added a reference to the ump allocation, so we must release it with ump_dd_release */ - - ump_dd_phys_blocks_get_64(ump_mem, &nr_blocks, &ump_blocks); - flags = ump_dd_allocation_flags_get(ump_mem); - - printf("UMP allocation with secure ID %u consists of %zd physical block(s):\n", secure_id, nr_blocks); - - for(i=0; i -#include -#include - -/* - * Example routine to exercise the user space UMP api. - * This routine initializes the UMP api and allocates some CPU+device X memory. - * No usage hints are given, so the driver will use the default cacheability policy. - * With the allocation it creates a duplicate handle and plays with the reference count. - * Then it simulates interacting with a device and contains pseudo code for the device. - * - * If any error is detected correct cleanup will be performed and -1 will be returned. - * If successful then 0 will be returned. - */ - -static int test_ump_user_api(void) -{ - /* This is the size we try to allocate*/ - const size_t alloc_size = 4096; - - ump_handle h = UMP_INVALID_MEMORY_HANDLE; - ump_handle h_copy = UMP_INVALID_MEMORY_HANDLE; - ump_handle h_clone = UMP_INVALID_MEMORY_HANDLE; - - void * mapping = NULL; - - ump_result ump_api_res; - int result = -1; - - ump_secure_id id; - - size_t size_returned; - - ump_api_res = ump_open(); - if (UMP_OK != ump_api_res) - { - /* failed to open an ump session */ - /* early out */ - return -1; - } - - h = ump_allocate_64(alloc_size, UMP_PROT_CPU_RD | UMP_PROT_CPU_WR | UMP_PROT_X_RD | UMP_PROT_X_WR); - /* the refcount is now 1 */ - if (UMP_INVALID_MEMORY_HANDLE == h) - { - /* allocation failure */ - goto cleanup; - } - - /* this is how we could share this allocation with another process */ - - /* in process A: */ - id = ump_secure_id_get(h); - /* still ref count 1 */ - /* send the id to process B */ - - /* in process B: */ - /* receive the id from A */ - h_clone = ump_from_secure_id(id); - /* the ref count of the allocation is now 2 (one from each handle to it) */ - /* do something ... */ - /* release our clone */ - ump_release(h_clone); /* safe to call even if ump_from_secure_id failed */ - h_clone = UMP_INVALID_MEMORY_HANDLE; - - - /* a simple save-for-future-use logic inside the driver would just copy the handle (but add a ref manually too!) */ - /* - * void assign_memory_to_job(h) - * { - */ - h_copy = h; - ump_retain(h_copy); /* manual retain needed as we just assigned the handle, now 2 */ - /* - * } - * - * void job_completed(void) - * { - */ - ump_release(h_copy); /* normal handle release as if we got via an ump_allocate */ - h_copy = UMP_INVALID_MEMORY_HANDLE; - /* - * } - */ - - /* we're now back at ref count 1, and only h is a valid handle */ - /* enough handle duplication show-off, let's play with the contents instead */ - - mapping = ump_map(h, 0, alloc_size); - if (NULL == mapping) - { - /* mapping failure, either out of address space or some other error */ - goto cleanup; - } - - memset(mapping, 0, alloc_size); - - /* let's pretend we're going to start some hw device on this buffer and read the result afterwards */ - ump_cpu_msync_now(h, UMP_MSYNC_CLEAN, mapping, alloc_size); - /* - device cache invalidate - - memory barrier - - start device - - memory barrier - - wait for device - - memory barrier - - device cache clean - - memory barrier - */ - ump_cpu_msync_now(h, UMP_MSYNC_CLEAN_AND_INVALIDATE, mapping, alloc_size); - - /* we could now peek at the result produced by the hw device, which is now accessible via our mapping */ - - /* unmap the buffer when we're done with it */ - ump_unmap(h, mapping, alloc_size); - - result = 0; - -cleanup: - ump_release(h); - h = UMP_INVALID_MEMORY_HANDLE; - - ump_close(); - - return result; -} - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/sconscript b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/sconscript deleted file mode 100755 index b4aa8b6853a52..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/sconscript +++ /dev/null @@ -1,21 +0,0 @@ -# -# (C) COPYRIGHT 2010-2013 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - -Import('env') - -if Glob('src/sconscript') and int(env['ump']) == 1: - SConscript( 'src/sconscript' ) - SConscript( 'docs/sconscript' ) - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/Kbuild b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/Kbuild deleted file mode 100755 index de6d30770d154..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/Kbuild +++ /dev/null @@ -1,50 +0,0 @@ -# -# (C) COPYRIGHT 2012 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -# Paths required for build -UMP_PATH = $(src)/../.. -UMP_DEVICEDRV_PATH = $(src)/. - -# Set up defaults if not defined by the user -MALI_UNIT_TEST ?= 0 - -SRC :=\ - common/ump_kernel_core.c \ - common/ump_kernel_descriptor_mapping.c \ - linux/ump_kernel_linux.c \ - linux/ump_kernel_linux_mem.c - -UNIT_TEST_DEFINES= -ifeq ($(MALI_UNIT_TEST), 1) - MALI_DEBUG ?= 1 - - UNIT_TEST_DEFINES = -DMALI_UNIT_TEST=1 \ - -DMALI_DEBUG=$(MALI_DEBUG) -endif - -# Use our defines when compiling -ccflags-y += -I$(UMP_PATH) -I$(UMP_DEVICEDRV_PATH) $(UNIT_TEST_DEFINES) - - -# Tell the Linux build system from which .o file to create the kernel module -obj-$(CONFIG_UMP) += ump.o -ifeq ($(CONFIG_ION),y) -ccflags-y += -I$(srctree)/drivers/staging/android/ion -I$(srctree)/include/linux -obj-$(CONFIG_UMP) += imports/ion/ump_kernel_import_ion.o -endif - -# Tell the Linux build system to enable building of our .c files -ump-y := $(SRC:.c=.o) diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/Makefile b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/Makefile deleted file mode 100755 index 45428adbdf77b..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/Makefile +++ /dev/null @@ -1,81 +0,0 @@ -# -# (C) COPYRIGHT 2008-2014 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -ifneq ($(KBUILD_EXTMOD),) -include $(KBUILD_EXTMOD)/Makefile.common -else -include ./Makefile.common -endif - -# default to building for the host -ARCH ?= $(shell uname -m) - -# linux build system integration -RELATIVE_ROOT=../../../../.. -ROOT = $(CURDIR)/$(RELATIVE_ROOT) - -EXTRA_CFLAGS=-I$(CURDIR)/../../../../include - -ifeq ($(MALI_UNIT_TEST),1) - EXTRA_CFLAGS += -DMALI_UNIT_TEST=$(MALI_UNIT_TEST) -endif - -# Get any user defined KDIR- or maybe even a hardcoded KDIR --include KDIR_CONFIGURATION - -# Define host system directory -KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build - -CONFIG ?= $(ARCH) - -# default cpu to select -CPU ?= $(shell uname -m) - -# look up KDIR based om CPU selection -KDIR ?= $(KDIR-$(CPU)) - -ifeq ($(KDIR),) -$(error No KDIR found for platform $(CPU)) -endif - -# Validate selected config -ifneq ($(shell [ -d arch-$(CONFIG) ] && [ -f arch-$(CONFIG)/config.h ] && echo "OK"), OK) -$(warning Current directory is $(shell pwd)) -$(error No configuration found for config $(CONFIG). Check that arch-$(CONFIG)/config.h exists) -else -# Link arch to the selected arch-config directory -$(shell [ -L arch ] && rm arch) -$(shell ln -sf arch-$(CONFIG) arch) -$(shell touch arch/config.h) -endif - -EXTRA_SYMBOLS= - -ifeq ($(MALI_UNIT_TEST),1) - KBASE_PATH=$(ROOT)/kernel/drivers/gpu/arm/midgard - EXTRA_SYMBOLS+=$(KBASE_PATH)/tests/internal/src/kernel_assert_module/linux/Module.symvers -endif -KDS_PATH=$(ROOT)/kernel/drivers/base/kds -EXTRA_SYMBOLS+=$(KDS_PATH)/Module.symvers - -all: - $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="$(EXTRA_CFLAGS) $(SCONS_CFLAGS)" CONFIG_UMP=m KBUILD_EXTRA_SYMBOLS="$(EXTRA_SYMBOLS)" modules - -kernelrelease: - $(MAKE) -C $(KDIR) KBUILD_EXTRA_SYMBOLS="$(EXTRA_SYMBOLS)" kernelrelease - -clean: - $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/Makefile.common b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/Makefile.common deleted file mode 100755 index f29a4c1cffa5a..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/Makefile.common +++ /dev/null @@ -1,19 +0,0 @@ -# -# (C) COPYRIGHT 2008-2010, 2013 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -SRC = $(UMP_FILE_PREFIX)/common/ump_kernel_core.c \ - $(UMP_FILE_PREFIX)/common/ump_kernel_descriptor_mapping.c - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/arch-arm/config.h b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/arch-arm/config.h deleted file mode 100755 index 152d98f38af86..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/arch-arm/config.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * - * (C) COPYRIGHT 2008-2009, 2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#ifndef __ARCH_CONFIG_H__ -#define __ARCH_CONFIG_H__ - -#define ARCH_UMP_BACKEND_DEFAULT 1 -#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT 0x00000000 -#define ARCH_UMP_MEMORY_SIZE_DEFAULT 32UL * 1024UL * 1024UL - -#endif /* __ARCH_CONFIG_H__ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/arch-arm64/config.h b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/arch-arm64/config.h deleted file mode 100755 index cfb14ca548769..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/arch-arm64/config.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * - * (C) COPYRIGHT 2008-2009, 2013-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#ifndef __ARCH_CONFIG_H__ -#define __ARCH_CONFIG_H__ - -#define ARCH_UMP_BACKEND_DEFAULT 1 -#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT 0x00000000 -#define ARCH_UMP_MEMORY_SIZE_DEFAULT 32UL * 1024UL * 1024UL - -#endif /* __ARCH_CONFIG_H__ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/common/ump_kernel_core.c b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/common/ump_kernel_core.c deleted file mode 100755 index 07aa07739f9fe..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/common/ump_kernel_core.c +++ /dev/null @@ -1,756 +0,0 @@ -/* - * - * (C) COPYRIGHT 2008-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* module headers */ -#include -#include - -/* local headers */ -#include -#include -#include -#include - -#define UMP_FLAGS_RANGE ((UMP_PROT_SHAREABLE<<1) - 1u) - -static umpp_device device; - -ump_result umpp_core_constructor(void) -{ - mutex_init(&device.secure_id_map_lock); - device.secure_id_map = umpp_descriptor_mapping_create(UMP_EXPECTED_IDS, UMP_MAX_IDS); - if (NULL != device.secure_id_map) - { - if (UMP_OK == umpp_device_initialize()) - { - return UMP_OK; - } - umpp_descriptor_mapping_destroy(device.secure_id_map); - } - mutex_destroy(&device.secure_id_map_lock); - - return UMP_ERROR; -} - -void umpp_core_destructor(void) -{ - umpp_device_terminate(); - umpp_descriptor_mapping_destroy(device.secure_id_map); - mutex_destroy(&device.secure_id_map_lock); -} - -umpp_session *umpp_core_session_start(void) -{ - umpp_session * session; - - session = kzalloc(sizeof(*session), GFP_KERNEL); - if (NULL != session) - { - mutex_init(&session->session_lock); - - INIT_LIST_HEAD(&session->memory_usage); - - /* try to create import client session, not a failure if they fail to initialize */ - umpp_import_handlers_init(session); - } - - return session; -} - -void umpp_core_session_end(umpp_session *session) -{ - umpp_session_memory_usage * usage, *_usage; - UMP_ASSERT(session); - - list_for_each_entry_safe(usage, _usage, &session->memory_usage, link) - { - printk(KERN_WARNING "UMP: Memory usage cleanup, releasing secure ID %d\n", ump_dd_secure_id_get(usage->mem)); - ump_dd_release(usage->mem); - kfree(usage); - - } - - /* we should now not hold any imported memory objects, - * detatch all import handlers */ - umpp_import_handlers_term(session); - - mutex_destroy(&session->session_lock); - kfree(session); -} - -ump_dd_handle ump_dd_allocate_64(uint64_t size, ump_alloc_flags flags, ump_dd_security_filter filter_func, ump_dd_final_release_callback final_release_func, void* callback_data) -{ - umpp_allocation * alloc; - int i; - - UMP_ASSERT(size); - - if (flags & (~UMP_FLAGS_RANGE)) - { - printk(KERN_WARNING "UMP: allocation flags out of allowed bits range\n"); - return UMP_DD_INVALID_MEMORY_HANDLE; - } - - if( ( flags & (UMP_PROT_CPU_RD | UMP_PROT_W_RD | UMP_PROT_X_RD | UMP_PROT_Y_RD | UMP_PROT_Z_RD ) ) == 0 || - ( flags & (UMP_PROT_CPU_WR | UMP_PROT_W_WR | UMP_PROT_X_WR | UMP_PROT_Y_WR | UMP_PROT_Z_WR )) == 0 ) - { - printk(KERN_WARNING "UMP: allocation flags should have at least one read and one write permission bit set\n"); - return UMP_DD_INVALID_MEMORY_HANDLE; - } - - /*check permission flags to be set if hit flags are set too*/ - for (i = UMP_DEVICE_CPU_SHIFT; i<=UMP_DEVICE_Z_SHIFT; i+=4) - { - if (flags & (UMP_HINT_DEVICE_RD<flags = flags; - alloc->filter_func = filter_func; - alloc->final_release_func = final_release_func; - alloc->callback_data = callback_data; - alloc->size = size; - - mutex_init(&alloc->map_list_lock); - INIT_LIST_HEAD(&alloc->map_list); - atomic_set(&alloc->refcount, 1); - -#ifdef CONFIG_KDS - kds_resource_init(&alloc->kds_res); -#endif - - if (!(alloc->flags & UMP_PROT_SHAREABLE)) - { - alloc->owner = get_current()->pid; - } - - if (0 != umpp_phys_commit(alloc)) - { - goto out2; - } - - /* all set up, allocate an ID for it */ - - mutex_lock(&device.secure_id_map_lock); - alloc->id = umpp_descriptor_mapping_allocate(device.secure_id_map, (void*)alloc); - mutex_unlock(&device.secure_id_map_lock); - - if ((int)alloc->id == 0) - { - /* failed to allocate a secure_id */ - goto out3; - } - - return alloc; - -out3: - umpp_phys_free(alloc); -out2: - kfree(alloc); -out1: - return UMP_DD_INVALID_MEMORY_HANDLE; -} - -uint64_t ump_dd_size_get_64(const ump_dd_handle mem) -{ - umpp_allocation * alloc; - - UMP_ASSERT(mem); - - alloc = (umpp_allocation*)mem; - - return alloc->size; -} - -/* - * UMP v1 API - */ -unsigned long ump_dd_size_get(ump_dd_handle mem) -{ - umpp_allocation * alloc; - - UMP_ASSERT(mem); - - alloc = (umpp_allocation*)mem; - - UMP_ASSERT(alloc->flags & UMP_CONSTRAINT_32BIT_ADDRESSABLE); - UMP_ASSERT(alloc->size <= UMP_UINT32_MAX); - - return (unsigned long)alloc->size; -} - -ump_secure_id ump_dd_secure_id_get(const ump_dd_handle mem) -{ - umpp_allocation * alloc; - - UMP_ASSERT(mem); - - alloc = (umpp_allocation*)mem; - - return alloc->id; -} - -#ifdef CONFIG_KDS -struct kds_resource * ump_dd_kds_resource_get(const ump_dd_handle mem) -{ - umpp_allocation * alloc; - - UMP_ASSERT(mem); - - alloc = (umpp_allocation*)mem; - - return &alloc->kds_res; -} -#endif - -ump_alloc_flags ump_dd_allocation_flags_get(const ump_dd_handle mem) -{ - const umpp_allocation * alloc; - - UMP_ASSERT(mem); - alloc = (const umpp_allocation *)mem; - - return alloc->flags; -} - -ump_dd_handle ump_dd_from_secure_id(ump_secure_id secure_id) -{ - umpp_allocation * alloc = UMP_DD_INVALID_MEMORY_HANDLE; - - mutex_lock(&device.secure_id_map_lock); - - if (0 == umpp_descriptor_mapping_lookup(device.secure_id_map, secure_id, (void**)&alloc)) - { - if (NULL != alloc->filter_func) - { - if (!alloc->filter_func(secure_id, alloc, alloc->callback_data)) - { - alloc = UMP_DD_INVALID_MEMORY_HANDLE; /* the filter denied access */ - } - } - - /* check permission to access it */ - if ((UMP_DD_INVALID_MEMORY_HANDLE != alloc) && !(alloc->flags & UMP_PROT_SHAREABLE)) - { - if (alloc->owner != get_current()->pid) - { - alloc = UMP_DD_INVALID_MEMORY_HANDLE; /*no rights for the current process*/ - } - } - - if (UMP_DD_INVALID_MEMORY_HANDLE != alloc) - { - if( ump_dd_retain(alloc) != UMP_DD_SUCCESS) - { - alloc = UMP_DD_INVALID_MEMORY_HANDLE; - } - } - } - mutex_unlock(&device.secure_id_map_lock); - - return alloc; -} - -/* - * UMP v1 API - */ -ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id) -{ - return ump_dd_from_secure_id(secure_id); -} - -int ump_dd_retain(ump_dd_handle mem) -{ - umpp_allocation * alloc; - - UMP_ASSERT(mem); - - alloc = (umpp_allocation*)mem; - - /* check for overflow */ - while(1) - { - int refcnt = atomic_read(&alloc->refcount); - if (refcnt + 1 > 0) - { - if(atomic_cmpxchg(&alloc->refcount, refcnt, refcnt + 1) == refcnt) - { - return 0; - } - } - else - { - return -EBUSY; - } - } -} - -/* - * UMP v1 API - */ -void ump_dd_reference_add(ump_dd_handle mem) -{ - ump_dd_retain(mem); -} - - -void ump_dd_release(ump_dd_handle mem) -{ - umpp_allocation * alloc; - uint32_t new_cnt; - - UMP_ASSERT(mem); - - alloc = (umpp_allocation*)mem; - - /* secure the id for lookup while releasing */ - mutex_lock(&device.secure_id_map_lock); - - /* do the actual release */ - new_cnt = atomic_sub_return(1, &alloc->refcount); - if (0 == new_cnt) - { - /* remove from the table as this was the last ref */ - umpp_descriptor_mapping_remove(device.secure_id_map, alloc->id); - } - - /* release the lock as early as possible */ - mutex_unlock(&device.secure_id_map_lock); - - if (0 != new_cnt) - { - /* exit if still have refs */ - return; - } - - UMP_ASSERT(list_empty(&alloc->map_list)); - -#ifdef CONFIG_KDS - if (kds_resource_term(&alloc->kds_res)) - { - printk(KERN_ERR "ump_dd_release: kds_resource_term failed," - "unable to release UMP allocation\n"); - return; - } -#endif - /* cleanup */ - if (NULL != alloc->final_release_func) - { - alloc->final_release_func(alloc, alloc->callback_data); - } - - if (0 == (alloc->management_flags & UMP_MGMT_EXTERNAL)) - { - umpp_phys_free(alloc); - } - else - { - kfree(alloc->block_array); - } - - mutex_destroy(&alloc->map_list_lock); - - kfree(alloc); -} - -/* - * UMP v1 API - */ -void ump_dd_reference_release(ump_dd_handle mem) -{ - ump_dd_release(mem); -} - -void ump_dd_phys_blocks_get_64(const ump_dd_handle mem, uint64_t * const pCount, const ump_dd_physical_block_64 ** const pArray) -{ - const umpp_allocation * alloc; - UMP_ASSERT(pCount); - UMP_ASSERT(pArray); - UMP_ASSERT(mem); - alloc = (const umpp_allocation *)mem; - *pCount = alloc->blocksCount; - *pArray = alloc->block_array; -} - -/* - * UMP v1 API - */ -ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle mem, ump_dd_physical_block * const blocks, unsigned long num_blocks) -{ - const umpp_allocation * alloc; - unsigned long i; - UMP_ASSERT(mem); - UMP_ASSERT(blocks); - UMP_ASSERT(num_blocks); - - alloc = (const umpp_allocation *)mem; - - UMP_ASSERT(alloc->flags & UMP_CONSTRAINT_32BIT_ADDRESSABLE); - - if((uint64_t)num_blocks != alloc->blocksCount) - { - return UMP_DD_INVALID; - } - - for( i = 0; i < num_blocks; i++) - { - UMP_ASSERT(alloc->block_array[i].addr <= UMP_UINT32_MAX); - UMP_ASSERT(alloc->block_array[i].size <= UMP_UINT32_MAX); - - blocks[i].addr = (unsigned long)alloc->block_array[i].addr; - blocks[i].size = (unsigned long)alloc->block_array[i].size; - } - - return UMP_DD_SUCCESS; -} -/* - * UMP v1 API - */ -ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle mem, unsigned long index, ump_dd_physical_block * const block) -{ - const umpp_allocation * alloc; - UMP_ASSERT(mem); - UMP_ASSERT(block); - alloc = (const umpp_allocation *)mem; - - UMP_ASSERT(alloc->flags & UMP_CONSTRAINT_32BIT_ADDRESSABLE); - - UMP_ASSERT(alloc->block_array[index].addr <= UMP_UINT32_MAX); - UMP_ASSERT(alloc->block_array[index].size <= UMP_UINT32_MAX); - - block->addr = (unsigned long)alloc->block_array[index].addr; - block->size = (unsigned long)alloc->block_array[index].size; - - return UMP_DD_SUCCESS; -} - -/* - * UMP v1 API - */ -unsigned long ump_dd_phys_block_count_get(ump_dd_handle mem) -{ - const umpp_allocation * alloc; - UMP_ASSERT(mem); - alloc = (const umpp_allocation *)mem; - - UMP_ASSERT(alloc->flags & UMP_CONSTRAINT_32BIT_ADDRESSABLE); - UMP_ASSERT(alloc->blocksCount <= UMP_UINT32_MAX); - - return (unsigned long)alloc->blocksCount; -} - -umpp_cpu_mapping * umpp_dd_find_enclosing_mapping(umpp_allocation * alloc, void *uaddr, size_t size) -{ - umpp_cpu_mapping *map; - - void *target_first = uaddr; - void *target_last = (void*)((uintptr_t)uaddr - 1 + size); - - if (target_last < target_first) /* wrapped */ - { - return NULL; - } - - mutex_lock(&alloc->map_list_lock); - list_for_each_entry(map, &alloc->map_list, link) - { - if ( map->vaddr_start <= target_first && - (void*)((uintptr_t)map->vaddr_start + (map->nr_pages << PAGE_SHIFT) - 1) >= target_last) - { - goto out; - } - } - map = NULL; -out: - mutex_unlock(&alloc->map_list_lock); - - return map; -} - -void umpp_dd_add_cpu_mapping(umpp_allocation * alloc, umpp_cpu_mapping * map) -{ - UMP_ASSERT(alloc); - UMP_ASSERT(map); - mutex_lock(&alloc->map_list_lock); - list_add(&map->link, &alloc->map_list); - mutex_unlock(&alloc->map_list_lock); -} - -void umpp_dd_remove_cpu_mapping(umpp_allocation * alloc, umpp_cpu_mapping * target) -{ - umpp_cpu_mapping * map; - - UMP_ASSERT(alloc); - UMP_ASSERT(target); - - mutex_lock(&alloc->map_list_lock); - list_for_each_entry(map, &alloc->map_list, link) - { - if (map == target) - { - list_del(&target->link); - kfree(target); - mutex_unlock(&alloc->map_list_lock); - return; - } - } - - /* not found, error */ - UMP_ASSERT(0); -} - -int umpp_dd_find_start_block(const umpp_allocation * alloc, uint64_t offset, uint64_t * const block_index, uint64_t * const block_internal_offset) -{ - uint64_t i; - - for (i = 0 ; i < alloc->blocksCount; i++) - { - if (offset < alloc->block_array[i].size) - { - /* found the block_array element containing this offset */ - *block_index = i; - *block_internal_offset = offset; - return 0; - } - offset -= alloc->block_array[i].size; - } - - return -ENXIO; -} - -void umpp_dd_cpu_msync_now(ump_dd_handle mem, ump_cpu_msync_op op, void * address, size_t size) -{ - umpp_allocation * alloc; - void *vaddr; - umpp_cpu_mapping * mapping; - uint64_t virt_page_off; /* offset of given address from beginning of the virtual mapping */ - uint64_t phys_page_off; /* offset of the virtual mapping from the beginning of the physical buffer */ - uint64_t page_count; /* number of pages to sync */ - uint64_t i; - uint64_t block_idx; - uint64_t block_offset; - uint64_t paddr; - - UMP_ASSERT((UMP_MSYNC_CLEAN == op) || (UMP_MSYNC_CLEAN_AND_INVALIDATE == op)); - - alloc = (umpp_allocation*)mem; - vaddr = (void*)(uintptr_t)address; - - if((alloc->flags & UMP_CONSTRAINT_UNCACHED) != 0) - { - /* mapping is not cached */ - return; - } - - mapping = umpp_dd_find_enclosing_mapping(alloc, vaddr, size); - if (NULL == mapping) - { - printk(KERN_WARNING "UMP: Illegal cache sync address %lx\n", (uintptr_t)vaddr); - return; /* invalid pointer or size causes out-of-bounds */ - } - - /* we already know that address + size don't wrap around as umpp_dd_find_enclosing_mapping didn't fail */ - page_count = ((((((uintptr_t)address + size - 1) & PAGE_MASK) - ((uintptr_t)address & PAGE_MASK))) >> PAGE_SHIFT) + 1; - virt_page_off = (vaddr - mapping->vaddr_start) >> PAGE_SHIFT; - phys_page_off = mapping->page_off; - - if (umpp_dd_find_start_block(alloc, (virt_page_off + phys_page_off) << PAGE_SHIFT, &block_idx, &block_offset)) - { - /* should not fail as a valid mapping was found, so the phys mem must exists */ - printk(KERN_WARNING "UMP: Unable to find physical start block with offset %llx\n", virt_page_off + phys_page_off); - UMP_ASSERT(0); - return; - } - - paddr = alloc->block_array[block_idx].addr + block_offset + (((uintptr_t)vaddr) & ((1u << PAGE_SHIFT)-1)); - - for (i = 0; i < page_count; i++) - { - size_t offset = ((uintptr_t)vaddr) & ((1u << PAGE_SHIFT)-1); - size_t sz = min((size_t)PAGE_SIZE - offset, size); - - /* check if we've overrrun the current block, if so move to the next block */ - if (paddr >= (alloc->block_array[block_idx].addr + alloc->block_array[block_idx].size)) - { - block_idx++; - UMP_ASSERT(block_idx < alloc->blocksCount); - paddr = alloc->block_array[block_idx].addr; - } - - if (UMP_MSYNC_CLEAN == op) - { - ump_sync_to_memory(paddr, vaddr, sz); - } - else /* (UMP_MSYNC_CLEAN_AND_INVALIDATE == op) already validated on entry */ - { - ump_sync_to_cpu(paddr, vaddr, sz); - } - - /* advance to next page */ - vaddr = (void*)((uintptr_t)vaddr + sz); - size -= sz; - paddr += sz; - } -} - -UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_create_from_phys_blocks_64(const ump_dd_physical_block_64 * blocks, uint64_t num_blocks, ump_alloc_flags flags, ump_dd_security_filter filter_func, ump_dd_final_release_callback final_release_func, void* callback_data) -{ - uint64_t size = 0; - uint64_t i; - umpp_allocation * alloc; - - UMP_ASSERT(blocks); - UMP_ASSERT(num_blocks); - - for (i = 0; i < num_blocks; i++) - { - size += blocks[i].size; - } - UMP_ASSERT(size); - - if (flags & (~UMP_FLAGS_RANGE)) - { - printk(KERN_WARNING "UMP: allocation flags out of allowed bits range\n"); - return UMP_DD_INVALID_MEMORY_HANDLE; - } - - if( ( flags & (UMP_PROT_CPU_RD | UMP_PROT_W_RD | UMP_PROT_X_RD | UMP_PROT_Y_RD | UMP_PROT_Z_RD - | UMP_PROT_CPU_WR | UMP_PROT_W_WR | UMP_PROT_X_WR | UMP_PROT_Y_WR | UMP_PROT_Z_WR )) == 0 ) - { - printk(KERN_WARNING "UMP: allocation flags should have at least one read or write permission bit set\n"); - return UMP_DD_INVALID_MEMORY_HANDLE; - } - - /*check permission flags to be set if hit flags are set too*/ - for (i = UMP_DEVICE_CPU_SHIFT; i<=UMP_DEVICE_Z_SHIFT; i+=4) - { - if (flags & (UMP_HINT_DEVICE_RD<block_array = kzalloc(sizeof(ump_dd_physical_block_64) * num_blocks,__GFP_HARDWALL | GFP_KERNEL); - if (NULL == alloc->block_array) - { - goto out2; - } - - memcpy(alloc->block_array, blocks, sizeof(ump_dd_physical_block_64) * num_blocks); - -#ifdef CONFIG_KDS - kds_resource_init(&alloc->kds_res); -#endif - alloc->size = size; - alloc->blocksCount = num_blocks; - alloc->flags = flags; - alloc->filter_func = filter_func; - alloc->final_release_func = final_release_func; - alloc->callback_data = callback_data; - - if (!(alloc->flags & UMP_PROT_SHAREABLE)) - { - alloc->owner = get_current()->pid; - } - - mutex_init(&alloc->map_list_lock); - INIT_LIST_HEAD(&alloc->map_list); - atomic_set(&alloc->refcount, 1); - - /* all set up, allocate an ID */ - - mutex_lock(&device.secure_id_map_lock); - alloc->id = umpp_descriptor_mapping_allocate(device.secure_id_map, (void*)alloc); - mutex_unlock(&device.secure_id_map_lock); - - if ((int)alloc->id == 0) - { - /* failed to allocate a secure_id */ - goto out3; - } - - alloc->management_flags |= UMP_MGMT_EXTERNAL; - - return alloc; - -out3: - kfree(alloc->block_array); -out2: - kfree(alloc); -out1: - return UMP_DD_INVALID_MEMORY_HANDLE; -} - - -/* - * UMP v1 API - */ -UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block * blocks, unsigned long num_blocks) -{ - ump_dd_handle mem; - ump_dd_physical_block_64 *block_64_array; - ump_alloc_flags flags = UMP_V1_API_DEFAULT_ALLOCATION_FLAGS; - unsigned long i; - - UMP_ASSERT(blocks); - UMP_ASSERT(num_blocks); - - block_64_array = kzalloc(num_blocks * sizeof(*block_64_array), __GFP_HARDWALL | GFP_KERNEL); - - if(block_64_array == NULL) - { - return UMP_DD_INVALID_MEMORY_HANDLE; - } - - /* copy physical blocks */ - for( i = 0; i < num_blocks; i++) - { - block_64_array[i].addr = blocks[i].addr; - block_64_array[i].size = blocks[i].size; - } - - mem = ump_dd_create_from_phys_blocks_64(block_64_array, num_blocks, flags, NULL, NULL, NULL); - - kfree(block_64_array); - - return mem; - -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/common/ump_kernel_core.h b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/common/ump_kernel_core.h deleted file mode 100755 index 8cb424d2893a3..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/common/ump_kernel_core.h +++ /dev/null @@ -1,228 +0,0 @@ -/* - * - * (C) COPYRIGHT 2008-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#ifndef _UMP_KERNEL_CORE_H_ -#define _UMP_KERNEL_CORE_H_ - - -#include -#include -#include -#include -#include -#include -#include - -#ifdef CONFIG_KDS -#include -#endif -#include -#include - -/* forward decl */ -struct umpp_session; - -/** - * UMP handle metadata. - * Tracks various data about a handle not of any use to user space - */ -typedef enum -{ - UMP_MGMT_EXTERNAL = (1ul << 0) /**< Handle created via the ump_dd_create_from_phys_blocks interface */ - /* (1ul << 31) not to be used */ -} umpp_management_flags; - -/** - * Structure tracking the single global UMP device. - * Holds global data like the ID map - */ -typedef struct umpp_device -{ - struct mutex secure_id_map_lock; /**< Lock protecting access to the map */ - umpp_descriptor_mapping * secure_id_map; /**< Map of all known secure IDs on the system */ -} umpp_device; - -/** - * Structure tracking all memory allocations of a UMP allocation. - * Tracks info about an mapping so we can verify cache maintenace - * operations and help in the unmap cleanup. - */ -typedef struct umpp_cpu_mapping -{ - struct list_head link; /**< link to list of mappings for an allocation */ - void *vaddr_start; /**< CPU VA start of the mapping */ - size_t nr_pages; /**< Size (in pages) of the mapping */ - uint64_t page_off; /**< Offset (in pages) from start of the allocation where the mapping starts */ - ump_dd_handle handle; /**< Which handle this mapping is linked to */ - struct umpp_session * session; /**< Which session created the mapping */ -} umpp_cpu_mapping; - -/** - * Structure tracking UMP allocation. - * Represent a memory allocation with its ID. - * Tracks all needed meta-data about an allocation. - * */ -typedef struct umpp_allocation -{ - ump_secure_id id; /**< Secure ID of the allocation */ - atomic_t refcount; /**< Usage count */ - - ump_alloc_flags flags; /**< Flags for all supported devices */ - uint32_t management_flags; /**< Managment flags tracking */ - - pid_t owner; /**< The process ID owning the memory if not sharable */ - - ump_dd_security_filter filter_func; /**< Hook to verify use, called during retains from new clients */ - ump_dd_final_release_callback final_release_func; /**< Hook called when the last reference is removed */ - void* callback_data; /**< Additional data given to release hook */ - - uint64_t size; /**< Size (in bytes) of the allocation */ - uint64_t blocksCount; /**< Number of physsical blocks the allocation is built up of */ - ump_dd_physical_block_64 * block_array; /**< Array, one entry per block, describing block start and length */ - - struct mutex map_list_lock; /**< Lock protecting the map_list */ - struct list_head map_list; /**< Tracks all CPU VA mappings of this allocation */ - -#ifdef CONFIG_KDS - struct kds_resource kds_res; /**< The KDS resource controlling access to this allocation */ -#endif - - void * backendData; /**< Physical memory backend meta-data */ -} umpp_allocation; - -/** - * Structure tracking use of UMP memory by a session. - * Tracks the use of an allocation by a session so session termination can clean up any outstanding references. - * Also protects agains non-matched release calls from user space. - */ -typedef struct umpp_session_memory_usage -{ - ump_secure_id id; /**< ID being used. For quick look-up */ - ump_dd_handle mem; /**< Handle being used. */ - - /** - * Track how many times has the process retained this handle in the kernel. - * This should usually just be 1(allocated or resolved) or 2(mapped), - * but could be more if someone is playing with the low-level API - * */ - atomic_t process_usage_count; - - struct list_head link; /**< link to other usage trackers for a session */ -} umpp_session_memory_usage; - -/** - * Structure representing a session/client. - * Tracks the UMP allocations being used by this client. - */ -typedef struct umpp_session -{ - struct mutex session_lock; /**< Lock for memory usage manipulation */ - struct list_head memory_usage; /**< list of memory currently being used by the this session */ - void* import_handler_data[UMPP_EXTERNAL_MEM_COUNT]; /**< Import modules per-session data pointer */ -} umpp_session; - -/** - * UMP core setup. - * Called by any OS specific startup function to initialize the common part. - * @return UMP_OK if core initialized correctly, any other value for errors - */ -ump_result umpp_core_constructor(void); - -/** - * UMP core teardown. - * Called by any OS specific unload function to clean up the common part. - */ -void umpp_core_destructor(void); - -/** - * UMP session start. - * Called by any OS specific session handler when a new session is detected - * @return Non-NULL if a matching core session could be set up. NULL on failure - */ -umpp_session *umpp_core_session_start(void); - -/** - * UMP session end. - * Called by any OS specific session handler when a session is ended/terminated. - * @param session The core session object returned by ump_core_session_start - */ -void umpp_core_session_end(umpp_session *session); - -/** - * Find a mapping object (if any) for this allocation. - * Called by any function needing to identify a mapping from a user virtual address. - * Verifies that the whole range to be within a mapping object. - * @param alloc The UMP allocation to find a matching mapping object of - * @param uaddr User mapping address to find the mapping object for - * @param size Length of the mapping - * @return NULL on error (no match found), pointer to mapping object if match found - */ -umpp_cpu_mapping * umpp_dd_find_enclosing_mapping(umpp_allocation * alloc, void* uaddr, size_t size); - -/** - * Register a new mapping of an allocation. - * Called by functions creating a new mapping of an allocation, typically OS specific handlers. - * @param alloc The allocation object which has been mapped - * @param map Info about the mapping - */ -void umpp_dd_add_cpu_mapping(umpp_allocation * alloc, umpp_cpu_mapping * map); - -/** - * Remove and free mapping object from an allocation. - * @param alloc The allocation object to remove the mapping info from - * @param target The mapping object to remove - */ -void umpp_dd_remove_cpu_mapping(umpp_allocation * alloc, umpp_cpu_mapping * target); - -/** - * Helper to find a block in the blockArray which holds a given byte offset. - * @param alloc The allocation object to find the block in - * @param offset Offset (in bytes) from allocation start to find the block of - * @param[out] block_index Pointer to the index of the block matching - * @param[out] block_internal_offset Offset within the returned block of the searched offset - * @return 0 if a matching block was found, any other value for error - */ -int umpp_dd_find_start_block(const umpp_allocation * alloc, uint64_t offset, uint64_t * const block_index, uint64_t * const block_internal_offset); - -/** - * Cache maintenance helper. - * Performs the requested cache operation on the given handle. - * @param mem Allocation handle - * @param op Cache maintenance operation to perform - * @param address User mapping at which to do the operation - * @param size Length (in bytes) of the range to do the operation on - */ -void umpp_dd_cpu_msync_now(ump_dd_handle mem, ump_cpu_msync_op op, void * address, size_t size); - -/** - * Import module session early init. - * Calls session_begin on all installed import modules. - * @param session The core session object to initialized the import handler for - * */ -void umpp_import_handlers_init(umpp_session * session); - -/** - * Import module session cleanup. - * Calls session_end on all import modules bound to the session. - * @param session The core session object to initialized the import handler for - */ -void umpp_import_handlers_term(umpp_session * session); - -#endif /* _UMP_KERNEL_CORE_H_ */ - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/common/ump_kernel_descriptor_mapping.c b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/common/ump_kernel_descriptor_mapping.c deleted file mode 100755 index c5b0d74400813..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/common/ump_kernel_descriptor_mapping.c +++ /dev/null @@ -1,162 +0,0 @@ -/* - * - * (C) COPYRIGHT 2008-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - - -#include -#include - -#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1)) - -/** - * Allocate a descriptor table capable of holding 'count' mappings - * @param count Number of mappings in the table - * @return Pointer to a new table, NULL on error - */ -static umpp_descriptor_table * descriptor_table_alloc(unsigned int count); - -/** - * Free a descriptor table - * @param table The table to free - */ -static void descriptor_table_free(umpp_descriptor_table * table); - -umpp_descriptor_mapping * umpp_descriptor_mapping_create(unsigned int init_entries, unsigned int max_entries) -{ - umpp_descriptor_mapping * map = kzalloc(sizeof(umpp_descriptor_mapping), GFP_KERNEL); - - init_entries = MALI_PAD_INT(init_entries); - max_entries = MALI_PAD_INT(max_entries); - - if (NULL != map) - { - map->table = descriptor_table_alloc(init_entries); - if (NULL != map->table) - { - init_rwsem( &map->lock); - set_bit(0, map->table->usage); - map->max_nr_mappings_allowed = max_entries; - map->current_nr_mappings = init_entries; - return map; - - descriptor_table_free(map->table); - } - kfree(map); - } - return NULL; -} - -void umpp_descriptor_mapping_destroy(umpp_descriptor_mapping * map) -{ - UMP_ASSERT(NULL != map); - descriptor_table_free(map->table); - kfree(map); -} - -unsigned int umpp_descriptor_mapping_allocate(umpp_descriptor_mapping * map, void * target) -{ - int descriptor = 0; - UMP_ASSERT(NULL != map); - down_write( &map->lock); - descriptor = find_first_zero_bit(map->table->usage, map->current_nr_mappings); - if (descriptor == map->current_nr_mappings) - { - /* no free descriptor, try to expand the table */ - umpp_descriptor_table * new_table; - umpp_descriptor_table * old_table = map->table; - int nr_mappings_new = map->current_nr_mappings + BITS_PER_LONG; - - if (map->current_nr_mappings >= map->max_nr_mappings_allowed) - { - descriptor = 0; - goto unlock_and_exit; - } - - new_table = descriptor_table_alloc(nr_mappings_new); - if (NULL == new_table) - { - descriptor = 0; - goto unlock_and_exit; - } - - memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG); - memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void*)); - - map->table = new_table; - map->current_nr_mappings = nr_mappings_new; - descriptor_table_free(old_table); - } - - /* we have found a valid descriptor, set the value and usage bit */ - set_bit(descriptor, map->table->usage); - map->table->mappings[descriptor] = target; - -unlock_and_exit: - up_write(&map->lock); - return descriptor; -} - -int umpp_descriptor_mapping_lookup(umpp_descriptor_mapping * map, unsigned int descriptor, void** const target) -{ - int result = -EINVAL; - UMP_ASSERT(map); - UMP_ASSERT(target); - down_read(&map->lock); - if ( (descriptor > 0) && (descriptor < map->current_nr_mappings) && test_bit(descriptor, map->table->usage) ) - { - *target = map->table->mappings[descriptor]; - result = 0; - } - /* keep target untouched if the descriptor was not found */ - up_read(&map->lock); - return result; -} - -void umpp_descriptor_mapping_remove(umpp_descriptor_mapping * map, unsigned int descriptor) -{ - UMP_ASSERT(map); - down_write(&map->lock); - if ( (descriptor > 0) && (descriptor < map->current_nr_mappings) && test_bit(descriptor, map->table->usage) ) - { - map->table->mappings[descriptor] = NULL; - clear_bit(descriptor, map->table->usage); - } - up_write(&map->lock); -} - -static umpp_descriptor_table * descriptor_table_alloc(unsigned int count) -{ - umpp_descriptor_table * table; - - table = kzalloc(sizeof(umpp_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG) + (sizeof(void*) * count), __GFP_HARDWALL | GFP_KERNEL ); - - if (NULL != table) - { - table->usage = (unsigned long*)((u8*)table + sizeof(umpp_descriptor_table)); - table->mappings = (void**)((u8*)table + sizeof(umpp_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG)); - } - - return table; -} - -static void descriptor_table_free(umpp_descriptor_table * table) -{ - UMP_ASSERT(table); - kfree(table); -} - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/common/ump_kernel_descriptor_mapping.h b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/common/ump_kernel_descriptor_mapping.h deleted file mode 100755 index d06c1455371ae..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/common/ump_kernel_descriptor_mapping.h +++ /dev/null @@ -1,94 +0,0 @@ -/* - * - * (C) COPYRIGHT 2008-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file ump_kernel_descriptor_mapping.h - */ - -#ifndef _UMP_KERNEL_DESCRIPTOR_MAPPING_H_ -#define _UMP_KERNEL_DESCRIPTOR_MAPPING_H_ - -#include -#include -/** - * The actual descriptor mapping table, never directly accessed by clients - */ -typedef struct umpp_descriptor_table -{ - /* keep as a unsigned long to rely on the OS's bitops support */ - unsigned long * usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used(1) or not(0) */ - void** mappings; /**< Array of the pointers the descriptors map to */ -} umpp_descriptor_table; - -/** - * The descriptor mapping object - * Provides a separate namespace where we can map an integer to a pointer - */ -typedef struct umpp_descriptor_mapping -{ - struct rw_semaphore lock; /**< Lock protecting access to the mapping object */ - unsigned int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */ - unsigned int current_nr_mappings; /**< Current number of possible mappings */ - umpp_descriptor_table * table; /**< Pointer to the current mapping table */ -} umpp_descriptor_mapping; - -/** - * Create a descriptor mapping object. - * Create a descriptor mapping capable of holding init_entries growable to max_entries. - * ID 0 is reserved so the number of available entries will be max - 1. - * @param init_entries Number of entries to preallocate memory for - * @param max_entries Number of entries to max support - * @return Pointer to a descriptor mapping object, NULL on failure - */ -umpp_descriptor_mapping * umpp_descriptor_mapping_create(unsigned int init_entries, unsigned int max_entries); - -/** - * Destroy a descriptor mapping object - * @param[in] map The map to free - */ -void umpp_descriptor_mapping_destroy(umpp_descriptor_mapping * map); - -/** - * Allocate a new mapping entry (descriptor ID) - * Allocates a new entry in the map. - * @param[in] map The map to allocate a new entry in - * @param[in] target The value to map to - * @return The descriptor allocated, ID 0 on failure. - */ -unsigned int umpp_descriptor_mapping_allocate(umpp_descriptor_mapping * map, void * target); - -/** - * Get the value mapped to by a descriptor ID - * @param[in] map The map to lookup the descriptor id in - * @param[in] descriptor The descriptor ID to lookup - * @param[out] target Pointer to a pointer which will receive the stored value - * - * @return 0 on success lookup, -EINVAL on lookup failure. - */ -int umpp_descriptor_mapping_lookup(umpp_descriptor_mapping * map, unsigned int descriptor, void** const target); - -/** - * Free the descriptor ID - * For the descriptor to be reused it has to be freed - * @param[in] map The map to free the descriptor from - * @param descriptor The descriptor ID to free - */ -void umpp_descriptor_mapping_remove(umpp_descriptor_mapping * map, unsigned int descriptor); - -#endif /* _UMP_KERNEL_DESCRIPTOR_MAPPING_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/common/ump_kernel_priv.h b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/common/ump_kernel_priv.h deleted file mode 100755 index 38b6f1b197fb5..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/common/ump_kernel_priv.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * - * (C) COPYRIGHT 2008-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#ifndef _UMP_KERNEL_PRIV_H_ -#define _UMP_KERNEL_PRIV_H_ - -#ifdef __KERNEL__ -#include -#include -#include -#include -#endif - - -#define UMP_EXPECTED_IDS 64 -#define UMP_MAX_IDS 32768 - -#ifdef __KERNEL__ -#define UMP_ASSERT(expr) \ - if (!(expr)) { \ - printk(KERN_ERR "UMP: Assertion failed! %s,%s,%s,line=%d\n",\ - #expr,__FILE__,__func__,__LINE__); \ - BUG(); \ - } - -static inline void ump_sync_to_memory(uint64_t paddr, void* vaddr, size_t sz) -{ -#ifdef CONFIG_ARM - __cpuc_flush_dcache_area(vaddr, sz); - outer_flush_range(paddr, paddr+sz); -#elif defined(CONFIG_ARM64) - /*TODO (MID64-46): There's no other suitable cache flush function for ARM64 */ - flush_cache_all(); -#elif defined(CONFIG_X86) - struct scatterlist scl = {0, }; - sg_set_page(&scl, pfn_to_page(PFN_DOWN(paddr)), sz, - paddr & (PAGE_SIZE -1 )); - dma_sync_sg_for_cpu(NULL, &scl, 1, DMA_TO_DEVICE); - mb(); /* for outer_sync (if needed) */ -#else -#error Implement cache maintenance for your architecture here -#endif -} - -static inline void ump_sync_to_cpu(uint64_t paddr, void* vaddr, size_t sz) -{ -#ifdef CONFIG_ARM - __cpuc_flush_dcache_area(vaddr, sz); - outer_flush_range(paddr, paddr+sz); -#elif defined(CONFIG_ARM64) - /* TODO (MID64-46): There's no other suitable cache flush function for ARM64 */ - flush_cache_all(); -#elif defined(CONFIG_X86) - struct scatterlist scl = {0, }; - sg_set_page(&scl, pfn_to_page(PFN_DOWN(paddr)), sz, - paddr & (PAGE_SIZE -1 )); - dma_sync_sg_for_cpu(NULL, &scl, 1, DMA_FROM_DEVICE); -#else -#error Implement cache maintenance for your architecture here -#endif -} -#endif /* __KERNEL__*/ -#endif /* _UMP_KERNEL_PRIV_H_ */ - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/imports/ion/Makefile b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/imports/ion/Makefile deleted file mode 100755 index ef74b273f7ad3..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/imports/ion/Makefile +++ /dev/null @@ -1,53 +0,0 @@ -# -# (C) COPYRIGHT 2011, 2013 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - - -# default to building for the host -ARCH ?= $(shell uname -m) - -# linux build system integration - -ifneq ($(KERNELRELEASE),) -# Inside the kernel build system - -EXTRA_CFLAGS += -I$(KBUILD_EXTMOD) -I$(KBUILD_EXTMOD)/../../../../.. -KBUILD_EXTRA_SYMBOLS += "$(KBUILD_EXTMOD)/../../Module.symvers" - -SRC += ump_kernel_import_ion.c - -MODULE:=ump_ion_import.ko - -obj-m := $(MODULE:.ko=.o) -$(MODULE:.ko=-y) := $(SRC:.c=.o) -$(MODULE:.ko=-objs) := $(SRC:.c=.o) - -else -# Outside the kernel build system -# -# - -ifeq ($(KDIR),) -$(error Must specify KDIR to point to the kernel to target)) -endif - -all: - $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) - -clean: - $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean - -endif - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/imports/ion/sconscript b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/imports/ion/sconscript deleted file mode 100755 index 749bb1f541f0e..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/imports/ion/sconscript +++ /dev/null @@ -1,52 +0,0 @@ -# -# (C) COPYRIGHT 2010-2013 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -import os -Import('env') - -# Clone the environment so changes don't affect other build files -env_ion = env.Clone() - -if env_ion['ump_ion'] != '1': - Return() - -if env_ion['v'] != '1': - env_ion['MAKECOMSTR'] = '[MAKE] ${SOURCE.dir}' - -# Source files required for UMP. -ion_src = [Glob('#kernel/drivers/base/ump/src/imports/ion/*.c')] - -# Note: cleaning via the Linux kernel build system does not yet work -if env_ion.GetOption('clean') : - makeAction=Action("cd ${SOURCE.dir} && make clean", '$MAKECOMSTR') -else: - makeAction=Action("cd ${SOURCE.dir} && make PLATFORM=${platform} && cp ump_ion_import.ko $STATIC_LIB_PATH/ump_ion_import.ko", '$MAKECOMSTR') -# The target is ump_import_ion.ko, built from the source in ion_src, via the action makeAction -# ump_import_ion.ko will be copied to $STATIC_LIB_PATH after being built by the standard Linux -# kernel build system, after which it can be installed to the directory specified if -# "libs_install" is set; this is done by LibTarget. -cmd = env_ion.Command('$STATIC_LIB_PATH/ump_ion_import.ko', ion_src, [makeAction]) - -# Until we fathom out how the invoke the Linux build system to clean, we can use Clean -# to remove generated files. - -patterns = ['*.mod.c', '*.o', '*.ko', '*.a', '.*.cmd', 'modules.order', '.tmp_versions', 'Module.symvers'] - -for p in patterns: - Clean(cmd, Glob('#kernel/drivers/base/ump/src/imports/ion/%s' % p)) - -env_ion.Depends('$STATIC_LIB_PATH/ump_ion_import.ko', '$STATIC_LIB_PATH/ump.ko') -env_ion.ProgTarget('ump', cmd) diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/imports/ion/ump_kernel_import_ion.c b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/imports/ion/ump_kernel_import_ion.c deleted file mode 100755 index 12b2e325b45ea..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/imports/ion/ump_kernel_import_ion.c +++ /dev/null @@ -1,204 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#include -#include -#include "ion.h" -#include -#include -#include - -struct ion_wrapping_info -{ - struct ion_client * ion_client; - struct ion_handle * ion_handle; - int num_phys_blocks; - struct scatterlist * sglist; -}; - -static struct ion_device * ion_device_get(void) -{ - /* < Customer to provide implementation > - * Return a pointer to the global ion_device on the system - */ - return NULL; -} - -static int import_ion_client_create(void** const custom_session_data) -{ - struct ion_client ** ion_client; - - ion_client = (struct ion_client**)custom_session_data; - - *ion_client = ion_client_create(ion_device_get(), "ump"); - - return PTR_RET(*ion_client); -} - - -static void import_ion_client_destroy(void* custom_session_data) -{ - struct ion_client * ion_client; - - ion_client = (struct ion_client*)custom_session_data; - BUG_ON(!ion_client); - - ion_client_destroy(ion_client); -} - - -static void import_ion_final_release_callback(const ump_dd_handle handle, void * info) -{ - struct ion_wrapping_info * ion_info; - - BUG_ON(!info); - - (void)handle; - ion_info = (struct ion_wrapping_info*)info; - - dma_unmap_sg(NULL, ion_info->sglist, ion_info->num_phys_blocks, DMA_BIDIRECTIONAL); - - ion_free(ion_info->ion_client, ion_info->ion_handle); - kfree(ion_info); - module_put(THIS_MODULE); -} - -static ump_dd_handle import_ion_import(void * custom_session_data, void * pfd, ump_alloc_flags flags) -{ - int fd; - ump_dd_handle ump_handle; - struct scatterlist * sg; - int num_dma_blocks; - ump_dd_physical_block_64 * phys_blocks; - unsigned long i; - struct sg_table * sgt; - - struct ion_wrapping_info * ion_info; - - BUG_ON(!custom_session_data); - BUG_ON(!pfd); - - ion_info = kzalloc(GFP_KERNEL, sizeof(*ion_info)); - if (NULL == ion_info) - { - return UMP_DD_INVALID_MEMORY_HANDLE; - } - - ion_info->ion_client = (struct ion_client*)custom_session_data; - - if (get_user(fd, (int*)pfd)) - { - goto out; - } - - ion_info->ion_handle = ion_import_dma_buf(ion_info->ion_client, fd); - - if (IS_ERR_OR_NULL(ion_info->ion_handle)) - { - goto out; - } - - sgt = ion_sg_table(ion_info->ion_client, ion_info->ion_handle); - if (IS_ERR_OR_NULL(sgt)) - { - goto ion_dma_map_failed; - } - - ion_info->sglist = sgt->sgl; - - sg = ion_info->sglist; - while (sg) - { - ion_info->num_phys_blocks++; - sg = sg_next(sg); - } - - num_dma_blocks = dma_map_sg(NULL, ion_info->sglist, ion_info->num_phys_blocks, DMA_BIDIRECTIONAL); - - if (0 == num_dma_blocks) - { - goto linux_dma_map_failed; - } - - phys_blocks = vmalloc(num_dma_blocks * sizeof(*phys_blocks)); - if (NULL == phys_blocks) - { - goto vmalloc_failed; - } - - for_each_sg(ion_info->sglist, sg, num_dma_blocks, i) - { - phys_blocks[i].addr = sg_phys(sg); - phys_blocks[i].size = sg_dma_len(sg); - } - - ump_handle = ump_dd_create_from_phys_blocks_64(phys_blocks, num_dma_blocks, flags, NULL, import_ion_final_release_callback, ion_info); - - vfree(phys_blocks); - - if (ump_handle != UMP_DD_INVALID_MEMORY_HANDLE) - { - /* - * As we have a final release callback installed - * we must keep the module locked until - * the callback has been triggered - * */ - __module_get(THIS_MODULE); - return ump_handle; - } - - /* failed*/ -vmalloc_failed: - dma_unmap_sg(NULL, ion_info->sglist, ion_info->num_phys_blocks, DMA_BIDIRECTIONAL); -linux_dma_map_failed: -ion_dma_map_failed: - ion_free(ion_info->ion_client, ion_info->ion_handle); -out: - kfree(ion_info); - return UMP_DD_INVALID_MEMORY_HANDLE; -} - -struct ump_import_handler import_handler_ion = -{ - .linux_module = THIS_MODULE, - .session_begin = import_ion_client_create, - .session_end = import_ion_client_destroy, - .import = import_ion_import -}; - -static int __init import_ion_initialize_module(void) -{ - /* register with UMP */ - return ump_import_module_register(UMP_EXTERNAL_MEM_TYPE_ION, &import_handler_ion); -} - -static void __exit import_ion_cleanup_module(void) -{ - /* unregister import handler */ - ump_import_module_unregister(UMP_EXTERNAL_MEM_TYPE_ION); -} - -/* Setup init and exit functions for this module */ -module_init(import_ion_initialize_module); -module_exit(import_ion_cleanup_module); - -/* And some module information */ -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("ARM Ltd."); -MODULE_VERSION("1.0"); diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/imports/sconscript b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/imports/sconscript deleted file mode 100755 index 8f506839d1d20..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/imports/sconscript +++ /dev/null @@ -1,25 +0,0 @@ -# -# (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -import os, sys -Import('env') - -import_modules = [ os.path.join( path, 'sconscript' ) for path in sorted(os.listdir( os.getcwd() )) ] - -for m in import_modules: - if os.path.exists(m): - SConscript( m, variant_dir=os.path.join( env['BUILD_DIR_PATH'], os.path.dirname(m) ), duplicate=0 ) - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/linux/ump_kernel_linux.c b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/linux/ump_kernel_linux.c deleted file mode 100755 index d6c3c53549076..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/linux/ump_kernel_linux.c +++ /dev/null @@ -1,831 +0,0 @@ -/* - * - * (C) COPYRIGHT 2008-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#include -#include - -#include /* copy_*_user */ -#include -#include /* kernel module definitions */ -#include /* file system operations */ -#include /* character device definitions */ -#include /* request_mem_region */ -#include /* class registration support */ - -#include - -#include "ump_kernel_linux_mem.h" -#include - - -struct ump_linux_device -{ - struct cdev cdev; - struct class * ump_class; -}; - -/* Name of the UMP device driver */ -static char ump_dev_name[] = "ump"; /* should be const, but the functions we call requires non-cost */ - -/* Module parameter to control log level */ -int ump_debug_level = 2; -module_param(ump_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */ -MODULE_PARM_DESC(ump_debug_level, "Higher number, more dmesg output"); - -/* By default the module uses any available major, but it's possible to set it at load time to a specific number */ -int ump_major = 0; -module_param(ump_major, int, S_IRUGO); /* r--r--r-- */ -MODULE_PARM_DESC(ump_major, "Device major number"); - -#define UMP_REV_STRING "1.0" - -char * ump_revision = UMP_REV_STRING; -module_param(ump_revision, charp, S_IRUGO); /* r--r--r-- */ -MODULE_PARM_DESC(ump_revision, "Revision info"); - -static int umpp_linux_open(struct inode *inode, struct file *filp); -static int umpp_linux_release(struct inode *inode, struct file *filp); -#ifdef HAVE_UNLOCKED_IOCTL -static long umpp_linux_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); -#else -static int umpp_linux_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -#endif - -/* This variable defines the file operations this UMP device driver offers */ -static struct file_operations ump_fops = -{ - .owner = THIS_MODULE, - .open = umpp_linux_open, - .release = umpp_linux_release, -#ifdef HAVE_UNLOCKED_IOCTL - .unlocked_ioctl = umpp_linux_ioctl, -#else - .ioctl = umpp_linux_ioctl, -#endif - .compat_ioctl = umpp_linux_ioctl, - .mmap = umpp_linux_mmap -}; - -/* import module handling */ -DEFINE_MUTEX(import_list_lock); -struct ump_import_handler * import_handlers[UMPP_EXTERNAL_MEM_COUNT]; - -/* The global variable containing the global device data */ -static struct ump_linux_device ump_linux_device; - -#define DBG_MSG(level, ...) do { \ -if ((level) <= ump_debug_level)\ -{\ -printk(KERN_DEBUG "UMP<" #level ">:\n" __VA_ARGS__);\ -} \ -} while (0) - -#define MSG_ERR(...) do{ \ -printk(KERN_ERR "UMP: ERR: %s\n %s()%4d\n", __FILE__, __func__ , __LINE__) ; \ -printk(KERN_ERR __VA_ARGS__); \ -printk(KERN_ERR "\n"); \ -} while(0) - -#define MSG(...) do{ \ -printk(KERN_INFO "UMP: " __VA_ARGS__);\ -} while (0) - -/* - * This function is called by Linux to initialize this module. - * All we do is initialize the UMP device driver. - */ -static int __init umpp_linux_initialize_module(void) -{ - ump_result err; - - err = umpp_core_constructor(); - if (UMP_OK != err) - { - MSG_ERR("UMP device driver init failed\n"); - return -ENOTTY; - } - - MSG("UMP device driver %s loaded\n", UMP_REV_STRING); - return 0; -} - - - -/* - * This function is called by Linux to unload/terminate/exit/cleanup this module. - * All we do is terminate the UMP device driver. - */ -static void __exit umpp_linux_cleanup_module(void) -{ - DBG_MSG(2, "Unloading UMP device driver\n"); - umpp_core_destructor(); - DBG_MSG(2, "Module unloaded\n"); -} - - - -/* - * Initialize the UMP device driver. - */ -ump_result umpp_device_initialize(void) -{ - int err; - dev_t dev = 0; - - if (0 == ump_major) - { - /* auto select a major */ - err = alloc_chrdev_region(&dev, 0, 1, ump_dev_name); - ump_major = MAJOR(dev); - } - else - { - /* use load time defined major number */ - dev = MKDEV(ump_major, 0); - err = register_chrdev_region(dev, 1, ump_dev_name); - } - - if (0 == err) - { - memset(&ump_linux_device, 0, sizeof(ump_linux_device)); - - /* initialize our char dev data */ - cdev_init(&ump_linux_device.cdev, &ump_fops); - ump_linux_device.cdev.owner = THIS_MODULE; - ump_linux_device.cdev.ops = &ump_fops; - - /* register char dev with the kernel */ - err = cdev_add(&ump_linux_device.cdev, dev, 1/*count*/); - if (0 == err) - { - - ump_linux_device.ump_class = class_create(THIS_MODULE, ump_dev_name); - if (IS_ERR(ump_linux_device.ump_class)) - { - err = PTR_ERR(ump_linux_device.ump_class); - } - else - { - struct device * mdev; - mdev = device_create(ump_linux_device.ump_class, NULL, dev, NULL, ump_dev_name); - if (!IS_ERR(mdev)) - { - return UMP_OK; - } - - err = PTR_ERR(mdev); - class_destroy(ump_linux_device.ump_class); - } - cdev_del(&ump_linux_device.cdev); - } - - unregister_chrdev_region(dev, 1); - } - - return UMP_ERROR; -} - - - -/* - * Terminate the UMP device driver - */ -void umpp_device_terminate(void) -{ - dev_t dev = MKDEV(ump_major, 0); - - device_destroy(ump_linux_device.ump_class, dev); - class_destroy(ump_linux_device.ump_class); - - /* unregister char device */ - cdev_del(&ump_linux_device.cdev); - - /* free major */ - unregister_chrdev_region(dev, 1); -} - - -static int umpp_linux_open(struct inode *inode, struct file *filp) -{ - umpp_session *session; - - session = umpp_core_session_start(); - if (NULL == session) - { - return -EFAULT; - } - - filp->private_data = session; - - return 0; -} - -static int umpp_linux_release(struct inode *inode, struct file *filp) -{ - umpp_session *session; - - session = filp->private_data; - - umpp_core_session_end(session); - - filp->private_data = NULL; - - return 0; -} - -/**************************/ -/*ioctl specific functions*/ -/**************************/ -static int do_ump_dd_allocate(umpp_session * session, ump_k_allocate * params) -{ - ump_dd_handle new_allocation; - new_allocation = ump_dd_allocate_64(params->size, params->alloc_flags, NULL, NULL, NULL); - - if (UMP_DD_INVALID_MEMORY_HANDLE != new_allocation) - { - umpp_session_memory_usage * tracker; - - tracker = kmalloc(sizeof(*tracker), GFP_KERNEL | __GFP_HARDWALL); - if (NULL != tracker) - { - /* update the return struct with the new ID */ - params->secure_id = ump_dd_secure_id_get(new_allocation); - - tracker->mem = new_allocation; - tracker->id = params->secure_id; - atomic_set(&tracker->process_usage_count, 1); - - /* link it into the session in-use list */ - mutex_lock(&session->session_lock); - list_add(&tracker->link, &session->memory_usage); - mutex_unlock(&session->session_lock); - - return 0; - } - ump_dd_release(new_allocation); - } - - printk(KERN_WARNING "UMP: Allocation FAILED\n"); - return -ENOMEM; -} - -static int do_ump_dd_retain(umpp_session * session, ump_k_retain * params) -{ - umpp_session_memory_usage * it; - - mutex_lock(&session->session_lock); - - /* try to find it on the session usage list */ - list_for_each_entry(it, &session->memory_usage, link) - { - if (it->id == params->secure_id) - { - /* found to already be in use */ - /* check for overflow */ - while(1) - { - int refcnt = atomic_read(&it->process_usage_count); - if (refcnt + 1 > 0) - { - /* add a process local ref */ - if(atomic_cmpxchg(&it->process_usage_count, refcnt, refcnt + 1) == refcnt) - { - mutex_unlock(&session->session_lock); - return 0; - } - } - else - { - /* maximum usage cap reached */ - mutex_unlock(&session->session_lock); - return -EBUSY; - } - } - } - } - /* try to look it up globally */ - - it = kmalloc(sizeof(*it), GFP_KERNEL); - - if (NULL != it) - { - it->mem = ump_dd_from_secure_id(params->secure_id); - if (UMP_DD_INVALID_MEMORY_HANDLE != it->mem) - { - /* found, add it to the session usage list */ - it->id = params->secure_id; - atomic_set(&it->process_usage_count, 1); - list_add(&it->link, &session->memory_usage); - } - else - { - /* not found */ - kfree(it); - it = NULL; - } - } - - mutex_unlock(&session->session_lock); - - return (NULL != it) ? 0 : -ENODEV; -} - - -static int do_ump_dd_release(umpp_session * session, ump_k_release * params) -{ - umpp_session_memory_usage * it; - int result = -ENODEV; - - mutex_lock(&session->session_lock); - - /* only do a release if found on the session list */ - list_for_each_entry(it, &session->memory_usage, link) - { - if (it->id == params->secure_id) - { - /* found, a valid call */ - result = 0; - - if (0 == atomic_sub_return(1, &it->process_usage_count)) - { - /* last ref in this process remove from the usage list and remove the underlying ref */ - list_del(&it->link); - ump_dd_release(it->mem); - kfree(it); - } - - break; - } - } - mutex_unlock(&session->session_lock); - - return result; -} - -static int do_ump_dd_sizequery(umpp_session * session, ump_k_sizequery * params) -{ - umpp_session_memory_usage * it; - int result = -ENODEV; - - mutex_lock(&session->session_lock); - - /* only valid if found on the session list */ - list_for_each_entry(it, &session->memory_usage, link) - { - if (it->id == params->secure_id) - { - /* found, a valid call */ - params->size = ump_dd_size_get_64(it->mem); - result = 0; - break; - } - - } - mutex_unlock(&session->session_lock); - - return result; -} - -static int do_ump_dd_allocation_flags_get(umpp_session * session, ump_k_allocation_flags * params) -{ - umpp_session_memory_usage * it; - int result = -ENODEV; - - mutex_lock(&session->session_lock); - - /* only valid if found on the session list */ - list_for_each_entry(it, &session->memory_usage, link) - { - if (it->id == params->secure_id) - { - /* found, a valid call */ - params->alloc_flags = ump_dd_allocation_flags_get(it->mem); - result = 0; - break; - } - - } - mutex_unlock(&session->session_lock); - - return result; -} - -static int do_ump_dd_msync_now(umpp_session * session, ump_k_msync * params) -{ - umpp_session_memory_usage * it; - int result = -ENODEV; - - mutex_lock(&session->session_lock); - - /* only valid if found on the session list */ - list_for_each_entry(it, &session->memory_usage, link) - { - if (it->id == params->secure_id) - { - /* found, do the cache op */ -#ifdef CONFIG_COMPAT - if (is_compat_task()) - { - umpp_dd_cpu_msync_now(it->mem, params->cache_operation, compat_ptr(params->mapped_ptr.compat_value), params->size); - result = 0; - } - else - { -#endif - umpp_dd_cpu_msync_now(it->mem, params->cache_operation, params->mapped_ptr.value, params->size); - result = 0; -#ifdef CONFIG_COMPAT - } -#endif - break; - } - } - mutex_unlock(&session->session_lock); - - return result; -} - - -void umpp_import_handlers_init(umpp_session * session) -{ - int i; - mutex_lock(&import_list_lock); - for ( i = 1; i < UMPP_EXTERNAL_MEM_COUNT; i++ ) - { - if (import_handlers[i]) - { - import_handlers[i]->session_begin(&session->import_handler_data[i]); - /* It is OK if session_begin returned an error. - * We won't do any import calls if so */ - } - } - mutex_unlock(&import_list_lock); -} - -void umpp_import_handlers_term(umpp_session * session) -{ - int i; - mutex_lock(&import_list_lock); - for ( i = 1; i < UMPP_EXTERNAL_MEM_COUNT; i++ ) - { - /* only call if session_begin succeeded */ - if (session->import_handler_data[i] != NULL) - { - /* if session_beging succeeded the handler - * should not have unregistered with us */ - BUG_ON(!import_handlers[i]); - import_handlers[i]->session_end(session->import_handler_data[i]); - session->import_handler_data[i] = NULL; - } - } - mutex_unlock(&import_list_lock); -} - -int ump_import_module_register(enum ump_external_memory_type type, struct ump_import_handler * handler) -{ - int res = -EEXIST; - - /* validate input */ - BUG_ON(type == 0 || type >= UMPP_EXTERNAL_MEM_COUNT); - BUG_ON(!handler); - BUG_ON(!handler->linux_module); - BUG_ON(!handler->session_begin); - BUG_ON(!handler->session_end); - BUG_ON(!handler->import); - - mutex_lock(&import_list_lock); - - if (!import_handlers[type]) - { - import_handlers[type] = handler; - res = 0; - } - - mutex_unlock(&import_list_lock); - - return res; -} - -void ump_import_module_unregister(enum ump_external_memory_type type) -{ - BUG_ON(type == 0 || type >= UMPP_EXTERNAL_MEM_COUNT); - - mutex_lock(&import_list_lock); - /* an error to call this if ump_import_module_register didn't succeed */ - BUG_ON(!import_handlers[type]); - import_handlers[type] = NULL; - mutex_unlock(&import_list_lock); -} - -static struct ump_import_handler * import_handler_get(unsigned int type_id) -{ - enum ump_external_memory_type type; - struct ump_import_handler * handler; - - /* validate and convert input */ - /* handle bad data here, not just BUG_ON */ - if (type_id == 0 || type_id >= UMPP_EXTERNAL_MEM_COUNT) - return NULL; - - type = (enum ump_external_memory_type)type_id; - - /* find the handler */ - mutex_lock(&import_list_lock); - - handler = import_handlers[type]; - - if (handler) - { - if (!try_module_get(handler->linux_module)) - { - handler = NULL; - } - } - - mutex_unlock(&import_list_lock); - - return handler; -} - -static void import_handler_put(struct ump_import_handler * handler) -{ - module_put(handler->linux_module); -} - -static int do_ump_dd_import(umpp_session * session, ump_k_import * params) -{ - ump_dd_handle new_allocation = UMP_DD_INVALID_MEMORY_HANDLE; - struct ump_import_handler * handler; - - handler = import_handler_get(params->type); - - if (handler) - { - /* try late binding if not already bound */ - if (!session->import_handler_data[params->type]) - { - handler->session_begin(&session->import_handler_data[params->type]); - } - - /* do we have a bound session? */ - if (session->import_handler_data[params->type]) - { - new_allocation = handler->import( session->import_handler_data[params->type], - params->phandle.value, - params->alloc_flags); - } - - /* done with the handler */ - import_handler_put(handler); - } - - /* did the import succeed? */ - if (UMP_DD_INVALID_MEMORY_HANDLE != new_allocation) - { - umpp_session_memory_usage * tracker; - - tracker = kmalloc(sizeof(*tracker), GFP_KERNEL | __GFP_HARDWALL); - if (NULL != tracker) - { - /* update the return struct with the new ID */ - params->secure_id = ump_dd_secure_id_get(new_allocation); - - tracker->mem = new_allocation; - tracker->id = params->secure_id; - atomic_set(&tracker->process_usage_count, 1); - - /* link it into the session in-use list */ - mutex_lock(&session->session_lock); - list_add(&tracker->link, &session->memory_usage); - mutex_unlock(&session->session_lock); - - return 0; - } - ump_dd_release(new_allocation); - } - - return -ENOMEM; - -} - -#ifdef HAVE_UNLOCKED_IOCTL -static long umpp_linux_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) -#else -static int umpp_linux_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) -#endif -{ - int ret; - uint64_t msg[(UMP_CALL_MAX_SIZE+7)>>3]; /* alignment fixup */ - uint32_t size = _IOC_SIZE(cmd); - struct umpp_session *session = filp->private_data; - -#ifndef HAVE_UNLOCKED_IOCTL - (void)inode; /* unused arg */ -#endif - - /* - * extract the type and number bitfields, and don't decode - * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok() - */ - if (_IOC_TYPE(cmd) != UMP_IOC_MAGIC) - { - return -ENOTTY; - - } - if (_IOC_NR(cmd) > UMP_IOC_MAXNR) - { - return -ENOTTY; - } - - switch(cmd) - { - case UMP_FUNC_ALLOCATE: - if (size != sizeof(ump_k_allocate)) - { - return -ENOTTY; - } - if (copy_from_user(&msg, (void __user *)arg, size)) - { - return -EFAULT; - } - ret = do_ump_dd_allocate(session, (ump_k_allocate *)&msg); - if (ret) - { - return ret; - } - if (copy_to_user((void *)arg, &msg, size)) - { - return -EFAULT; - } - return 0; - case UMP_FUNC_SIZEQUERY: - if (size != sizeof(ump_k_sizequery)) - { - return -ENOTTY; - } - if (copy_from_user(&msg, (void __user *)arg, size)) - { - return -EFAULT; - } - ret = do_ump_dd_sizequery(session,(ump_k_sizequery*) &msg); - if (ret) - { - return ret; - } - if (copy_to_user((void *)arg, &msg, size)) - { - return -EFAULT; - } - return 0; - case UMP_FUNC_MSYNC: - if (size != sizeof(ump_k_msync)) - { - return -ENOTTY; - } - if (copy_from_user(&msg, (void __user *)arg, size)) - { - return -EFAULT; - } - ret = do_ump_dd_msync_now(session,(ump_k_msync*) &msg); - if (ret) - { - return ret; - } - if (copy_to_user((void *)arg, &msg, size)) - { - return -EFAULT; - } - return 0; - case UMP_FUNC_IMPORT: - if (size != sizeof(ump_k_import)) - { - return -ENOTTY; - } - if (copy_from_user(&msg, (void __user*)arg, size)) - { - return -EFAULT; - } - ret = do_ump_dd_import(session, (ump_k_import*) &msg); - if (ret) - { - return ret; - } - if (copy_to_user((void *)arg, &msg, size)) - { - return -EFAULT; - } - return 0; - /* used only by v1 API */ - case UMP_FUNC_ALLOCATION_FLAGS_GET: - if (size != sizeof(ump_k_allocation_flags)) - { - return -ENOTTY; - } - if (copy_from_user(&msg, (void __user *)arg, size)) - { - return -EFAULT; - } - ret = do_ump_dd_allocation_flags_get(session,(ump_k_allocation_flags*) &msg); - if (ret) - { - return ret; - } - if (copy_to_user((void *)arg, &msg, size)) - { - return -EFAULT; - } - return 0; - case UMP_FUNC_RETAIN: - if (size != sizeof(ump_k_retain)) - { - return -ENOTTY; - } - if (copy_from_user(&msg, (void __user *)arg, size)) - { - return -EFAULT; - } - ret = do_ump_dd_retain(session,(ump_k_retain*) &msg); - if (ret) - { - return ret; - } - return 0; - case UMP_FUNC_RELEASE: - if (size != sizeof(ump_k_release)) - { - return -ENOTTY; - } - if (copy_from_user(&msg, (void __user *)arg, size)) - { - return -EFAULT; - } - ret = do_ump_dd_release(session,(ump_k_release*) &msg); - if (ret) - { - return ret; - } - return 0; - default: - /* not ours */ - return -ENOTTY; - } - /*redundant below*/ - return -ENOTTY; -} - - -/* Export UMP kernel space API functions */ -EXPORT_SYMBOL(ump_dd_allocate_64); -EXPORT_SYMBOL(ump_dd_allocation_flags_get); -EXPORT_SYMBOL(ump_dd_secure_id_get); -EXPORT_SYMBOL(ump_dd_from_secure_id); -EXPORT_SYMBOL(ump_dd_phys_blocks_get_64); -EXPORT_SYMBOL(ump_dd_size_get_64); -EXPORT_SYMBOL(ump_dd_retain); -EXPORT_SYMBOL(ump_dd_release); -EXPORT_SYMBOL(ump_dd_create_from_phys_blocks_64); -#ifdef CONFIG_KDS -EXPORT_SYMBOL(ump_dd_kds_resource_get); -#endif - -/* import API */ -EXPORT_SYMBOL(ump_import_module_register); -EXPORT_SYMBOL(ump_import_module_unregister); - - - -/* V1 API */ -EXPORT_SYMBOL(ump_dd_handle_create_from_secure_id); -EXPORT_SYMBOL(ump_dd_phys_block_count_get); -EXPORT_SYMBOL(ump_dd_phys_block_get); -EXPORT_SYMBOL(ump_dd_phys_blocks_get); -EXPORT_SYMBOL(ump_dd_size_get); -EXPORT_SYMBOL(ump_dd_reference_add); -EXPORT_SYMBOL(ump_dd_reference_release); -EXPORT_SYMBOL(ump_dd_handle_create_from_phys_blocks); - - -/* Setup init and exit functions for this module */ -module_init(umpp_linux_initialize_module); -module_exit(umpp_linux_cleanup_module); - -/* And some module informatio */ -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("ARM Ltd."); -MODULE_VERSION(UMP_REV_STRING); diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/linux/ump_kernel_linux_mem.c b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/linux/ump_kernel_linux_mem.c deleted file mode 100755 index 38db91e407ab8..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/linux/ump_kernel_linux_mem.c +++ /dev/null @@ -1,250 +0,0 @@ -/* - * - * (C) COPYRIGHT 2008-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#include -#include - -#include -#include /* kernel module definitions */ -#include /* file system operations */ -#include /* character device definitions */ -#include /* request_mem_region */ -#include /* memory mananger definitions */ -#include -#include /*kmap*/ - -#include /* is_compat_task */ - -#include -#include -#include - -static void umpp_vm_close(struct vm_area_struct *vma) -{ - umpp_cpu_mapping * mapping; - umpp_session * session; - ump_dd_handle handle; - - mapping = (umpp_cpu_mapping*)vma->vm_private_data; - UMP_ASSERT(mapping); - - session = mapping->session; - handle = mapping->handle; - - umpp_dd_remove_cpu_mapping(mapping->handle, mapping); /* will free the mapping object */ - ump_dd_release(handle); -} - - -static const struct vm_operations_struct umpp_vm_ops = { - .close = umpp_vm_close -}; - -int umpp_phys_commit(umpp_allocation * alloc) -{ - uint64_t i; - - /* round up to a page boundary */ - alloc->size = (alloc->size + PAGE_SIZE - 1) & ~((uint64_t)PAGE_SIZE-1) ; - /* calculate number of pages */ - alloc->blocksCount = alloc->size >> PAGE_SHIFT; - - if( (sizeof(ump_dd_physical_block_64) * alloc->blocksCount) > ((size_t)-1)) - { - printk(KERN_WARNING "UMP: umpp_phys_commit - trying to allocate more than possible\n"); - return -ENOMEM; - } - - alloc->block_array = kmalloc(sizeof(ump_dd_physical_block_64) * alloc->blocksCount, __GFP_HARDWALL | GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); - if (NULL == alloc->block_array) - { - return -ENOMEM; - } - - for (i = 0; i < alloc->blocksCount; i++) - { - void * mp; - struct page * page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY | __GFP_NOWARN | __GFP_COLD); - if (NULL == page) - { - break; - } - - alloc->block_array[i].addr = page_to_pfn(page) << PAGE_SHIFT; - alloc->block_array[i].size = PAGE_SIZE; - - mp = kmap(page); - if (NULL == mp) - { - __free_page(page); - break; - } - - memset(mp, 0x00, PAGE_SIZE); /* instead of __GFP_ZERO, so we can do cache maintenance */ - ump_sync_to_memory(PFN_PHYS(page_to_pfn(page)), mp, PAGE_SIZE); - kunmap(page); - } - - if (i == alloc->blocksCount) - { - return 0; - } - else - { - uint64_t j; - for (j = 0; j < i; j++) - { - struct page * page; - page = pfn_to_page(alloc->block_array[j].addr >> PAGE_SHIFT); - __free_page(page); - } - - kfree(alloc->block_array); - - return -ENOMEM; - } -} - -void umpp_phys_free(umpp_allocation * alloc) -{ - uint64_t i; - - for (i = 0; i < alloc->blocksCount; i++) - { - __free_page(pfn_to_page(alloc->block_array[i].addr >> PAGE_SHIFT)); - } - - kfree(alloc->block_array); -} - -int umpp_linux_mmap(struct file * filp, struct vm_area_struct * vma) -{ - ump_secure_id id; - ump_dd_handle h; - size_t offset; - int err = -EINVAL; - size_t length = vma->vm_end - vma->vm_start; - - umpp_cpu_mapping * map = NULL; - umpp_session *session = filp->private_data; - - if ( 0 == length ) - { - return -EINVAL; - } - - map = kzalloc(sizeof(*map), GFP_KERNEL); - if (NULL == map) - { - WARN_ON(1); - err = -ENOMEM; - goto out; - } - - /* unpack our arg */ -#if defined CONFIG_64BIT && CONFIG_64BIT - if (is_compat_task()) - { -#endif - id = vma->vm_pgoff >> UMP_LINUX_OFFSET_BITS_32; - offset = vma->vm_pgoff & UMP_LINUX_OFFSET_MASK_32; -#if defined CONFIG_64BIT && CONFIG_64BIT - } - else - { - id = vma->vm_pgoff >> UMP_LINUX_OFFSET_BITS_64; - offset = vma->vm_pgoff & UMP_LINUX_OFFSET_MASK_64; - } -#endif - - h = ump_dd_from_secure_id(id); - if (UMP_DD_INVALID_MEMORY_HANDLE != h) - { - uint64_t i; - uint64_t block_idx; - uint64_t block_offset; - uint64_t paddr; - umpp_allocation * alloc; - uint64_t last_byte; - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) - vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_IO | VM_MIXEDMAP | VM_DONTDUMP; -#else - vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO | VM_MIXEDMAP; -#endif - vma->vm_ops = &umpp_vm_ops; - vma->vm_private_data = map; - - alloc = (umpp_allocation*)h; - - if( (alloc->flags & UMP_CONSTRAINT_UNCACHED) != 0) - { - /* cache disabled flag set, disable caching for cpu mappings */ - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); - } - - last_byte = length + (offset << PAGE_SHIFT) - 1; - if (last_byte >= alloc->size || last_byte < (offset << PAGE_SHIFT)) - { - goto err_out; - } - - if (umpp_dd_find_start_block(alloc, offset << PAGE_SHIFT, &block_idx, &block_offset)) - { - goto err_out; - } - - paddr = alloc->block_array[block_idx].addr + block_offset; - - for (i = 0; i < (length >> PAGE_SHIFT); i++) - { - /* check if we've overrrun the current block, if so move to the next block */ - if (paddr >= (alloc->block_array[block_idx].addr + alloc->block_array[block_idx].size)) - { - block_idx++; - UMP_ASSERT(block_idx < alloc->blocksCount); - paddr = alloc->block_array[block_idx].addr; - } - - err = vm_insert_mixed(vma, vma->vm_start + (i << PAGE_SHIFT), paddr >> PAGE_SHIFT); - paddr += PAGE_SIZE; - } - - map->vaddr_start = (void*)vma->vm_start; - map->nr_pages = length >> PAGE_SHIFT; - map->page_off = offset; - map->handle = h; - map->session = session; - - umpp_dd_add_cpu_mapping(h, map); - - return 0; - - err_out: - - ump_dd_release(h); - } - - kfree(map); - -out: - - return err; -} - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/linux/ump_kernel_linux_mem.h b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/linux/ump_kernel_linux_mem.h deleted file mode 100755 index 4fbf3b2a9119c..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/linux/ump_kernel_linux_mem.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * - * (C) COPYRIGHT 2008-2011, 2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#ifndef _UMP_KERNEL_LINUX_MEM_H_ -#define _UMP_KERNEL_LINUX_MEM_H_ - - -int umpp_linux_mmap(struct file * filp, struct vm_area_struct * vma); - -#endif /* _UMP_KERNEL_LINUX_MEM_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/sconscript b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/sconscript deleted file mode 100755 index 9cec770fe26a4..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/sconscript +++ /dev/null @@ -1,64 +0,0 @@ -# -# (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -import os -import re -Import('env') - -# Clone the environment so changes don't affect other build files -env_ump = env.Clone() - -if env_ump['v'] != '1': - env_ump['MAKECOMSTR'] = '[MAKE] ${SOURCE.dir}' - -# Source files required for UMP. -ump_src = [Glob('#kernel/drivers/base/ump/src/linux/*.c'), Glob('#kernel/drivers/base/ump/src/common/*.c'), Glob('#kernel/drivers/base/ump/src/imports/*/*.c')] - -env_ump.Append( CPPPATH='#kernel/drivers/base/ump/src/' ) - -if env_ump.GetOption('clean') : - env_ump.Execute(Action("make clean", '[clean] ump')) - cmd = env_ump.Command('$STATIC_LIB_PATH/ump.ko', ump_src, []) -else: - if env['unit'] == '1': - makeAction=Action("cd ${SOURCE.dir}/.. && make MALI_UNIT_TEST=${unit} PLATFORM=${platform} %s && cp ump.ko $STATIC_LIB_PATH/ump.ko" % env.kernel_get_config_defines(), '$MAKECOMSTR') - else: - # The target is ump.ko, built from the source in ump_src, via the action makeAction - # ump.ko will be copied to $STATIC_LIB_PATH after being built by the standard Linux - # kernel build system, after which it can be installed to the directory specified if - # "libs_install" is set; this is done by LibTarget. - makeAction=Action("cd ${SOURCE.dir}/.. && make PLATFORM=${platform} %s && cp ump.ko $STATIC_LIB_PATH/ump.ko" % env.kernel_get_config_defines(), '$MAKECOMSTR') - cmd = env_ump.Command('$STATIC_LIB_PATH/ump.ko', ump_src, [makeAction]) - -# Add a dependency on kds.ko only when the build is not Android -# Android uses sync_pt instead of Midgard KDS to achieve KDS functionality -# Only necessary when KDS is not built into the kernel. -# -if env['os'] != 'android': - linux_config_file = os.path.normpath(os.environ['KDIR']) + '/.config' - search_term = '^[\ ]*CONFIG_KDS[\ ]*=[\ ]*y' - kds_in_kernel = 0 - for line in open(linux_config_file, 'r'): - if re.search(search_term, line): - # KDS in kernel. - kds_in_kernel = 1 - if not kds_in_kernel: - env.Depends('$STATIC_LIB_PATH/ump.ko', '$STATIC_LIB_PATH/kds.ko') - -env_ump.ProgTarget('ump', cmd) - -SConscript( 'imports/sconscript' ) - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/ump_arch.h b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/ump_arch.h deleted file mode 100755 index 2303d56505a77..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/src/ump_arch.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2011, 2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#ifndef _UMP_ARCH_H_ -#define _UMP_ARCH_H_ - -#include - -/** - * Device specific setup. - * Called by the UMP core code to to host OS/device specific setup. - * Typical use case is device node creation for talking to user space. - * @return UMP_OK on success, any other value on failure - */ -extern ump_result umpp_device_initialize(void); - -/** - * Device specific teardown. - * Undo any things done by ump_device_initialize. - */ -extern void umpp_device_terminate(void); - -extern int umpp_phys_commit(umpp_allocation * alloc); -extern void umpp_phys_free(umpp_allocation * alloc); - -#endif /* _UMP_ARCH_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/ump_ref_drv.h b/drivers/gpu/arm/t83x/kernel/drivers/base/ump/ump_ref_drv.h deleted file mode 100755 index 9a265fe12587c..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/base/ump/ump_ref_drv.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * - * (C) COPYRIGHT 2008-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file ump_ref_drv.h - * - * This file contains the link to user space part of the UMP API for usage by MALI 400 gralloc. - * - */ - -#ifndef _UMP_REF_DRV_H_ -#define _UMP_REF_DRV_H_ - -#include - - -#endif /* _UMP_REF_DRV_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/Kbuild b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/Kbuild deleted file mode 100755 index 19c7e9a2659d2..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/Kbuild +++ /dev/null @@ -1,17 +0,0 @@ -# -# (C) COPYRIGHT 2012 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -obj-$(CONFIG_MALI_MIDGARD) += midgard/ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/Kconfig b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/Kconfig deleted file mode 100755 index 1f30eb541d652..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/Kconfig +++ /dev/null @@ -1,19 +0,0 @@ -# -# (C) COPYRIGHT 2012 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -menu "ARM GPU Configuration" -source "drivers/gpu/arm/midgard/Kconfig" -endmenu diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/Kbuild b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/Kbuild deleted file mode 100755 index 899b9ef3a797f..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/Kbuild +++ /dev/null @@ -1,234 +0,0 @@ -# -# (C) COPYRIGHT 2012,2014 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -# Driver version string which is returned to userspace via an ioctl -MALI_RELEASE_NAME ?= "r7p0-02rel0" - -# Paths required for build -KBASE_PATH = $(src) -KBASE_PLATFORM_PATH = $(KBASE_PATH)/platform_dummy -UMP_PATH = $(src)/../../../base - -ifeq ($(CONFIG_MALI_ERROR_INJECTION),y) -MALI_ERROR_INJECT_ON = 1 -endif - -# Set up defaults if not defined by build system -MALI_CUSTOMER_RELEASE ?= 1 -MALI_UNIT_TEST ?= 0 -MALI_KERNEL_TEST_API ?= 0 -MALI_ERROR_INJECT_ON ?= 0 -MALI_MOCK_TEST ?= 0 -MALI_COVERAGE ?= 0 -MALI_INSTRUMENTATION_LEVEL ?= 0 -# This workaround is for what seems to be a compiler bug we observed in -# GCC 4.7 on AOSP 4.3. The bug caused an intermittent failure compiling -# the "_Pragma" syntax, where an error message is returned: -# -# "internal compiler error: unspellable token PRAGMA" -# -# This regression has thus far only been seen on the GCC 4.7 compiler bundled -# with AOSP 4.3.0. So this makefile, intended for in-tree kernel builds -# which are not known to be used with AOSP, is hardcoded to disable the -# workaround, i.e. set the define to 0. -MALI_GCC_WORKAROUND_MIDCOM_4598 ?= 0 - -# Set up our defines, which will be passed to gcc -DEFINES = \ - -DMALI_CUSTOMER_RELEASE=$(MALI_CUSTOMER_RELEASE) \ - -DMALI_KERNEL_TEST_API=$(MALI_KERNEL_TEST_API) \ - -DMALI_UNIT_TEST=$(MALI_UNIT_TEST) \ - -DMALI_ERROR_INJECT_ON=$(MALI_ERROR_INJECT_ON) \ - -DMALI_MOCK_TEST=$(MALI_MOCK_TEST) \ - -DMALI_COVERAGE=$(MALI_COVERAGE) \ - -DMALI_INSTRUMENTATION_LEVEL=$(MALI_INSTRUMENTATION_LEVEL) \ - -DMALI_RELEASE_NAME=\"$(MALI_RELEASE_NAME)\" \ - -DMALI_GCC_WORKAROUND_MIDCOM_4598=$(MALI_GCC_WORKAROUND_MIDCOM_4598) - -ifeq ($(KBUILD_EXTMOD),) -# in-tree -DEFINES +=-DMALI_KBASE_THIRDPARTY_PATH=../../$(src)/platform/$(CONFIG_MALI_PLATFORM_THIRDPARTY_NAME) -else -# out-of-tree -DEFINES +=-DMALI_KBASE_THIRDPARTY_PATH=$(src)/platform/$(CONFIG_MALI_PLATFORM_THIRDPARTY_NAME) -endif - -DEFINES += -I$(srctree)/drivers/staging/android - -# Use our defines when compiling -ccflags-y += $(DEFINES) -I$(KBASE_PATH) -I$(KBASE_PLATFORM_PATH) -I$(UMP_PATH) -I$(srctree)/include/linux -subdir-ccflags-y += $(DEFINES) -I$(KBASE_PATH) -I$(KBASE_PLATFORM_PATH) -I$(OSK_PATH) -I$(UMP_PATH) -I$(srctree)/include/linux - -SRC := \ - mali_kbase_device.c \ - mali_kbase_cache_policy.c \ - mali_kbase_mem.c \ - mali_kbase_mmu.c \ - mali_kbase_ipa.c \ - mali_kbase_jd.c \ - mali_kbase_jd_debugfs.c \ - mali_kbase_jm.c \ - mali_kbase_gpuprops.c \ - mali_kbase_js.c \ - mali_kbase_js_ctx_attr.c \ - mali_kbase_event.c \ - mali_kbase_context.c \ - mali_kbase_pm.c \ - mali_kbase_config.c \ - mali_kbase_security.c \ - mali_kbase_instr.c \ - mali_kbase_vinstr.c \ - mali_kbase_softjobs.c \ - mali_kbase_10969_workaround.c \ - mali_kbase_hw.c \ - mali_kbase_utility.c \ - mali_kbase_debug.c \ - mali_kbase_trace_timeline.c \ - mali_kbase_gpu_memory_debugfs.c \ - mali_kbase_mem_linux.c \ - mali_kbase_core_linux.c \ - mali_kbase_sync.c \ - mali_kbase_sync_user.c \ - mali_kbase_replay.c \ - mali_kbase_mem_profile_debugfs.c \ - mali_kbase_mmu_mode_lpae.c \ - mali_kbase_disjoint_events.c \ - mali_kbase_gator_api.c \ - mali_kbase_debug_mem_view.c \ - mali_kbase_debug_job_fault.c \ - mali_kbase_smc.c \ - mali_kbase_mem_pool.c \ - mali_kbase_mem_pool_debugfs.c - -ifeq ($(CONFIG_MALI_MIPE_ENABLED),y) - SRC += mali_kbase_tlstream.c - ifeq ($(MALI_UNIT_TEST),1) - SRC += mali_kbase_tlstream_test.c - endif -endif - -# Job Scheduler Policy: Completely Fair Scheduler -SRC += mali_kbase_js_policy_cfs.c - -ccflags-y += -I$(KBASE_PATH) - -ifeq ($(CONFIG_MALI_PLATFORM_FAKE),y) - SRC += mali_kbase_platform_fake.c - - ifeq ($(CONFIG_MALI_PLATFORM_VEXPRESS),y) - SRC += platform/vexpress/mali_kbase_config_vexpress.c \ - platform/vexpress/mali_kbase_cpu_vexpress.c - ccflags-y += -I$(src)/platform/vexpress - endif - - ifeq ($(CONFIG_MALI_PLATFORM_RTSM_VE),y) - SRC += platform/rtsm_ve/mali_kbase_config_vexpress.c - ccflags-y += -I$(src)/platform/rtsm_ve - endif - - ifeq ($(CONFIG_MALI_PLATFORM_JUNO),y) - SRC += platform/juno/mali_kbase_config_vexpress.c - ccflags-y += -I$(src)/platform/juno - endif - - ifeq ($(CONFIG_MALI_PLATFORM_JUNO_SOC),y) - SRC += platform/juno_soc/mali_kbase_config_juno_soc.c - ccflags-y += -I$(src)/platform/juno_soc - endif - - ifeq ($(CONFIG_MALI_PLATFORM_VEXPRESS_1XV7_A57),y) - SRC += platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c - ccflags-y += -I$(src)/platform/vexpress_1xv7_a57 - endif - - ifeq ($(CONFIG_MALI_PLATFORM_VEXPRESS_6XVIRTEX7_10MHZ),y) - SRC += platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c \ - platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.c - ccflags-y += -I$(src)/platform/vexpress_6xvirtex7_10mhz - endif - - ifeq ($(CONFIG_MALI_PLATFORM_A7_KIPLING),y) - SRC += platform/a7_kipling/mali_kbase_config_a7_kipling.c \ - platform/a7_kipling/mali_kbase_cpu_a7_kipling.c - ccflags-y += -I$(src)/platform/a7_kipling - endif - - ifeq ($(CONFIG_MALI_PLATFORM_THIRDPARTY),y) - # remove begin and end quotes from the Kconfig string type - platform_name := $(shell echo $(CONFIG_MALI_PLATFORM_THIRDPARTY_NAME)) - MALI_PLATFORM_THIRDPARTY_DIR := platform/$(platform_name) - ccflags-y += -I$(src)/$(MALI_PLATFORM_THIRDPARTY_DIR) - ifeq ($(CONFIG_MALI_MIDGARD),m) - include $(src)/platform/$(platform_name)/Kbuild - else ifeq ($(CONFIG_MALI_MIDGARD),y) - obj-$(CONFIG_MALI_MIDGARD) += platform/ - endif - endif -endif # CONFIG_MALI_PLATFORM_FAKE=y - -ifeq ($(CONFIG_MALI_PLATFORM_THIRDPARTY),y) -# remove begin and end quotes from the Kconfig string type -platform_name := $(shell echo $(CONFIG_MALI_PLATFORM_THIRDPARTY_NAME)) -MALI_PLATFORM_THIRDPARTY_DIR := platform/$(platform_name) -ccflags-y += -I$(src)/$(MALI_PLATFORM_THIRDPARTY_DIR) -ifeq ($(CONFIG_MALI_MIDGARD),m) -include $(src)/platform/$(platform_name)/Kbuild -else ifeq ($(CONFIG_MALI_MIDGARD),y) -obj-$(CONFIG_MALI_MIDGARD) += platform/ -endif -endif - -ifeq ($(CONFIG_MALI_PLATFORM_DEVICETREE),y) - SRC += platform/devicetree/mali_kbase_runtime_pm.c - SRC += platform/devicetree/mali_kbase_config_devicetree.c - SRC += platform/devicetree/mali_clock.c - SRC += platform/devicetree/mpgpu.c - SRC += platform/devicetree/meson_main2.c - SRC += platform/devicetree/platform_gx.c - SRC += platform/devicetree/scaling.c - ccflags-y += -I$(src)/platform/devicetree -endif - -# Tell the Linux build system from which .o file to create the kernel module -obj-$(CONFIG_MALI_MIDGARD) += mali_kbase.o - -# Tell the Linux build system to enable building of our .c files -mali_kbase-y := $(SRC:.c=.o) - -ifneq ($(wildcard $(src)/internal/Kbuild),) -ifeq ($(MALI_CUSTOMER_RELEASE),0) -# This include may set MALI_BACKEND_PATH and CONFIG_MALI_BACKEND_REAL -include $(src)/internal/Kbuild -mali_kbase-y += $(INTERNAL:.c=.o) -endif -endif - -MALI_BACKEND_PATH ?= backend -CONFIG_MALI_BACKEND ?= gpu -CONFIG_MALI_BACKEND_REAL ?= $(CONFIG_MALI_BACKEND) - -ifeq ($(MALI_MOCK_TEST),1) -ifeq ($(CONFIG_MALI_BACKEND_REAL),gpu) -# Test functionality -mali_kbase-y += tests/internal/src/mock/mali_kbase_pm_driver_mock.o -endif -endif - -include $(src)/$(MALI_BACKEND_PATH)/$(CONFIG_MALI_BACKEND_REAL)/Kbuild -mali_kbase-y += $(BACKEND:.c=.o) - -ccflags-y += -I$(src)/$(MALI_BACKEND_PATH)/$(CONFIG_MALI_BACKEND_REAL) -subdir-ccflags-y += -I$(src)/$(MALI_BACKEND_PATH)/$(CONFIG_MALI_BACKEND_REAL) diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/Kconfig b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/Kconfig deleted file mode 100755 index 1543043800417..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/Kconfig +++ /dev/null @@ -1,209 +0,0 @@ -# -# (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -menuconfig MALI_MIDGARD - tristate "Mali Midgard series support" - default n - help - Enable this option to build support for a ARM Mali Midgard GPU. - - To compile this driver as a module, choose M here: - this will generate a single module, called mali_kbase. - -choice - prompt "Streamline support" - depends on MALI_MIDGARD - default MALI_TIMELINE_DISABLED - help - Select streamline support configuration. - -config MALI_TIMELINE_DISABLED - bool "Streamline support disabled" - help - Disable support for ARM Streamline Performance Analyzer. - - Timeline support will not be included in - kernel code. - Debug stream will not be generated. - -config MALI_GATOR_SUPPORT - bool "Streamline support via Gator" - help - Adds diagnostic support for use with the ARM Streamline Performance Analyzer. - You will need the Gator device driver already loaded before loading this driver when enabling - Streamline debug support. - -config MALI_MIPE_ENABLED - bool "Streamline support via MIPE" - help - Adds diagnostic support for use with the ARM Streamline Performance Analyzer. - - Stream will be transmitted directly to Mali GPU library. - Compatible version of the library is required to read debug stream generated by kernel. - -endchoice - -config MALI_MIDGARD_DVFS - bool "Enable DVFS" - depends on MALI_MIDGARD - default n - help - Choose this option to enable DVFS in the Mali Midgard DDK. - -config MALI_MIDGARD_RT_PM - bool "Enable Runtime power management" - depends on MALI_MIDGARD - depends on PM_RUNTIME - default n - help - Choose this option to enable runtime power management in the Mali Midgard DDK. - -config MALI_MIDGARD_ENABLE_TRACE - bool "Enable kbase tracing" - depends on MALI_MIDGARD - default n - help - Enables tracing in kbase. Trace log available through - the "mali_trace" debugfs file, when the CONFIG_DEBUG_FS is enabled - -config MALI_MIDGARD_DEBUG_SYS - bool "Enable sysfs for the Mali Midgard DDK " - depends on MALI_MIDGARD && SYSFS - default n - help - Enables sysfs for the Mali Midgard DDK. Set/Monitor the Mali Midgard DDK - -config MALI_DEVFREQ - bool "devfreq support for Mali" - depends on MALI_MIDGARD && PM_DEVFREQ - help - Support devfreq for Mali. - - Using the devfreq framework and, by default, the simpleondemand - governor, the frequency of Mali will be dynamically selected from the - available OPPs. - - -# MALI_EXPERT configuration options - -menuconfig MALI_EXPERT - depends on MALI_MIDGARD - bool "Enable Expert Settings" - default n - help - Enabling this option and modifying the default settings may produce a driver with performance or - other limitations. - -config MALI_DEBUG_SHADER_SPLIT_FS - bool "Allow mapping of shader cores via sysfs" - depends on MALI_MIDGARD && MALI_MIDGARD_DEBUG_SYS && MALI_EXPERT - default n - help - Select this option to provide a sysfs entry for runtime configuration of shader - core affinity masks. - -config MALI_PLATFORM_FAKE - bool "Enable fake platform device support" - depends on MALI_MIDGARD && MALI_EXPERT - default n - help - When you start to work with the Mali Midgard series device driver the platform-specific code of - the Linux kernel for your platform may not be complete. In this situation the kernel device driver - supports creating the platform device outside of the Linux platform-specific code. - Enable this option if would like to use a platform device configuration from within the device driver. - -choice - prompt "Platform configuration" - depends on MALI_MIDGARD && MALI_EXPERT - default MALI_PLATFORM_VEXPRESS - help - Select the SOC platform that contains a Mali Midgard GPU - -config MALI_PLATFORM_VEXPRESS - depends on ARCH_VEXPRESS && (ARCH_VEXPRESS_CA9X4 || ARCH_VEXPRESS_CA15X4) - bool "Versatile Express" -config MALI_PLATFORM_VEXPRESS_VIRTEX7_40MHZ - depends on ARCH_VEXPRESS && (ARCH_VEXPRESS_CA9X4 || ARCH_VEXPRESS_CA15X4) - bool "Versatile Express w/Virtex7 @ 40Mhz" -config MALI_PLATFORM_GOLDFISH - depends on ARCH_GOLDFISH - bool "Android Goldfish virtual CPU" -config MALI_PLATFORM_PBX - depends on ARCH_REALVIEW && REALVIEW_EB_A9MP && MACH_REALVIEW_PBX - bool "Realview PBX-A9" -config MALI_PLATFORM_THIRDPARTY - bool "Third Party Platform" -endchoice - -config MALI_PLATFORM_THIRDPARTY_NAME - depends on MALI_MIDGARD && MALI_PLATFORM_THIRDPARTY && MALI_EXPERT - string "Third party platform name" - help - Enter the name of a third party platform that is supported. The third part configuration - file must be in midgard/config/tpip/mali_kbase_config_xxx.c where xxx is the name - specified here. - -config MALI_DEBUG - bool "Debug build" - depends on MALI_MIDGARD && MALI_EXPERT - default n - help - Select this option for increased checking and reporting of errors. - -config MALI_NO_MALI - bool "No Mali" - depends on MALI_MIDGARD && MALI_EXPERT - default n - help - This can be used to test the driver in a simulated environment - whereby the hardware is not physically present. If the hardware is physically - present it will not be used. This can be used to test the majority of the - driver without needing actual hardware or for software benchmarking. - All calls to the simulated hardware will complete immediately as if the hardware - completed the task. - -config MALI_ERROR_INJECT - bool "Error injection" - depends on MALI_MIDGARD && MALI_EXPERT && MALI_NO_MALI - default n - help - Enables insertion of errors to test module failure and recovery mechanisms. - -config MALI_TRACE_TIMELINE - bool "Timeline tracing" - depends on MALI_MIDGARD && MALI_EXPERT - default n - help - Enables timeline tracing through the kernel tracepoint system. - -config MALI_SYSTEM_TRACE - bool "Enable system event tracing support" - depends on MALI_MIDGARD && MALI_EXPERT - default n - help - Choose this option to enable system trace events for each - kbase event. This is typically used for debugging but has - minimal overhead when not in use. Enable only if you know what - you are doing. - -config MALI_GPU_TRACEPOINTS - bool "Enable GPU tracepoints" - depends on MALI_MIDGARD && ANDROID - select GPU_TRACEPOINTS - help - Enables GPU tracepoints using Android trace event definitions. - -source "drivers/gpu/arm/midgard/platform/Kconfig" diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/Makefile b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/Makefile deleted file mode 100755 index d4d5de4cd5121..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/Makefile +++ /dev/null @@ -1,46 +0,0 @@ -# -# (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -KDIR ?= /lib/modules/$(shell uname -r)/build - -BUSLOG_PATH_RELATIVE = $(CURDIR)/../../../.. -UMP_PATH_RELATIVE = $(CURDIR)/../../../base/ump -KBASE_PATH_RELATIVE = $(CURDIR) -KDS_PATH_RELATIVE = $(CURDIR)/../../../.. -EXTRA_SYMBOLS = $(UMP_PATH_RELATIVE)/src/Module.symvers - -ifeq ($(MALI_UNIT_TEST), 1) - EXTRA_SYMBOLS += $(KBASE_PATH_RELATIVE)/tests/internal/src/kernel_assert_module/linux/Module.symvers -endif - -ifneq ($(wildcard $(CURDIR)/internal/Makefile.in),) -include $(CURDIR)/internal/Makefile.in -endif - -ifeq ($(MALI_BUS_LOG), 1) -#Add bus logger symbols -EXTRA_SYMBOLS += $(BUSLOG_PATH_RELATIVE)/drivers/base/bus_logger/Module.symvers -endif - -# GPL driver supports KDS -EXTRA_SYMBOLS += $(KDS_PATH_RELATIVE)/drivers/base/kds/Module.symvers - -# we get the symbols from modules using KBUILD_EXTRA_SYMBOLS to prevent warnings about unknown functions -all: - $(MAKE) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../../include -I$(CURDIR)/../../../../tests/include $(SCONS_CFLAGS)" $(SCONS_CONFIGS) KBUILD_EXTRA_SYMBOLS="$(EXTRA_SYMBOLS)" modules - -clean: - $(MAKE) -C $(KDIR) M=$(CURDIR) clean diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/Makefile.kbase b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/Makefile.kbase deleted file mode 100755 index 2bef9c25eaeb8..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/Makefile.kbase +++ /dev/null @@ -1,17 +0,0 @@ -# -# (C) COPYRIGHT 2010 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - -EXTRA_CFLAGS += -I$(ROOT) -I$(KBASE_PATH) -I$(OSK_PATH)/src/linux/include -I$(KBASE_PATH)/platform_$(PLATFORM) - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/Kbuild b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/Kbuild deleted file mode 100755 index cadd7d5fc3c5e..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/Kbuild +++ /dev/null @@ -1,59 +0,0 @@ -# -# (C) COPYRIGHT 2014 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - -BACKEND += \ - backend/gpu/mali_kbase_cache_policy_backend.c \ - backend/gpu/mali_kbase_device_hw.c \ - backend/gpu/mali_kbase_gpu.c \ - backend/gpu/mali_kbase_gpuprops_backend.c \ - backend/gpu/mali_kbase_debug_job_fault_backend.c \ - backend/gpu/mali_kbase_irq_linux.c \ - backend/gpu/mali_kbase_instr_backend.c \ - backend/gpu/mali_kbase_jm_as.c \ - backend/gpu/mali_kbase_jm_hw.c \ - backend/gpu/mali_kbase_jm_rb.c \ - backend/gpu/mali_kbase_js_affinity.c \ - backend/gpu/mali_kbase_js_backend.c \ - backend/gpu/mali_kbase_mmu_hw_direct.c \ - backend/gpu/mali_kbase_pm_backend.c \ - backend/gpu/mali_kbase_pm_driver.c \ - backend/gpu/mali_kbase_pm_metrics.c \ - backend/gpu/mali_kbase_pm_ca.c \ - backend/gpu/mali_kbase_pm_ca_fixed.c \ - backend/gpu/mali_kbase_pm_always_on.c \ - backend/gpu/mali_kbase_pm_coarse_demand.c \ - backend/gpu/mali_kbase_pm_demand.c \ - backend/gpu/mali_kbase_pm_policy.c \ - backend/gpu/mali_kbase_time.c - -ifeq ($(MALI_CUSTOMER_RELEASE),0) -BACKEND += \ - backend/gpu/mali_kbase_pm_ca_random.c \ - backend/gpu/mali_kbase_pm_ca_demand.c \ - backend/gpu/mali_kbase_pm_demand_always_powered.c \ - backend/gpu/mali_kbase_pm_fast_start.c -endif - -ifeq ($(CONFIG_MALI_DEVFREQ),y) -BACKEND += backend/gpu/mali_kbase_devfreq.c -endif - -ifeq ($(CONFIG_MALI_NO_MALI),y) - # Dummy model - BACKEND += backend/gpu/mali_kbase_model_dummy.c - BACKEND += backend/gpu/mali_kbase_model_linux.c - # HW error simulation - BACKEND += backend/gpu/mali_kbase_model_error_generator.c -endif diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_backend_config.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_backend_config.h deleted file mode 100755 index c8ae87eb84a28..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_backend_config.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/* - * Backend specific configuration - */ - -#ifndef _KBASE_BACKEND_CONFIG_H_ -#define _KBASE_BACKEND_CONFIG_H_ - -/* Enable GPU reset API */ -#define KBASE_GPU_RESET_EN 1 - -#endif /* _KBASE_BACKEND_CONFIG_H_ */ - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c deleted file mode 100755 index 92a14fa1bae12..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c +++ /dev/null @@ -1,22 +0,0 @@ -/* - * - * (C) COPYRIGHT 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include "backend/gpu/mali_kbase_cache_policy_backend.h" -#include -#include - - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.h deleted file mode 100755 index 42069fc88a1ff..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * - * (C) COPYRIGHT 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - -#ifndef _KBASE_CACHE_POLICY_BACKEND_H_ -#define _KBASE_CACHE_POLICY_BACKEND_H_ - -#include "mali_kbase.h" -#include "mali_base_kernel.h" - - -#endif /* _KBASE_CACHE_POLICY_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_debug_job_fault_backend.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_debug_job_fault_backend.c deleted file mode 100755 index 7851ea6466c78..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_debug_job_fault_backend.c +++ /dev/null @@ -1,157 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include -#include -#include "mali_kbase_debug_job_fault.h" - -#ifdef CONFIG_DEBUG_FS - -/*GPU_CONTROL_REG(r)*/ -static int gpu_control_reg_snapshot[] = { - GPU_ID, - SHADER_READY_LO, - SHADER_READY_HI, - TILER_READY_LO, - TILER_READY_HI, - L2_READY_LO, - L2_READY_HI -}; - -/* JOB_CONTROL_REG(r) */ -static int job_control_reg_snapshot[] = { - JOB_IRQ_MASK, - JOB_IRQ_STATUS -}; - -/* JOB_SLOT_REG(n,r) */ -static int job_slot_reg_snapshot[] = { - JS_HEAD_LO, - JS_HEAD_HI, - JS_TAIL_LO, - JS_TAIL_HI, - JS_AFFINITY_LO, - JS_AFFINITY_HI, - JS_CONFIG, - JS_STATUS, - JS_HEAD_NEXT_LO, - JS_HEAD_NEXT_HI, - JS_AFFINITY_NEXT_LO, - JS_AFFINITY_NEXT_HI, - JS_CONFIG_NEXT -}; - -/*MMU_REG(r)*/ -static int mmu_reg_snapshot[] = { - MMU_IRQ_MASK, - MMU_IRQ_STATUS -}; - -/* MMU_AS_REG(n,r) */ -static int as_reg_snapshot[] = { - AS_TRANSTAB_LO, - AS_TRANSTAB_HI, - AS_MEMATTR_LO, - AS_MEMATTR_HI, - AS_FAULTSTATUS, - AS_FAULTADDRESS_LO, - AS_FAULTADDRESS_HI, - AS_STATUS -}; - -bool kbase_debug_job_fault_reg_snapshot_init(struct kbase_context *kctx, - int reg_range) -{ - int i, j; - int offset = 0; - int slot_number; - int as_number; - - if (kctx->reg_dump == NULL) - return false; - - slot_number = kctx->kbdev->gpu_props.num_job_slots; - as_number = kctx->kbdev->gpu_props.num_address_spaces; - - /* get the GPU control registers*/ - for (i = 0; i < sizeof(gpu_control_reg_snapshot)/4; i++) { - kctx->reg_dump[offset] = - GPU_CONTROL_REG(gpu_control_reg_snapshot[i]); - offset += 2; - } - - /* get the Job control registers*/ - for (i = 0; i < sizeof(job_control_reg_snapshot)/4; i++) { - kctx->reg_dump[offset] = - JOB_CONTROL_REG(job_control_reg_snapshot[i]); - offset += 2; - } - - /* get the Job Slot registers*/ - for (j = 0; j < slot_number; j++) { - for (i = 0; i < sizeof(job_slot_reg_snapshot)/4; i++) { - kctx->reg_dump[offset] = - JOB_SLOT_REG(j, job_slot_reg_snapshot[i]); - offset += 2; - } - } - - /* get the MMU registers*/ - for (i = 0; i < sizeof(mmu_reg_snapshot)/4; i++) { - kctx->reg_dump[offset] = MMU_REG(mmu_reg_snapshot[i]); - offset += 2; - } - - /* get the Address space registers*/ - for (j = 0; j < as_number; j++) { - for (i = 0; i < sizeof(as_reg_snapshot)/4; i++) { - kctx->reg_dump[offset] = - MMU_AS_REG(j, as_reg_snapshot[i]); - offset += 2; - } - } - - WARN_ON(offset >= (reg_range*2/4)); - - /* set the termination flag*/ - kctx->reg_dump[offset] = REGISTER_DUMP_TERMINATION_FLAG; - kctx->reg_dump[offset + 1] = REGISTER_DUMP_TERMINATION_FLAG; - - dev_dbg(kctx->kbdev->dev, "kbase_job_fault_reg_snapshot_init:%d\n", - offset); - - return true; -} - -bool kbase_job_fault_get_reg_snapshot(struct kbase_context *kctx) -{ - int offset = 0; - - if (kctx->reg_dump == NULL) - return false; - - while (kctx->reg_dump[offset] != REGISTER_DUMP_TERMINATION_FLAG) { - kctx->reg_dump[offset+1] = - kbase_reg_read(kctx->kbdev, - kctx->reg_dump[offset], NULL); - offset += 2; - } - return true; -} - - -#endif diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c deleted file mode 100755 index db97637c9bc81..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c +++ /dev/null @@ -1,389 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include -#include -#include - -#include -#include -#ifdef CONFIG_DEVFREQ_THERMAL -#include -#endif - -#include -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) -#include -#else /* Linux >= 3.13 */ -/* In 3.13 the OPP include header file, types, and functions were all - * renamed. Use the old filename for the include, and define the new names to - * the old, when an old kernel is detected. - */ -#include -#define dev_pm_opp opp -#define dev_pm_opp_get_voltage opp_get_voltage -#define dev_pm_opp_get_opp_count opp_get_opp_count -#define dev_pm_opp_find_freq_ceil opp_find_freq_ceil -#endif /* Linux >= 3.13 */ - -#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) -static struct devfreq_simple_ondemand_data ondemand_data = { - .upthreshold = 90, - .downdifferential = 5, -}; - -#define DEV_FREQ_GOV_DATA (&ondemand_data) - -static ssize_t upthreshold_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", ondemand_data.upthreshold); -} - -static ssize_t upthreshold_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - unsigned int t; - int ret; - - ret = sscanf(buf, "%u", &t); - if (ret != 1) - return -EINVAL; - - if (t >= 100 || t == 0) { - dev_err(dev, "invalid input:%s\n", buf); - return -EINVAL; - } - ondemand_data.upthreshold = t; - return count; - -} -static DEVICE_ATTR_RW(upthreshold); - -static ssize_t downdifferential_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", ondemand_data.downdifferential); -} - -static ssize_t downdifferential_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - unsigned int t; - int ret; - - ret = sscanf(buf, "%u", &t); - if (ret != 1) - return -EINVAL; - - if (t >= 100 || t == 0) { - dev_err(dev, "invalid input:%s\n", buf); - return -EINVAL; - } - ondemand_data.downdifferential = t; - return count; - -} -static DEVICE_ATTR_RW(downdifferential); -#else -#define DEV_FREQ_GOV_DATA (NULL) -#endif /* CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND */ - -static ssize_t utilisation_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct kbase_device *kbdev = dev_get_drvdata(dev->parent); - unsigned long total_time = 0, busy_time = 0; - unsigned long utilise; - - kbase_pm_get_dvfs_utilisation(kbdev, &total_time, &busy_time); - - if (total_time == 0) { - utilise = 0; - } else { - /* round up */ - utilise = (busy_time * 100 + (total_time >> 1)) / total_time; - } - return sprintf(buf, "%ld\n", utilise); -} -static DEVICE_ATTR_RO(utilisation); - -static struct device_attribute *kbase_dev_attr[] = { -#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) - &dev_attr_downdifferential, - &dev_attr_upthreshold, -#endif - &dev_attr_utilisation -}; - -static int kbase_dev_add_attr(struct device *dev) -{ - int i, ret; - for (i = 0; i < ARRAY_SIZE(kbase_dev_attr); i++) { - if (!kbase_dev_attr[i]) - continue; - ret = device_create_file(dev, kbase_dev_attr[i]); - if (ret) - return ret; - } - return ret; -} - -static int -kbase_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags) -{ - struct kbase_device *kbdev = dev_get_drvdata(dev); - struct dev_pm_opp *opp; - unsigned long freq = 0; - unsigned long voltage; - int err; - - freq = *target_freq; - - rcu_read_lock(); - opp = devfreq_recommended_opp(dev, &freq, flags); - voltage = dev_pm_opp_get_voltage(opp); - rcu_read_unlock(); - if (IS_ERR_OR_NULL(opp)) { - dev_err(dev, "Failed to get opp (%ld)\n", PTR_ERR(opp)); - return PTR_ERR(opp); - } - - /* - * Only update if there is a change of frequency - */ - if (kbdev->current_freq == freq) { - *target_freq = freq; - return 0; - } - -#ifdef CONFIG_REGULATOR - if (kbdev->regulator && kbdev->current_voltage != voltage - && kbdev->current_freq < freq) { - err = regulator_set_voltage(kbdev->regulator, voltage, voltage); - if (err) { - dev_err(dev, "Failed to increase voltage (%d)\n", err); - return err; - } - } -#endif - - err = clk_set_rate(kbdev->clock, freq); - if (err) { - dev_err(dev, "Failed to set clock %lu (target %lu)\n", - freq, *target_freq); - return err; - } - -#ifdef CONFIG_REGULATOR - if (kbdev->regulator && kbdev->current_voltage != voltage - && kbdev->current_freq > freq) { - err = regulator_set_voltage(kbdev->regulator, voltage, voltage); - if (err) { - dev_err(dev, "Failed to decrease voltage (%d)\n", err); - return err; - } - } -#endif - - *target_freq = freq; - kbdev->current_voltage = voltage; - kbdev->current_freq = freq; - - kbase_pm_reset_dvfs_utilisation(kbdev); - - return err; -} - -static int -kbase_devfreq_cur_freq(struct device *dev, unsigned long *freq) -{ - struct kbase_device *kbdev = dev_get_drvdata(dev); - - *freq = kbdev->current_freq; - - return 0; -} - -static int -kbase_devfreq_status(struct device *dev, struct devfreq_dev_status *stat) -{ - struct kbase_device *kbdev = dev_get_drvdata(dev); - - stat->current_frequency = kbdev->current_freq; - - kbase_pm_get_dvfs_utilisation(kbdev, - &stat->total_time, &stat->busy_time); - - stat->private_data = NULL; - -#ifdef CONFIG_DEVFREQ_THERMAL - memcpy(&kbdev->devfreq_cooling->last_status, stat, sizeof(*stat)); -#endif - - return 0; -} - -static int kbase_devfreq_init_freq_table(struct kbase_device *kbdev, - struct devfreq_dev_profile *dp) -{ - int count; - int i = 0; - unsigned long freq = 0; - struct dev_pm_opp *opp; - - rcu_read_lock(); - count = dev_pm_opp_get_opp_count(kbdev->dev); - if (count < 0) { - rcu_read_unlock(); - return count; - } - rcu_read_unlock(); - - dp->freq_table = kmalloc_array(count, sizeof(dp->freq_table[0]), - GFP_KERNEL); - if (!dp->freq_table) - return -ENOMEM; - - rcu_read_lock(); - for (i = 0; i < count; i++, freq++) { - opp = dev_pm_opp_find_freq_ceil(kbdev->dev, &freq); - if (IS_ERR(opp)) - break; - - dp->freq_table[i] = freq; - } - rcu_read_unlock(); - - if (count != i) - dev_warn(kbdev->dev, "Unable to enumerate all OPPs (%d!=%d\n", - count, i); - - dp->max_state = i; - - return 0; -} - -static void kbase_devfreq_term_freq_table(struct kbase_device *kbdev) -{ - struct devfreq_dev_profile *dp = kbdev->devfreq->profile; - - kfree(dp->freq_table); -} - -static void kbase_devfreq_exit(struct device *dev) -{ - struct kbase_device *kbdev = dev_get_drvdata(dev); - - kbase_devfreq_term_freq_table(kbdev); -} - -int kbase_devfreq_init(struct kbase_device *kbdev) -{ -#ifdef CONFIG_DEVFREQ_THERMAL - struct devfreq_cooling_ops *callbacks = POWER_MODEL_CALLBACKS; -#endif - struct devfreq_dev_profile *dp; - int err; - - dev_dbg(kbdev->dev, "Init Mali devfreq\n"); - - if (!kbdev->clock) - return -ENODEV; - - kbdev->current_freq = clk_get_rate(kbdev->clock); - - dp = &kbdev->devfreq_profile; - - dp->initial_freq = kbdev->current_freq; - dp->polling_ms = 100; - dp->target = kbase_devfreq_target; - dp->get_dev_status = kbase_devfreq_status; - dp->get_cur_freq = kbase_devfreq_cur_freq; - dp->exit = kbase_devfreq_exit; - - if (kbase_devfreq_init_freq_table(kbdev, dp)) - return -EFAULT; - - kbdev->devfreq = devfreq_add_device(kbdev->dev, dp, - "simple_ondemand", DEV_FREQ_GOV_DATA); - if (IS_ERR(kbdev->devfreq)) { - kbase_devfreq_term_freq_table(kbdev); - return PTR_ERR(kbdev->devfreq); - } - - kbase_dev_add_attr(&kbdev->devfreq->dev); - err = devfreq_register_opp_notifier(kbdev->dev, kbdev->devfreq); - if (err) { - dev_err(kbdev->dev, - "Failed to register OPP notifier (%d)\n", err); - goto opp_notifier_failed; - } - -#ifdef CONFIG_DEVFREQ_THERMAL - if (callbacks) { - - kbdev->devfreq_cooling = of_devfreq_cooling_register_power( - kbdev->dev->of_node, - kbdev->devfreq, - callbacks); - if (IS_ERR_OR_NULL(kbdev->devfreq_cooling)) { - err = PTR_ERR(kbdev->devfreq_cooling); - dev_err(kbdev->dev, - "Failed to register cooling device (%d)\n", - err); - goto cooling_failed; - } - } -#endif - - return 0; - -#ifdef CONFIG_DEVFREQ_THERMAL -cooling_failed: - devfreq_unregister_opp_notifier(kbdev->dev, kbdev->devfreq); -#endif /* CONFIG_DEVFREQ_THERMAL */ -opp_notifier_failed: - err = devfreq_remove_device(kbdev->devfreq); - if (err) - dev_err(kbdev->dev, "Failed to terminate devfreq (%d)\n", err); - else - kbdev->devfreq = NULL; - - return err; -} - -void kbase_devfreq_term(struct kbase_device *kbdev) -{ - int err; - - dev_dbg(kbdev->dev, "Term Mali devfreq\n"); - -#ifdef CONFIG_DEVFREQ_THERMAL - devfreq_cooling_unregister(kbdev->devfreq_cooling); -#endif - - devfreq_unregister_opp_notifier(kbdev->dev, kbdev->devfreq); - - err = devfreq_remove_device(kbdev->devfreq); - if (err) - dev_err(kbdev->dev, "Failed to terminate devfreq (%d)\n", err); - else - kbdev->devfreq = NULL; -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.h deleted file mode 100755 index c0bf8b15b3bcd..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#ifndef _BASE_DEVFREQ_H_ -#define _BASE_DEVFREQ_H_ - -int kbase_devfreq_init(struct kbase_device *kbdev); -void kbase_devfreq_term(struct kbase_device *kbdev); - -#endif /* _BASE_DEVFREQ_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c deleted file mode 100755 index 83d5ec9f7a93e..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c +++ /dev/null @@ -1,116 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - -/* - * - */ -#include -#include -#include - -#include - -#if !defined(CONFIG_MALI_NO_MALI) -void kbase_reg_write(struct kbase_device *kbdev, u16 offset, u32 value, - struct kbase_context *kctx) -{ - KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered); - KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID); - KBASE_DEBUG_ASSERT(kbdev->dev != NULL); - dev_dbg(kbdev->dev, "w: reg %04x val %08x", offset, value); - writel(value, kbdev->reg + offset); - if (kctx && kctx->jctx.tb) - kbase_device_trace_register_access(kctx, REG_WRITE, offset, - value); -} - -KBASE_EXPORT_TEST_API(kbase_reg_write); - -u32 kbase_reg_read(struct kbase_device *kbdev, u16 offset, - struct kbase_context *kctx) -{ - u32 val; - KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered); - KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID); - KBASE_DEBUG_ASSERT(kbdev->dev != NULL); - val = readl(kbdev->reg + offset); - dev_dbg(kbdev->dev, "r: reg %04x val %08x", offset, val); - if (kctx && kctx->jctx.tb) - kbase_device_trace_register_access(kctx, REG_READ, offset, val); - return val; -} - -KBASE_EXPORT_TEST_API(kbase_reg_read); -#endif /* !defined(CONFIG_MALI_NO_MALI) */ - -/** - * kbase_report_gpu_fault - Report a GPU fault. - * @kbdev: Kbase device pointer - * @multiple: Zero if only GPU_FAULT was raised, non-zero if MULTIPLE_GPU_FAULTS - * was also set - * - * This function is called from the interrupt handler when a GPU fault occurs. - * It reports the details of the fault using dev_warn(). - */ -static void kbase_report_gpu_fault(struct kbase_device *kbdev, int multiple) -{ - u32 status; - u64 address; - - status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS), NULL); - address = (u64) kbase_reg_read(kbdev, - GPU_CONTROL_REG(GPU_FAULTADDRESS_HI), NULL) << 32; - address |= kbase_reg_read(kbdev, - GPU_CONTROL_REG(GPU_FAULTADDRESS_LO), NULL); - - dev_warn(kbdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx", - status & 0xFF, - kbase_exception_name(kbdev, status), - address); - if (multiple) - dev_warn(kbdev->dev, "There were multiple GPU faults - some have not been reported\n"); -} - -void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val) -{ - KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ, NULL, NULL, 0u, val); - if (val & GPU_FAULT) - kbase_report_gpu_fault(kbdev, val & MULTIPLE_GPU_FAULTS); - - if (val & RESET_COMPLETED) - kbase_pm_reset_done(kbdev); - - if (val & PRFCNT_SAMPLE_COMPLETED) - kbase_instr_hwcnt_sample_done(kbdev); - - if (val & CLEAN_CACHES_COMPLETED) - kbase_clean_caches_done(kbdev); - - KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, NULL, 0u, val); - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val, NULL); - - /* kbase_pm_check_transitions must be called after the IRQ has been - * cleared. This is because it might trigger further power transitions - * and we don't want to miss the interrupt raised to notify us that - * these further transitions have finished. - */ - if (val & POWER_CHANGED_ALL) - kbase_pm_power_changed(kbdev); - - KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_DONE, NULL, NULL, 0u, val); -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_internal.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_internal.h deleted file mode 100755 index 5b20445932fb6..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_internal.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * Backend-specific HW access device APIs - */ - -#ifndef _KBASE_DEVICE_INTERNAL_H_ -#define _KBASE_DEVICE_INTERNAL_H_ - -/** - * kbase_reg_write - write to GPU register - * @kbdev: Kbase device pointer - * @offset: Offset of register - * @value: Value to write - * @kctx: Kbase context pointer. May be NULL - * - * Caller must ensure the GPU is powered (@kbdev->pm.gpu_powered != false). If - * @kctx is not NULL then the caller must ensure it is scheduled (@kctx->as_nr - * != KBASEP_AS_NR_INVALID). - */ -void kbase_reg_write(struct kbase_device *kbdev, u16 offset, u32 value, - struct kbase_context *kctx); - -/** - * kbase_reg_read - read from GPU register - * @kbdev: Kbase device pointer - * @offset: Offset of register - * @kctx: Kbase context pointer. May be NULL - * - * Caller must ensure the GPU is powered (@kbdev->pm.gpu_powered != false). If - * @kctx is not NULL then the caller must ensure it is scheduled (@kctx->as_nr - * != KBASEP_AS_NR_INVALID). - * - * Return: Value in desired register - */ -u32 kbase_reg_read(struct kbase_device *kbdev, u16 offset, - struct kbase_context *kctx); - - -/** - * kbase_gpu_interrupt - GPU interrupt handler - * @kbdev: Kbase device pointer - * @val: The value of the GPU IRQ status register which triggered the call - * - * This function is called from the interrupt handler when a GPU irq is to be - * handled. - */ -void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val); - -#endif /* _KBASE_DEVICE_INTERNAL_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpu.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpu.c deleted file mode 100755 index 72a98d0f79525..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpu.c +++ /dev/null @@ -1,124 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - -/* - * Register-based HW access backend APIs - */ -#include -#include -#include -#include -#include -#include -#include - -int kbase_backend_early_init(struct kbase_device *kbdev) -{ - int err; - - err = kbasep_platform_device_init(kbdev); - if (err) - return err; - - /* Ensure we can access the GPU registers */ - kbase_pm_register_access_enable(kbdev); - - /* Find out GPU properties based on the GPU feature registers */ - kbase_gpuprops_set(kbdev); - - /* We're done accessing the GPU registers for now. */ - kbase_pm_register_access_disable(kbdev); - - err = kbase_hwaccess_pm_init(kbdev); - if (err) - goto fail_pm; - - err = kbase_install_interrupts(kbdev); - if (err) - goto fail_interrupts; - - return 0; - -fail_interrupts: - kbase_hwaccess_pm_term(kbdev); -fail_pm: - kbasep_platform_device_term(kbdev); - - return err; -} - -void kbase_backend_early_term(struct kbase_device *kbdev) -{ - kbase_release_interrupts(kbdev); - kbase_hwaccess_pm_term(kbdev); - kbasep_platform_device_term(kbdev); -} - -int kbase_backend_late_init(struct kbase_device *kbdev) -{ - int err; - - err = kbase_hwaccess_pm_powerup(kbdev, PM_HW_ISSUES_DETECT); - if (err) - return err; - - err = kbase_backend_timer_init(kbdev); - if (err) - goto fail_timer; - -/* Currently disabled on the prototype */ -#ifdef CONFIG_MALI_DEBUG -#ifndef CONFIG_MALI_NO_MALI - if (kbasep_common_test_interrupt_handlers(kbdev) != 0) { - dev_err(kbdev->dev, "Interrupt assigment check failed.\n"); - err = -EINVAL; - goto fail_interrupt_test; - } -#endif /* !CONFIG_MALI_NO_MALI */ -#endif /* CONFIG_MALI_DEBUG */ - - err = kbase_job_slot_init(kbdev); - if (err) - goto fail_job_slot; - - init_waitqueue_head(&kbdev->hwaccess.backend.reset_wait); - - return 0; - -fail_job_slot: -/* Currently disabled on the prototype */ -#ifdef CONFIG_MALI_DEBUG -#ifndef CONFIG_MALI_NO_MALI -fail_interrupt_test: -#endif /* !CONFIG_MALI_NO_MALI */ -#endif /* CONFIG_MALI_DEBUG */ - kbase_backend_timer_term(kbdev); -fail_timer: - kbase_hwaccess_pm_halt(kbdev); - - return err; -} - -void kbase_backend_late_term(struct kbase_device *kbdev) -{ - kbase_job_slot_halt(kbdev); - kbase_job_slot_term(kbdev); - kbase_backend_timer_term(kbdev); - kbase_hwaccess_pm_halt(kbdev); -} - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c deleted file mode 100755 index 591c013c53491..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c +++ /dev/null @@ -1,85 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * Base kernel property query backend APIs - */ - -#include -#include -#include -#include - -void kbase_backend_gpuprops_get(struct kbase_device *kbdev, - struct kbase_gpuprops_regdump *regdump) -{ - int i; - - /* Fill regdump with the content of the relevant registers */ - regdump->gpu_id = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_ID), NULL); - - regdump->l2_features = kbase_reg_read(kbdev, - GPU_CONTROL_REG(L2_FEATURES), NULL); - regdump->suspend_size = kbase_reg_read(kbdev, - GPU_CONTROL_REG(SUSPEND_SIZE), NULL); - regdump->tiler_features = kbase_reg_read(kbdev, - GPU_CONTROL_REG(TILER_FEATURES), NULL); - regdump->mem_features = kbase_reg_read(kbdev, - GPU_CONTROL_REG(MEM_FEATURES), NULL); - regdump->mmu_features = kbase_reg_read(kbdev, - GPU_CONTROL_REG(MMU_FEATURES), NULL); - regdump->as_present = kbase_reg_read(kbdev, - GPU_CONTROL_REG(AS_PRESENT), NULL); - regdump->js_present = kbase_reg_read(kbdev, - GPU_CONTROL_REG(JS_PRESENT), NULL); - - for (i = 0; i < GPU_MAX_JOB_SLOTS; i++) - regdump->js_features[i] = kbase_reg_read(kbdev, - GPU_CONTROL_REG(JS_FEATURES_REG(i)), NULL); - - for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++) - regdump->texture_features[i] = kbase_reg_read(kbdev, - GPU_CONTROL_REG(TEXTURE_FEATURES_REG(i)), NULL); - - regdump->thread_max_threads = kbase_reg_read(kbdev, - GPU_CONTROL_REG(THREAD_MAX_THREADS), NULL); - regdump->thread_max_workgroup_size = kbase_reg_read(kbdev, - GPU_CONTROL_REG(THREAD_MAX_WORKGROUP_SIZE), - NULL); - regdump->thread_max_barrier_size = kbase_reg_read(kbdev, - GPU_CONTROL_REG(THREAD_MAX_BARRIER_SIZE), NULL); - regdump->thread_features = kbase_reg_read(kbdev, - GPU_CONTROL_REG(THREAD_FEATURES), NULL); - - regdump->shader_present_lo = kbase_reg_read(kbdev, - GPU_CONTROL_REG(SHADER_PRESENT_LO), NULL); - regdump->shader_present_hi = kbase_reg_read(kbdev, - GPU_CONTROL_REG(SHADER_PRESENT_HI), NULL); - - regdump->tiler_present_lo = kbase_reg_read(kbdev, - GPU_CONTROL_REG(TILER_PRESENT_LO), NULL); - regdump->tiler_present_hi = kbase_reg_read(kbdev, - GPU_CONTROL_REG(TILER_PRESENT_HI), NULL); - - regdump->l2_present_lo = kbase_reg_read(kbdev, - GPU_CONTROL_REG(L2_PRESENT_LO), NULL); - regdump->l2_present_hi = kbase_reg_read(kbdev, - GPU_CONTROL_REG(L2_PRESENT_HI), NULL); -} - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c deleted file mode 100755 index 2c987071a77ca..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c +++ /dev/null @@ -1,536 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * GPU backend instrumentation APIs. - */ - -#include -#include -#include -#include -#include - -/** - * kbasep_instr_hwcnt_cacheclean - Issue Cache Clean & Invalidate command to - * hardware - * - * @kbdev: Kbase device - */ -static void kbasep_instr_hwcnt_cacheclean(struct kbase_device *kbdev) -{ - unsigned long flags; - unsigned long pm_flags; - u32 irq_mask; - - spin_lock_irqsave(&kbdev->hwcnt.lock, flags); - /* Wait for any reset to complete */ - while (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_RESETTING) { - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); - wait_event(kbdev->hwcnt.backend.cache_clean_wait, - kbdev->hwcnt.backend.state != - KBASE_INSTR_STATE_RESETTING); - spin_lock_irqsave(&kbdev->hwcnt.lock, flags); - } - KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state == - KBASE_INSTR_STATE_REQUEST_CLEAN); - - /* Enable interrupt */ - spin_lock_irqsave(&kbdev->pm.power_change_lock, pm_flags); - irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL); - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), - irq_mask | CLEAN_CACHES_COMPLETED, NULL); - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, pm_flags); - - /* clean&invalidate the caches so we're sure the mmu tables for the dump - * buffer is valid */ - KBASE_TRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, NULL, 0u, 0); - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), - GPU_COMMAND_CLEAN_INV_CACHES, NULL); - kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_CLEANING; - - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); -} - -int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev, - struct kbase_context *kctx, - struct kbase_uk_hwcnt_setup *setup) -{ - unsigned long flags, pm_flags; - int err = -EINVAL; - struct kbasep_js_device_data *js_devdata; - u32 irq_mask; - int ret; - u64 shader_cores_needed; - - KBASE_DEBUG_ASSERT(NULL == kbdev->hwcnt.suspended_kctx); - - shader_cores_needed = kbase_pm_get_present_cores(kbdev, - KBASE_PM_CORE_SHADER); - - js_devdata = &kbdev->js_data; - - /* alignment failure */ - if ((setup->dump_buffer == 0ULL) || (setup->dump_buffer & (2048 - 1))) - goto out_err; - - /* Override core availability policy to ensure all cores are available - */ - kbase_pm_ca_instr_enable(kbdev); - - /* Request the cores early on synchronously - we'll release them on any - * errors (e.g. instrumentation already active) */ - kbase_pm_request_cores_sync(kbdev, true, shader_cores_needed); - - spin_lock_irqsave(&kbdev->hwcnt.lock, flags); - - if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_RESETTING) { - /* GPU is being reset */ - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); - wait_event(kbdev->hwcnt.backend.wait, - kbdev->hwcnt.backend.triggered != 0); - spin_lock_irqsave(&kbdev->hwcnt.lock, flags); - } - - if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_DISABLED) { - /* Instrumentation is already enabled */ - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); - goto out_unrequest_cores; - } - - /* Enable interrupt */ - spin_lock_irqsave(&kbdev->pm.power_change_lock, pm_flags); - irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL); - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), irq_mask | - PRFCNT_SAMPLE_COMPLETED, NULL); - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, pm_flags); - - /* In use, this context is the owner */ - kbdev->hwcnt.kctx = kctx; - /* Remember the dump address so we can reprogram it later */ - kbdev->hwcnt.addr = setup->dump_buffer; - /* Remember all the settings for suspend/resume */ - if (&kbdev->hwcnt.suspended_state != setup) - memcpy(&kbdev->hwcnt.suspended_state, setup, - sizeof(kbdev->hwcnt.suspended_state)); - - /* Request the clean */ - kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_REQUEST_CLEAN; - kbdev->hwcnt.backend.triggered = 0; - /* Clean&invalidate the caches so we're sure the mmu tables for the dump - * buffer is valid */ - ret = queue_work(kbdev->hwcnt.backend.cache_clean_wq, - &kbdev->hwcnt.backend.cache_clean_work); - KBASE_DEBUG_ASSERT(ret); - - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); - - /* Wait for cacheclean to complete */ - wait_event(kbdev->hwcnt.backend.wait, - kbdev->hwcnt.backend.triggered != 0); - - KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state == - KBASE_INSTR_STATE_IDLE); - - kbase_pm_request_l2_caches(kbdev); - - /* Configure */ - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), - (kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) - | PRFCNT_CONFIG_MODE_OFF, kctx); - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO), - setup->dump_buffer & 0xFFFFFFFF, kctx); - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI), - setup->dump_buffer >> 32, kctx); - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN), - setup->jm_bm, kctx); - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN), - setup->shader_bm, kctx); - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN), - setup->mmu_l2_bm, kctx); - /* Due to PRLAM-8186 we need to disable the Tiler before we enable the - * HW counter dump. */ - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186)) - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), 0, - kctx); - else - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), - setup->tiler_bm, kctx); - - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), - (kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) | - PRFCNT_CONFIG_MODE_MANUAL, kctx); - - /* If HW has PRLAM-8186 we can now re-enable the tiler HW counters dump - */ - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186)) - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), - setup->tiler_bm, kctx); - - spin_lock_irqsave(&kbdev->hwcnt.lock, flags); - - if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_RESETTING) { - /* GPU is being reset */ - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); - wait_event(kbdev->hwcnt.backend.wait, - kbdev->hwcnt.backend.triggered != 0); - spin_lock_irqsave(&kbdev->hwcnt.lock, flags); - } - - kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE; - kbdev->hwcnt.backend.triggered = 1; - wake_up(&kbdev->hwcnt.backend.wait); - - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); - - err = 0; - - dev_dbg(kbdev->dev, "HW counters dumping set-up for context %p", kctx); - return err; - out_unrequest_cores: - kbase_pm_unrequest_cores(kbdev, true, shader_cores_needed); - out_err: - return err; -} - -int kbase_instr_hwcnt_disable_internal(struct kbase_context *kctx) -{ - unsigned long flags, pm_flags; - int err = -EINVAL; - u32 irq_mask; - struct kbase_device *kbdev = kctx->kbdev; - - while (1) { - spin_lock_irqsave(&kbdev->hwcnt.lock, flags); - - if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DISABLED) { - /* Instrumentation is not enabled */ - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); - goto out; - } - - if (kbdev->hwcnt.kctx != kctx) { - /* Instrumentation has been setup for another context */ - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); - goto out; - } - - if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_IDLE) - break; - - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); - - /* Ongoing dump/setup - wait for its completion */ - wait_event(kbdev->hwcnt.backend.wait, - kbdev->hwcnt.backend.triggered != 0); - } - - kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DISABLED; - kbdev->hwcnt.backend.triggered = 0; - - /* Disable interrupt */ - spin_lock_irqsave(&kbdev->pm.power_change_lock, pm_flags); - irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL); - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), - irq_mask & ~PRFCNT_SAMPLE_COMPLETED, NULL); - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, pm_flags); - - /* Disable the counters */ - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), 0, kctx); - - kbdev->hwcnt.kctx = NULL; - kbdev->hwcnt.addr = 0ULL; - - kbase_pm_ca_instr_disable(kbdev); - - kbase_pm_unrequest_cores(kbdev, true, - kbase_pm_get_present_cores(kbdev, KBASE_PM_CORE_SHADER)); - - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); - - kbase_pm_release_l2_caches(kbdev); - - dev_dbg(kbdev->dev, "HW counters dumping disabled for context %p", - kctx); - - err = 0; - - out: - return err; -} - -int kbase_instr_hwcnt_request_dump(struct kbase_context *kctx) -{ - unsigned long flags; - int err = -EINVAL; - struct kbase_device *kbdev = kctx->kbdev; - - spin_lock_irqsave(&kbdev->hwcnt.lock, flags); - - if (kbdev->hwcnt.kctx != kctx) { - /* The instrumentation has been setup for another context */ - goto unlock; - } - - if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_IDLE) { - /* HW counters are disabled or another dump is ongoing, or we're - * resetting */ - goto unlock; - } - - kbdev->hwcnt.backend.triggered = 0; - - /* Mark that we're dumping - the PF handler can signal that we faulted - */ - kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DUMPING; - - /* Reconfigure the dump address */ - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO), - kbdev->hwcnt.addr & 0xFFFFFFFF, NULL); - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI), - kbdev->hwcnt.addr >> 32, NULL); - - /* Start dumping */ - KBASE_TRACE_ADD(kbdev, CORE_GPU_PRFCNT_SAMPLE, NULL, NULL, - kbdev->hwcnt.addr, 0); - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), - GPU_COMMAND_PRFCNT_SAMPLE, kctx); - - dev_dbg(kbdev->dev, "HW counters dumping done for context %p", kctx); - - err = 0; - - unlock: - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); - return err; -} -KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_request_dump); - -bool kbase_instr_hwcnt_dump_complete(struct kbase_context *kctx, - bool * const success) -{ - unsigned long flags; - bool complete = false; - struct kbase_device *kbdev = kctx->kbdev; - - spin_lock_irqsave(&kbdev->hwcnt.lock, flags); - - if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_IDLE) { - *success = true; - complete = true; - } else if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) { - *success = false; - complete = true; - kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE; - } - - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); - - return complete; -} -KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_dump_complete); - -void kbasep_cache_clean_worker(struct work_struct *data) -{ - struct kbase_device *kbdev; - unsigned long flags; - - kbdev = container_of(data, struct kbase_device, - hwcnt.backend.cache_clean_work); - - mutex_lock(&kbdev->cacheclean_lock); - kbasep_instr_hwcnt_cacheclean(kbdev); - - spin_lock_irqsave(&kbdev->hwcnt.lock, flags); - /* Wait for our condition, and any reset to complete */ - while (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_RESETTING || - kbdev->hwcnt.backend.state == - KBASE_INSTR_STATE_CLEANING) { - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); - wait_event(kbdev->hwcnt.backend.cache_clean_wait, - (kbdev->hwcnt.backend.state != - KBASE_INSTR_STATE_RESETTING && - kbdev->hwcnt.backend.state != - KBASE_INSTR_STATE_CLEANING)); - spin_lock_irqsave(&kbdev->hwcnt.lock, flags); - } - KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state == - KBASE_INSTR_STATE_CLEANED); - - /* All finished and idle */ - kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE; - kbdev->hwcnt.backend.triggered = 1; - wake_up(&kbdev->hwcnt.backend.wait); - - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); - mutex_unlock(&kbdev->cacheclean_lock); -} - -void kbase_instr_hwcnt_sample_done(struct kbase_device *kbdev) -{ - unsigned long flags; - - spin_lock_irqsave(&kbdev->hwcnt.lock, flags); - - if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) { - kbdev->hwcnt.backend.triggered = 1; - wake_up(&kbdev->hwcnt.backend.wait); - } else if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DUMPING) { - int ret; - /* Always clean and invalidate the cache after a successful dump - */ - kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_REQUEST_CLEAN; - ret = queue_work(kbdev->hwcnt.backend.cache_clean_wq, - &kbdev->hwcnt.backend.cache_clean_work); - KBASE_DEBUG_ASSERT(ret); - } - /* NOTE: In the state KBASE_INSTR_STATE_RESETTING, We're in a reset, - * and the instrumentation state hasn't been restored yet - - * kbasep_reset_timeout_worker() will do the rest of the work */ - - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); -} - -void kbase_clean_caches_done(struct kbase_device *kbdev) -{ - u32 irq_mask; - - if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_DISABLED) { - unsigned long flags; - unsigned long pm_flags; - - spin_lock_irqsave(&kbdev->hwcnt.lock, flags); - /* Disable interrupt */ - spin_lock_irqsave(&kbdev->pm.power_change_lock, pm_flags); - irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), - NULL); - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), - irq_mask & ~CLEAN_CACHES_COMPLETED, NULL); - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, pm_flags); - - /* Wakeup... */ - if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_CLEANING) { - /* Only wake if we weren't resetting */ - kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_CLEANED; - wake_up(&kbdev->hwcnt.backend.cache_clean_wait); - } - /* NOTE: In the state KBASE_INSTR_STATE_RESETTING, We're in a - * reset, and the instrumentation state hasn't been restored yet - * - kbasep_reset_timeout_worker() will do the rest of the work - */ - - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); - } -} - -int kbase_instr_hwcnt_wait_for_dump(struct kbase_context *kctx) -{ - struct kbase_device *kbdev = kctx->kbdev; - unsigned long flags; - int err; - - /* Wait for dump & cacheclean to complete */ - wait_event(kbdev->hwcnt.backend.wait, - kbdev->hwcnt.backend.triggered != 0); - - spin_lock_irqsave(&kbdev->hwcnt.lock, flags); - - if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_RESETTING) { - /* GPU is being reset */ - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); - wait_event(kbdev->hwcnt.backend.wait, - kbdev->hwcnt.backend.triggered != 0); - spin_lock_irqsave(&kbdev->hwcnt.lock, flags); - } - - if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) { - err = -EINVAL; - kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE; - } else { - /* Dump done */ - KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state == - KBASE_INSTR_STATE_IDLE); - err = 0; - } - - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); - - return err; -} - -int kbase_instr_hwcnt_clear(struct kbase_context *kctx) -{ - unsigned long flags; - int err = -EINVAL; - struct kbase_device *kbdev = kctx->kbdev; - - spin_lock_irqsave(&kbdev->hwcnt.lock, flags); - - if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_RESETTING) { - /* GPU is being reset */ - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); - wait_event(kbdev->hwcnt.backend.wait, - kbdev->hwcnt.backend.triggered != 0); - spin_lock_irqsave(&kbdev->hwcnt.lock, flags); - } - - /* Check it's the context previously set up and we're not already - * dumping */ - if (kbdev->hwcnt.kctx != kctx || kbdev->hwcnt.backend.state != - KBASE_INSTR_STATE_IDLE) - goto out; - - /* Clear the counters */ - KBASE_TRACE_ADD(kbdev, CORE_GPU_PRFCNT_CLEAR, NULL, NULL, 0u, 0); - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), - GPU_COMMAND_PRFCNT_CLEAR, kctx); - - err = 0; - -out: - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); - return err; -} -KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_clear); - -int kbase_instr_backend_init(struct kbase_device *kbdev) -{ - int ret = 0; - - kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DISABLED; - - init_waitqueue_head(&kbdev->hwcnt.backend.wait); - init_waitqueue_head(&kbdev->hwcnt.backend.cache_clean_wait); - INIT_WORK(&kbdev->hwcnt.backend.cache_clean_work, - kbasep_cache_clean_worker); - kbdev->hwcnt.backend.triggered = 0; - - kbdev->hwcnt.backend.cache_clean_wq = - alloc_workqueue("Mali cache cleaning workqueue", 0, 1); - if (NULL == kbdev->hwcnt.backend.cache_clean_wq) - ret = -EINVAL; - - return ret; -} - -void kbase_instr_backend_term(struct kbase_device *kbdev) -{ - destroy_workqueue(kbdev->hwcnt.backend.cache_clean_wq); -} - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_defs.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_defs.h deleted file mode 100755 index 23bd80a5a1507..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_defs.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/* - * Backend-specific instrumentation definitions - */ - -#ifndef _KBASE_INSTR_DEFS_H_ -#define _KBASE_INSTR_DEFS_H_ - -/* - * Instrumentation State Machine States - */ -enum kbase_instr_state { - /* State where instrumentation is not active */ - KBASE_INSTR_STATE_DISABLED = 0, - /* State machine is active and ready for a command. */ - KBASE_INSTR_STATE_IDLE, - /* Hardware is currently dumping a frame. */ - KBASE_INSTR_STATE_DUMPING, - /* We've requested a clean to occur on a workqueue */ - KBASE_INSTR_STATE_REQUEST_CLEAN, - /* Hardware is currently cleaning and invalidating caches. */ - KBASE_INSTR_STATE_CLEANING, - /* Cache clean completed, and either a) a dump is complete, or - * b) instrumentation can now be setup. */ - KBASE_INSTR_STATE_CLEANED, - /* kbasep_reset_timeout_worker() has started (but not compelted) a - * reset. This generally indicates the current action should be aborted, - * and kbasep_reset_timeout_worker() will handle the cleanup */ - KBASE_INSTR_STATE_RESETTING, - /* An error has occured during DUMPING (page fault). */ - KBASE_INSTR_STATE_FAULT -}; - -/* Structure used for instrumentation and HW counters dumping */ -struct kbase_instr_backend { - wait_queue_head_t wait; - int triggered; - - enum kbase_instr_state state; - wait_queue_head_t cache_clean_wait; - struct workqueue_struct *cache_clean_wq; - struct work_struct cache_clean_work; -}; - -#endif /* _KBASE_INSTR_DEFS_H_ */ - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_internal.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_internal.h deleted file mode 100755 index e96aeae786e1e..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_internal.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * Backend-specific HW access instrumentation APIs - */ - -#ifndef _KBASE_INSTR_INTERNAL_H_ -#define _KBASE_INSTR_INTERNAL_H_ - -/** - * kbasep_cache_clean_worker() - Workqueue for handling cache cleaning - * @data: a &struct work_struct - */ -void kbasep_cache_clean_worker(struct work_struct *data); - -/** - * kbase_clean_caches_done() - Cache clean interrupt received - * @kbdev: Kbase device - */ -void kbase_clean_caches_done(struct kbase_device *kbdev); - -/** - * kbase_instr_hwcnt_sample_done() - Dump complete interrupt received - * @kbdev: Kbase device - */ -void kbase_instr_hwcnt_sample_done(struct kbase_device *kbdev); - -#endif /* _KBASE_INSTR_INTERNAL_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_internal.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_internal.h deleted file mode 100755 index 8781561e73d00..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_internal.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/* - * Backend specific IRQ APIs - */ - -#ifndef _KBASE_IRQ_INTERNAL_H_ -#define _KBASE_IRQ_INTERNAL_H_ - -int kbase_install_interrupts(struct kbase_device *kbdev); - -void kbase_release_interrupts(struct kbase_device *kbdev); - -/** - * kbase_synchronize_irqs - Ensure that all IRQ handlers have completed - * execution - * @kbdev: The kbase device - */ -void kbase_synchronize_irqs(struct kbase_device *kbdev); - -int kbasep_common_test_interrupt_handlers( - struct kbase_device * const kbdev); - -#endif /* _KBASE_IRQ_INTERNAL_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c deleted file mode 100755 index 49c72f90aac6f..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c +++ /dev/null @@ -1,471 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include -#include -#include - -#include - -#if !defined(CONFIG_MALI_NO_MALI) - -/* GPU IRQ Tags */ -#define JOB_IRQ_TAG 0 -#define MMU_IRQ_TAG 1 -#define GPU_IRQ_TAG 2 - - -static void *kbase_tag(void *ptr, u32 tag) -{ - return (void *)(((uintptr_t) ptr) | tag); -} - -static void *kbase_untag(void *ptr) -{ - return (void *)(((uintptr_t) ptr) & ~3); -} - - - - -static irqreturn_t kbase_job_irq_handler(int irq, void *data) -{ - unsigned long flags; - struct kbase_device *kbdev = kbase_untag(data); - u32 val; - - spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags); - - if (!kbdev->pm.backend.gpu_powered) { - /* GPU is turned off - IRQ is not for us */ - spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, - flags); - return IRQ_NONE; - } - - val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS), NULL); - -#ifdef CONFIG_MALI_DEBUG - if (!kbdev->pm.backend.driver_ready_for_irqs) - dev_warn(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n", - __func__, irq, val); -#endif /* CONFIG_MALI_DEBUG */ - spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags); - - if (!val) - return IRQ_NONE; - - dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val); - - kbase_job_done(kbdev, val); - - return IRQ_HANDLED; -} - -KBASE_EXPORT_TEST_API(kbase_job_irq_handler); - -static irqreturn_t kbase_mmu_irq_handler(int irq, void *data) -{ - unsigned long flags; - struct kbase_device *kbdev = kbase_untag(data); - u32 val; - - spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags); - - if (!kbdev->pm.backend.gpu_powered) { - /* GPU is turned off - IRQ is not for us */ - spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, - flags); - return IRQ_NONE; - } - - atomic_inc(&kbdev->faults_pending); - - val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS), NULL); - -#ifdef CONFIG_MALI_DEBUG - if (!kbdev->pm.backend.driver_ready_for_irqs) - dev_warn(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n", - __func__, irq, val); -#endif /* CONFIG_MALI_DEBUG */ - spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags); - - if (!val) { - atomic_dec(&kbdev->faults_pending); - return IRQ_NONE; - } - - dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val); - - kbase_mmu_interrupt(kbdev, val); - - atomic_dec(&kbdev->faults_pending); - - return IRQ_HANDLED; -} - -static irqreturn_t kbase_gpu_irq_handler(int irq, void *data) -{ - unsigned long flags; - struct kbase_device *kbdev = kbase_untag(data); - u32 val; - - spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags); - - if (!kbdev->pm.backend.gpu_powered) { - /* GPU is turned off - IRQ is not for us */ - spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, - flags); - return IRQ_NONE; - } - - val = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_STATUS), NULL); - -#ifdef CONFIG_MALI_DEBUG - if (!kbdev->pm.backend.driver_ready_for_irqs) - dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n", - __func__, irq, val); -#endif /* CONFIG_MALI_DEBUG */ - spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags); - - if (!val) - return IRQ_NONE; - - dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val); - - kbase_gpu_interrupt(kbdev, val); - - return IRQ_HANDLED; -} -static irq_handler_t kbase_handler_table[] = { - [JOB_IRQ_TAG] = kbase_job_irq_handler, - [MMU_IRQ_TAG] = kbase_mmu_irq_handler, - [GPU_IRQ_TAG] = kbase_gpu_irq_handler, -}; - - -#ifdef CONFIG_MALI_DEBUG -#define JOB_IRQ_HANDLER JOB_IRQ_TAG -#define MMU_IRQ_HANDLER MMU_IRQ_TAG -#define GPU_IRQ_HANDLER GPU_IRQ_TAG - -/** - * kbase_set_custom_irq_handler - Set a custom IRQ handler - * @kbdev: Device for which the handler is to be registered - * @custom_handler: Handler to be registered - * @irq_type: Interrupt type - * - * Registers given interrupt handler for requested interrupt type - * In the case where irq handler is not specified, the default handler shall be - * registered - * - * Return: 0 case success, error code otherwise - */ -int kbase_set_custom_irq_handler(struct kbase_device *kbdev, - irq_handler_t custom_handler, - int irq_type) -{ - int result = 0; - irq_handler_t requested_irq_handler = NULL; - - KBASE_DEBUG_ASSERT((JOB_IRQ_HANDLER <= irq_type) && - (GPU_IRQ_HANDLER >= irq_type)); - - /* Release previous handler */ - if (kbdev->irqs[irq_type].irq) - free_irq(kbdev->irqs[irq_type].irq, kbase_tag(kbdev, irq_type)); - - requested_irq_handler = (NULL != custom_handler) ? custom_handler : - kbase_handler_table[irq_type]; - - if (0 != request_irq(kbdev->irqs[irq_type].irq, - requested_irq_handler, - kbdev->irqs[irq_type].flags | IRQF_SHARED, - dev_name(kbdev->dev), kbase_tag(kbdev, irq_type))) { - result = -EINVAL; - dev_err(kbdev->dev, "Can't request interrupt %d (index %d)\n", - kbdev->irqs[irq_type].irq, irq_type); -#ifdef CONFIG_SPARSE_IRQ - dev_err(kbdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n"); -#endif /* CONFIG_SPARSE_IRQ */ - } - - return result; -} - -KBASE_EXPORT_TEST_API(kbase_set_custom_irq_handler); - -/* test correct interrupt assigment and reception by cpu */ -struct kbasep_irq_test { - struct hrtimer timer; - wait_queue_head_t wait; - int triggered; - u32 timeout; -}; - -static struct kbasep_irq_test kbasep_irq_test_data; - -#define IRQ_TEST_TIMEOUT 500 - -static irqreturn_t kbase_job_irq_test_handler(int irq, void *data) -{ - unsigned long flags; - struct kbase_device *kbdev = kbase_untag(data); - u32 val; - - spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags); - - if (!kbdev->pm.backend.gpu_powered) { - /* GPU is turned off - IRQ is not for us */ - spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, - flags); - return IRQ_NONE; - } - - val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS), NULL); - - spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags); - - if (!val) - return IRQ_NONE; - - dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val); - - kbasep_irq_test_data.triggered = 1; - wake_up(&kbasep_irq_test_data.wait); - - kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), val, NULL); - - return IRQ_HANDLED; -} - -static irqreturn_t kbase_mmu_irq_test_handler(int irq, void *data) -{ - unsigned long flags; - struct kbase_device *kbdev = kbase_untag(data); - u32 val; - - spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags); - - if (!kbdev->pm.backend.gpu_powered) { - /* GPU is turned off - IRQ is not for us */ - spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, - flags); - return IRQ_NONE; - } - - val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS), NULL); - - spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags); - - if (!val) - return IRQ_NONE; - - dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val); - - kbasep_irq_test_data.triggered = 1; - wake_up(&kbasep_irq_test_data.wait); - - kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), val, NULL); - - return IRQ_HANDLED; -} - -static enum hrtimer_restart kbasep_test_interrupt_timeout(struct hrtimer *timer) -{ - struct kbasep_irq_test *test_data = container_of(timer, - struct kbasep_irq_test, timer); - - test_data->timeout = 1; - test_data->triggered = 1; - wake_up(&test_data->wait); - return HRTIMER_NORESTART; -} - -static int kbasep_common_test_interrupt( - struct kbase_device * const kbdev, u32 tag) -{ - int err = 0; - irq_handler_t test_handler; - - u32 old_mask_val; - u16 mask_offset; - u16 rawstat_offset; - - switch (tag) { - case JOB_IRQ_TAG: - test_handler = kbase_job_irq_test_handler; - rawstat_offset = JOB_CONTROL_REG(JOB_IRQ_RAWSTAT); - mask_offset = JOB_CONTROL_REG(JOB_IRQ_MASK); - break; - case MMU_IRQ_TAG: - test_handler = kbase_mmu_irq_test_handler; - rawstat_offset = MMU_REG(MMU_IRQ_RAWSTAT); - mask_offset = MMU_REG(MMU_IRQ_MASK); - break; - case GPU_IRQ_TAG: - /* already tested by pm_driver - bail out */ - default: - return 0; - } - - /* store old mask */ - old_mask_val = kbase_reg_read(kbdev, mask_offset, NULL); - /* mask interrupts */ - kbase_reg_write(kbdev, mask_offset, 0x0, NULL); - - if (kbdev->irqs[tag].irq) { - /* release original handler and install test handler */ - if (kbase_set_custom_irq_handler(kbdev, test_handler, tag) != 0) { - err = -EINVAL; - } else { - kbasep_irq_test_data.timeout = 0; - hrtimer_init(&kbasep_irq_test_data.timer, - CLOCK_MONOTONIC, HRTIMER_MODE_REL); - kbasep_irq_test_data.timer.function = - kbasep_test_interrupt_timeout; - - /* trigger interrupt */ - kbase_reg_write(kbdev, mask_offset, 0x1, NULL); - kbase_reg_write(kbdev, rawstat_offset, 0x1, NULL); - - hrtimer_start(&kbasep_irq_test_data.timer, - HR_TIMER_DELAY_MSEC(IRQ_TEST_TIMEOUT), - HRTIMER_MODE_REL); - - wait_event(kbasep_irq_test_data.wait, - kbasep_irq_test_data.triggered != 0); - - if (kbasep_irq_test_data.timeout != 0) { - dev_err(kbdev->dev, "Interrupt %d (index %d) didn't reach CPU.\n", - kbdev->irqs[tag].irq, tag); - err = -EINVAL; - } else { - dev_dbg(kbdev->dev, "Interrupt %d (index %d) reached CPU.\n", - kbdev->irqs[tag].irq, tag); - } - - hrtimer_cancel(&kbasep_irq_test_data.timer); - kbasep_irq_test_data.triggered = 0; - - /* mask interrupts */ - kbase_reg_write(kbdev, mask_offset, 0x0, NULL); - - /* release test handler */ - free_irq(kbdev->irqs[tag].irq, kbase_tag(kbdev, tag)); - } - - /* restore original interrupt */ - if (request_irq(kbdev->irqs[tag].irq, kbase_handler_table[tag], - kbdev->irqs[tag].flags | IRQF_SHARED, - dev_name(kbdev->dev), kbase_tag(kbdev, tag))) { - dev_err(kbdev->dev, "Can't restore original interrupt %d (index %d)\n", - kbdev->irqs[tag].irq, tag); - err = -EINVAL; - } - } - /* restore old mask */ - kbase_reg_write(kbdev, mask_offset, old_mask_val, NULL); - - return err; -} - -int kbasep_common_test_interrupt_handlers( - struct kbase_device * const kbdev) -{ - int err; - - init_waitqueue_head(&kbasep_irq_test_data.wait); - kbasep_irq_test_data.triggered = 0; - - /* A suspend won't happen during startup/insmod */ - kbase_pm_context_active(kbdev); - - err = kbasep_common_test_interrupt(kbdev, JOB_IRQ_TAG); - if (err) { - dev_err(kbdev->dev, "Interrupt JOB_IRQ didn't reach CPU. Check interrupt assignments.\n"); - goto out; - } - - err = kbasep_common_test_interrupt(kbdev, MMU_IRQ_TAG); - if (err) { - dev_err(kbdev->dev, "Interrupt MMU_IRQ didn't reach CPU. Check interrupt assignments.\n"); - goto out; - } - - dev_dbg(kbdev->dev, "Interrupts are correctly assigned.\n"); - - out: - kbase_pm_context_idle(kbdev); - - return err; -} -#endif /* CONFIG_MALI_DEBUG */ - -int kbase_install_interrupts(struct kbase_device *kbdev) -{ - u32 nr = ARRAY_SIZE(kbase_handler_table); - int err; - u32 i; - - for (i = 0; i < nr; i++) { - err = request_irq(kbdev->irqs[i].irq, kbase_handler_table[i], - kbdev->irqs[i].flags | IRQF_SHARED, - dev_name(kbdev->dev), - kbase_tag(kbdev, i)); - if (err) { - dev_err(kbdev->dev, "Can't request interrupt %d (index %d)\n", - kbdev->irqs[i].irq, i); -#ifdef CONFIG_SPARSE_IRQ - dev_err(kbdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n"); -#endif /* CONFIG_SPARSE_IRQ */ - goto release; - } - } - - return 0; - - release: - while (i-- > 0) - free_irq(kbdev->irqs[i].irq, kbase_tag(kbdev, i)); - - return err; -} - -void kbase_release_interrupts(struct kbase_device *kbdev) -{ - u32 nr = ARRAY_SIZE(kbase_handler_table); - u32 i; - - for (i = 0; i < nr; i++) { - if (kbdev->irqs[i].irq) - free_irq(kbdev->irqs[i].irq, kbase_tag(kbdev, i)); - } -} - -void kbase_synchronize_irqs(struct kbase_device *kbdev) -{ - u32 nr = ARRAY_SIZE(kbase_handler_table); - u32 i; - - for (i = 0; i < nr; i++) { - if (kbdev->irqs[i].irq) - synchronize_irq(kbdev->irqs[i].irq); - } -} - -#endif /* !defined(CONFIG_MALI_NO_MALI) */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_as.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_as.c deleted file mode 100755 index f2167887229bc..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_as.c +++ /dev/null @@ -1,385 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - -/* - * Register backend context / address space management - */ - -#include -#include - -/** - * assign_and_activate_kctx_addr_space - Assign an AS to a context - * @kbdev: Kbase device - * @kctx: Kbase context - * @current_as: Address Space to assign - * - * Assign an Address Space (AS) to a context, and add the context to the Policy. - * - * This includes - * setting up the global runpool_irq structure and the context on the AS, - * Activating the MMU on the AS, - * Allowing jobs to be submitted on the AS. - * - * Context: - * kbasep_js_kctx_info.jsctx_mutex held, - * kbasep_js_device_data.runpool_mutex held, - * AS transaction mutex held, - * Runpool IRQ lock held - */ -static void assign_and_activate_kctx_addr_space(struct kbase_device *kbdev, - struct kbase_context *kctx, - struct kbase_as *current_as) -{ - struct kbasep_js_device_data *js_devdata = &kbdev->js_data; - struct kbasep_js_per_as_data *js_per_as_data; - int as_nr = current_as->number; - - lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex); - lockdep_assert_held(&js_devdata->runpool_mutex); - lockdep_assert_held(¤t_as->transaction_mutex); - lockdep_assert_held(&js_devdata->runpool_irq.lock); - - js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr]; - - /* Attribute handling */ - kbasep_js_ctx_attr_runpool_retain_ctx(kbdev, kctx); - - /* Assign addr space */ - kctx->as_nr = as_nr; - - /* If the GPU is currently powered, activate this address space on the - * MMU */ - if (kbdev->pm.backend.gpu_powered) - kbase_mmu_update(kctx); - /* If the GPU was not powered then the MMU will be reprogrammed on the - * next pm_context_active() */ - - /* Allow it to run jobs */ - kbasep_js_set_submit_allowed(js_devdata, kctx); - - /* Book-keeping */ - js_per_as_data->kctx = kctx; - js_per_as_data->as_busy_refcount = 0; - - kbase_js_runpool_inc_context_count(kbdev, kctx); -} - -/** - * release_addr_space - Release an address space - * @kbdev: Kbase device - * @kctx_as_nr: Address space of context to release - * @kctx: Context being released - * - * Context: kbasep_js_device_data.runpool_mutex must be held - * - * Release an address space, making it available for being picked again. - */ -static void release_addr_space(struct kbase_device *kbdev, int kctx_as_nr, - struct kbase_context *kctx) -{ - struct kbasep_js_device_data *js_devdata; - u16 as_bit = (1u << kctx_as_nr); - - js_devdata = &kbdev->js_data; - lockdep_assert_held(&js_devdata->runpool_mutex); - - /* The address space must not already be free */ - KBASE_DEBUG_ASSERT(!(js_devdata->as_free & as_bit)); - - js_devdata->as_free |= as_bit; - - kbase_js_runpool_dec_context_count(kbdev, kctx); -} - -bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev, - struct kbase_context *kctx) -{ - int i; - - if (kbdev->hwaccess.active_kctx == kctx) { - /* Context is already active */ - return true; - } - - for (i = 0; i < kbdev->nr_hw_address_spaces; i++) { - struct kbasep_js_per_as_data *js_per_as_data = - &kbdev->js_data.runpool_irq.per_as_data[i]; - - if (js_per_as_data->kctx == kctx) { - /* Context already has ASID - mark as active */ - return true; - } - } - - /* Context does not have address space assigned */ - return false; -} - -void kbase_backend_release_ctx_irq(struct kbase_device *kbdev, - struct kbase_context *kctx) -{ - struct kbasep_js_per_as_data *js_per_as_data; - int as_nr = kctx->as_nr; - - if (as_nr == KBASEP_AS_NR_INVALID) { - WARN(1, "Attempting to release context without ASID\n"); - return; - } - - lockdep_assert_held(&kbdev->as[as_nr].transaction_mutex); - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - js_per_as_data = &kbdev->js_data.runpool_irq.per_as_data[kctx->as_nr]; - if (js_per_as_data->as_busy_refcount != 0) { - WARN(1, "Attempting to release active ASID\n"); - return; - } - - /* Release context from address space */ - js_per_as_data->kctx = NULL; - - kbasep_js_clear_submit_allowed(&kbdev->js_data, kctx); - /* If the GPU is currently powered, de-activate this address space on - * the MMU */ - if (kbdev->pm.backend.gpu_powered) - kbase_mmu_disable(kctx); - /* If the GPU was not powered then the MMU will be reprogrammed on the - * next pm_context_active() */ - - release_addr_space(kbdev, as_nr, kctx); - kctx->as_nr = KBASEP_AS_NR_INVALID; -} - -void kbase_backend_release_ctx_noirq(struct kbase_device *kbdev, - struct kbase_context *kctx) -{ -} - -void kbase_backend_release_free_address_space(struct kbase_device *kbdev, - int as_nr) -{ - struct kbasep_js_device_data *js_devdata; - - js_devdata = &kbdev->js_data; - - lockdep_assert_held(&js_devdata->runpool_mutex); - - js_devdata->as_free |= (1 << as_nr); -} - -/** - * check_is_runpool_full - check whether the runpool is full for a specified - * context - * @kbdev: Kbase device - * @kctx: Kbase context - * - * If kctx == NULL, then this makes the least restrictive check on the - * runpool. A specific context that is supplied immediately after could fail - * the check, even under the same conditions. - * - * Therefore, once a context is obtained you \b must re-check it with this - * function, since the return value could change to false. - * - * Context: - * In all cases, the caller must hold kbasep_js_device_data.runpool_mutex. - * When kctx != NULL the caller must hold the - * kbasep_js_kctx_info.ctx.jsctx_mutex. - * When kctx == NULL, then the caller need not hold any jsctx_mutex locks (but - * it doesn't do any harm to do so). - * - * Return: true if the runpool is full - */ -static bool check_is_runpool_full(struct kbase_device *kbdev, - struct kbase_context *kctx) -{ - struct kbasep_js_device_data *js_devdata; - bool is_runpool_full; - - js_devdata = &kbdev->js_data; - lockdep_assert_held(&js_devdata->runpool_mutex); - - /* Regardless of whether a context is submitting or not, can't have more - * than there are HW address spaces */ - is_runpool_full = (bool) (js_devdata->nr_all_contexts_running >= - kbdev->nr_hw_address_spaces); - - if (kctx != NULL && (kctx->jctx.sched_info.ctx.flags & - KBASE_CTX_FLAG_SUBMIT_DISABLED) == 0) { - lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex); - /* Contexts that submit might use less of the address spaces - * available, due to HW workarounds. In which case, the runpool - * is also full when the number of submitting contexts exceeds - * the number of submittable address spaces. - * - * Both checks must be made: can have nr_user_address_spaces == - * nr_hw_address spaces, and at the same time can have - * nr_user_contexts_running < nr_all_contexts_running. */ - is_runpool_full |= (bool) - (js_devdata->nr_user_contexts_running >= - kbdev->nr_user_address_spaces); - } - - return is_runpool_full; -} - -int kbase_backend_find_free_address_space(struct kbase_device *kbdev, - struct kbase_context *kctx) -{ - struct kbasep_js_device_data *js_devdata; - struct kbasep_js_kctx_info *js_kctx_info; - unsigned long flags; - int i; - - js_devdata = &kbdev->js_data; - js_kctx_info = &kctx->jctx.sched_info; - - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); - mutex_lock(&js_devdata->runpool_mutex); - - /* First try to find a free address space */ - if (check_is_runpool_full(kbdev, kctx)) - i = -1; - else - i = ffs(js_devdata->as_free) - 1; - - if (i >= 0 && i < kbdev->nr_hw_address_spaces) { - js_devdata->as_free &= ~(1 << i); - - mutex_unlock(&js_devdata->runpool_mutex); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - - return i; - } - - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - - /* No address space currently free, see if we can release one */ - for (i = 0; i < kbdev->nr_hw_address_spaces; i++) { - struct kbasep_js_per_as_data *js_per_as_data; - struct kbasep_js_kctx_info *as_js_kctx_info; - struct kbase_context *as_kctx; - - js_per_as_data = &kbdev->js_data.runpool_irq.per_as_data[i]; - as_kctx = js_per_as_data->kctx; - as_js_kctx_info = &as_kctx->jctx.sched_info; - - /* Don't release privileged or active contexts, or contexts with - * jobs running */ - if (as_kctx && !(as_kctx->jctx.sched_info.ctx.flags & - KBASE_CTX_FLAG_PRIVILEGED) && - js_per_as_data->as_busy_refcount == 0) { - if (!kbasep_js_runpool_retain_ctx_nolock(kbdev, - as_kctx)) { - WARN(1, "Failed to retain active context\n"); - - spin_unlock_irqrestore( - &js_devdata->runpool_irq.lock, - flags); - mutex_unlock(&js_devdata->runpool_mutex); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - - return KBASEP_AS_NR_INVALID; - } - - kbasep_js_clear_submit_allowed(js_devdata, as_kctx); - - /* Drop and retake locks to take the jsctx_mutex on the - * context we're about to release without violating lock - * ordering - */ - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, - flags); - mutex_unlock(&js_devdata->runpool_mutex); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - - - /* Release context from address space */ - mutex_lock(&as_js_kctx_info->ctx.jsctx_mutex); - mutex_lock(&js_devdata->runpool_mutex); - - kbasep_js_runpool_release_ctx_nolock(kbdev, as_kctx); - - if (!as_js_kctx_info->ctx.is_scheduled) { - kbasep_js_runpool_requeue_or_kill_ctx(kbdev, - as_kctx, - true); - - js_devdata->as_free &= ~(1 << i); - - mutex_unlock(&js_devdata->runpool_mutex); - mutex_unlock(&as_js_kctx_info->ctx.jsctx_mutex); - - return i; - } - - /* Context was retained while locks were dropped, - * continue looking for free AS */ - - mutex_unlock(&js_devdata->runpool_mutex); - mutex_unlock(&as_js_kctx_info->ctx.jsctx_mutex); - - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); - mutex_lock(&js_devdata->runpool_mutex); - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - } - } - - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - - mutex_unlock(&js_devdata->runpool_mutex); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - - return KBASEP_AS_NR_INVALID; -} - -bool kbase_backend_use_ctx(struct kbase_device *kbdev, - struct kbase_context *kctx, - int as_nr) -{ - struct kbasep_js_device_data *js_devdata; - struct kbasep_js_kctx_info *js_kctx_info; - struct kbase_as *new_address_space = NULL; - - js_devdata = &kbdev->js_data; - js_kctx_info = &kctx->jctx.sched_info; - - if (kbdev->hwaccess.active_kctx == kctx || - kctx->as_nr != KBASEP_AS_NR_INVALID || - as_nr == KBASEP_AS_NR_INVALID) { - WARN(1, "Invalid parameters to use_ctx()\n"); - return false; - } - - new_address_space = &kbdev->as[as_nr]; - - lockdep_assert_held(&js_devdata->runpool_mutex); - lockdep_assert_held(&new_address_space->transaction_mutex); - lockdep_assert_held(&js_devdata->runpool_irq.lock); - - assign_and_activate_kctx_addr_space(kbdev, kctx, new_address_space); - - if ((js_kctx_info->ctx.flags & KBASE_CTX_FLAG_PRIVILEGED) != 0) { - /* We need to retain it to keep the corresponding address space - */ - kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx); - } - - return true; -} - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h deleted file mode 100755 index 57c64f7db93fd..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h +++ /dev/null @@ -1,115 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - -/* - * Register-based HW access backend specific definitions - */ - -#ifndef _KBASE_HWACCESS_GPU_DEFS_H_ -#define _KBASE_HWACCESS_GPU_DEFS_H_ - -/* SLOT_RB_SIZE must be < 256 */ -#define SLOT_RB_SIZE 2 -#define SLOT_RB_MASK (SLOT_RB_SIZE - 1) - -/** - * struct rb_entry - Ringbuffer entry - * @katom: Atom associated with this entry - */ -struct rb_entry { - struct kbase_jd_atom *katom; -}; - -/** - * struct slot_rb - Slot ringbuffer - * @entries: Ringbuffer entries - * @last_context: The last context to submit a job on this slot - * @read_idx: Current read index of buffer - * @write_idx: Current write index of buffer - * @job_chain_flag: Flag used to implement jobchain disambiguation - */ -struct slot_rb { - struct rb_entry entries[SLOT_RB_SIZE]; - - struct kbase_context *last_context; - - u8 read_idx; - u8 write_idx; - - u8 job_chain_flag; -}; - -/** - * struct kbase_backend_data - GPU backend specific data for HW access layer - * @slot_rb: Slot ringbuffers - * @rmu_workaround_flag: When PRLAM-8987 is present, this flag determines - * whether slots 0/1 or slot 2 are currently being - * pulled from - * @scheduling_timer: The timer tick used for rescheduling jobs - * @timer_running: Is the timer running? The runpool_mutex must be - * held whilst modifying this. - * @reset_gpu: Set to a KBASE_RESET_xxx value (see comments) - * @reset_workq: Work queue for performing the reset - * @reset_work: Work item for performing the reset - * @reset_wait: Wait event signalled when the reset is complete - * @reset_timer: Timeout for soft-stops before the reset - * - * The kbasep_js_device_data::runpool_irq::lock (a spinlock) must be held when - * accessing this structure - */ -struct kbase_backend_data { - struct slot_rb slot_rb[BASE_JM_MAX_NR_SLOTS]; - - bool rmu_workaround_flag; - - struct hrtimer scheduling_timer; - - bool timer_running; - - atomic_t reset_gpu; - -/* The GPU reset isn't pending */ -#define KBASE_RESET_GPU_NOT_PENDING 0 -/* kbase_prepare_to_reset_gpu has been called */ -#define KBASE_RESET_GPU_PREPARED 1 -/* kbase_reset_gpu has been called - the reset will now definitely happen - * within the timeout period */ -#define KBASE_RESET_GPU_COMMITTED 2 -/* The GPU reset process is currently occuring (timeout has expired or - * kbasep_try_reset_gpu_early was called) */ -#define KBASE_RESET_GPU_HAPPENING 3 - - struct workqueue_struct *reset_workq; - struct work_struct reset_work; - wait_queue_head_t reset_wait; - struct hrtimer reset_timer; -}; - -/** - * struct kbase_jd_atom_backend - GPU backend specific katom data - */ -struct kbase_jd_atom_backend { -}; - -/** - * struct kbase_context_backend - GPU backend specific context data - */ -struct kbase_context_backend { -}; - -#endif /* _KBASE_HWACCESS_GPU_DEFS_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c deleted file mode 100755 index a4b0c26cddecd..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c +++ /dev/null @@ -1,1561 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * Base kernel job manager APIs - */ - -#include -#include -#include -#if defined(CONFIG_MALI_GATOR_SUPPORT) -#include -#endif -#if defined(CONFIG_MALI_MIPE_ENABLED) -#include -#endif -#include -#include -#include -#include -#include -#include -#include - -#define beenthere(kctx, f, a...) \ - dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a) - -#ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS -u64 mali_js0_affinity_mask = 0xFFFFFFFFFFFFFFFFULL; -u64 mali_js1_affinity_mask = 0xFFFFFFFFFFFFFFFFULL; -u64 mali_js2_affinity_mask = 0xFFFFFFFFFFFFFFFFULL; -#endif - -#if KBASE_GPU_RESET_EN -static void kbasep_try_reset_gpu_early(struct kbase_device *kbdev); -static void kbasep_reset_timeout_worker(struct work_struct *data); -static enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *timer); -#endif /* KBASE_GPU_RESET_EN */ - -static inline int kbasep_jm_is_js_free(struct kbase_device *kbdev, int js, - struct kbase_context *kctx) -{ - return !kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT), kctx); -} - -void kbase_job_hw_submit(struct kbase_device *kbdev, - struct kbase_jd_atom *katom, - int js) -{ - struct kbase_context *kctx; - u32 cfg; - u64 jc_head = katom->jc; - - KBASE_DEBUG_ASSERT(kbdev); - KBASE_DEBUG_ASSERT(katom); - - kctx = katom->kctx; - - /* Command register must be available */ - KBASE_DEBUG_ASSERT(kbasep_jm_is_js_free(kbdev, js, kctx)); - /* Affinity is not violating */ - kbase_js_debug_log_current_affinities(kbdev); - KBASE_DEBUG_ASSERT(!kbase_js_affinity_would_violate(kbdev, js, - katom->affinity)); - - kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO), - jc_head & 0xFFFFFFFF, kctx); - kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI), - jc_head >> 32, kctx); - -#ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS - { - u64 mask; - u32 value; - - if (0 == js) - mask = mali_js0_affinity_mask; - else if (1 == js) - mask = mali_js1_affinity_mask; - else - mask = mali_js2_affinity_mask; - - value = katom->affinity & (mask & 0xFFFFFFFF); - - kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_LO), - value, kctx); - - value = (katom->affinity >> 32) & ((mask>>32) & 0xFFFFFFFF); - kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_HI), - value, kctx); - } -#else - kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_LO), - katom->affinity & 0xFFFFFFFF, kctx); - kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_HI), - katom->affinity >> 32, kctx); -#endif - - /* start MMU, medium priority, cache clean/flush on end, clean/flush on - * start */ - cfg = kctx->as_nr; - -#ifndef CONFIG_MALI_COH_GPU - cfg |= JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE; - cfg |= JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE; -#endif - - cfg |= JS_CONFIG_START_MMU; - cfg |= JS_CONFIG_THREAD_PRI(8); - - if (kbase_hw_has_feature(kbdev, - BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) { - if (!kbdev->hwaccess.backend.slot_rb[js].job_chain_flag) { - cfg |= JS_CONFIG_JOB_CHAIN_FLAG; - katom->atom_flags |= KBASE_KATOM_FLAGS_JOBCHAIN; - kbdev->hwaccess.backend.slot_rb[js].job_chain_flag = - true; - } else { - katom->atom_flags &= ~KBASE_KATOM_FLAGS_JOBCHAIN; - kbdev->hwaccess.backend.slot_rb[js].job_chain_flag = - false; - } - } - - kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_CONFIG_NEXT), cfg, kctx); - - - /* Write an approximate start timestamp. - * It's approximate because there might be a job in the HEAD register. - * In such cases, we'll try to make a better approximation in the IRQ - * handler (up to the KBASE_JS_IRQ_THROTTLE_TIME_US). */ - katom->start_timestamp = ktime_get(); - - /* GO ! */ - dev_dbg(kbdev->dev, "JS: Submitting atom %p from ctx %p to js[%d] with head=0x%llx, affinity=0x%llx", - katom, kctx, js, jc_head, katom->affinity); - - KBASE_TRACE_ADD_SLOT_INFO(kbdev, JM_SUBMIT, kctx, katom, jc_head, js, - (u32) katom->affinity); - -#if defined(CONFIG_MALI_GATOR_SUPPORT) - kbase_trace_mali_job_slots_event( - GATOR_MAKE_EVENT(GATOR_JOB_SLOT_START, js), - kctx, kbase_jd_atom_id(kctx, katom)); -#endif -#if defined(CONFIG_MALI_MIPE_ENABLED) - kbase_tlstream_tl_attrib_atom_config(katom, jc_head, - katom->affinity, cfg); - kbase_tlstream_tl_ret_atom_as(katom, &kbdev->as[kctx->as_nr]); - kbase_tlstream_tl_ret_atom_lpu( - katom, - &kbdev->gpu_props.props.raw_props.js_features[js]); -#endif -#ifdef CONFIG_GPU_TRACEPOINTS - if (kbase_backend_nr_atoms_submitted(kbdev, js) == 1) { - /* If this is the only job on the slot, trace it as starting */ - char js_string[16]; - - trace_gpu_sched_switch( - kbasep_make_job_slot_string(js, js_string), - ktime_to_ns(katom->start_timestamp), - (u32)katom->kctx, 0, katom->work_id); - kbdev->hwaccess.backend.slot_rb[js].last_context = katom->kctx; - } -#endif - kbase_timeline_job_slot_submit(kbdev, kctx, katom, js); - - kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT), - JS_COMMAND_START, katom->kctx); -} - -/** - * kbasep_job_slot_update_head_start_timestamp - Update timestamp - * @kbdev: kbase device - * @js: job slot - * @end_timestamp: timestamp - * - * Update the start_timestamp of the job currently in the HEAD, based on the - * fact that we got an IRQ for the previous set of completed jobs. - * - * The estimate also takes into account the %KBASE_JS_IRQ_THROTTLE_TIME_US and - * the time the job was submitted, to work out the best estimate (which might - * still result in an over-estimate to the calculated time spent) - */ -static void kbasep_job_slot_update_head_start_timestamp( - struct kbase_device *kbdev, - int js, - ktime_t end_timestamp) -{ - if (kbase_backend_nr_atoms_on_slot(kbdev, js) > 0) { - struct kbase_jd_atom *katom; - ktime_t new_timestamp; - ktime_t timestamp_diff; - /* The atom in the HEAD */ - katom = kbase_gpu_inspect(kbdev, js, 0); - - KBASE_DEBUG_ASSERT(katom != NULL); - - /* Account for any IRQ Throttle time - makes an overestimate of - * the time spent by the job */ - new_timestamp = ktime_sub_ns(end_timestamp, - KBASE_JS_IRQ_THROTTLE_TIME_US * 1000); - timestamp_diff = ktime_sub(new_timestamp, - katom->start_timestamp); - if (ktime_to_ns(timestamp_diff) >= 0) { - /* Only update the timestamp if it's a better estimate - * than what's currently stored. This is because our - * estimate that accounts for the throttle time may be - * too much of an overestimate */ - katom->start_timestamp = new_timestamp; - } - } -} - -void kbase_job_done(struct kbase_device *kbdev, u32 done) -{ - unsigned long flags; - int i; - u32 count = 0; - ktime_t end_timestamp = ktime_get(); - struct kbasep_js_device_data *js_devdata; - - KBASE_DEBUG_ASSERT(kbdev); - js_devdata = &kbdev->js_data; - - KBASE_TRACE_ADD(kbdev, JM_IRQ, NULL, NULL, 0, done); - - memset(&kbdev->slot_submit_count_irq[0], 0, - sizeof(kbdev->slot_submit_count_irq)); - - /* write irq throttle register, this will prevent irqs from occurring - * until the given number of gpu clock cycles have passed */ - { - int irq_throttle_cycles = - atomic_read(&kbdev->irq_throttle_cycles); - - kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_THROTTLE), - irq_throttle_cycles, NULL); - } - - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - - while (done) { - u32 failed = done >> 16; - - /* treat failed slots as finished slots */ - u32 finished = (done & 0xFFFF) | failed; - - /* Note: This is inherently unfair, as we always check - * for lower numbered interrupts before the higher - * numbered ones.*/ - i = ffs(finished) - 1; - KBASE_DEBUG_ASSERT(i >= 0); - - do { - int nr_done; - u32 active; - u32 completion_code = BASE_JD_EVENT_DONE;/* assume OK */ - u64 job_tail = 0; - - if (failed & (1u << i)) { - /* read out the job slot status code if the job - * slot reported failure */ - completion_code = kbase_reg_read(kbdev, - JOB_SLOT_REG(i, JS_STATUS), NULL); - - switch (completion_code) { - case BASE_JD_EVENT_STOPPED: -#if defined(CONFIG_MALI_GATOR_SUPPORT) - kbase_trace_mali_job_slots_event( - GATOR_MAKE_EVENT( - GATOR_JOB_SLOT_SOFT_STOPPED, i), - NULL, 0); -#endif -#if defined(CONFIG_MALI_MIPE_ENABLED) - kbase_tlstream_aux_job_softstop(i); -#endif - /* Soft-stopped job - read the value of - * JS_TAIL so that the job chain can - * be resumed */ - job_tail = (u64)kbase_reg_read(kbdev, - JOB_SLOT_REG(i, JS_TAIL_LO), - NULL) | - ((u64)kbase_reg_read(kbdev, - JOB_SLOT_REG(i, JS_TAIL_HI), - NULL) << 32); - break; - case BASE_JD_EVENT_NOT_STARTED: - /* PRLAM-10673 can cause a TERMINATED - * job to come back as NOT_STARTED, but - * the error interrupt helps us detect - * it */ - completion_code = - BASE_JD_EVENT_TERMINATED; - /* fall through */ - default: - dev_warn(kbdev->dev, "error detected from slot %d, job status 0x%08x (%s)", - i, completion_code, - kbase_exception_name - (kbdev, - completion_code)); - } - - kbase_gpu_irq_evict(kbdev, i); - } - - kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), - done & ((1 << i) | (1 << (i + 16))), - NULL); - active = kbase_reg_read(kbdev, - JOB_CONTROL_REG(JOB_IRQ_JS_STATE), - NULL); - - if (((active >> i) & 1) == 0 && - (((done >> (i + 16)) & 1) == 0)) { - /* There is a potential race we must work - * around: - * - * 1. A job slot has a job in both current and - * next registers - * 2. The job in current completes - * successfully, the IRQ handler reads - * RAWSTAT and calls this function with the - * relevant bit set in "done" - * 3. The job in the next registers becomes the - * current job on the GPU - * 4. Sometime before the JOB_IRQ_CLEAR line - * above the job on the GPU _fails_ - * 5. The IRQ_CLEAR clears the done bit but not - * the failed bit. This atomically sets - * JOB_IRQ_JS_STATE. However since both jobs - * have now completed the relevant bits for - * the slot are set to 0. - * - * If we now did nothing then we'd incorrectly - * assume that _both_ jobs had completed - * successfully (since we haven't yet observed - * the fail bit being set in RAWSTAT). - * - * So at this point if there are no active jobs - * left we check to see if RAWSTAT has a failure - * bit set for the job slot. If it does we know - * that there has been a new failure that we - * didn't previously know about, so we make sure - * that we record this in active (but we wait - * for the next loop to deal with it). - * - * If we were handling a job failure (i.e. done - * has the relevant high bit set) then we know - * that the value read back from - * JOB_IRQ_JS_STATE is the correct number of - * remaining jobs because the failed job will - * have prevented any futher jobs from starting - * execution. - */ - u32 rawstat = kbase_reg_read(kbdev, - JOB_CONTROL_REG(JOB_IRQ_RAWSTAT), NULL); - - if ((rawstat >> (i + 16)) & 1) { - /* There is a failed job that we've - * missed - add it back to active */ - active |= (1u << i); - } - } - - dev_dbg(kbdev->dev, "Job ended with status 0x%08X\n", - completion_code); - - nr_done = kbase_backend_nr_atoms_submitted(kbdev, i); - nr_done -= (active >> i) & 1; - nr_done -= (active >> (i + 16)) & 1; - - if (nr_done <= 0) { - dev_warn(kbdev->dev, "Spurious interrupt on slot %d", - i); - - goto spurious; - } - - count += nr_done; - - while (nr_done) { - if (nr_done == 1) { - kbase_gpu_complete_hw(kbdev, i, - completion_code, - job_tail, - &end_timestamp); - kbase_jm_try_kick_all(kbdev); - } else { - /* More than one job has completed. - * Since this is not the last job being - * reported this time it must have - * passed. This is because the hardware - * will not allow further jobs in a job - * slot to complete until the failed job - * is cleared from the IRQ status. - */ - kbase_gpu_complete_hw(kbdev, i, - BASE_JD_EVENT_DONE, - 0, - &end_timestamp); - } - nr_done--; - } - spurious: - done = kbase_reg_read(kbdev, - JOB_CONTROL_REG(JOB_IRQ_RAWSTAT), NULL); - - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10883)) { - /* Workaround for missing interrupt caused by - * PRLAM-10883 */ - if (((active >> i) & 1) && (0 == - kbase_reg_read(kbdev, - JOB_SLOT_REG(i, - JS_STATUS), NULL))) { - /* Force job slot to be processed again - */ - done |= (1u << i); - } - } - - failed = done >> 16; - finished = (done & 0xFFFF) | failed; - } while (finished & (1 << i)); - - kbasep_job_slot_update_head_start_timestamp(kbdev, i, - end_timestamp); - } - - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); -#if KBASE_GPU_RESET_EN - if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) == - KBASE_RESET_GPU_COMMITTED) { - /* If we're trying to reset the GPU then we might be able to do - * it early (without waiting for a timeout) because some jobs - * have completed - */ - kbasep_try_reset_gpu_early(kbdev); - } -#endif /* KBASE_GPU_RESET_EN */ - KBASE_TRACE_ADD(kbdev, JM_IRQ_END, NULL, NULL, 0, count); -} -KBASE_EXPORT_TEST_API(kbase_job_done); - -static bool kbasep_soft_stop_allowed(struct kbase_device *kbdev, - u16 core_reqs) -{ - bool soft_stops_allowed = true; - - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408)) { - if ((core_reqs & BASE_JD_REQ_T) != 0) - soft_stops_allowed = false; - } - return soft_stops_allowed; -} - -static bool kbasep_hard_stop_allowed(struct kbase_device *kbdev, - u16 core_reqs) -{ - bool hard_stops_allowed = true; - - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8394)) { - if ((core_reqs & BASE_JD_REQ_T) != 0) - hard_stops_allowed = false; - } - return hard_stops_allowed; -} - -void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev, - int js, - u32 action, - u16 core_reqs, - struct kbase_jd_atom *target_katom) -{ - struct kbase_context *kctx = target_katom->kctx; -#if KBASE_TRACE_ENABLE - u32 status_reg_before; - u64 job_in_head_before; - u32 status_reg_after; - - KBASE_DEBUG_ASSERT(!(action & (~JS_COMMAND_MASK))); - - /* Check the head pointer */ - job_in_head_before = ((u64) kbase_reg_read(kbdev, - JOB_SLOT_REG(js, JS_HEAD_LO), NULL)) - | (((u64) kbase_reg_read(kbdev, - JOB_SLOT_REG(js, JS_HEAD_HI), NULL)) - << 32); - status_reg_before = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_STATUS), - NULL); -#endif - - if (action == JS_COMMAND_SOFT_STOP) { - bool soft_stop_allowed = kbasep_soft_stop_allowed(kbdev, - core_reqs); - - if (!soft_stop_allowed) { -#ifdef CONFIG_MALI_DEBUG - dev_dbg(kbdev->dev, "Attempt made to soft-stop a job that cannot be soft-stopped. core_reqs = 0x%X", - (unsigned int)core_reqs); -#endif /* CONFIG_MALI_DEBUG */ - return; - } - - /* We are about to issue a soft stop, so mark the atom as having - * been soft stopped */ - target_katom->atom_flags |= KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED; - } - - if (action == JS_COMMAND_HARD_STOP) { - bool hard_stop_allowed = kbasep_hard_stop_allowed(kbdev, - core_reqs); - - if (!hard_stop_allowed) { - /* Jobs can be hard-stopped for the following reasons: - * * CFS decides the job has been running too long (and - * soft-stop has not occurred). In this case the GPU - * will be reset by CFS if the job remains on the - * GPU. - * - * * The context is destroyed, kbase_jd_zap_context - * will attempt to hard-stop the job. However it also - * has a watchdog which will cause the GPU to be - * reset if the job remains on the GPU. - * - * * An (unhandled) MMU fault occurred. As long as - * BASE_HW_ISSUE_8245 is defined then the GPU will be - * reset. - * - * All three cases result in the GPU being reset if the - * hard-stop fails, so it is safe to just return and - * ignore the hard-stop request. - */ - dev_warn(kbdev->dev, "Attempt made to hard-stop a job that cannot be hard-stopped. core_reqs = 0x%X", - (unsigned int)core_reqs); - return; - } - target_katom->atom_flags |= KBASE_KATOM_FLAG_BEEN_HARD_STOPPED; - } - - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316) && - action == JS_COMMAND_SOFT_STOP) { - int i; - - for (i = 0; i < kbase_backend_nr_atoms_submitted(kbdev, js); - i++) { - struct kbase_jd_atom *katom; - - katom = kbase_gpu_inspect(kbdev, js, i); - - KBASE_DEBUG_ASSERT(katom); - - /* For HW_ISSUE_8316, only 'bad' jobs attacking the - * system can cause this issue: normally, all memory - * should be allocated in multiples of 4 pages, and - * growable memory should be changed size in multiples - * of 4 pages. - * - * Whilst such 'bad' jobs can be cleared by a GPU reset, - * the locking up of a uTLB entry caused by the bad job - * could also stall other ASs, meaning that other ASs' - * jobs don't complete in the 'grace' period before the - * reset. We don't want to lose other ASs' jobs when - * they would normally complete fine, so we must 'poke' - * the MMU regularly to help other ASs complete */ - kbase_as_poking_timer_retain_atom(kbdev, katom->kctx, - katom); - } - } - - if (kbase_hw_has_feature(kbdev, - BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) { - if (action == JS_COMMAND_SOFT_STOP) - action = (target_katom->atom_flags & - KBASE_KATOM_FLAGS_JOBCHAIN) ? - JS_COMMAND_SOFT_STOP_1 : - JS_COMMAND_SOFT_STOP_0; - else - action = (target_katom->atom_flags & - KBASE_KATOM_FLAGS_JOBCHAIN) ? - JS_COMMAND_HARD_STOP_1 : - JS_COMMAND_HARD_STOP_0; - } - - kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND), action, kctx); - -#if KBASE_TRACE_ENABLE - status_reg_after = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_STATUS), - NULL); - if (status_reg_after == BASE_JD_EVENT_ACTIVE) { - struct kbase_jd_atom *head; - struct kbase_context *head_kctx; - - head = kbase_gpu_inspect(kbdev, js, 0); - head_kctx = head->kctx; - - if (status_reg_before == BASE_JD_EVENT_ACTIVE) - KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, head_kctx, - head, job_in_head_before, js); - else - KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL, - 0, js); - - switch (action) { - case JS_COMMAND_SOFT_STOP: - KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP, head_kctx, - head, head->jc, js); - break; - case JS_COMMAND_SOFT_STOP_0: - KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_0, head_kctx, - head, head->jc, js); - break; - case JS_COMMAND_SOFT_STOP_1: - KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_1, head_kctx, - head, head->jc, js); - break; - case JS_COMMAND_HARD_STOP: - KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP, head_kctx, - head, head->jc, js); - break; - case JS_COMMAND_HARD_STOP_0: - KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_0, head_kctx, - head, head->jc, js); - break; - case JS_COMMAND_HARD_STOP_1: - KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_1, head_kctx, - head, head->jc, js); - break; - default: - BUG(); - break; - } - } else { - if (status_reg_before == BASE_JD_EVENT_ACTIVE) - KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL, - job_in_head_before, js); - else - KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL, - 0, js); - - switch (action) { - case JS_COMMAND_SOFT_STOP: - KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP, NULL, NULL, 0, - js); - break; - case JS_COMMAND_SOFT_STOP_0: - KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_0, NULL, NULL, - 0, js); - break; - case JS_COMMAND_SOFT_STOP_1: - KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_1, NULL, NULL, - 0, js); - break; - case JS_COMMAND_HARD_STOP: - KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP, NULL, NULL, 0, - js); - break; - case JS_COMMAND_HARD_STOP_0: - KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_0, NULL, NULL, - 0, js); - break; - case JS_COMMAND_HARD_STOP_1: - KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_1, NULL, NULL, - 0, js); - break; - default: - BUG(); - break; - } - } -#endif -} - -void kbase_backend_jm_kill_jobs_from_kctx(struct kbase_context *kctx) -{ - unsigned long flags; - struct kbase_device *kbdev; - struct kbasep_js_device_data *js_devdata; - int i; - - KBASE_DEBUG_ASSERT(kctx != NULL); - kbdev = kctx->kbdev; - KBASE_DEBUG_ASSERT(kbdev != NULL); - js_devdata = &kbdev->js_data; - - /* Cancel any remaining running jobs for this kctx */ - mutex_lock(&kctx->jctx.lock); - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - - /* Invalidate all jobs in context, to prevent re-submitting */ - for (i = 0; i < BASE_JD_ATOM_COUNT; i++) { - if (!work_pending(&kctx->jctx.atoms[i].work)) - kctx->jctx.atoms[i].event_code = - BASE_JD_EVENT_JOB_CANCELLED; - } - - for (i = 0; i < kbdev->gpu_props.num_job_slots; i++) - kbase_job_slot_hardstop(kctx, i, NULL); - - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - mutex_unlock(&kctx->jctx.lock); -} - -void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx, - struct kbase_jd_atom *target_katom) -{ - struct kbase_device *kbdev; - struct kbasep_js_device_data *js_devdata; - int js = target_katom->slot_nr; - int priority = target_katom->sched_priority; - int i; - - KBASE_DEBUG_ASSERT(kctx != NULL); - kbdev = kctx->kbdev; - KBASE_DEBUG_ASSERT(kbdev != NULL); - js_devdata = &kbdev->js_data; - - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - for (i = 0; i < kbase_backend_nr_atoms_on_slot(kbdev, js); i++) { - struct kbase_jd_atom *katom; - - katom = kbase_gpu_inspect(kbdev, js, i); - if (!katom) - continue; - - if (katom->kctx != kctx) - continue; - - if (katom->sched_priority > priority) - kbase_job_slot_softstop(kbdev, js, katom); - } -} - -struct zap_reset_data { - /* The stages are: - * 1. The timer has never been called - * 2. The zap has timed out, all slots are soft-stopped - the GPU reset - * will happen. The GPU has been reset when - * kbdev->hwaccess.backend.reset_waitq is signalled - * - * (-1 - The timer has been cancelled) - */ - int stage; - struct kbase_device *kbdev; - struct hrtimer timer; - spinlock_t lock; /* protects updates to stage member */ -}; - -static enum hrtimer_restart zap_timeout_callback(struct hrtimer *timer) -{ - struct zap_reset_data *reset_data = container_of(timer, - struct zap_reset_data, timer); - struct kbase_device *kbdev = reset_data->kbdev; - unsigned long flags; - - spin_lock_irqsave(&reset_data->lock, flags); - - if (reset_data->stage == -1) - goto out; - -#if KBASE_GPU_RESET_EN - if (kbase_prepare_to_reset_gpu(kbdev)) { - dev_err(kbdev->dev, "Issueing GPU soft-reset because jobs failed to be killed (within %d ms) as part of context termination (e.g. process exit)\n", - ZAP_TIMEOUT); - kbase_reset_gpu(kbdev); - } -#endif /* KBASE_GPU_RESET_EN */ - reset_data->stage = 2; - - out: - spin_unlock_irqrestore(&reset_data->lock, flags); - - return HRTIMER_NORESTART; -} - -void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx) -{ - struct kbase_device *kbdev = kctx->kbdev; - struct zap_reset_data reset_data; - unsigned long flags; - - hrtimer_init_on_stack(&reset_data.timer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL); - reset_data.timer.function = zap_timeout_callback; - - spin_lock_init(&reset_data.lock); - - reset_data.kbdev = kbdev; - reset_data.stage = 1; - - hrtimer_start(&reset_data.timer, HR_TIMER_DELAY_MSEC(ZAP_TIMEOUT), - HRTIMER_MODE_REL); - - /* Wait for all jobs to finish, and for the context to be not-scheduled - * (due to kbase_job_zap_context(), we also guarentee it's not in the JS - * policy queue either */ - wait_event(kctx->jctx.zero_jobs_wait, kctx->jctx.job_nr == 0); - wait_event(kctx->jctx.sched_info.ctx.is_scheduled_wait, - kctx->jctx.sched_info.ctx.is_scheduled == false); - - spin_lock_irqsave(&reset_data.lock, flags); - if (reset_data.stage == 1) { - /* The timer hasn't run yet - so cancel it */ - reset_data.stage = -1; - } - spin_unlock_irqrestore(&reset_data.lock, flags); - - hrtimer_cancel(&reset_data.timer); - - if (reset_data.stage == 2) { - /* The reset has already started. - * Wait for the reset to complete - */ - wait_event(kbdev->hwaccess.backend.reset_wait, - atomic_read(&kbdev->hwaccess.backend.reset_gpu) - == KBASE_RESET_GPU_NOT_PENDING); - } - destroy_hrtimer_on_stack(&reset_data.timer); - - dev_dbg(kbdev->dev, "Zap: Finished Context %p", kctx); - - /* Ensure that the signallers of the waitqs have finished */ - mutex_lock(&kctx->jctx.lock); - mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex); - mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex); - mutex_unlock(&kctx->jctx.lock); -} - -int kbase_job_slot_init(struct kbase_device *kbdev) -{ -#if KBASE_GPU_RESET_EN - kbdev->hwaccess.backend.reset_workq = alloc_workqueue( - "Mali reset workqueue", 0, 1); - if (NULL == kbdev->hwaccess.backend.reset_workq) - return -EINVAL; - - KBASE_DEBUG_ASSERT(0 == - object_is_on_stack(&kbdev->hwaccess.backend.reset_work)); - INIT_WORK(&kbdev->hwaccess.backend.reset_work, - kbasep_reset_timeout_worker); - - hrtimer_init(&kbdev->hwaccess.backend.reset_timer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL); - kbdev->hwaccess.backend.reset_timer.function = - kbasep_reset_timer_callback; -#endif - - return 0; -} -KBASE_EXPORT_TEST_API(kbase_job_slot_init); - -void kbase_job_slot_halt(struct kbase_device *kbdev) -{ - CSTD_UNUSED(kbdev); -} - -void kbase_job_slot_term(struct kbase_device *kbdev) -{ -#if KBASE_GPU_RESET_EN - destroy_workqueue(kbdev->hwaccess.backend.reset_workq); -#endif -} -KBASE_EXPORT_TEST_API(kbase_job_slot_term); - -#if KBASE_GPU_RESET_EN -/** - * kbasep_check_for_afbc_on_slot() - Check whether AFBC is in use on this slot - * @kbdev: kbase device pointer - * @kctx: context to check against - * @js: slot to check - * @target_katom: An atom to check, or NULL if all atoms from @kctx on - * slot @js should be checked - * - * This checks are based upon parameters that would normally be passed to - * kbase_job_slot_hardstop(). - * - * In the event of @target_katom being NULL, this will check the last jobs that - * are likely to be running on the slot to see if a) they belong to kctx, and - * so would be stopped, and b) whether they have AFBC - * - * In that case, It's guaranteed that a job currently executing on the HW with - * AFBC will be detected. However, this is a conservative check because it also - * detects jobs that have just completed too. - * - * Return: true when hard-stop _might_ stop an afbc atom, else false. - */ -static bool kbasep_check_for_afbc_on_slot(struct kbase_device *kbdev, - struct kbase_context *kctx, int js, - struct kbase_jd_atom *target_katom) -{ - bool ret = false; - int i; - - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - /* When we have an atom the decision can be made straight away. */ - if (target_katom) - return !!(target_katom->core_req & BASE_JD_REQ_FS_AFBC); - - /* Otherwise, we must chweck the hardware to see if it has atoms from - * this context with AFBC. */ - for (i = 0; i < kbase_backend_nr_atoms_on_slot(kbdev, js); i++) { - struct kbase_jd_atom *katom; - - katom = kbase_gpu_inspect(kbdev, js, i); - if (!katom) - continue; - - /* Ignore atoms from other contexts, they won't be stopped when - * we use this for checking if we should hard-stop them */ - if (katom->kctx != kctx) - continue; - - /* An atom on this slot and this context: check for AFBC */ - if (katom->core_req & BASE_JD_REQ_FS_AFBC) { - ret = true; - break; - } - } - - return ret; -} -#endif /* KBASE_GPU_RESET_EN */ - -/** - * kbase_job_slot_softstop_swflags - Soft-stop a job with flags - * @kbdev: The kbase device - * @js: The job slot to soft-stop - * @target_katom: The job that should be soft-stopped (or NULL for any job) - * @sw_flags: Flags to pass in about the soft-stop - * - * Context: - * The job slot lock must be held when calling this function. - * The job slot must not already be in the process of being soft-stopped. - * - * Soft-stop the specified job slot, with extra information about the stop - * - * Where possible any job in the next register is evicted before the soft-stop. - */ -void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, int js, - struct kbase_jd_atom *target_katom, u32 sw_flags) -{ - KBASE_DEBUG_ASSERT(!(sw_flags & JS_COMMAND_MASK)); - kbase_backend_soft_hard_stop_slot(kbdev, NULL, js, target_katom, - JS_COMMAND_SOFT_STOP | sw_flags); -} - -/** - * kbase_job_slot_softstop - Soft-stop the specified job slot - * @kbdev: The kbase device - * @js: The job slot to soft-stop - * @target_katom: The job that should be soft-stopped (or NULL for any job) - * Context: - * The job slot lock must be held when calling this function. - * The job slot must not already be in the process of being soft-stopped. - * - * Where possible any job in the next register is evicted before the soft-stop. - */ -void kbase_job_slot_softstop(struct kbase_device *kbdev, int js, - struct kbase_jd_atom *target_katom) -{ - kbase_job_slot_softstop_swflags(kbdev, js, target_katom, 0u); -} - -/** - * kbase_job_slot_hardstop - Hard-stop the specified job slot - * @kctx: The kbase context that contains the job(s) that should - * be hard-stopped - * @js: The job slot to hard-stop - * @target_katom: The job that should be hard-stopped (or NULL for all - * jobs from the context) - * Context: - * The job slot lock must be held when calling this function. - */ -void kbase_job_slot_hardstop(struct kbase_context *kctx, int js, - struct kbase_jd_atom *target_katom) -{ - struct kbase_device *kbdev = kctx->kbdev; - bool stopped; -#if KBASE_GPU_RESET_EN - /* We make the check for AFBC before evicting/stopping atoms. Note - * that no other thread can modify the slots whilst we have the - * runpool_irq lock. */ - int needs_workaround_for_afbc = - kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3542) - && kbasep_check_for_afbc_on_slot(kbdev, kctx, js, - target_katom); -#endif - - stopped = kbase_backend_soft_hard_stop_slot(kbdev, kctx, js, - target_katom, - JS_COMMAND_HARD_STOP); -#if KBASE_GPU_RESET_EN - if (stopped && (kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_8401) || - kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_9510) || - needs_workaround_for_afbc)) { - /* MIDBASE-2916 if a fragment job with AFBC encoding is - * hardstopped, ensure to do a soft reset also in order to - * clear the GPU status. - * Workaround for HW issue 8401 has an issue,so after - * hard-stopping just reset the GPU. This will ensure that the - * jobs leave the GPU.*/ - if (kbase_prepare_to_reset_gpu_locked(kbdev)) { - dev_err(kbdev->dev, "Issueing GPU soft-reset after hard stopping due to hardware issue"); - kbase_reset_gpu_locked(kbdev); - } - } -#endif -} - -/** - * kbase_job_check_enter_disjoint - potentiall enter disjoint mode - * @kbdev: kbase device - * @action: the event which has occurred - * @core_reqs: core requirements of the atom - * @target_katom: the atom which is being affected - * - * For a certain soft/hard-stop action, work out whether to enter disjoint - * state. - * - * This does not register multiple disjoint events if the atom has already - * started a disjoint period - * - * @core_reqs can be supplied as 0 if the atom had not started on the hardware - * (and so a 'real' soft/hard-stop was not required, but it still interrupted - * flow, perhaps on another context) - * - * kbase_job_check_leave_disjoint() should be used to end the disjoint - * state when the soft/hard-stop action is complete - */ -void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action, - u16 core_reqs, struct kbase_jd_atom *target_katom) -{ - u32 hw_action = action & JS_COMMAND_MASK; - - /* For hard-stop, don't enter if hard-stop not allowed */ - if (hw_action == JS_COMMAND_HARD_STOP && - !kbasep_hard_stop_allowed(kbdev, core_reqs)) - return; - - /* For soft-stop, don't enter if soft-stop not allowed, or isn't - * causing disjoint */ - if (hw_action == JS_COMMAND_SOFT_STOP && - !(kbasep_soft_stop_allowed(kbdev, core_reqs) && - (action & JS_COMMAND_SW_CAUSES_DISJOINT))) - return; - - /* Nothing to do if already logged disjoint state on this atom */ - if (target_katom->atom_flags & KBASE_KATOM_FLAG_IN_DISJOINT) - return; - - target_katom->atom_flags |= KBASE_KATOM_FLAG_IN_DISJOINT; - kbase_disjoint_state_up(kbdev); -} - -/** - * kbase_job_check_enter_disjoint - potentially leave disjoint state - * @kbdev: kbase device - * @target_katom: atom which is finishing - * - * Work out whether to leave disjoint state when finishing an atom that was - * originated by kbase_job_check_enter_disjoint(). - */ -void kbase_job_check_leave_disjoint(struct kbase_device *kbdev, - struct kbase_jd_atom *target_katom) -{ - if (target_katom->atom_flags & KBASE_KATOM_FLAG_IN_DISJOINT) { - target_katom->atom_flags &= ~KBASE_KATOM_FLAG_IN_DISJOINT; - kbase_disjoint_state_down(kbdev); - } -} - - -#if KBASE_GPU_RESET_EN -static void kbase_debug_dump_registers(struct kbase_device *kbdev) -{ - int i; - - dev_err(kbdev->dev, "Register state:"); - dev_err(kbdev->dev, " GPU_IRQ_RAWSTAT=0x%08x GPU_STATUS=0x%08x", - kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL), - kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS), NULL)); - dev_err(kbdev->dev, " JOB_IRQ_RAWSTAT=0x%08x JOB_IRQ_JS_STATE=0x%08x JOB_IRQ_THROTTLE=0x%08x", - kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_RAWSTAT), NULL), - kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_JS_STATE), NULL), - kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_THROTTLE), NULL)); - for (i = 0; i < 3; i++) { - dev_err(kbdev->dev, " JS%d_STATUS=0x%08x JS%d_HEAD_LO=0x%08x", - i, kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_STATUS), - NULL), - i, kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_HEAD_LO), - NULL)); - } - dev_err(kbdev->dev, " MMU_IRQ_RAWSTAT=0x%08x GPU_FAULTSTATUS=0x%08x", - kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_RAWSTAT), NULL), - kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS), NULL)); - dev_err(kbdev->dev, " GPU_IRQ_MASK=0x%08x JOB_IRQ_MASK=0x%08x MMU_IRQ_MASK=0x%08x", - kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL), - kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), NULL), - kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL)); - dev_err(kbdev->dev, " PWR_OVERRIDE0=0x%08x PWR_OVERRIDE1=0x%08x", - kbase_reg_read(kbdev, GPU_CONTROL_REG(PWR_OVERRIDE0), NULL), - kbase_reg_read(kbdev, GPU_CONTROL_REG(PWR_OVERRIDE1), NULL)); - dev_err(kbdev->dev, " SHADER_CONFIG=0x%08x L2_MMU_CONFIG=0x%08x", - kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_CONFIG), NULL), - kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG), NULL)); -} - -static void kbasep_save_hwcnt_setup(struct kbase_device *kbdev, - struct kbase_context *kctx, - struct kbase_uk_hwcnt_setup *hwcnt_setup) -{ - hwcnt_setup->dump_buffer = - kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO), kctx) & - 0xffffffff; - hwcnt_setup->dump_buffer |= (u64) - kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI), kctx) << - 32; - hwcnt_setup->jm_bm = - kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN), kctx); - hwcnt_setup->shader_bm = - kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN), kctx); - hwcnt_setup->tiler_bm = - kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), kctx); - hwcnt_setup->mmu_l2_bm = - kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN), kctx); -} - -static void kbasep_reset_timeout_worker(struct work_struct *data) -{ - unsigned long flags; - struct kbase_device *kbdev; - int i; - ktime_t end_timestamp = ktime_get(); - struct kbasep_js_device_data *js_devdata; - struct kbase_uk_hwcnt_setup hwcnt_setup = { {0} }; - enum kbase_instr_state bckp_state; - bool try_schedule = false; - bool restore_hwc = false; - - u32 mmu_irq_mask; - - KBASE_DEBUG_ASSERT(data); - - kbdev = container_of(data, struct kbase_device, - hwaccess.backend.reset_work); - - KBASE_DEBUG_ASSERT(kbdev); - js_devdata = &kbdev->js_data; - - KBASE_TRACE_ADD(kbdev, JM_BEGIN_RESET_WORKER, NULL, NULL, 0u, 0); - - /* Make sure the timer has completed - this cannot be done from - * interrupt context, so this cannot be done within - * kbasep_try_reset_gpu_early. */ - hrtimer_cancel(&kbdev->hwaccess.backend.reset_timer); - - if (kbase_pm_context_active_handle_suspend(kbdev, - KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) { - /* This would re-activate the GPU. Since it's already idle, - * there's no need to reset it */ - atomic_set(&kbdev->hwaccess.backend.reset_gpu, - KBASE_RESET_GPU_NOT_PENDING); - kbase_disjoint_state_down(kbdev); - wake_up(&kbdev->hwaccess.backend.reset_wait); - return; - } - - mutex_lock(&kbdev->pm.lock); - /* We hold the pm lock, so there ought to be a current policy */ - KBASE_DEBUG_ASSERT(kbdev->pm.backend.pm_current_policy); - - /* All slot have been soft-stopped and we've waited - * SOFT_STOP_RESET_TIMEOUT for the slots to clear, at this point we - * assume that anything that is still left on the GPU is stuck there and - * we'll kill it when we reset the GPU */ - - dev_err(kbdev->dev, "Resetting GPU (allowing up to %d ms)", - RESET_TIMEOUT); - - spin_lock_irqsave(&kbdev->hwcnt.lock, flags); - - if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_RESETTING) { - /* the same interrupt handler preempted itself */ - /* GPU is being reset */ - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); - wait_event(kbdev->hwcnt.backend.wait, - kbdev->hwcnt.backend.triggered != 0); - spin_lock_irqsave(&kbdev->hwcnt.lock, flags); - } - /* Save the HW counters setup */ - if (kbdev->hwcnt.kctx != NULL) { - struct kbase_context *kctx = kbdev->hwcnt.kctx; - - if (kctx->jctx.sched_info.ctx.is_scheduled) { - kbasep_save_hwcnt_setup(kbdev, kctx, &hwcnt_setup); - - restore_hwc = true; - } - } - - /* Output the state of some interesting registers to help in the - * debugging of GPU resets */ - kbase_debug_dump_registers(kbdev); - - bckp_state = kbdev->hwcnt.backend.state; - kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_RESETTING; - kbdev->hwcnt.backend.triggered = 0; - - mmu_irq_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL); - /* Disable IRQ to avoid IRQ handlers to kick in after releasing the - * spinlock; this also clears any outstanding interrupts */ - kbase_pm_disable_interrupts(kbdev); - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); - - /* Ensure that any IRQ handlers have finished - * Must be done without any locks IRQ handlers will take */ - kbase_synchronize_irqs(kbdev); - - /* Reset the GPU */ - kbase_pm_init_hw(kbdev, 0); - - /* Re-enabled IRQs */ - kbase_pm_enable_interrupts_mmu_mask(kbdev, mmu_irq_mask); - - /* Complete any jobs that were still on the GPU */ - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - kbase_backend_reset(kbdev, &end_timestamp); - kbase_pm_metrics_update(kbdev, NULL); - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - - mutex_unlock(&kbdev->pm.lock); - - mutex_lock(&js_devdata->runpool_mutex); - - /* Reprogram the GPU's MMU */ - for (i = 0; i < kbdev->nr_hw_address_spaces; i++) { - struct kbase_as *as = &kbdev->as[i]; - - mutex_lock(&as->transaction_mutex); - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - - if (js_devdata->runpool_irq.per_as_data[i].kctx) - kbase_mmu_update( - js_devdata->runpool_irq.per_as_data[i].kctx); - else - kbase_mmu_disable_as(kbdev, i); - - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - mutex_unlock(&as->transaction_mutex); - } - - atomic_set(&kbdev->hwaccess.backend.reset_gpu, - KBASE_RESET_GPU_NOT_PENDING); - - kbase_disjoint_state_down(kbdev); - - wake_up(&kbdev->hwaccess.backend.reset_wait); - dev_err(kbdev->dev, "Reset complete"); - - if (js_devdata->nr_contexts_pullable > 0 && !kbdev->poweroff_pending) - try_schedule = true; - - mutex_unlock(&js_devdata->runpool_mutex); - - spin_lock_irqsave(&kbdev->hwcnt.lock, flags); - /* Restore the HW counters setup */ - if (restore_hwc) { - struct kbase_context *kctx = kbdev->hwcnt.kctx; - - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), - (kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) | - PRFCNT_CONFIG_MODE_OFF, kctx); - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO), - hwcnt_setup.dump_buffer & 0xFFFFFFFF, kctx); - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI), - hwcnt_setup.dump_buffer >> 32, kctx); - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN), - hwcnt_setup.jm_bm, kctx); - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN), - hwcnt_setup.shader_bm, kctx); - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN), - hwcnt_setup.mmu_l2_bm, kctx); - - /* Due to PRLAM-8186 we need to disable the Tiler before we - * enable the HW counter dump. */ - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186)) - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), - 0, kctx); - else - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), - hwcnt_setup.tiler_bm, kctx); - - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), - (kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) | - PRFCNT_CONFIG_MODE_MANUAL, kctx); - - /* If HW has PRLAM-8186 we can now re-enable the tiler HW - * counters dump */ - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186)) - kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), - hwcnt_setup.tiler_bm, kctx); - } - kbdev->hwcnt.backend.state = bckp_state; - switch (kbdev->hwcnt.backend.state) { - /* Cases for waking kbasep_cache_clean_worker worker */ - case KBASE_INSTR_STATE_CLEANED: - /* Cache-clean IRQ occurred, but we reset: - * Wakeup incase the waiter saw RESETTING */ - case KBASE_INSTR_STATE_REQUEST_CLEAN: - /* After a clean was requested, but before the regs were - * written: - * Wakeup incase the waiter saw RESETTING */ - wake_up(&kbdev->hwcnt.backend.cache_clean_wait); - break; - case KBASE_INSTR_STATE_CLEANING: - /* Either: - * 1) We've not got the Cache-clean IRQ yet: it was lost, or: - * 2) We got it whilst resetting: it was voluntarily lost - * - * So, move to the next state and wakeup: */ - kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_CLEANED; - wake_up(&kbdev->hwcnt.backend.cache_clean_wait); - break; - - /* Cases for waking anyone else */ - case KBASE_INSTR_STATE_DUMPING: - /* If dumping, abort the dump, because we may've lost the IRQ */ - kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE; - kbdev->hwcnt.backend.triggered = 1; - wake_up(&kbdev->hwcnt.backend.wait); - break; - case KBASE_INSTR_STATE_DISABLED: - case KBASE_INSTR_STATE_IDLE: - case KBASE_INSTR_STATE_FAULT: - /* Every other reason: wakeup in that state */ - kbdev->hwcnt.backend.triggered = 1; - wake_up(&kbdev->hwcnt.backend.wait); - break; - - /* Unhandled cases */ - case KBASE_INSTR_STATE_RESETTING: - default: - BUG(); - break; - } - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); - /* Note: counter dumping may now resume */ - - mutex_lock(&kbdev->pm.lock); - - /* Find out what cores are required now */ - kbase_pm_update_cores_state(kbdev); - - /* Synchronously request and wait for those cores, because if - * instrumentation is enabled it would need them immediately. */ - kbase_pm_check_transitions_sync(kbdev); - - mutex_unlock(&kbdev->pm.lock); - - /* Try submitting some jobs to restart processing */ - if (try_schedule) { - KBASE_TRACE_ADD(kbdev, JM_SUBMIT_AFTER_RESET, NULL, NULL, 0u, - 0); - kbase_js_sched_all(kbdev); - } - - kbase_pm_context_idle(kbdev); - KBASE_TRACE_ADD(kbdev, JM_END_RESET_WORKER, NULL, NULL, 0u, 0); -} - -static enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *timer) -{ - struct kbase_device *kbdev = container_of(timer, struct kbase_device, - hwaccess.backend.reset_timer); - - KBASE_DEBUG_ASSERT(kbdev); - - /* Reset still pending? */ - if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu, - KBASE_RESET_GPU_COMMITTED, KBASE_RESET_GPU_HAPPENING) == - KBASE_RESET_GPU_COMMITTED) - queue_work(kbdev->hwaccess.backend.reset_workq, - &kbdev->hwaccess.backend.reset_work); - - return HRTIMER_NORESTART; -} - -/* - * If all jobs are evicted from the GPU then we can reset the GPU - * immediately instead of waiting for the timeout to elapse - */ - -static void kbasep_try_reset_gpu_early_locked(struct kbase_device *kbdev) -{ - int i; - int pending_jobs = 0; - - KBASE_DEBUG_ASSERT(kbdev); - - /* Count the number of jobs */ - for (i = 0; i < kbdev->gpu_props.num_job_slots; i++) - pending_jobs += kbase_backend_nr_atoms_submitted(kbdev, i); - - if (pending_jobs > 0) { - /* There are still jobs on the GPU - wait */ - return; - } - - /* To prevent getting incorrect registers when dumping failed job, - * skip early reset. - */ - if (kbdev->job_fault_debug != false) - return; - - /* Check that the reset has been committed to (i.e. kbase_reset_gpu has - * been called), and that no other thread beat this thread to starting - * the reset */ - if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu, - KBASE_RESET_GPU_COMMITTED, KBASE_RESET_GPU_HAPPENING) != - KBASE_RESET_GPU_COMMITTED) { - /* Reset has already occurred */ - return; - } - - queue_work(kbdev->hwaccess.backend.reset_workq, - &kbdev->hwaccess.backend.reset_work); -} - -static void kbasep_try_reset_gpu_early(struct kbase_device *kbdev) -{ - unsigned long flags; - struct kbasep_js_device_data *js_devdata; - - js_devdata = &kbdev->js_data; - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - kbasep_try_reset_gpu_early_locked(kbdev); - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); -} - -/** - * kbase_prepare_to_reset_gpu_locked - Prepare for resetting the GPU - * @kbdev: kbase device - * - * This function just soft-stops all the slots to ensure that as many jobs as - * possible are saved. - * - * Return: - * The function returns a boolean which should be interpreted as follows: - * true - Prepared for reset, kbase_reset_gpu should be called. - * false - Another thread is performing a reset, kbase_reset_gpu should - * not be called. - */ -bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev) -{ - int i; - - KBASE_DEBUG_ASSERT(kbdev); - - if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu, - KBASE_RESET_GPU_NOT_PENDING, - KBASE_RESET_GPU_PREPARED) != - KBASE_RESET_GPU_NOT_PENDING) { - /* Some other thread is already resetting the GPU */ - return false; - } - - kbase_disjoint_state_up(kbdev); - - for (i = 0; i < kbdev->gpu_props.num_job_slots; i++) - kbase_job_slot_softstop(kbdev, i, NULL); - - return true; -} - -bool kbase_prepare_to_reset_gpu(struct kbase_device *kbdev) -{ - unsigned long flags; - bool ret; - struct kbasep_js_device_data *js_devdata; - - js_devdata = &kbdev->js_data; - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - ret = kbase_prepare_to_reset_gpu_locked(kbdev); - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - - return ret; -} -KBASE_EXPORT_TEST_API(kbase_prepare_to_reset_gpu); - -/* - * This function should be called after kbase_prepare_to_reset_gpu if it - * returns true. It should never be called without a corresponding call to - * kbase_prepare_to_reset_gpu. - * - * After this function is called (or not called if kbase_prepare_to_reset_gpu - * returned false), the caller should wait for - * kbdev->hwaccess.backend.reset_waitq to be signalled to know when the reset - * has completed. - */ -void kbase_reset_gpu(struct kbase_device *kbdev) -{ - KBASE_DEBUG_ASSERT(kbdev); - - /* Note this is an assert/atomic_set because it is a software issue for - * a race to be occuring here */ - KBASE_DEBUG_ASSERT(atomic_read(&kbdev->hwaccess.backend.reset_gpu) == - KBASE_RESET_GPU_PREPARED); - atomic_set(&kbdev->hwaccess.backend.reset_gpu, - KBASE_RESET_GPU_COMMITTED); - - dev_err(kbdev->dev, "Preparing to soft-reset GPU: Waiting (upto %d ms) for all jobs to complete soft-stop\n", - kbdev->reset_timeout_ms); - - hrtimer_start(&kbdev->hwaccess.backend.reset_timer, - HR_TIMER_DELAY_MSEC(kbdev->reset_timeout_ms), - HRTIMER_MODE_REL); - - /* Try resetting early */ - kbasep_try_reset_gpu_early(kbdev); -} -KBASE_EXPORT_TEST_API(kbase_reset_gpu); - -void kbase_reset_gpu_locked(struct kbase_device *kbdev) -{ - KBASE_DEBUG_ASSERT(kbdev); - - /* Note this is an assert/atomic_set because it is a software issue for - * a race to be occuring here */ - KBASE_DEBUG_ASSERT(atomic_read(&kbdev->hwaccess.backend.reset_gpu) == - KBASE_RESET_GPU_PREPARED); - atomic_set(&kbdev->hwaccess.backend.reset_gpu, - KBASE_RESET_GPU_COMMITTED); - - dev_err(kbdev->dev, "Preparing to soft-reset GPU: Waiting (upto %d ms) for all jobs to complete soft-stop\n", - kbdev->reset_timeout_ms); - hrtimer_start(&kbdev->hwaccess.backend.reset_timer, - HR_TIMER_DELAY_MSEC(kbdev->reset_timeout_ms), - HRTIMER_MODE_REL); - - /* Try resetting early */ - kbasep_try_reset_gpu_early_locked(kbdev); -} -#endif /* KBASE_GPU_RESET_EN */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h deleted file mode 100755 index eb068d40283b2..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h +++ /dev/null @@ -1,155 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * Job Manager backend-specific low-level APIs. - */ - -#ifndef _KBASE_JM_HWACCESS_H_ -#define _KBASE_JM_HWACCESS_H_ - -#include -#include -#include - -#include - -/** - * kbase_job_submit_nolock() - Submit a job to a certain job-slot - * @kbdev: Device pointer - * @katom: Atom to submit - * @js: Job slot to submit on - * - * The caller must check kbasep_jm_is_submit_slots_free() != false before - * calling this. - * - * The following locking conditions are made on the caller: - * - it must hold the kbasep_js_device_data::runpoool_irq::lock - */ -void kbase_job_submit_nolock(struct kbase_device *kbdev, - struct kbase_jd_atom *katom, int js); - -/** - * kbase_job_done_slot() - Complete the head job on a particular job-slot - * @kbdev: Device pointer - * @s: Job slot - * @completion_code: Completion code of job reported by GPU - * @job_tail: Job tail address reported by GPU - * @end_timestamp: Timestamp of job completion - */ -void kbase_job_done_slot(struct kbase_device *kbdev, int s, u32 completion_code, - u64 job_tail, ktime_t *end_timestamp); - -#ifdef CONFIG_GPU_TRACEPOINTS -static inline char *kbasep_make_job_slot_string(int js, char *js_string) -{ - sprintf(js_string, "job_slot_%i", js); - return js_string; -} -#endif - -/** - * kbase_job_hw_submit() - Submit a job to the GPU - * @kbdev: Device pointer - * @katom: Atom to submit - * @js: Job slot to submit on - * - * The caller must check kbasep_jm_is_submit_slots_free() != false before - * calling this. - * - * The following locking conditions are made on the caller: - * - it must hold the kbasep_js_device_data::runpoool_irq::lock - */ -void kbase_job_hw_submit(struct kbase_device *kbdev, - struct kbase_jd_atom *katom, - int js); - -/** - * kbasep_job_slot_soft_or_hard_stop_do_action() - Perform a soft or hard stop - * on the specified atom - * @kbdev: Device pointer - * @js: Job slot to stop on - * @action: The action to perform, either JSn_COMMAND_HARD_STOP or - * JSn_COMMAND_SOFT_STOP - * @core_reqs: Core requirements of atom to stop - * @target_katom: Atom to stop - * - * The following locking conditions are made on the caller: - * - it must hold the kbasep_js_device_data::runpool_irq::lock - */ -void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev, - int js, - u32 action, - u16 core_reqs, - struct kbase_jd_atom *target_katom); - -/** - * kbase_backend_soft_hard_stop_slot() - Soft or hard stop jobs on a given job - * slot belonging to a given context. - * @kbdev: Device pointer - * @kctx: Context pointer. May be NULL - * @katom: Specific atom to stop. May be NULL - * @js: Job slot to hard stop - * @action: The action to perform, either JSn_COMMAND_HARD_STOP or - * JSn_COMMAND_SOFT_STOP - * - * If no context is provided then all jobs on the slot will be soft or hard - * stopped. - * - * If a katom is provided then only that specific atom will be stopped. In this - * case the kctx parameter is ignored. - * - * Jobs that are on the slot but are not yet on the GPU will be unpulled and - * returned to the job scheduler. - * - * Return: true if an atom was stopped, false otherwise - */ -bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev, - struct kbase_context *kctx, - int js, - struct kbase_jd_atom *katom, - u32 action); - -/** - * kbase_job_slot_init - Initialise job slot framework - * @kbdev: Device pointer - * - * Called on driver initialisation - * - * Return: 0 on success - */ -int kbase_job_slot_init(struct kbase_device *kbdev); - -/** - * kbase_job_slot_halt - Halt the job slot framework - * @kbdev: Device pointer - * - * Should prevent any further job slot processing - */ -void kbase_job_slot_halt(struct kbase_device *kbdev); - -/** - * kbase_job_slot_term - Terminate job slot framework - * @kbdev: Device pointer - * - * Called on driver termination - */ -void kbase_job_slot_term(struct kbase_device *kbdev); - -#endif /* _KBASE_JM_HWACCESS_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c deleted file mode 100755 index 8601718106673..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c +++ /dev/null @@ -1,1490 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - -/* - * Register-based HW access backend specific APIs - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* Return whether the specified ringbuffer is empty. HW access lock must be - * held */ -#define SLOT_RB_EMPTY(rb) (rb->write_idx == rb->read_idx) -/* Return number of atoms currently in the specified ringbuffer. HW access lock - * must be held */ -#define SLOT_RB_ENTRIES(rb) (int)(s8)(rb->write_idx - rb->read_idx) - -static void kbase_gpu_release_atom(struct kbase_device *kbdev, - struct kbase_jd_atom *katom, - ktime_t *end_timestamp); - -/** - * kbase_gpu_enqueue_atom - Enqueue an atom in the HW access ringbuffer - * @kbdev: Device pointer - * @katom: Atom to enqueue - * - * Context: Caller must hold the HW access lock - */ -static void kbase_gpu_enqueue_atom(struct kbase_device *kbdev, - struct kbase_jd_atom *katom) -{ - struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[katom->slot_nr]; - - WARN_ON(SLOT_RB_ENTRIES(rb) >= SLOT_RB_SIZE); - - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - rb->entries[rb->write_idx & SLOT_RB_MASK].katom = katom; - rb->write_idx++; - - katom->gpu_rb_state = KBASE_ATOM_GPU_RB_WAITING_BLOCKED; -} - -/** - * kbase_gpu_dequeue_atom - Remove an atom from the HW access ringbuffer, once - * it has been completed - * @kbdev: Device pointer - * @js: Job slot to remove atom from - * @end_timestamp: Pointer to timestamp of atom completion. May be NULL, in - * which case current time will be used. - * - * Context: Caller must hold the HW access lock - * - * Return: Atom removed from ringbuffer - */ -static struct kbase_jd_atom *kbase_gpu_dequeue_atom(struct kbase_device *kbdev, - int js, - ktime_t *end_timestamp) -{ - struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js]; - struct kbase_jd_atom *katom; - - if (SLOT_RB_EMPTY(rb)) { - WARN(1, "GPU ringbuffer unexpectedly empty\n"); - return NULL; - } - - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - katom = rb->entries[rb->read_idx & SLOT_RB_MASK].katom; - - kbase_gpu_release_atom(kbdev, katom, end_timestamp); - - rb->read_idx++; - - katom->gpu_rb_state = KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB; - - kbase_js_debug_log_current_affinities(kbdev); - - return katom; -} - -struct kbase_jd_atom *kbase_gpu_inspect(struct kbase_device *kbdev, int js, - int idx) -{ - struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js]; - - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - if ((SLOT_RB_ENTRIES(rb) - 1) < idx) - return NULL; /* idx out of range */ - - return rb->entries[(rb->read_idx + idx) & SLOT_RB_MASK].katom; -} - -struct kbase_jd_atom *kbase_backend_inspect_head(struct kbase_device *kbdev, - int js) -{ - return kbase_gpu_inspect(kbdev, js, 0); -} - -struct kbase_jd_atom *kbase_backend_inspect_tail(struct kbase_device *kbdev, - int js) -{ - struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js]; - - if (SLOT_RB_EMPTY(rb)) - return NULL; - - return rb->entries[(rb->write_idx - 1) & SLOT_RB_MASK].katom; -} - -/** - * kbase_gpu_atoms_submitted - Inspect whether a slot has any atoms currently - * on the GPU - * @kbdev: Device pointer - * @js: Job slot to inspect - * - * Return: true if there are atoms on the GPU for slot js, - * false otherwise - */ -static bool kbase_gpu_atoms_submitted(struct kbase_device *kbdev, int js) -{ - int i; - - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - for (i = 0; i < SLOT_RB_SIZE; i++) { - struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i); - - if (!katom) - return false; - if (katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED || - katom->gpu_rb_state == KBASE_ATOM_GPU_RB_READY) - return true; - } - - return false; -} - -/** - * kbase_gpu_atoms_submitted_any() - Inspect whether there are any atoms - * currently on the GPU - * @kbdev: Device pointer - * - * Return: true if there are any atoms on the GPU, false otherwise - */ -static bool kbase_gpu_atoms_submitted_any(struct kbase_device *kbdev) -{ - int js; - int i; - - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) { - for (i = 0; i < SLOT_RB_SIZE; i++) { - struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i); - - if (katom && katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED) - return true; - } - } - return false; -} - -int kbase_backend_nr_atoms_submitted(struct kbase_device *kbdev, int js) -{ - int nr = 0; - int i; - - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - for (i = 0; i < SLOT_RB_SIZE; i++) { - struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i); - - if (katom && (katom->gpu_rb_state == - KBASE_ATOM_GPU_RB_SUBMITTED)) - nr++; - } - - return nr; -} - -int kbase_backend_nr_atoms_on_slot(struct kbase_device *kbdev, int js) -{ - int nr = 0; - int i; - - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - for (i = 0; i < SLOT_RB_SIZE; i++) { - if (kbase_gpu_inspect(kbdev, js, i)) - nr++; - } - - return nr; -} - -static int kbase_gpu_nr_atoms_on_slot_min(struct kbase_device *kbdev, int js, - enum kbase_atom_gpu_rb_state min_rb_state) -{ - int nr = 0; - int i; - - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - for (i = 0; i < SLOT_RB_SIZE; i++) { - struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i); - - if (katom && (katom->gpu_rb_state >= min_rb_state)) - nr++; - } - - return nr; -} - -int kbase_backend_slot_free(struct kbase_device *kbdev, int js) -{ - if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) != - KBASE_RESET_GPU_NOT_PENDING) { - /* The GPU is being reset - so prevent submission */ - return 0; - } - - return SLOT_RB_SIZE - kbase_backend_nr_atoms_on_slot(kbdev, js); -} - - -static void kbasep_js_job_check_deref_cores(struct kbase_device *kbdev, - struct kbase_jd_atom *katom); - -static bool kbasep_js_job_check_ref_cores(struct kbase_device *kbdev, - int js, - struct kbase_jd_atom *katom) -{ - /* The most recently checked affinity. Having this at this scope allows - * us to guarantee that we've checked the affinity in this function - * call. - */ - u64 recently_chosen_affinity = 0; - bool chosen_affinity = false; - bool retry; - - do { - retry = false; - - /* NOTE: The following uses a number of FALLTHROUGHs to optimize - * the calls to this function. Ending of the function is - * indicated by BREAK OUT */ - switch (katom->coreref_state) { - /* State when job is first attempted to be run */ - case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED: - KBASE_DEBUG_ASSERT(katom->affinity == 0); - - /* Compute affinity */ - if (false == kbase_js_choose_affinity( - &recently_chosen_affinity, kbdev, katom, - js)) { - /* No cores are currently available */ - /* *** BREAK OUT: No state transition *** */ - break; - } - - chosen_affinity = true; - - /* Request the cores */ - kbase_pm_request_cores(kbdev, - katom->core_req & BASE_JD_REQ_T, - recently_chosen_affinity); - - katom->affinity = recently_chosen_affinity; - - /* Proceed to next state */ - katom->coreref_state = - KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES; - - /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ - - case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES: - { - enum kbase_pm_cores_ready cores_ready; - - KBASE_DEBUG_ASSERT(katom->affinity != 0 || - (katom->core_req & BASE_JD_REQ_T)); - - cores_ready = kbase_pm_register_inuse_cores( - kbdev, - katom->core_req & BASE_JD_REQ_T, - katom->affinity); - if (cores_ready == KBASE_NEW_AFFINITY) { - /* Affinity no longer valid - return to - * previous state */ - kbasep_js_job_check_deref_cores(kbdev, - katom); - KBASE_TRACE_ADD_SLOT_INFO(kbdev, - JS_CORE_REF_REGISTER_INUSE_FAILED, - katom->kctx, katom, - katom->jc, js, - (u32) katom->affinity); - /* *** BREAK OUT: Return to previous - * state, retry *** */ - retry = true; - break; - } - if (cores_ready == KBASE_CORES_NOT_READY) { - /* Stay in this state and return, to - * retry at this state later */ - KBASE_TRACE_ADD_SLOT_INFO(kbdev, - JS_CORE_REF_REGISTER_INUSE_FAILED, - katom->kctx, katom, - katom->jc, js, - (u32) katom->affinity); - /* *** BREAK OUT: No state transition - * *** */ - break; - } - /* Proceed to next state */ - katom->coreref_state = - KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY; - } - - /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ - - case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY: - KBASE_DEBUG_ASSERT(katom->affinity != 0 || - (katom->core_req & BASE_JD_REQ_T)); - - /* Optimize out choosing the affinity twice in the same - * function call */ - if (chosen_affinity == false) { - /* See if the affinity changed since a previous - * call. */ - if (false == kbase_js_choose_affinity( - &recently_chosen_affinity, - kbdev, katom, js)) { - /* No cores are currently available */ - kbasep_js_job_check_deref_cores(kbdev, - katom); - KBASE_TRACE_ADD_SLOT_INFO(kbdev, - JS_CORE_REF_REQUEST_ON_RECHECK_FAILED, - katom->kctx, katom, - katom->jc, js, - (u32) recently_chosen_affinity); - /* *** BREAK OUT: Transition to lower - * state *** */ - break; - } - chosen_affinity = true; - } - - /* Now see if this requires a different set of cores */ - if (recently_chosen_affinity != katom->affinity) { - enum kbase_pm_cores_ready cores_ready; - - kbase_pm_request_cores(kbdev, - katom->core_req & BASE_JD_REQ_T, - recently_chosen_affinity); - - /* Register new cores whilst we still hold the - * old ones, to minimize power transitions */ - cores_ready = - kbase_pm_register_inuse_cores(kbdev, - katom->core_req & BASE_JD_REQ_T, - recently_chosen_affinity); - kbasep_js_job_check_deref_cores(kbdev, katom); - - /* Fixup the state that was reduced by - * deref_cores: */ - katom->coreref_state = - KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY; - katom->affinity = recently_chosen_affinity; - if (cores_ready == KBASE_NEW_AFFINITY) { - /* Affinity no longer valid - return to - * previous state */ - katom->coreref_state = - KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES; - - kbasep_js_job_check_deref_cores(kbdev, - katom); - - KBASE_TRACE_ADD_SLOT_INFO(kbdev, - JS_CORE_REF_REGISTER_INUSE_FAILED, - katom->kctx, katom, - katom->jc, js, - (u32) katom->affinity); - /* *** BREAK OUT: Return to previous - * state, retry *** */ - retry = true; - break; - } - /* Now might be waiting for powerup again, with - * a new affinity */ - if (cores_ready == KBASE_CORES_NOT_READY) { - /* Return to previous state */ - katom->coreref_state = - KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES; - KBASE_TRACE_ADD_SLOT_INFO(kbdev, - JS_CORE_REF_REGISTER_ON_RECHECK_FAILED, - katom->kctx, katom, - katom->jc, js, - (u32) katom->affinity); - /* *** BREAK OUT: Transition to lower - * state *** */ - break; - } - } - /* Proceed to next state */ - katom->coreref_state = - KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS; - - /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ - case KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS: - KBASE_DEBUG_ASSERT(katom->affinity != 0 || - (katom->core_req & BASE_JD_REQ_T)); - KBASE_DEBUG_ASSERT(katom->affinity == - recently_chosen_affinity); - - /* Note: this is where the caller must've taken the - * runpool_irq.lock */ - - /* Check for affinity violations - if there are any, - * then we just ask the caller to requeue and try again - * later */ - if (kbase_js_affinity_would_violate(kbdev, js, - katom->affinity) != false) { - /* Return to previous state */ - katom->coreref_state = - KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY; - /* *** BREAK OUT: Transition to lower state *** - */ - KBASE_TRACE_ADD_SLOT_INFO(kbdev, - JS_CORE_REF_AFFINITY_WOULD_VIOLATE, - katom->kctx, katom, katom->jc, js, - (u32) katom->affinity); - break; - } - - /* No affinity violations would result, so the cores are - * ready */ - katom->coreref_state = KBASE_ATOM_COREREF_STATE_READY; - /* *** BREAK OUT: Cores Ready *** */ - break; - - default: - KBASE_DEBUG_ASSERT_MSG(false, - "Unhandled kbase_atom_coreref_state %d", - katom->coreref_state); - break; - } - } while (retry != false); - - return (katom->coreref_state == KBASE_ATOM_COREREF_STATE_READY); -} - -static void kbasep_js_job_check_deref_cores(struct kbase_device *kbdev, - struct kbase_jd_atom *katom) -{ - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(katom != NULL); - - switch (katom->coreref_state) { - case KBASE_ATOM_COREREF_STATE_READY: - /* State where atom was submitted to the HW - just proceed to - * power-down */ - KBASE_DEBUG_ASSERT(katom->affinity != 0 || - (katom->core_req & BASE_JD_REQ_T)); - - /* *** FALLTHROUGH *** */ - - case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY: - /* State where cores were registered */ - KBASE_DEBUG_ASSERT(katom->affinity != 0 || - (katom->core_req & BASE_JD_REQ_T)); - kbase_pm_release_cores(kbdev, katom->core_req & BASE_JD_REQ_T, - katom->affinity); - - break; - - case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES: - /* State where cores were requested, but not registered */ - KBASE_DEBUG_ASSERT(katom->affinity != 0 || - (katom->core_req & BASE_JD_REQ_T)); - kbase_pm_unrequest_cores(kbdev, katom->core_req & BASE_JD_REQ_T, - katom->affinity); - break; - - case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED: - /* Initial state - nothing required */ - KBASE_DEBUG_ASSERT(katom->affinity == 0); - break; - - default: - KBASE_DEBUG_ASSERT_MSG(false, - "Unhandled coreref_state: %d", - katom->coreref_state); - break; - } - - katom->affinity = 0; - katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED; -} - -static void kbasep_js_job_check_deref_cores_nokatom(struct kbase_device *kbdev, - base_jd_core_req core_req, u64 affinity, - enum kbase_atom_coreref_state coreref_state) -{ - KBASE_DEBUG_ASSERT(kbdev != NULL); - - switch (coreref_state) { - case KBASE_ATOM_COREREF_STATE_READY: - /* State where atom was submitted to the HW - just proceed to - * power-down */ - KBASE_DEBUG_ASSERT(affinity != 0 || - (core_req & BASE_JD_REQ_T)); - - /* *** FALLTHROUGH *** */ - - case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY: - /* State where cores were registered */ - KBASE_DEBUG_ASSERT(affinity != 0 || - (core_req & BASE_JD_REQ_T)); - kbase_pm_release_cores(kbdev, core_req & BASE_JD_REQ_T, - affinity); - - break; - - case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES: - /* State where cores were requested, but not registered */ - KBASE_DEBUG_ASSERT(affinity != 0 || - (core_req & BASE_JD_REQ_T)); - kbase_pm_unrequest_cores(kbdev, core_req & BASE_JD_REQ_T, - affinity); - break; - - case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED: - /* Initial state - nothing required */ - KBASE_DEBUG_ASSERT(affinity == 0); - break; - - default: - KBASE_DEBUG_ASSERT_MSG(false, - "Unhandled coreref_state: %d", - coreref_state); - break; - } -} - -static void kbase_gpu_release_atom(struct kbase_device *kbdev, - struct kbase_jd_atom *katom, - ktime_t *end_timestamp) -{ - switch (katom->gpu_rb_state) { - case KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB: - /* Should be impossible */ - WARN(1, "Attempting to release atom not in ringbuffer\n"); - break; - - case KBASE_ATOM_GPU_RB_SUBMITTED: - /* Inform power management at start/finish of atom so it can - * update its GPU utilisation metrics. Mark atom as not - * submitted beforehand. */ - katom->gpu_rb_state = KBASE_ATOM_GPU_RB_READY; - kbase_pm_metrics_update(kbdev, end_timestamp); - - if (katom->core_req & BASE_JD_REQ_PERMON) - kbase_pm_release_gpu_cycle_counter(kbdev); - /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */ - - case KBASE_ATOM_GPU_RB_READY: - /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */ - - case KBASE_ATOM_GPU_RB_WAITING_SECURE_MODE: - /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */ - - case KBASE_ATOM_GPU_RB_WAITING_AFFINITY: - kbase_js_affinity_release_slot_cores(kbdev, katom->slot_nr, - katom->affinity); - /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */ - - case KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE: - break; - - case KBASE_ATOM_GPU_RB_WAITING_BLOCKED: - /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */ - - case KBASE_ATOM_GPU_RB_RETURN_TO_JS: - break; - } - - katom->gpu_rb_state = KBASE_ATOM_GPU_RB_WAITING_BLOCKED; -} - -static void kbase_gpu_mark_atom_for_return(struct kbase_device *kbdev, - struct kbase_jd_atom *katom) -{ - kbase_gpu_release_atom(kbdev, katom, NULL); - katom->gpu_rb_state = KBASE_ATOM_GPU_RB_RETURN_TO_JS; -} - -static inline bool kbase_gpu_rmu_workaround(struct kbase_device *kbdev, int js) -{ - struct kbase_backend_data *backend = &kbdev->hwaccess.backend; - bool slot_busy[3]; - - if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) - return true; - slot_busy[0] = kbase_gpu_nr_atoms_on_slot_min(kbdev, 0, - KBASE_ATOM_GPU_RB_WAITING_AFFINITY); - slot_busy[1] = kbase_gpu_nr_atoms_on_slot_min(kbdev, 1, - KBASE_ATOM_GPU_RB_WAITING_AFFINITY); - slot_busy[2] = kbase_gpu_nr_atoms_on_slot_min(kbdev, 2, - KBASE_ATOM_GPU_RB_WAITING_AFFINITY); - - if ((js == 2 && !(slot_busy[0] || slot_busy[1])) || - (js != 2 && !slot_busy[2])) - return true; - - /* Don't submit slot 2 atom while GPU has jobs on slots 0/1 */ - if (js == 2 && (kbase_gpu_atoms_submitted(kbdev, 0) || - kbase_gpu_atoms_submitted(kbdev, 1) || - backend->rmu_workaround_flag)) - return false; - - /* Don't submit slot 0/1 atom while GPU has jobs on slot 2 */ - if (js != 2 && (kbase_gpu_atoms_submitted(kbdev, 2) || - !backend->rmu_workaround_flag)) - return false; - - backend->rmu_workaround_flag = !backend->rmu_workaround_flag; - - return true; -} - -static bool kbase_gpu_in_secure_mode(struct kbase_device *kbdev) -{ - return kbdev->secure_mode; -} - -static int kbase_gpu_secure_mode_enable(struct kbase_device *kbdev) -{ - int err = -EINVAL; - - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - WARN_ONCE(!kbdev->secure_ops, - "Cannot enable secure mode: secure callbacks not specified.\n"); - - if (kbdev->secure_ops) { - /* Switch GPU to secure mode */ - err = kbdev->secure_ops->secure_mode_enable(kbdev); - - if (err) - dev_warn(kbdev->dev, "Failed to enable secure mode: %d\n", err); - else - kbdev->secure_mode = true; - } - - return err; -} - -static int kbase_gpu_secure_mode_disable(struct kbase_device *kbdev) -{ - int err = -EINVAL; - - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - WARN_ONCE(!kbdev->secure_ops, - "Cannot disable secure mode: secure callbacks not specified.\n"); - - if (kbdev->secure_ops) { - /* Switch GPU to non-secure mode */ - err = kbdev->secure_ops->secure_mode_disable(kbdev); - - if (err) - dev_warn(kbdev->dev, "Failed to disable secure mode: %d\n", err); - else - kbdev->secure_mode = false; - } - - return err; -} - -void kbase_gpu_slot_update(struct kbase_device *kbdev) -{ - int js; - - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) { - struct kbase_jd_atom *katom[2]; - int idx; - - katom[0] = kbase_gpu_inspect(kbdev, js, 0); - katom[1] = kbase_gpu_inspect(kbdev, js, 1); - WARN_ON(katom[1] && !katom[0]); - - for (idx = 0; idx < SLOT_RB_SIZE; idx++) { - bool cores_ready; - - if (!katom[idx]) - continue; - - switch (katom[idx]->gpu_rb_state) { - case KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB: - /* Should be impossible */ - WARN(1, "Attempting to update atom not in ringbuffer\n"); - break; - - case KBASE_ATOM_GPU_RB_WAITING_BLOCKED: - if (katom[idx]->atom_flags & - KBASE_KATOM_FLAG_X_DEP_BLOCKED) - break; - - katom[idx]->gpu_rb_state = - KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE; - - /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ - case KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE: - cores_ready = - kbasep_js_job_check_ref_cores(kbdev, js, - katom[idx]); - - if (katom[idx]->event_code == - BASE_JD_EVENT_PM_EVENT) { - katom[idx]->gpu_rb_state = - KBASE_ATOM_GPU_RB_RETURN_TO_JS; - break; - } - - if (!cores_ready) - break; - - kbase_js_affinity_retain_slot_cores(kbdev, js, - katom[idx]->affinity); - katom[idx]->gpu_rb_state = - KBASE_ATOM_GPU_RB_WAITING_AFFINITY; - - /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ - - case KBASE_ATOM_GPU_RB_WAITING_AFFINITY: - if (!kbase_gpu_rmu_workaround(kbdev, js)) - break; - - katom[idx]->gpu_rb_state = - KBASE_ATOM_GPU_RB_WAITING_SECURE_MODE; - - /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ - - case KBASE_ATOM_GPU_RB_WAITING_SECURE_MODE: - if (kbase_gpu_in_secure_mode(kbdev) != kbase_jd_katom_is_secure(katom[idx])) { - int err = 0; - - /* Not in correct mode, take action */ - if (kbase_gpu_atoms_submitted_any(kbdev)) { - /* - * We are not in the correct - * GPU mode for this job, and - * we can't switch now because - * there are jobs already - * running. - */ - break; - } - - /* No jobs running, so we can switch GPU mode right now */ - if (kbase_jd_katom_is_secure(katom[idx])) { - err = kbase_gpu_secure_mode_enable(kbdev); - } else { - err = kbase_gpu_secure_mode_disable(kbdev); - } - - if (err) { - /* Failed to switch secure mode, fail atom */ - katom[idx]->event_code = BASE_JD_EVENT_JOB_INVALID; - kbase_gpu_mark_atom_for_return(kbdev, katom[idx]); - /* Only return if head atom or previous atom - * already removed - as atoms must be returned - * in order */ - if (idx == 0 || katom[0]->gpu_rb_state == - KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) { - kbase_gpu_dequeue_atom(kbdev, js, NULL); - kbase_jm_return_atom_to_js(kbdev, katom[idx]); - } - break; - } - } - - /* Secure mode sanity checks */ - KBASE_DEBUG_ASSERT_MSG( - kbase_jd_katom_is_secure(katom[idx]) == kbase_gpu_in_secure_mode(kbdev), - "Secure mode of atom (%d) doesn't match secure mode of GPU (%d)", - kbase_jd_katom_is_secure(katom[idx]), kbase_gpu_in_secure_mode(kbdev)); - KBASE_DEBUG_ASSERT_MSG( - (kbase_jd_katom_is_secure(katom[idx]) && js == 0) || - !kbase_jd_katom_is_secure(katom[idx]), - "Secure atom on JS%d not supported", js); - - katom[idx]->gpu_rb_state = - KBASE_ATOM_GPU_RB_READY; - - /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ - - case KBASE_ATOM_GPU_RB_READY: - /* Only submit if head atom or previous atom - * already submitted */ - if (idx == 1 && - (katom[0]->gpu_rb_state != - KBASE_ATOM_GPU_RB_SUBMITTED && - katom[0]->gpu_rb_state != - KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB)) - break; - - /* Check if this job needs the cycle counter - * enabled before submission */ - if (katom[idx]->core_req & BASE_JD_REQ_PERMON) - kbase_pm_request_gpu_cycle_counter_l2_is_on( - kbdev); - - kbase_job_hw_submit(kbdev, katom[idx], js); - katom[idx]->gpu_rb_state = - KBASE_ATOM_GPU_RB_SUBMITTED; - - /* Inform power management at start/finish of - * atom so it can update its GPU utilisation - * metrics. */ - kbase_pm_metrics_update(kbdev, - &katom[idx]->start_timestamp); - - /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ - - case KBASE_ATOM_GPU_RB_SUBMITTED: - /* Atom submitted to HW, nothing else to do */ - break; - - case KBASE_ATOM_GPU_RB_RETURN_TO_JS: - /* Only return if head atom or previous atom - * already removed - as atoms must be returned - * in order */ - if (idx == 0 || katom[0]->gpu_rb_state == - KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) { - kbase_gpu_dequeue_atom(kbdev, js, NULL); - kbase_jm_return_atom_to_js(kbdev, - katom[idx]); - } - break; - } - } - } - - /* Warn if PRLAM-8987 affinity restrictions are violated */ - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) - WARN_ON((kbase_gpu_atoms_submitted(kbdev, 0) || - kbase_gpu_atoms_submitted(kbdev, 1)) && - kbase_gpu_atoms_submitted(kbdev, 2)); -} - - -void kbase_backend_run_atom(struct kbase_device *kbdev, - struct kbase_jd_atom *katom) -{ - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - kbase_gpu_enqueue_atom(kbdev, katom); - kbase_gpu_slot_update(kbdev); -} - -bool kbase_gpu_irq_evict(struct kbase_device *kbdev, int js) -{ - struct kbase_jd_atom *katom; - struct kbase_jd_atom *next_katom; - - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - katom = kbase_gpu_inspect(kbdev, js, 0); - next_katom = kbase_gpu_inspect(kbdev, js, 1); - - if (next_katom && katom->kctx == next_katom->kctx && - next_katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED && - (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO), NULL) - != 0 || - kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI), NULL) - != 0)) { - kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT), - JS_COMMAND_NOP, NULL); - next_katom->gpu_rb_state = KBASE_ATOM_GPU_RB_READY; - return true; - } - - return false; -} - -void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js, - u32 completion_code, - u64 job_tail, - ktime_t *end_timestamp) -{ - struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, 0); - struct kbase_context *kctx = katom->kctx; - - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6787) && - completion_code != BASE_JD_EVENT_DONE && - !(completion_code & BASE_JD_SW_EVENT)) { - katom->need_cache_flush_cores_retained = katom->affinity; - kbase_pm_request_cores(kbdev, false, katom->affinity); - } else if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10676)) { - if (kbdev->gpu_props.num_core_groups > 1 && - !(katom->affinity & - kbdev->gpu_props.props.coherency_info.group[0].core_mask - ) && - (katom->affinity & - kbdev->gpu_props.props.coherency_info.group[1].core_mask - )) { - dev_info(kbdev->dev, "JD: Flushing cache due to PRLAM-10676\n"); - katom->need_cache_flush_cores_retained = - katom->affinity; - kbase_pm_request_cores(kbdev, false, - katom->affinity); - } - } - - katom = kbase_gpu_dequeue_atom(kbdev, js, end_timestamp); - - kbase_timeline_job_slot_done(kbdev, katom->kctx, katom, js, 0); - - if (completion_code == BASE_JD_EVENT_STOPPED) { - struct kbase_jd_atom *next_katom = kbase_gpu_inspect(kbdev, js, - 0); - - /* - * Dequeue next atom from ringbuffers on same slot if required. - * This atom will already have been removed from the NEXT - * registers by kbase_gpu_soft_hard_stop_slot(), to ensure that - * the atoms on this slot are returned in the correct order. - */ - if (next_katom && katom->kctx == next_katom->kctx) { - kbase_gpu_dequeue_atom(kbdev, js, end_timestamp); - kbase_jm_return_atom_to_js(kbdev, next_katom); - } - } else if (completion_code != BASE_JD_EVENT_DONE) { - struct kbasep_js_device_data *js_devdata = &kbdev->js_data; - int i; - -#if KBASE_TRACE_DUMP_ON_JOB_SLOT_ERROR != 0 - KBASE_TRACE_DUMP(kbdev); -#endif - kbasep_js_clear_submit_allowed(js_devdata, katom->kctx); - - /* - * Remove all atoms on the same context from ringbuffers. This - * will not remove atoms that are already on the GPU, as these - * are guaranteed not to have fail dependencies on the failed - * atom. - */ - for (i = 0; i < kbdev->gpu_props.num_job_slots; i++) { - struct kbase_jd_atom *katom_idx0 = - kbase_gpu_inspect(kbdev, i, 0); - struct kbase_jd_atom *katom_idx1 = - kbase_gpu_inspect(kbdev, i, 1); - - if (katom_idx0 && katom_idx0->kctx == katom->kctx && - katom_idx0->gpu_rb_state != - KBASE_ATOM_GPU_RB_SUBMITTED) { - /* Dequeue katom_idx0 from ringbuffer */ - kbase_gpu_dequeue_atom(kbdev, i, end_timestamp); - - if (katom_idx1 && - katom_idx1->kctx == katom->kctx && - katom_idx0->gpu_rb_state != - KBASE_ATOM_GPU_RB_SUBMITTED) { - /* Dequeue katom_idx1 from ringbuffer */ - kbase_gpu_dequeue_atom(kbdev, i, - end_timestamp); - - katom_idx1->event_code = - BASE_JD_EVENT_STOPPED; - kbase_jm_return_atom_to_js(kbdev, - katom_idx1); - } - katom_idx0->event_code = BASE_JD_EVENT_STOPPED; - kbase_jm_return_atom_to_js(kbdev, katom_idx0); - - } else if (katom_idx1 && - katom_idx1->kctx == katom->kctx && - katom_idx1->gpu_rb_state != - KBASE_ATOM_GPU_RB_SUBMITTED) { - /* Can not dequeue this atom yet - will be - * dequeued when atom at idx0 completes */ - katom_idx1->event_code = BASE_JD_EVENT_STOPPED; - kbase_gpu_mark_atom_for_return(kbdev, - katom_idx1); - } - } - } - - KBASE_TRACE_ADD_SLOT_INFO(kbdev, JM_JOB_DONE, kctx, katom, katom->jc, - js, completion_code); - - if (job_tail != 0 && job_tail != katom->jc) { - bool was_updated = (job_tail != katom->jc); - - /* Some of the job has been executed, so we update the job chain - * address to where we should resume from */ - katom->jc = job_tail; - if (was_updated) - KBASE_TRACE_ADD_SLOT(kbdev, JM_UPDATE_HEAD, katom->kctx, - katom, job_tail, js); - } - - /* Only update the event code for jobs that weren't cancelled */ - if (katom->event_code != BASE_JD_EVENT_JOB_CANCELLED) - katom->event_code = (base_jd_event_code)completion_code; - - kbase_device_trace_register_access(kctx, REG_WRITE, - JOB_CONTROL_REG(JOB_IRQ_CLEAR), - 1 << js); - - /* Complete the job, and start new ones - * - * Also defer remaining work onto the workqueue: - * - Re-queue Soft-stopped jobs - * - For any other jobs, queue the job back into the dependency system - * - Schedule out the parent context if necessary, and schedule a new - * one in. - */ -#ifdef CONFIG_GPU_TRACEPOINTS - { - /* The atom in the HEAD */ - struct kbase_jd_atom *next_katom = kbase_gpu_inspect(kbdev, js, - 0); - - if (next_katom && next_katom->gpu_rb_state == - KBASE_ATOM_GPU_RB_SUBMITTED) { - char js_string[16]; - - trace_gpu_sched_switch(kbasep_make_job_slot_string(js, - js_string), - ktime_to_ns(*end_timestamp), - (u32)next_katom->kctx, 0, - next_katom->work_id); - kbdev->hwaccess.backend.slot_rb[js].last_context = - next_katom->kctx; - } else { - char js_string[16]; - - trace_gpu_sched_switch(kbasep_make_job_slot_string(js, - js_string), - ktime_to_ns(ktime_get()), 0, 0, - 0); - kbdev->hwaccess.backend.slot_rb[js].last_context = 0; - } - } -#endif - - if (completion_code == BASE_JD_EVENT_STOPPED) - kbase_jm_return_atom_to_js(kbdev, katom); - else - kbase_jm_complete(kbdev, katom, end_timestamp); - - /* Job completion may have unblocked other atoms. Try to update all job - * slots */ - kbase_gpu_slot_update(kbdev); -} - -void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp) -{ - int js; - - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) { - int idx; - - for (idx = 0; idx < 2; idx++) { - struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, - js, 0); - - if (katom) { - enum kbase_atom_gpu_rb_state gpu_rb_state = - katom->gpu_rb_state; - - kbase_gpu_release_atom(kbdev, katom, NULL); - kbase_gpu_dequeue_atom(kbdev, js, NULL); - - if (gpu_rb_state == - KBASE_ATOM_GPU_RB_SUBMITTED) { - katom->event_code = - BASE_JD_EVENT_JOB_CANCELLED; - kbase_jm_complete(kbdev, katom, - end_timestamp); - } else { - katom->event_code = - BASE_JD_EVENT_STOPPED; - kbase_jm_return_atom_to_js(kbdev, - katom); - } - } - } - } -} - -static inline void kbase_gpu_stop_atom(struct kbase_device *kbdev, - int js, - struct kbase_jd_atom *katom, - u32 action) -{ - struct kbasep_js_device_data *js_devdata = &kbdev->js_data; - u32 hw_action = action & JS_COMMAND_MASK; - - kbase_job_check_enter_disjoint(kbdev, action, katom->core_req, katom); - kbasep_job_slot_soft_or_hard_stop_do_action(kbdev, js, hw_action, - katom->core_req, katom); - kbasep_js_clear_submit_allowed(js_devdata, katom->kctx); -} - -static inline void kbase_gpu_remove_atom(struct kbase_device *kbdev, - struct kbase_jd_atom *katom, - u32 action, - bool disjoint) -{ - struct kbasep_js_device_data *js_devdata = &kbdev->js_data; - - katom->event_code = BASE_JD_EVENT_REMOVED_FROM_NEXT; - kbase_gpu_mark_atom_for_return(kbdev, katom); - kbasep_js_clear_submit_allowed(js_devdata, katom->kctx); - - if (disjoint) - kbase_job_check_enter_disjoint(kbdev, action, katom->core_req, - katom); -} - -static int should_stop_x_dep_slot(struct kbase_jd_atom *katom) -{ - if (katom->x_post_dep) { - struct kbase_jd_atom *dep_atom = katom->x_post_dep; - - if (dep_atom->gpu_rb_state != - KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB && - dep_atom->gpu_rb_state != - KBASE_ATOM_GPU_RB_RETURN_TO_JS) - return dep_atom->slot_nr; - } - return -1; -} - -bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev, - struct kbase_context *kctx, - int js, - struct kbase_jd_atom *katom, - u32 action) -{ - struct kbasep_js_device_data *js_devdata = &kbdev->js_data; - - struct kbase_jd_atom *katom_idx0; - struct kbase_jd_atom *katom_idx1; - - bool katom_idx0_valid, katom_idx1_valid; - - bool ret = false; - - int stop_x_dep_idx0 = -1, stop_x_dep_idx1 = -1; - - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - katom_idx0 = kbase_gpu_inspect(kbdev, js, 0); - katom_idx1 = kbase_gpu_inspect(kbdev, js, 1); - - if (katom) { - katom_idx0_valid = (katom_idx0 == katom); - /* If idx0 is to be removed and idx1 is on the same context, - * then idx1 must also be removed otherwise the atoms might be - * returned out of order */ - if (katom_idx1) - katom_idx1_valid = (katom_idx1 == katom) || - (katom_idx0_valid && - (katom_idx0->kctx == - katom_idx1->kctx)); - else - katom_idx1_valid = false; - } else { - katom_idx0_valid = (katom_idx0 && - (!kctx || katom_idx0->kctx == kctx)); - katom_idx1_valid = (katom_idx1 && - (!kctx || katom_idx1->kctx == kctx)); - } - - if (katom_idx0_valid) - stop_x_dep_idx0 = should_stop_x_dep_slot(katom_idx0); - if (katom_idx1_valid) - stop_x_dep_idx1 = should_stop_x_dep_slot(katom_idx1); - - if (katom_idx0_valid) { - if (katom_idx0->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED) { - /* Simple case - just dequeue and return */ - kbase_gpu_dequeue_atom(kbdev, js, NULL); - if (katom_idx1_valid) { - kbase_gpu_dequeue_atom(kbdev, js, NULL); - katom_idx1->event_code = - BASE_JD_EVENT_REMOVED_FROM_NEXT; - kbase_jm_return_atom_to_js(kbdev, katom_idx1); - kbasep_js_clear_submit_allowed(js_devdata, - katom_idx1->kctx); - } - - katom_idx0->event_code = - BASE_JD_EVENT_REMOVED_FROM_NEXT; - kbase_jm_return_atom_to_js(kbdev, katom_idx0); - kbasep_js_clear_submit_allowed(js_devdata, - katom_idx0->kctx); - } else { - /* katom_idx0 is on GPU */ - if (katom_idx1 && katom_idx1->gpu_rb_state == - KBASE_ATOM_GPU_RB_SUBMITTED) { - /* katom_idx0 and katom_idx1 are on GPU */ - - if (kbase_reg_read(kbdev, JOB_SLOT_REG(js, - JS_COMMAND_NEXT), NULL) == 0) { - /* idx0 has already completed - stop - * idx1 if needed*/ - if (katom_idx1_valid) { - kbase_gpu_stop_atom(kbdev, js, - katom_idx1, - action); - ret = true; - } - } else { - /* idx1 is in NEXT registers - attempt - * to remove */ - kbase_reg_write(kbdev, - JOB_SLOT_REG(js, - JS_COMMAND_NEXT), - JS_COMMAND_NOP, NULL); - - if (kbase_reg_read(kbdev, - JOB_SLOT_REG(js, - JS_HEAD_NEXT_LO), NULL) - != 0 || - kbase_reg_read(kbdev, - JOB_SLOT_REG(js, - JS_HEAD_NEXT_HI), NULL) - != 0) { - /* idx1 removed successfully, - * will be handled in IRQ */ - kbase_gpu_remove_atom(kbdev, - katom_idx1, - action, true); - stop_x_dep_idx1 = - should_stop_x_dep_slot(katom_idx1); - - /* stop idx0 if still on GPU */ - kbase_gpu_stop_atom(kbdev, js, - katom_idx0, - action); - ret = true; - } else if (katom_idx1_valid) { - /* idx0 has already completed, - * stop idx1 if needed */ - kbase_gpu_stop_atom(kbdev, js, - katom_idx1, - action); - ret = true; - } - } - } else if (katom_idx1_valid) { - /* idx1 not on GPU but must be dequeued*/ - - /* idx1 will be handled in IRQ */ - kbase_gpu_remove_atom(kbdev, katom_idx1, action, - false); - /* stop idx0 */ - /* This will be repeated for anything removed - * from the next registers, since their normal - * flow was also interrupted, and this function - * might not enter disjoint state e.g. if we - * don't actually do a hard stop on the head - * atom */ - kbase_gpu_stop_atom(kbdev, js, katom_idx0, - action); - ret = true; - } else { - /* no atom in idx1 */ - /* just stop idx0 */ - kbase_gpu_stop_atom(kbdev, js, katom_idx0, - action); - ret = true; - } - } - } else if (katom_idx1_valid) { - if (katom_idx1->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED) { - /* Mark for return */ - /* idx1 will be returned once idx0 completes */ - kbase_gpu_remove_atom(kbdev, katom_idx1, action, - false); - } else { - /* idx1 is on GPU */ - if (kbase_reg_read(kbdev, JOB_SLOT_REG(js, - JS_COMMAND_NEXT), NULL) == 0) { - /* idx0 has already completed - stop idx1 */ - kbase_gpu_stop_atom(kbdev, js, katom_idx1, - action); - ret = true; - } else { - /* idx1 is in NEXT registers - attempt to - * remove */ - kbase_reg_write(kbdev, JOB_SLOT_REG(js, - JS_COMMAND_NEXT), - JS_COMMAND_NOP, NULL); - - if (kbase_reg_read(kbdev, JOB_SLOT_REG(js, - JS_HEAD_NEXT_LO), NULL) != 0 || - kbase_reg_read(kbdev, JOB_SLOT_REG(js, - JS_HEAD_NEXT_HI), NULL) != 0) { - /* idx1 removed successfully, will be - * handled in IRQ once idx0 completes */ - kbase_gpu_remove_atom(kbdev, katom_idx1, - action, - false); - } else { - /* idx0 has already completed - stop - * idx1 */ - kbase_gpu_stop_atom(kbdev, js, - katom_idx1, - action); - ret = true; - } - } - } - } - - - if (stop_x_dep_idx0 != -1) - kbase_backend_soft_hard_stop_slot(kbdev, kctx, stop_x_dep_idx0, - NULL, action); - - if (stop_x_dep_idx1 != -1) - kbase_backend_soft_hard_stop_slot(kbdev, kctx, stop_x_dep_idx1, - NULL, action); - - return ret; -} - -void kbase_gpu_cacheclean(struct kbase_device *kbdev, - struct kbase_jd_atom *katom) -{ - /* Limit the number of loops to avoid a hang if the interrupt is missed - */ - u32 max_loops = KBASE_CLEAN_CACHE_MAX_LOOPS; - - mutex_lock(&kbdev->cacheclean_lock); - - /* use GPU_COMMAND completion solution */ - /* clean & invalidate the caches */ - KBASE_TRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, NULL, 0u, 0); - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), - GPU_COMMAND_CLEAN_INV_CACHES, NULL); - - /* wait for cache flush to complete before continuing */ - while (--max_loops && - (kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL) & - CLEAN_CACHES_COMPLETED) == 0) - ; - - /* clear the CLEAN_CACHES_COMPLETED irq */ - KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, NULL, 0u, - CLEAN_CACHES_COMPLETED); - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), - CLEAN_CACHES_COMPLETED, NULL); - KBASE_DEBUG_ASSERT_MSG(kbdev->hwcnt.backend.state != - KBASE_INSTR_STATE_CLEANING, - "Instrumentation code was cleaning caches, but Job Management code cleared their IRQ - Instrumentation code will now hang."); - - mutex_unlock(&kbdev->cacheclean_lock); - - kbase_pm_unrequest_cores(kbdev, false, - katom->need_cache_flush_cores_retained); -} - -void kbase_backend_complete_wq(struct kbase_device *kbdev, - struct kbase_jd_atom *katom) -{ - /* - * If cache flush required due to HW workaround then perform the flush - * now - */ - if (katom->need_cache_flush_cores_retained) { - kbase_gpu_cacheclean(kbdev, katom); - katom->need_cache_flush_cores_retained = 0; - } - - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10969) && - (katom->core_req & BASE_JD_REQ_FS) && - katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT && - (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED) && - !(katom->atom_flags & KBASE_KATOM_FLAGS_RERUN)) { - dev_dbg(kbdev->dev, "Soft-stopped fragment shader job got a TILE_RANGE_FAULT. Possible HW issue, trying SW workaround\n"); - if (kbasep_10969_workaround_clamp_coordinates(katom)) { - /* The job had a TILE_RANGE_FAULT after was soft-stopped - * Due to an HW issue we try to execute the job again. - */ - dev_dbg(kbdev->dev, - "Clamping has been executed, try to rerun the job\n" - ); - katom->event_code = BASE_JD_EVENT_STOPPED; - katom->atom_flags |= KBASE_KATOM_FLAGS_RERUN; - } - } - - /* Clear the coreref_state now - while check_deref_cores() may not have - * been called yet, the caller will have taken a copy of this field. If - * this is not done, then if the atom is re-scheduled (following a soft - * stop) then the core reference would not be retaken. */ - katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED; - katom->affinity = 0; -} - -void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev, - base_jd_core_req core_req, u64 affinity, - enum kbase_atom_coreref_state coreref_state) -{ - kbasep_js_job_check_deref_cores_nokatom(kbdev, core_req, affinity, - coreref_state); - - if (!kbdev->pm.active_count) { - mutex_lock(&kbdev->js_data.runpool_mutex); - mutex_lock(&kbdev->pm.lock); - kbase_pm_update_active(kbdev); - mutex_unlock(&kbdev->pm.lock); - mutex_unlock(&kbdev->js_data.runpool_mutex); - } -} - -void kbase_gpu_dump_slots(struct kbase_device *kbdev) -{ - struct kbasep_js_device_data *js_devdata; - unsigned long flags; - int js; - - js_devdata = &kbdev->js_data; - - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - - dev_info(kbdev->dev, "kbase_gpu_dump_slots:\n"); - - for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) { - int idx; - - for (idx = 0; idx < SLOT_RB_SIZE; idx++) { - struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, - js, - idx); - - if (katom) - dev_info(kbdev->dev, - " js%d idx%d : katom=%p gpu_rb_state=%d\n", - js, idx, katom, katom->gpu_rb_state); - else - dev_info(kbdev->dev, " js%d idx%d : empty\n", - js, idx); - } - } - - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); -} - - - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.h deleted file mode 100755 index 102d94be93d1e..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.h +++ /dev/null @@ -1,87 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - -/* - * Register-based HW access backend specific APIs - */ - -#ifndef _KBASE_HWACCESS_GPU_H_ -#define _KBASE_HWACCESS_GPU_H_ - -#include - -/** - * kbase_gpu_irq_evict - Evict an atom from a NEXT slot - * - * @kbdev: Device pointer - * @js: Job slot to evict from - * - * Evict the atom in the NEXT slot for the specified job slot. This function is - * called from the job complete IRQ handler when the previous job has failed. - * - * Return: true if job evicted from NEXT registers, false otherwise - */ -bool kbase_gpu_irq_evict(struct kbase_device *kbdev, int js); - -/** - * kbase_gpu_complete_hw - Complete an atom on job slot js - * - * @kbdev: Device pointer - * @js: Job slot that has completed - * @completion_code: Event code from job that has completed - * @job_tail: The tail address from the hardware if the job has partially - * completed - * @end_timestamp: Time of completion - */ -void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js, - u32 completion_code, - u64 job_tail, - ktime_t *end_timestamp); - -/** - * kbase_gpu_inspect - Inspect the contents of the HW access ringbuffer - * - * @kbdev: Device pointer - * @js: Job slot to inspect - * @idx: Index into ringbuffer. 0 is the job currently running on - * the slot, 1 is the job waiting, all other values are invalid. - * Return: The atom at that position in the ringbuffer - * or NULL if no atom present - */ -struct kbase_jd_atom *kbase_gpu_inspect(struct kbase_device *kbdev, int js, - int idx); - -/** - * kbase_gpu_slot_update - Update state based on slot ringbuffers - * - * @kbdev: Device pointer - * - * Inspect the jobs in the slot ringbuffers and update state. - * - * This will cause jobs to be submitted to hardware if they are unblocked - */ -void kbase_gpu_slot_update(struct kbase_device *kbdev); - -/** - * kbase_gpu_dump_slots - Print the contents of the slot ringbuffers - * - * @kbdev: Device pointer - */ -void kbase_gpu_dump_slots(struct kbase_device *kbdev); - -#endif /* _KBASE_HWACCESS_GPU_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.c deleted file mode 100755 index 89b8085e28b29..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.c +++ /dev/null @@ -1,295 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * Base kernel affinity manager APIs - */ - -#include -#include "mali_kbase_js_affinity.h" - -#include - - -bool kbase_js_can_run_job_on_slot_no_lock(struct kbase_device *kbdev, - int js) -{ - /* - * Here are the reasons for using job slot 2: - * - BASE_HW_ISSUE_8987 (which is entirely used for that purpose) - * - In absence of the above, then: - * - Atoms with BASE_JD_REQ_COHERENT_GROUP - * - But, only when there aren't contexts with - * KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES, because the atoms that run on - * all cores on slot 1 could be blocked by those using a coherent group - * on slot 2 - * - And, only when you actually have 2 or more coregroups - if you - * only have 1 coregroup, then having jobs for slot 2 implies they'd - * also be for slot 1, meaning you'll get interference from them. Jobs - * able to run on slot 2 could also block jobs that can only run on - * slot 1 (tiler jobs) - */ - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) - return true; - - if (js != 2) - return true; - - /* Only deal with js==2 now: */ - if (kbdev->gpu_props.num_core_groups > 1) { - /* Only use slot 2 in the 2+ coregroup case */ - if (kbasep_js_ctx_attr_is_attr_on_runpool(kbdev, - KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES) == - false) { - /* ...But only when we *don't* have atoms that run on - * all cores */ - - /* No specific check for BASE_JD_REQ_COHERENT_GROUP - * atoms - the policy will sort that out */ - return true; - } - } - - /* Above checks failed mean we shouldn't use slot 2 */ - return false; -} - -/* - * As long as it has been decided to have a deeper modification of - * what job scheduler, power manager and affinity manager will - * implement, this function is just an intermediate step that - * assumes: - * - all working cores will be powered on when this is called. - * - largest current configuration is 2 core groups. - * - It has been decided not to have hardcoded values so the low - * and high cores in a core split will be evently distributed. - * - Odd combinations of core requirements have been filtered out - * and do not get to this function (e.g. CS+T+NSS is not - * supported here). - * - This function is frequently called and can be optimized, - * (see notes in loops), but as the functionallity will likely - * be modified, optimization has not been addressed. -*/ -bool kbase_js_choose_affinity(u64 * const affinity, - struct kbase_device *kbdev, - struct kbase_jd_atom *katom, int js) -{ - base_jd_core_req core_req = katom->core_req; - unsigned int num_core_groups = kbdev->gpu_props.num_core_groups; - u64 core_availability_mask; - unsigned long flags; - - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - - core_availability_mask = kbase_pm_ca_get_core_mask(kbdev); - - /* - * If no cores are currently available (core availability policy is - * transitioning) then fail. - */ - if (0 == core_availability_mask) { - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - *affinity = 0; - return false; - } - - KBASE_DEBUG_ASSERT(js >= 0); - - if ((core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) == - BASE_JD_REQ_T) { - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - /* Tiler only job, bit 0 needed to enable tiler but no shader - * cores required */ - *affinity = 1; - return true; - } - - if (1 == kbdev->gpu_props.num_cores) { - /* trivial case only one core, nothing to do */ - *affinity = core_availability_mask; - } else { - if ((core_req & (BASE_JD_REQ_COHERENT_GROUP | - BASE_JD_REQ_SPECIFIC_COHERENT_GROUP))) { - if (js == 0 || num_core_groups == 1) { - /* js[0] and single-core-group systems just get - * the first core group */ - *affinity = - kbdev->gpu_props.props.coherency_info.group[0].core_mask - & core_availability_mask; - } else { - /* js[1], js[2] use core groups 0, 1 for - * dual-core-group systems */ - u32 core_group_idx = ((u32) js) - 1; - - KBASE_DEBUG_ASSERT(core_group_idx < - num_core_groups); - *affinity = - kbdev->gpu_props.props.coherency_info.group[core_group_idx].core_mask - & core_availability_mask; - - /* If the job is specifically targeting core - * group 1 and the core availability policy is - * keeping that core group off, then fail */ - if (*affinity == 0 && core_group_idx == 1 && - kbdev->pm.backend.cg1_disabled - == true) - katom->event_code = - BASE_JD_EVENT_PM_EVENT; - } - } else { - /* All cores are available when no core split is - * required */ - *affinity = core_availability_mask; - } - } - - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - - /* - * If no cores are currently available in the desired core group(s) - * (core availability policy is transitioning) then fail. - */ - if (*affinity == 0) - return false; - - /* Enable core 0 if tiler required */ - if (core_req & BASE_JD_REQ_T) - *affinity = *affinity | 1; - - return true; -} - -static inline bool kbase_js_affinity_is_violating( - struct kbase_device *kbdev, - u64 *affinities) -{ - /* This implementation checks whether the two slots involved in Generic - * thread creation have intersecting affinity. This is due to micro- - * architectural issues where a job in slot A targetting cores used by - * slot B could prevent the job in slot B from making progress until the - * job in slot A has completed. - */ - u64 affinity_set_left; - u64 affinity_set_right; - u64 intersection; - - KBASE_DEBUG_ASSERT(affinities != NULL); - - affinity_set_left = affinities[1]; - - affinity_set_right = affinities[2]; - - /* A violation occurs when any bit in the left_set is also in the - * right_set */ - intersection = affinity_set_left & affinity_set_right; - - return (bool) (intersection != (u64) 0u); -} - -bool kbase_js_affinity_would_violate(struct kbase_device *kbdev, int js, - u64 affinity) -{ - struct kbasep_js_device_data *js_devdata; - u64 new_affinities[BASE_JM_MAX_NR_SLOTS]; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS); - js_devdata = &kbdev->js_data; - - memcpy(new_affinities, js_devdata->runpool_irq.slot_affinities, - sizeof(js_devdata->runpool_irq.slot_affinities)); - - new_affinities[js] |= affinity; - - return kbase_js_affinity_is_violating(kbdev, new_affinities); -} - -void kbase_js_affinity_retain_slot_cores(struct kbase_device *kbdev, int js, - u64 affinity) -{ - struct kbasep_js_device_data *js_devdata; - u64 cores; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS); - js_devdata = &kbdev->js_data; - - KBASE_DEBUG_ASSERT(kbase_js_affinity_would_violate(kbdev, js, affinity) - == false); - - cores = affinity; - while (cores) { - int bitnum = fls64(cores) - 1; - u64 bit = 1ULL << bitnum; - s8 cnt; - - cnt = - ++(js_devdata->runpool_irq.slot_affinity_refcount[js][bitnum]); - - if (cnt == 1) - js_devdata->runpool_irq.slot_affinities[js] |= bit; - - cores &= ~bit; - } -} - -void kbase_js_affinity_release_slot_cores(struct kbase_device *kbdev, int js, - u64 affinity) -{ - struct kbasep_js_device_data *js_devdata; - u64 cores; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS); - js_devdata = &kbdev->js_data; - - cores = affinity; - while (cores) { - int bitnum = fls64(cores) - 1; - u64 bit = 1ULL << bitnum; - s8 cnt; - - KBASE_DEBUG_ASSERT( - js_devdata->runpool_irq.slot_affinity_refcount[js][bitnum] > 0); - - cnt = - --(js_devdata->runpool_irq.slot_affinity_refcount[js][bitnum]); - - if (0 == cnt) - js_devdata->runpool_irq.slot_affinities[js] &= ~bit; - - cores &= ~bit; - } -} - -#if KBASE_TRACE_ENABLE -void kbase_js_debug_log_current_affinities(struct kbase_device *kbdev) -{ - struct kbasep_js_device_data *js_devdata; - int slot_nr; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - js_devdata = &kbdev->js_data; - - for (slot_nr = 0; slot_nr < 3; ++slot_nr) - KBASE_TRACE_ADD_SLOT_INFO(kbdev, JS_AFFINITY_CURRENT, NULL, - NULL, 0u, slot_nr, - (u32) js_devdata->runpool_irq.slot_affinities[slot_nr]); -} -#endif /* KBASE_TRACE_ENABLE */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.h deleted file mode 100755 index 3026e6a583034..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.h +++ /dev/null @@ -1,138 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * Affinity Manager internal APIs. - */ - -#ifndef _KBASE_JS_AFFINITY_H_ -#define _KBASE_JS_AFFINITY_H_ - -#ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS -/* Import the external affinity mask variables */ -extern u64 mali_js0_affinity_mask; -extern u64 mali_js1_affinity_mask; -extern u64 mali_js2_affinity_mask; -#endif /* CONFIG_MALI_DEBUG_SHADER_SPLIT_FS */ - - -/** - * kbase_js_can_run_job_on_slot_no_lock - Decide whether it is possible to - * submit a job to a particular job slot in the current status - * - * @kbdev: The kbase device structure of the device - * @js: Job slot number to check for allowance - * - * Will check if submitting to the given job slot is allowed in the current - * status. For example using job slot 2 while in soft-stoppable state and only - * having 1 coregroup is not allowed by the policy. This function should be - * called prior to submitting a job to a slot to make sure policy rules are not - * violated. - * - * The following locking conditions are made on the caller - * - it must hold kbasep_js_device_data.runpool_irq.lock - */ -bool kbase_js_can_run_job_on_slot_no_lock(struct kbase_device *kbdev, - int js); - -/** - * kbase_js_choose_affinity - Compute affinity for a given job. - * - * @affinity: Affinity bitmap computed - * @kbdev: The kbase device structure of the device - * @katom: Job chain of which affinity is going to be found - * @js: Slot the job chain is being submitted - * - * Currently assumes an all-on/all-off power management policy. - * Also assumes there is at least one core with tiler available. - * - * Returns true if a valid affinity was chosen, false if - * no cores were available. - */ -bool kbase_js_choose_affinity(u64 * const affinity, - struct kbase_device *kbdev, - struct kbase_jd_atom *katom, - int js); - -/** - * kbase_js_affinity_would_violate - Determine whether a proposed affinity on - * job slot @js would cause a violation of affinity restrictions. - * - * @kbdev: Kbase device structure - * @js: The job slot to test - * @affinity: The affinity mask to test - * - * The following locks must be held by the caller - * - kbasep_js_device_data.runpool_irq.lock - * - * Return: true if the affinity would violate the restrictions - */ -bool kbase_js_affinity_would_violate(struct kbase_device *kbdev, int js, - u64 affinity); - -/** - * kbase_js_affinity_retain_slot_cores - Affinity tracking: retain cores used by - * a slot - * - * @kbdev: Kbase device structure - * @js: The job slot retaining the cores - * @affinity: The cores to retain - * - * The following locks must be held by the caller - * - kbasep_js_device_data.runpool_irq.lock - */ -void kbase_js_affinity_retain_slot_cores(struct kbase_device *kbdev, int js, - u64 affinity); - -/** - * kbase_js_affinity_release_slot_cores - Affinity tracking: release cores used - * by a slot - * - * @kbdev: Kbase device structure - * @js: Job slot - * @affinity: Bit mask of core to be released - * - * Cores must be released as soon as a job is dequeued from a slot's 'submit - * slots', and before another job is submitted to those slots. Otherwise, the - * refcount could exceed the maximum number submittable to a slot, - * %BASE_JM_SUBMIT_SLOTS. - * - * The following locks must be held by the caller - * - kbasep_js_device_data.runpool_irq.lock - */ -void kbase_js_affinity_release_slot_cores(struct kbase_device *kbdev, int js, - u64 affinity); - -/** - * kbase_js_debug_log_current_affinities - log the current affinities - * - * @kbdev: Kbase device structure - * - * Output to the Trace log the current tracked affinities on all slots - */ -#if KBASE_TRACE_ENABLE -void kbase_js_debug_log_current_affinities(struct kbase_device *kbdev); -#else /* KBASE_TRACE_ENABLE */ -static inline void -kbase_js_debug_log_current_affinities(struct kbase_device *kbdev) -{ -} -#endif /* KBASE_TRACE_ENABLE */ - -#endif /* _KBASE_JS_AFFINITY_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c deleted file mode 100755 index 04bfa51903970..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c +++ /dev/null @@ -1,316 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - -/* - * Register-based HW access backend specific job scheduler APIs - */ - -#include -#include -#include -#include - -/* - * Define for when dumping is enabled. - * This should not be based on the instrumentation level as whether dumping is - * enabled for a particular level is down to the integrator. However this is - * being used for now as otherwise the cinstr headers would be needed. - */ -#define CINSTR_DUMPING_ENABLED (2 == MALI_INSTRUMENTATION_LEVEL) - -/* - * Hold the runpool_mutex for this - */ -static inline bool timer_callback_should_run(struct kbase_device *kbdev) -{ - s8 nr_running_ctxs; - - lockdep_assert_held(&kbdev->js_data.runpool_mutex); - - /* nr_contexts_pullable is updated with the runpool_mutex. However, the - * locking in the caller gives us a barrier that ensures - * nr_contexts_pullable is up-to-date for reading */ - nr_running_ctxs = atomic_read(&kbdev->js_data.nr_contexts_runnable); - -#ifdef CONFIG_MALI_DEBUG - if (kbdev->js_data.softstop_always) { - /* Debug support for allowing soft-stop on a single context */ - return true; - } -#endif /* CONFIG_MALI_DEBUG */ - - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9435)) { - /* Timeouts would have to be 4x longer (due to micro- - * architectural design) to support OpenCL conformance tests, so - * only run the timer when there's: - * - 2 or more CL contexts - * - 1 or more GLES contexts - * - * NOTE: We will treat a context that has both Compute and Non- - * Compute jobs will be treated as an OpenCL context (hence, we - * don't check KBASEP_JS_CTX_ATTR_NON_COMPUTE). - */ - { - s8 nr_compute_ctxs = - kbasep_js_ctx_attr_count_on_runpool(kbdev, - KBASEP_JS_CTX_ATTR_COMPUTE); - s8 nr_noncompute_ctxs = nr_running_ctxs - - nr_compute_ctxs; - - return (bool) (nr_compute_ctxs >= 2 || - nr_noncompute_ctxs > 0); - } - } else { - /* Run the timer callback whenever you have at least 1 context - */ - return (bool) (nr_running_ctxs > 0); - } -} - -static enum hrtimer_restart timer_callback(struct hrtimer *timer) -{ - unsigned long flags; - struct kbase_device *kbdev; - struct kbasep_js_device_data *js_devdata; - struct kbase_backend_data *backend; - int s; - bool reset_needed = false; - - KBASE_DEBUG_ASSERT(timer != NULL); - - backend = container_of(timer, struct kbase_backend_data, - scheduling_timer); - kbdev = container_of(backend, struct kbase_device, hwaccess.backend); - js_devdata = &kbdev->js_data; - - /* Loop through the slots */ - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - for (s = 0; s < kbdev->gpu_props.num_job_slots; s++) { - struct kbase_jd_atom *atom = NULL; - - if (kbase_backend_nr_atoms_on_slot(kbdev, s) > 0) { - atom = kbase_gpu_inspect(kbdev, s, 0); - KBASE_DEBUG_ASSERT(atom != NULL); - } - - if (atom != NULL) { - /* The current version of the model doesn't support - * Soft-Stop */ - if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_5736)) { - u32 ticks = atom->sched_info.cfs.ticks++; - -#if !CINSTR_DUMPING_ENABLED - u32 soft_stop_ticks, hard_stop_ticks, - gpu_reset_ticks; - if (atom->core_req & BASE_JD_REQ_ONLY_COMPUTE) { - soft_stop_ticks = - js_devdata->soft_stop_ticks_cl; - hard_stop_ticks = - js_devdata->hard_stop_ticks_cl; - gpu_reset_ticks = - js_devdata->gpu_reset_ticks_cl; - } else { - soft_stop_ticks = - js_devdata->soft_stop_ticks; - hard_stop_ticks = - js_devdata->hard_stop_ticks_ss; - gpu_reset_ticks = - js_devdata->gpu_reset_ticks_ss; - } - - /* Job is Soft-Stoppable */ - if (ticks == soft_stop_ticks) { - int disjoint_threshold = - KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD; - u32 softstop_flags = 0u; - /* Job has been scheduled for at least - * js_devdata->soft_stop_ticks ticks. - * Soft stop the slot so we can run - * other jobs. - */ - dev_dbg(kbdev->dev, "Soft-stop"); -#if !KBASE_DISABLE_SCHEDULING_SOFT_STOPS - /* nr_user_contexts_running is updated - * with the runpool_mutex, but we can't - * take that here. - * - * However, if it's about to be - * increased then the new context can't - * run any jobs until they take the - * runpool_irq lock, so it's OK to - * observe the older value. - * - * Similarly, if it's about to be - * decreased, the last job from another - * context has already finished, so it's - * not too bad that we observe the older - * value and register a disjoint event - * when we try soft-stopping */ - if (js_devdata->nr_user_contexts_running - >= disjoint_threshold) - softstop_flags |= - JS_COMMAND_SW_CAUSES_DISJOINT; - - kbase_job_slot_softstop_swflags(kbdev, - s, atom, softstop_flags); -#endif - } else if (ticks == hard_stop_ticks) { - /* Job has been scheduled for at least - * js_devdata->hard_stop_ticks_ss ticks. - * It should have been soft-stopped by - * now. Hard stop the slot. - */ -#if !KBASE_DISABLE_SCHEDULING_HARD_STOPS - int ms = - js_devdata->scheduling_period_ns - / 1000000u; - dev_warn(kbdev->dev, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)", - (unsigned long)ticks, - (unsigned long)ms); - kbase_job_slot_hardstop(atom->kctx, s, - atom); -#endif - } else if (ticks == gpu_reset_ticks) { - /* Job has been scheduled for at least - * js_devdata->gpu_reset_ticks_ss ticks. - * It should have left the GPU by now. - * Signal that the GPU needs to be - * reset. - */ - reset_needed = true; - } -#else /* !CINSTR_DUMPING_ENABLED */ - /* NOTE: During CINSTR_DUMPING_ENABLED, we use - * the alternate timeouts, which makes the hard- - * stop and GPU reset timeout much longer. We - * also ensure that we don't soft-stop at all. - */ - if (ticks == js_devdata->soft_stop_ticks) { - /* Job has been scheduled for at least - * js_devdata->soft_stop_ticks. We do - * not soft-stop during - * CINSTR_DUMPING_ENABLED, however. - */ - dev_dbg(kbdev->dev, "Soft-stop"); - } else if (ticks == - js_devdata->hard_stop_ticks_dumping) { - /* Job has been scheduled for at least - * js_devdata->hard_stop_ticks_dumping - * ticks. Hard stop the slot. - */ -#if !KBASE_DISABLE_SCHEDULING_HARD_STOPS - int ms = - js_devdata->scheduling_period_ns - / 1000000u; - dev_warn(kbdev->dev, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)", - (unsigned long)ticks, - (unsigned long)ms); - kbase_job_slot_hardstop(atom->kctx, s, - atom); -#endif - } else if (ticks == - js_devdata->gpu_reset_ticks_dumping) { - /* Job has been scheduled for at least - * js_devdata->gpu_reset_ticks_dumping - * ticks. It should have left the GPU by - * now. Signal that the GPU needs to be - * reset. - */ - reset_needed = true; - } -#endif /* !CINSTR_DUMPING_ENABLED */ - } - } - } -#if KBASE_GPU_RESET_EN - if (reset_needed) { - dev_err(kbdev->dev, "JS: Job has been on the GPU for too long (JS_RESET_TICKS_SS/DUMPING timeout hit). Issueing GPU soft-reset to resolve."); - - if (kbase_prepare_to_reset_gpu_locked(kbdev)) - kbase_reset_gpu_locked(kbdev); - } -#endif /* KBASE_GPU_RESET_EN */ - /* the timer is re-issued if there is contexts in the run-pool */ - - if (backend->timer_running) - hrtimer_start(&backend->scheduling_timer, - HR_TIMER_DELAY_NSEC(js_devdata->scheduling_period_ns), - HRTIMER_MODE_REL); - - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - - return HRTIMER_NORESTART; -} - -void kbase_backend_ctx_count_changed(struct kbase_device *kbdev) -{ - struct kbasep_js_device_data *js_devdata = &kbdev->js_data; - struct kbase_backend_data *backend = &kbdev->hwaccess.backend; - unsigned long flags; - - lockdep_assert_held(&js_devdata->runpool_mutex); - - if (!timer_callback_should_run(kbdev)) { - /* Take spinlock to force synchronisation with timer */ - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - backend->timer_running = false; - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - - /* From now on, return value of timer_callback_should_run() will - * also cause the timer to not requeue itself. Its return value - * cannot change, because it depends on variables updated with - * the runpool_mutex held, which the caller of this must also - * hold */ - hrtimer_cancel(&backend->scheduling_timer); - } - - if (timer_callback_should_run(kbdev) && !backend->timer_running) { - /* Take spinlock to force synchronisation with timer */ - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - backend->timer_running = true; - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - - hrtimer_start(&backend->scheduling_timer, - HR_TIMER_DELAY_NSEC(js_devdata->scheduling_period_ns), - HRTIMER_MODE_REL); - - KBASE_TRACE_ADD(kbdev, JS_POLICY_TIMER_START, NULL, NULL, 0u, - 0u); - } -} - -int kbase_backend_timer_init(struct kbase_device *kbdev) -{ - struct kbase_backend_data *backend = &kbdev->hwaccess.backend; - - hrtimer_init(&backend->scheduling_timer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL); - backend->scheduling_timer.function = timer_callback; - - backend->timer_running = false; - - return 0; -} - -void kbase_backend_timer_term(struct kbase_device *kbdev) -{ - struct kbase_backend_data *backend = &kbdev->hwaccess.backend; - - hrtimer_cancel(&backend->scheduling_timer); -} - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_internal.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_internal.h deleted file mode 100755 index 3c101e4320d8c..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_internal.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - -/* - * Register-based HW access backend specific job scheduler APIs - */ - -#ifndef _KBASE_JS_BACKEND_H_ -#define _KBASE_JS_BACKEND_H_ - -/** - * kbase_backend_timer_init() - Initialise the JS scheduling timer - * @kbdev: Device pointer - * - * This function should be called at driver initialisation - * - * Return: 0 on success - */ -int kbase_backend_timer_init(struct kbase_device *kbdev); - -/** - * kbase_backend_timer_term() - Terminate the JS scheduling timer - * @kbdev: Device pointer - * - * This function should be called at driver termination - */ -void kbase_backend_timer_term(struct kbase_device *kbdev); - -#endif /* _KBASE_JS_BACKEND_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.c deleted file mode 100755 index 1b613a1967c9f..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.c +++ /dev/null @@ -1,316 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include - -#include -#include -#include -#if defined(CONFIG_MALI_MIPE_ENABLED) -#include -#endif -#include -#include - -static inline u64 lock_region(struct kbase_device *kbdev, u64 pfn, - u32 num_pages) -{ - u64 region; - - /* can't lock a zero sized range */ - KBASE_DEBUG_ASSERT(num_pages); - - region = pfn << PAGE_SHIFT; - /* - * fls returns (given the ASSERT above): - * 1 .. 32 - * - * 10 + fls(num_pages) - * results in the range (11 .. 42) - */ - - /* gracefully handle num_pages being zero */ - if (0 == num_pages) { - region |= 11; - } else { - u8 region_width; - - region_width = 10 + fls(num_pages); - if (num_pages != (1ul << (region_width - 11))) { - /* not pow2, so must go up to the next pow2 */ - region_width += 1; - } - KBASE_DEBUG_ASSERT(region_width <= KBASE_LOCK_REGION_MAX_SIZE); - KBASE_DEBUG_ASSERT(region_width >= KBASE_LOCK_REGION_MIN_SIZE); - region |= region_width; - } - - return region; -} - -static int wait_ready(struct kbase_device *kbdev, - unsigned int as_nr, struct kbase_context *kctx) -{ - unsigned int max_loops = KBASE_AS_INACTIVE_MAX_LOOPS; - u32 val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), kctx); - - /* Wait for the MMU status to indicate there is no active command, in - * case one is pending. Do not log remaining register accesses. */ - while (--max_loops && (val & AS_STATUS_AS_ACTIVE)) - val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), NULL); - - if (max_loops == 0) { - dev_err(kbdev->dev, "AS_ACTIVE bit stuck\n"); - return -1; - } - - /* If waiting in loop was performed, log last read value. */ - if (KBASE_AS_INACTIVE_MAX_LOOPS - 1 > max_loops) - kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), kctx); - - return 0; -} - -static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd, - struct kbase_context *kctx) -{ - int status; - - /* write AS_COMMAND when MMU is ready to accept another command */ - status = wait_ready(kbdev, as_nr, kctx); - if (status == 0) - kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_COMMAND), cmd, - kctx); - - return status; -} - -void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat) -{ - const int num_as = 16; - const int busfault_shift = MMU_PAGE_FAULT_FLAGS; - const int pf_shift = 0; - const unsigned long as_bit_mask = (1UL << num_as) - 1; - unsigned long flags; - u32 new_mask; - u32 tmp; - - /* bus faults */ - u32 bf_bits = (irq_stat >> busfault_shift) & as_bit_mask; - /* page faults (note: Ignore ASes with both pf and bf) */ - u32 pf_bits = ((irq_stat >> pf_shift) & as_bit_mask) & ~bf_bits; - - KBASE_DEBUG_ASSERT(NULL != kbdev); - - /* remember current mask */ - spin_lock_irqsave(&kbdev->mmu_mask_change, flags); - new_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL); - /* mask interrupts for now */ - kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0, NULL); - spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags); - - while (bf_bits | pf_bits) { - struct kbase_as *as; - int as_no; - struct kbase_context *kctx; - - /* - * the while logic ensures we have a bit set, no need to check - * for not-found here - */ - as_no = ffs(bf_bits | pf_bits) - 1; - as = &kbdev->as[as_no]; - - /* - * Refcount the kctx ASAP - it shouldn't disappear anyway, since - * Bus/Page faults _should_ only occur whilst jobs are running, - * and a job causing the Bus/Page fault shouldn't complete until - * the MMU is updated - */ - kctx = kbasep_js_runpool_lookup_ctx(kbdev, as_no); - - /* find faulting address */ - as->fault_addr = kbase_reg_read(kbdev, - MMU_AS_REG(as_no, - AS_FAULTADDRESS_HI), - kctx); - as->fault_addr <<= 32; - as->fault_addr |= kbase_reg_read(kbdev, - MMU_AS_REG(as_no, - AS_FAULTADDRESS_LO), - kctx); - - /* record the fault status */ - as->fault_status = kbase_reg_read(kbdev, - MMU_AS_REG(as_no, - AS_FAULTSTATUS), - kctx); - - /* find the fault type */ - as->fault_type = (bf_bits & (1 << as_no)) ? - KBASE_MMU_FAULT_TYPE_BUS : - KBASE_MMU_FAULT_TYPE_PAGE; - - - if (kbase_as_has_bus_fault(as)) { - /* Mark bus fault as handled. - * Note that a bus fault is processed first in case - * where both a bus fault and page fault occur. - */ - bf_bits &= ~(1UL << as_no); - - /* remove the queued BF (and PF) from the mask */ - new_mask &= ~(MMU_BUS_ERROR(as_no) | - MMU_PAGE_FAULT(as_no)); - } else { - /* Mark page fault as handled */ - pf_bits &= ~(1UL << as_no); - - /* remove the queued PF from the mask */ - new_mask &= ~MMU_PAGE_FAULT(as_no); - } - - /* Process the interrupt for this address space */ - spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags); - kbase_mmu_interrupt_process(kbdev, kctx, as); - spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, - flags); - } - - /* reenable interrupts */ - spin_lock_irqsave(&kbdev->mmu_mask_change, flags); - tmp = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL); - new_mask |= tmp; - kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), new_mask, NULL); - spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags); -} - -void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as, - struct kbase_context *kctx) -{ - struct kbase_mmu_setup *current_setup = &as->current_setup; -#if defined(CONFIG_MALI_MIPE_ENABLED) || \ - (defined(MALI_INCLUDE_TMIX) && \ - defined(CONFIG_MALI_COH_PAGES) && \ - defined(CONFIG_MALI_GPU_MMU_AARCH64)) - u32 transcfg = 0; -#endif - - - kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_LO), - current_setup->transtab & 0xFFFFFFFFUL, kctx); - kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_HI), - (current_setup->transtab >> 32) & 0xFFFFFFFFUL, kctx); - - kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_LO), - current_setup->memattr & 0xFFFFFFFFUL, kctx); - kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_HI), - (current_setup->memattr >> 32) & 0xFFFFFFFFUL, kctx); - -#if defined(CONFIG_MALI_MIPE_ENABLED) - kbase_tlstream_tl_attrib_as_config(as, - current_setup->transtab, - current_setup->memattr, - transcfg); -#endif - - write_cmd(kbdev, as->number, AS_COMMAND_UPDATE, kctx); -} - -int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as, - struct kbase_context *kctx, u64 vpfn, u32 nr, u32 op, - unsigned int handling_irq) -{ - int ret; - - if (op == AS_COMMAND_UNLOCK) { - /* Unlock doesn't require a lock first */ - ret = write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx); - } else { - u64 lock_addr = lock_region(kbdev, vpfn, nr); - - /* Lock the region that needs to be updated */ - kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_LO), - lock_addr & 0xFFFFFFFFUL, kctx); - kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_HI), - (lock_addr >> 32) & 0xFFFFFFFFUL, kctx); - write_cmd(kbdev, as->number, AS_COMMAND_LOCK, kctx); - - /* Run the MMU operation */ - write_cmd(kbdev, as->number, op, kctx); - - /* Wait for the flush to complete */ - ret = wait_ready(kbdev, as->number, kctx); - - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9630)) { - /* Issue an UNLOCK command to ensure that valid page - tables are re-read by the GPU after an update. - Note that, the FLUSH command should perform all the - actions necessary, however the bus logs show that if - multiple page faults occur within an 8 page region - the MMU does not always re-read the updated page - table entries for later faults or is only partially - read, it subsequently raises the page fault IRQ for - the same addresses, the unlock ensures that the MMU - cache is flushed, so updates can be re-read. As the - region is now unlocked we need to issue 2 UNLOCK - commands in order to flush the MMU/uTLB, - see PRLAM-8812. - */ - write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx); - write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx); - } - } - - return ret; -} - -void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as, - struct kbase_context *kctx, enum kbase_mmu_fault_type type) -{ - u32 pf_bf_mask; - - /* Clear the page (and bus fault IRQ as well in case one occurred) */ - pf_bf_mask = MMU_PAGE_FAULT(as->number); - if (type == KBASE_MMU_FAULT_TYPE_BUS || - type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED) - pf_bf_mask |= MMU_BUS_ERROR(as->number); - - kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), pf_bf_mask, kctx); -} - -void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as, - struct kbase_context *kctx, enum kbase_mmu_fault_type type) -{ - unsigned long flags; - u32 irq_mask; - - /* Enable the page fault IRQ (and bus fault IRQ as well in case one - * occurred) */ - spin_lock_irqsave(&kbdev->mmu_mask_change, flags); - - irq_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), kctx) | - MMU_PAGE_FAULT(as->number); - - if (type == KBASE_MMU_FAULT_TYPE_BUS || - type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED) - irq_mask |= MMU_BUS_ERROR(as->number); - - kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), irq_mask, kctx); - - spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags); -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.h deleted file mode 100755 index c02253c6acc30..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/* - * Interface file for the direct implementation for MMU hardware access - * - * Direct MMU hardware interface - * - * This module provides the interface(s) that are required by the direct - * register access implementation of the MMU hardware interface - */ - -#ifndef _MALI_KBASE_MMU_HW_DIRECT_H_ -#define _MALI_KBASE_MMU_HW_DIRECT_H_ - -#include - -/** - * kbase_mmu_interrupt - Process an MMU interrupt. - * - * Process the MMU interrupt that was reported by the &kbase_device. - * - * @kbdev: kbase context to clear the fault from. - * @irq_stat: Value of the MMU_IRQ_STATUS register - */ -void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat); - -#endif /* _MALI_KBASE_MMU_HW_DIRECT_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.c deleted file mode 100755 index 0614348e935ac..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.c +++ /dev/null @@ -1,63 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * "Always on" power management policy - */ - -#include -#include - -static u64 always_on_get_core_mask(struct kbase_device *kbdev) -{ - return kbdev->gpu_props.props.raw_props.shader_present; -} - -static bool always_on_get_core_active(struct kbase_device *kbdev) -{ - return true; -} - -static void always_on_init(struct kbase_device *kbdev) -{ - CSTD_UNUSED(kbdev); -} - -static void always_on_term(struct kbase_device *kbdev) -{ - CSTD_UNUSED(kbdev); -} - -/* - * The struct kbase_pm_policy structure for the demand power policy. - * - * This is the static structure that defines the demand power policy's callback - * and name. - */ -const struct kbase_pm_policy kbase_pm_always_on_policy_ops = { - "always_on", /* name */ - always_on_init, /* init */ - always_on_term, /* term */ - always_on_get_core_mask, /* get_core_mask */ - always_on_get_core_active, /* get_core_active */ - 0u, /* flags */ - KBASE_PM_POLICY_ID_ALWAYS_ON, /* id */ -}; - -KBASE_EXPORT_TEST_API(kbase_pm_always_on_policy_ops); diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.h deleted file mode 100755 index f9d244b01bc23..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.h +++ /dev/null @@ -1,77 +0,0 @@ - -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * "Always on" power management policy - */ - -#ifndef MALI_KBASE_PM_ALWAYS_ON_H -#define MALI_KBASE_PM_ALWAYS_ON_H - -/** - * DOC: - * The "Always on" power management policy has the following - * characteristics: - * - * - When KBase indicates that the GPU will be powered up, but we don't yet - * know which Job Chains are to be run: - * All Shader Cores are powered up, regardless of whether or not they will - * be needed later. - * - * - When KBase indicates that a set of Shader Cores are needed to submit the - * currently queued Job Chains: - * All Shader Cores are kept powered, regardless of whether or not they will - * be needed - * - * - When KBase indicates that the GPU need not be powered: - * The Shader Cores are kept powered, regardless of whether or not they will - * be needed. The GPU itself is also kept powered, even though it is not - * needed. - * - * This policy is automatically overridden during system suspend: the desired - * core state is ignored, and the cores are forced off regardless of what the - * policy requests. After resuming from suspend, new changes to the desired - * core state made by the policy are honored. - * - * Note: - * - * - KBase indicates the GPU will be powered up when it has a User Process that - * has just started to submit Job Chains. - * - * - KBase indicates the GPU need not be powered when all the Job Chains from - * User Processes have finished, and it is waiting for a User Process to - * submit some more Job Chains. - */ - -/** - * struct kbasep_pm_policy_always_on - Private struct for policy instance data - * @dummy: unused dummy variable - * - * This contains data that is private to the particular power policy that is - * active. - */ -struct kbasep_pm_policy_always_on { - int dummy; -}; - -extern const struct kbase_pm_policy kbase_pm_always_on_policy_ops; - -#endif /* MALI_KBASE_PM_ALWAYS_ON_H */ - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c deleted file mode 100755 index 9ff7baadec7a3..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c +++ /dev/null @@ -1,375 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - -/* - * GPU backend implementation of base kernel power management APIs - */ - -#include -#include -#include -#ifdef CONFIG_MALI_PLATFORM_DEVICETREE -#include -#endif /* CONFIG_MALI_PLATFORM_DEVICETREE */ - -#include -#include -#include - -void kbase_pm_register_access_enable(struct kbase_device *kbdev) -{ - struct kbase_pm_callback_conf *callbacks; - -#ifdef CONFIG_MALI_PLATFORM_DEVICETREE - pm_runtime_enable(kbdev->dev); -#endif /* CONFIG_MALI_PLATFORM_DEVICETREE */ - callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS; - - if (callbacks) - callbacks->power_on_callback(kbdev); - - kbdev->pm.backend.gpu_powered = true; -} - -void kbase_pm_register_access_disable(struct kbase_device *kbdev) -{ - struct kbase_pm_callback_conf *callbacks; - - callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS; - - if (callbacks) - callbacks->power_off_callback(kbdev); - - kbdev->pm.backend.gpu_powered = false; -#ifdef CONFIG_MALI_PLATFORM_DEVICETREE - pm_runtime_disable(kbdev->dev); -#endif -} - -int kbase_hwaccess_pm_init(struct kbase_device *kbdev) -{ - int ret = 0; - struct kbase_pm_callback_conf *callbacks; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - - mutex_init(&kbdev->pm.lock); - - kbdev->pm.backend.gpu_powered = false; - kbdev->pm.suspending = false; -#ifdef CONFIG_MALI_DEBUG - kbdev->pm.backend.driver_ready_for_irqs = false; -#endif /* CONFIG_MALI_DEBUG */ - kbdev->pm.backend.gpu_in_desired_state = true; - init_waitqueue_head(&kbdev->pm.backend.gpu_in_desired_state_wait); - - callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS; - if (callbacks) { - kbdev->pm.backend.callback_power_on = - callbacks->power_on_callback; - kbdev->pm.backend.callback_power_off = - callbacks->power_off_callback; - kbdev->pm.backend.callback_power_suspend = - callbacks->power_suspend_callback; - kbdev->pm.backend.callback_power_resume = - callbacks->power_resume_callback; - kbdev->pm.callback_power_runtime_init = - callbacks->power_runtime_init_callback; - kbdev->pm.callback_power_runtime_term = - callbacks->power_runtime_term_callback; - kbdev->pm.backend.callback_power_runtime_on = - callbacks->power_runtime_on_callback; - kbdev->pm.backend.callback_power_runtime_off = - callbacks->power_runtime_off_callback; - } else { - kbdev->pm.backend.callback_power_on = NULL; - kbdev->pm.backend.callback_power_off = NULL; - kbdev->pm.backend.callback_power_suspend = NULL; - kbdev->pm.backend.callback_power_resume = NULL; - kbdev->pm.callback_power_runtime_init = NULL; - kbdev->pm.callback_power_runtime_term = NULL; - kbdev->pm.backend.callback_power_runtime_on = NULL; - kbdev->pm.backend.callback_power_runtime_off = NULL; - } - - /* Initialise the metrics subsystem */ - ret = kbasep_pm_metrics_init(kbdev); - if (ret) - return ret; - - init_waitqueue_head(&kbdev->pm.backend.l2_powered_wait); - kbdev->pm.backend.l2_powered = 0; - - init_waitqueue_head(&kbdev->pm.backend.reset_done_wait); - kbdev->pm.backend.reset_done = false; - - init_waitqueue_head(&kbdev->pm.zero_active_count_wait); - kbdev->pm.active_count = 0; - - spin_lock_init(&kbdev->pm.power_change_lock); - spin_lock_init(&kbdev->pm.backend.gpu_cycle_counter_requests_lock); - spin_lock_init(&kbdev->pm.backend.gpu_powered_lock); - - if (kbase_pm_ca_init(kbdev) != 0) - goto workq_fail; - - if (kbase_pm_policy_init(kbdev) != 0) - goto pm_policy_fail; - - return 0; - -pm_policy_fail: - kbase_pm_ca_term(kbdev); -workq_fail: - kbasep_pm_metrics_term(kbdev); - return -EINVAL; -} - -void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume) -{ - lockdep_assert_held(&kbdev->pm.lock); - - /* Turn clocks and interrupts on - no-op if we haven't done a previous - * kbase_pm_clock_off() */ - kbase_pm_clock_on(kbdev, is_resume); - - /* Update core status as required by the policy */ - KBASE_TIMELINE_PM_CHECKTRANS(kbdev, - SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START); - kbase_pm_update_cores_state(kbdev); - KBASE_TIMELINE_PM_CHECKTRANS(kbdev, - SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END); - - /* NOTE: We don't wait to reach the desired state, since running atoms - * will wait for that state to be reached anyway */ -} - -bool kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend) -{ - unsigned long flags; - bool cores_are_available; - - lockdep_assert_held(&kbdev->pm.lock); - - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - - /* Force all cores off */ - kbdev->pm.backend.desired_shader_state = 0; - - /* Force all cores to be unavailable, in the situation where - * transitions are in progress for some cores but not others, - * and kbase_pm_check_transitions_nolock can not immediately - * power off the cores */ - kbdev->shader_available_bitmap = 0; - kbdev->tiler_available_bitmap = 0; - kbdev->l2_available_bitmap = 0; - - KBASE_TIMELINE_PM_CHECKTRANS(kbdev, - SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START); - cores_are_available = kbase_pm_check_transitions_nolock(kbdev); - KBASE_TIMELINE_PM_CHECKTRANS(kbdev, - SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END); - /* Don't need 'cores_are_available', because we don't return anything */ - CSTD_UNUSED(cores_are_available); - - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - - /* NOTE: We won't wait to reach the core's desired state, even if we're - * powering off the GPU itself too. It's safe to cut the power whilst - * they're transitioning to off, because the cores should be idle and - * all cache flushes should already have occurred */ - - /* Consume any change-state events */ - kbase_timeline_pm_check_handle_event(kbdev, - KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED); - /* Disable interrupts and turn the clock off */ - return kbase_pm_clock_off(kbdev, is_suspend); -} - -int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev, - unsigned int flags) -{ - struct kbasep_js_device_data *js_devdata = &kbdev->js_data; - unsigned long irq_flags; - int ret; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - - mutex_lock(&js_devdata->runpool_mutex); - mutex_lock(&kbdev->pm.lock); - - /* A suspend won't happen during startup/insmod */ - KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev)); - - /* Power up the GPU, don't enable IRQs as we are not ready to receive - * them. */ - ret = kbase_pm_init_hw(kbdev, flags); - if (ret) { - mutex_unlock(&kbdev->pm.lock); - mutex_unlock(&js_devdata->runpool_mutex); - return ret; - } - - kbasep_pm_read_present_cores(kbdev); - - kbdev->pm.debug_core_mask = - kbdev->gpu_props.props.raw_props.shader_present; - - /* Pretend the GPU is active to prevent a power policy turning the GPU - * cores off */ - kbdev->pm.active_count = 1; - - spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock, - irq_flags); - /* Ensure cycle counter is off */ - kbdev->pm.backend.gpu_cycle_counter_requests = 0; - spin_unlock_irqrestore( - &kbdev->pm.backend.gpu_cycle_counter_requests_lock, - irq_flags); - - /* We are ready to receive IRQ's now as power policy is set up, so - * enable them now. */ -#ifdef CONFIG_MALI_DEBUG - spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, irq_flags); - kbdev->pm.backend.driver_ready_for_irqs = true; - spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, irq_flags); -#endif - kbase_pm_enable_interrupts(kbdev); - - /* Turn on the GPU and any cores needed by the policy */ - kbase_pm_do_poweron(kbdev, false); - mutex_unlock(&kbdev->pm.lock); - mutex_unlock(&js_devdata->runpool_mutex); - - /* Idle the GPU and/or cores, if the policy wants it to */ - kbase_pm_context_idle(kbdev); - - return 0; -} - -void kbase_hwaccess_pm_halt(struct kbase_device *kbdev) -{ - KBASE_DEBUG_ASSERT(kbdev != NULL); - - mutex_lock(&kbdev->pm.lock); - kbase_pm_cancel_deferred_poweroff(kbdev); - if (!kbase_pm_do_poweroff(kbdev, false)) { - /* Page/bus faults are pending, must drop pm.lock to process. - * Interrupts are disabled so no more faults should be - * generated at this point */ - mutex_unlock(&kbdev->pm.lock); - kbase_flush_mmu_wqs(kbdev); - mutex_lock(&kbdev->pm.lock); - WARN_ON(!kbase_pm_do_poweroff(kbdev, false)); - } - mutex_unlock(&kbdev->pm.lock); -} - -KBASE_EXPORT_TEST_API(kbase_hwaccess_pm_halt); - -void kbase_hwaccess_pm_term(struct kbase_device *kbdev) -{ - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(kbdev->pm.active_count == 0); - KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests == 0); - - /* Free any resources the policy allocated */ - kbase_pm_policy_term(kbdev); - kbase_pm_ca_term(kbdev); - - /* Shut down the metrics subsystem */ - kbasep_pm_metrics_term(kbdev); -} - -void kbase_pm_power_changed(struct kbase_device *kbdev) -{ - bool cores_are_available; - unsigned long flags; - - KBASE_TIMELINE_PM_CHECKTRANS(kbdev, - SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START); - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - cores_are_available = kbase_pm_check_transitions_nolock(kbdev); - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - KBASE_TIMELINE_PM_CHECKTRANS(kbdev, - SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END); - - if (cores_are_available) { - /* Log timelining information that a change in state has - * completed */ - kbase_timeline_pm_handle_event(kbdev, - KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED); - - spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags); - kbase_gpu_slot_update(kbdev); - spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags); - } -} - -void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev, u64 new_core_mask) -{ - kbdev->pm.debug_core_mask = new_core_mask; - - kbase_pm_update_cores_state_nolock(kbdev); -} - -void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev) -{ - kbase_pm_update_active(kbdev); -} - -void kbase_hwaccess_pm_gpu_idle(struct kbase_device *kbdev) -{ - kbase_pm_update_active(kbdev); -} - -void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev) -{ - struct kbasep_js_device_data *js_devdata = &kbdev->js_data; - - /* Force power off the GPU and all cores (regardless of policy), only - * after the PM active count reaches zero (otherwise, we risk turning it - * off prematurely) */ - mutex_lock(&js_devdata->runpool_mutex); - mutex_lock(&kbdev->pm.lock); - kbase_pm_cancel_deferred_poweroff(kbdev); - if (!kbase_pm_do_poweroff(kbdev, true)) { - /* Page/bus faults are pending, must drop pm.lock to process. - * Interrupts are disabled so no more faults should be - * generated at this point */ - mutex_unlock(&kbdev->pm.lock); - kbase_flush_mmu_wqs(kbdev); - mutex_lock(&kbdev->pm.lock); - WARN_ON(!kbase_pm_do_poweroff(kbdev, false)); - } - - mutex_unlock(&kbdev->pm.lock); - mutex_unlock(&js_devdata->runpool_mutex); -} - -void kbase_hwaccess_pm_resume(struct kbase_device *kbdev) -{ - struct kbasep_js_device_data *js_devdata = &kbdev->js_data; - - mutex_lock(&js_devdata->runpool_mutex); - mutex_lock(&kbdev->pm.lock); - kbdev->pm.suspending = false; - kbase_pm_do_poweron(kbdev, true); - mutex_unlock(&kbdev->pm.lock); - mutex_unlock(&js_devdata->runpool_mutex); -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.c deleted file mode 100755 index 60b4758d92bb1..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.c +++ /dev/null @@ -1,183 +0,0 @@ -/* - * - * (C) COPYRIGHT 2013-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/* - * Base kernel core availability APIs - */ - -#include -#include -#include - -static const struct kbase_pm_ca_policy *const policy_list[] = { - &kbase_pm_ca_fixed_policy_ops, -#if !MALI_CUSTOMER_RELEASE - &kbase_pm_ca_demand_policy_ops, - &kbase_pm_ca_random_policy_ops, -#endif -}; - -/** - * POLICY_COUNT - The number of policies available in the system. - * - * This is derived from the number of functions listed in policy_list. - */ -#define POLICY_COUNT (sizeof(policy_list)/sizeof(*policy_list)) - -int kbase_pm_ca_init(struct kbase_device *kbdev) -{ - KBASE_DEBUG_ASSERT(kbdev != NULL); - - kbdev->pm.backend.ca_current_policy = policy_list[0]; - - kbdev->pm.backend.ca_current_policy->init(kbdev); - - return 0; -} - -void kbase_pm_ca_term(struct kbase_device *kbdev) -{ - kbdev->pm.backend.ca_current_policy->term(kbdev); -} - -int kbase_pm_ca_list_policies(const struct kbase_pm_ca_policy * const **list) -{ - if (!list) - return POLICY_COUNT; - - *list = policy_list; - - return POLICY_COUNT; -} - -KBASE_EXPORT_TEST_API(kbase_pm_ca_list_policies); - -const struct kbase_pm_ca_policy -*kbase_pm_ca_get_policy(struct kbase_device *kbdev) -{ - KBASE_DEBUG_ASSERT(kbdev != NULL); - - return kbdev->pm.backend.ca_current_policy; -} - -KBASE_EXPORT_TEST_API(kbase_pm_ca_get_policy); - -void kbase_pm_ca_set_policy(struct kbase_device *kbdev, - const struct kbase_pm_ca_policy *new_policy) -{ - const struct kbase_pm_ca_policy *old_policy; - unsigned long flags; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(new_policy != NULL); - - KBASE_TRACE_ADD(kbdev, PM_CA_SET_POLICY, NULL, NULL, 0u, - new_policy->id); - - /* During a policy change we pretend the GPU is active */ - /* A suspend won't happen here, because we're in a syscall from a - * userspace thread */ - kbase_pm_context_active(kbdev); - - mutex_lock(&kbdev->pm.lock); - - /* Remove the policy to prevent IRQ handlers from working on it */ - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - old_policy = kbdev->pm.backend.ca_current_policy; - kbdev->pm.backend.ca_current_policy = NULL; - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - - if (old_policy->term) - old_policy->term(kbdev); - - if (new_policy->init) - new_policy->init(kbdev); - - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - kbdev->pm.backend.ca_current_policy = new_policy; - - /* If any core power state changes were previously attempted, but - * couldn't be made because the policy was changing (current_policy was - * NULL), then re-try them here. */ - kbase_pm_update_cores_state_nolock(kbdev); - - kbdev->pm.backend.ca_current_policy->update_core_status(kbdev, - kbdev->shader_ready_bitmap, - kbdev->shader_transitioning_bitmap); - - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - - mutex_unlock(&kbdev->pm.lock); - - /* Now the policy change is finished, we release our fake context active - * reference */ - kbase_pm_context_idle(kbdev); -} - -KBASE_EXPORT_TEST_API(kbase_pm_ca_set_policy); - -u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev) -{ - lockdep_assert_held(&kbdev->pm.power_change_lock); - - /* All cores must be enabled when instrumentation is in use */ - if (kbdev->pm.backend.instr_enabled) - return kbdev->gpu_props.props.raw_props.shader_present & - kbdev->pm.debug_core_mask; - - if (kbdev->pm.backend.ca_current_policy == NULL) - return kbdev->gpu_props.props.raw_props.shader_present & - kbdev->pm.debug_core_mask; - - return kbdev->pm.backend.ca_current_policy->get_core_mask(kbdev) & - kbdev->pm.debug_core_mask; -} - -KBASE_EXPORT_TEST_API(kbase_pm_ca_get_core_mask); - -void kbase_pm_ca_update_core_status(struct kbase_device *kbdev, u64 cores_ready, - u64 cores_transitioning) -{ - lockdep_assert_held(&kbdev->pm.power_change_lock); - - if (kbdev->pm.backend.ca_current_policy != NULL) - kbdev->pm.backend.ca_current_policy->update_core_status(kbdev, - cores_ready, - cores_transitioning); -} - -void kbase_pm_ca_instr_enable(struct kbase_device *kbdev) -{ - unsigned long flags; - - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - kbdev->pm.backend.instr_enabled = true; - - kbase_pm_update_cores_state_nolock(kbdev); - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); -} - -void kbase_pm_ca_instr_disable(struct kbase_device *kbdev) -{ - unsigned long flags; - - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - kbdev->pm.backend.instr_enabled = false; - - kbase_pm_update_cores_state_nolock(kbdev); - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.h deleted file mode 100755 index ee9e751f2d79b..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.h +++ /dev/null @@ -1,92 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/* - * Base kernel core availability APIs - */ - -#ifndef _KBASE_PM_CA_H_ -#define _KBASE_PM_CA_H_ - -/** - * kbase_pm_ca_init - Initialize core availability framework - * - * Must be called before calling any other core availability function - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - * - * Return: 0 if the core availability framework was successfully initialized, - * -errno otherwise - */ -int kbase_pm_ca_init(struct kbase_device *kbdev); - -/** - * kbase_pm_ca_term - Terminate core availability framework - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - */ -void kbase_pm_ca_term(struct kbase_device *kbdev); - -/** - * kbase_pm_ca_get_core_mask - Get currently available shaders core mask - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - * - * Returns a mask of the currently available shader cores. - * Calls into the core availability policy - * - * Return: The bit mask of available cores - */ -u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev); - -/** - * kbase_pm_ca_update_core_status - Update core status - * - * @kbdev: The kbase device structure for the device (must be - * a valid pointer) - * @cores_ready: The bit mask of cores ready for job submission - * @cores_transitioning: The bit mask of cores that are transitioning power - * state - * - * Update core availability policy with current core power status - * - * Calls into the core availability policy - */ -void kbase_pm_ca_update_core_status(struct kbase_device *kbdev, u64 cores_ready, - u64 cores_transitioning); - -/** - * kbase_pm_ca_instr_enable - Enable override for instrumentation - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - * - * This overrides the output of the core availability policy, ensuring that all - * cores are available - */ -void kbase_pm_ca_instr_enable(struct kbase_device *kbdev); - -/** - * kbase_pm_ca_instr_disable - Disable override for instrumentation - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - * - * This disables any previously enabled override, and resumes normal policy - * functionality - */ -void kbase_pm_ca_instr_disable(struct kbase_device *kbdev); - -#endif /* _KBASE_PM_CA_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_demand.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_demand.c deleted file mode 100644 index 14a75079f1b96..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_demand.c +++ /dev/null @@ -1,241 +0,0 @@ -/* - * This confidential and proprietary software may be used only as - * authorised by a licensing agreement from ARM Limited - * (C) COPYRIGHT 2013 ARM Limited - * ALL RIGHTS RESERVED - * The entire notice above must be reproduced on all authorised - * copies and copies may only be made to the extent permitted - * by a licensing agreement from ARM Limited. - */ - -/* - * A core availability policy implementing demand core rotation. - * - * This policy periodically selects a new core mask at demand. This new mask is - * applied in two stages. It initially powers off all undesired cores, by - * removing them from the core availability mask. Once it is confirmed that - * these cores are powered off, then the desired cores are powered on. - */ - -#include -#include -#include -#include -#include -#include - -#ifndef MALI_CA_TIMER -static struct workqueue_struct *mali_ca_wq = NULL; -#endif -static int num_shader_cores_to_be_enabled = 0; - -int mali_perf_set_num_pp_cores(int cores) -{ -#ifndef MALI_CA_TIMER - struct kbase_device *kbdev = (struct kbase_device *)priv; - struct kbasep_pm_ca_policy_demand *data = - &kbdev->pm.backend.ca_policy_data.demand; -#endif - - num_shader_cores_to_be_enabled = cores -1; - printk("pp num=%d\n", num_shader_cores_to_be_enabled); - if (num_shader_cores_to_be_enabled < 1) - num_shader_cores_to_be_enabled = 1; - -#ifndef MALI_CA_TIMER - queue_work(mali_ca_wq, &data->wq_work); -#endif - - return 0; -} - -#ifdef MALI_CA_TIMER -#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) -/* random32 was renamed to prandom_u32 in 3.8 */ -#define prandom_u32 random32 -#endif - -void demand_timer_callback(unsigned long cb) -{ - struct kbase_device *kbdev = (struct kbase_device *)cb; - struct kbasep_pm_ca_policy_demand *data = - &kbdev->pm.backend.ca_policy_data.demand; - unsigned long flags; - - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - - /* Select new core mask, ensuring that core group 0 is not powered off - */ -#if 0 - do { - data->cores_desired = prandom_u32() & - kbdev->gpu_props.props.raw_props.shader_present; - } while (!(data->cores_desired & - kbdev->gpu_props.props.coherency_info.group[0].core_mask)); -#endif - data->cores_desired = num_shader_cores_to_be_enabled; - - if (!(data->cores_desired & - kbdev->gpu_props.props.coherency_info.group[0].core_mask)) - dev_info(kbdev->dev, "error, ca demand policy : new core mask=%llX\n", - data->cores_desired); - - /* Disable any cores that are now unwanted */ - data->cores_enabled &= data->cores_desired; - - kbdev->pm.backend.ca_in_transition = true; - - /* If there are no cores to be powered off then power on desired cores - */ - if (!(data->cores_used & ~data->cores_desired)) { - data->cores_enabled = data->cores_desired; - kbdev->pm.backend.ca_in_transition = false; - } - - data->core_change_timer.expires = jiffies + HZ; - - add_timer(&data->core_change_timer); - - kbase_pm_update_cores_state_nolock(kbdev); - - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - - dev_info(kbdev->dev, "ca demand policy : new core mask=%llX %llX\n", - data->cores_desired, data->cores_enabled); -} - -KBASE_EXPORT_TEST_API(demand_timer_callback); -#else -static void do_ca_scaling(struct work_struct *work) -{ - //unsigned long flags; - struct kbase_device *kbdev = = container_of(work, - struct kbasep_pm_ca_policy_demand, wq_work); - struct kbasep_pm_ca_policy_random *data = - &kbdev->pm.backend.ca_policy_data.random; - unsigned long flags; - - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - - /* Select new core mask, ensuring that core group 0 is not powered off - */ - data->cores_desired = num_shader_cores_to_be_enabled; - - /* Disable any cores that are now unwanted */ - data->cores_enabled &= data->cores_desired; - - kbdev->pm.backend.ca_in_transition = true; - - /* If there are no cores to be powered off then power on desired cores - */ - if (!(data->cores_used & ~data->cores_desired)) { - data->cores_enabled = data->cores_desired; - kbdev->pm.backend.ca_in_transition = false; - } - - kbase_pm_update_cores_state_nolock(kbdev); - - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - - dev_info(kbdev->dev, "Random policy : new core mask=%llX %llX\n", - data->cores_desired, data->cores_enabled); - -} -#endif - -static void demand_init(struct kbase_device *kbdev) -{ - struct kbasep_pm_ca_policy_demand *data = - &kbdev->pm.backend.ca_policy_data.demand; - - data->cores_enabled = kbdev->gpu_props.props.raw_props.shader_present; - data->cores_desired = kbdev->gpu_props.props.raw_props.shader_present; - data->cores_used = 0; - kbdev->pm.backend.ca_in_transition = false; - -#ifdef MALI_CA_TIMER - init_timer(&data->core_change_timer); - - data->core_change_timer.function = demand_timer_callback; - data->core_change_timer.data = (unsigned long) kbdev; - data->core_change_timer.expires = jiffies + 5 * HZ; - - add_timer(&data->core_change_timer); -#else -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) - mali_ca_wq = alloc_workqueue("gpu_ca_wq", WQ_HIGHPRI | WQ_UNBOUND, 0); -#else - mali_ca_wq = create_workqueue("gpu_ca_wq"); -#endif - INIT_WORK(&data->wq_work, do_ca_scaling); - if (mali_ca_wq == NULL) printk("Unable to create gpu scaling workqueue\n"); -#endif -} - -static void demand_term(struct kbase_device *kbdev) -{ - struct kbasep_pm_ca_policy_demand *data = - &kbdev->pm.backend.ca_policy_data.demand; - -#ifdef MALI_CA_TIMER - del_timer(&data->core_change_timer); -#else - if (mali_ca_wq) { - flush_workqueue(mali_ca_wq); - destroy_workqueue(mali_ca_wq); - mali_ca_wq = NULL; - } -#endif -} - -static u64 demand_get_core_mask(struct kbase_device *kbdev) -{ - return kbdev->pm.backend.ca_policy_data.demand.cores_enabled; -} - -static void demand_update_core_status(struct kbase_device *kbdev, - u64 cores_ready, - u64 cores_transitioning) -{ - struct kbasep_pm_ca_policy_demand *data = - &kbdev->pm.backend.ca_policy_data.demand; - - lockdep_assert_held(&kbdev->pm.power_change_lock); - - data->cores_used = cores_ready | cores_transitioning; - - /* If in desired state then clear transition flag */ - if (data->cores_enabled == data->cores_desired) - kbdev->pm.backend.ca_in_transition = false; - - /* If all undesired cores are now off then power on desired cores. - * The direct comparison against cores_enabled limits potential - * recursion to one level */ - if (!(data->cores_used & ~data->cores_desired) && - data->cores_enabled != data->cores_desired) { - data->cores_enabled = data->cores_desired; - - kbase_pm_update_cores_state_nolock(kbdev); - - kbdev->pm.backend.ca_in_transition = false; - } -} - -/* - * The struct kbase_pm_ca_policy structure for the demand core availability - * policy. - * - * This is the static structure that defines the demand core availability power - * policy's callback and name. - */ -const struct kbase_pm_ca_policy kbase_pm_ca_demand_policy_ops = { - "demand", /* name */ - demand_init, /* init */ - demand_term, /* term */ - demand_get_core_mask, /* get_core_mask */ - demand_update_core_status, /* update_core_status */ - 0u, /* flags */ - KBASE_PM_CA_POLICY_ID_DEMAND, /* id */ -}; - -KBASE_EXPORT_TEST_API(kbase_pm_ca_demand_policy_ops); diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_demand.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_demand.h deleted file mode 100644 index c1a4c9635b3c5..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_demand.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * This confidential and proprietary software may be used only as - * authorised by a licensing agreement from ARM Limited - * (C) COPYRIGHT 2013 ARM Limited - * ALL RIGHTS RESERVED - * The entire notice above must be reproduced on all authorised - * copies and copies may only be made to the extent permitted - * by a licensing agreement from ARM Limited. - */ - -/* - * A core availability policy implementing demand core rotation - */ - -#ifndef MALI_KBASE_PM_CA_DEMAND_H -#define MALI_KBASE_PM_CA_DEMAND_H -#include -/** - * struct kbasep_pm_ca_policy_demand - Private structure for demand ca policy - * - * This contains data that is private to the demand core availability - * policy. - * - * @cores_desired: Cores that the policy wants to be available - * @cores_enabled: Cores that the policy is currently returning as available - * @cores_used: Cores currently powered or transitioning - * @core_change_timer: Timer for changing desired core mask - */ -#define MALI_CA_TIMER 1 -struct kbasep_pm_ca_policy_demand { - u64 cores_desired; - u64 cores_enabled; - u64 cores_used; -#ifdef MALI_CA_TIMER - struct timer_list core_change_timer; -#else - struct work_struct wq_work; -#endif -}; - -extern const struct kbase_pm_ca_policy kbase_pm_ca_demand_policy_ops; -extern int mali_perf_set_num_pp_cores(int cores); - -#endif /* MALI_KBASE_PM_CA_DEMAND_H */ - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_fixed.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_fixed.c deleted file mode 100755 index 864612d31f9b3..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_fixed.c +++ /dev/null @@ -1,65 +0,0 @@ -/* - * - * (C) COPYRIGHT 2013-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/* - * A power policy implementing fixed core availability - */ - -#include -#include - -static void fixed_init(struct kbase_device *kbdev) -{ - kbdev->pm.backend.ca_in_transition = false; -} - -static void fixed_term(struct kbase_device *kbdev) -{ - CSTD_UNUSED(kbdev); -} - -static u64 fixed_get_core_mask(struct kbase_device *kbdev) -{ - return kbdev->gpu_props.props.raw_props.shader_present; -} - -static void fixed_update_core_status(struct kbase_device *kbdev, - u64 cores_ready, - u64 cores_transitioning) -{ - CSTD_UNUSED(kbdev); - CSTD_UNUSED(cores_ready); - CSTD_UNUSED(cores_transitioning); -} - -/* - * The struct kbase_pm_policy structure for the fixed power policy. - * - * This is the static structure that defines the fixed power policy's callback - * and name. - */ -const struct kbase_pm_ca_policy kbase_pm_ca_fixed_policy_ops = { - "fixed", /* name */ - fixed_init, /* init */ - fixed_term, /* term */ - fixed_get_core_mask, /* get_core_mask */ - fixed_update_core_status, /* update_core_status */ - 0u, /* flags */ - KBASE_PM_CA_POLICY_ID_FIXED, /* id */ -}; - -KBASE_EXPORT_TEST_API(kbase_pm_ca_fixed_policy_ops); diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_fixed.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_fixed.h deleted file mode 100755 index a763155cb703d..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_fixed.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * - * (C) COPYRIGHT 2013-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/* - * A power policy implementing fixed core availability - */ - -#ifndef MALI_KBASE_PM_CA_FIXED_H -#define MALI_KBASE_PM_CA_FIXED_H - -/** - * struct kbasep_pm_ca_policy_fixed - Private structure for policy instance data - * - * @dummy: Dummy member - no state is needed - * - * This contains data that is private to the particular power policy that is - * active. - */ -struct kbasep_pm_ca_policy_fixed { - int dummy; -}; - -extern const struct kbase_pm_ca_policy kbase_pm_ca_fixed_policy_ops; - -#endif /* MALI_KBASE_PM_CA_FIXED_H */ - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_random.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_random.c deleted file mode 100644 index 34922ef3dd556..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_random.c +++ /dev/null @@ -1,151 +0,0 @@ -/* - * This confidential and proprietary software may be used only as - * authorised by a licensing agreement from ARM Limited - * (C) COPYRIGHT 2013 ARM Limited - * ALL RIGHTS RESERVED - * The entire notice above must be reproduced on all authorised - * copies and copies may only be made to the extent permitted - * by a licensing agreement from ARM Limited. - */ - -/* - * A core availability policy implementing random core rotation. - * - * This policy periodically selects a new core mask at random. This new mask is - * applied in two stages. It initially powers off all undesired cores, by - * removing them from the core availability mask. Once it is confirmed that - * these cores are powered off, then the desired cores are powered on. - */ - -#include -#include -#include -#include -#include - -#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) -/* random32 was renamed to prandom_u32 in 3.8 */ -#define prandom_u32 random32 -#endif - -void random_timer_callback(unsigned long cb) -{ - struct kbase_device *kbdev = (struct kbase_device *)cb; - struct kbasep_pm_ca_policy_random *data = - &kbdev->pm.backend.ca_policy_data.random; - unsigned long flags; - - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - - /* Select new core mask, ensuring that core group 0 is not powered off - */ - do { - data->cores_desired = prandom_u32() & - kbdev->gpu_props.props.raw_props.shader_present; - } while (!(data->cores_desired & - kbdev->gpu_props.props.coherency_info.group[0].core_mask)); - - /* Disable any cores that are now unwanted */ - data->cores_enabled &= data->cores_desired; - - kbdev->pm.backend.ca_in_transition = true; - - /* If there are no cores to be powered off then power on desired cores - */ - if (!(data->cores_used & ~data->cores_desired)) { - data->cores_enabled = data->cores_desired; - kbdev->pm.backend.ca_in_transition = false; - } - - data->core_change_timer.expires = jiffies + 5 * HZ; - - add_timer(&data->core_change_timer); - - kbase_pm_update_cores_state_nolock(kbdev); - - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - - dev_info(kbdev->dev, "Random policy : new core mask=%llX %llX\n", - data->cores_desired, data->cores_enabled); -} - -KBASE_EXPORT_TEST_API(random_timer_callback); - -static void random_init(struct kbase_device *kbdev) -{ - struct kbasep_pm_ca_policy_random *data = - &kbdev->pm.backend.ca_policy_data.random; - - data->cores_enabled = kbdev->gpu_props.props.raw_props.shader_present; - data->cores_desired = kbdev->gpu_props.props.raw_props.shader_present; - data->cores_used = 0; - kbdev->pm.backend.ca_in_transition = false; - - init_timer(&data->core_change_timer); - - data->core_change_timer.function = random_timer_callback; - data->core_change_timer.data = (unsigned long) kbdev; - data->core_change_timer.expires = jiffies + 5 * HZ; - - add_timer(&data->core_change_timer); -} - -static void random_term(struct kbase_device *kbdev) -{ - struct kbasep_pm_ca_policy_random *data = - &kbdev->pm.backend.ca_policy_data.random; - - del_timer(&data->core_change_timer); -} - -static u64 random_get_core_mask(struct kbase_device *kbdev) -{ - return kbdev->pm.backend.ca_policy_data.random.cores_enabled; -} - -static void random_update_core_status(struct kbase_device *kbdev, - u64 cores_ready, - u64 cores_transitioning) -{ - struct kbasep_pm_ca_policy_random *data = - &kbdev->pm.backend.ca_policy_data.random; - - lockdep_assert_held(&kbdev->pm.power_change_lock); - - data->cores_used = cores_ready | cores_transitioning; - - /* If in desired state then clear transition flag */ - if (data->cores_enabled == data->cores_desired) - kbdev->pm.backend.ca_in_transition = false; - - /* If all undesired cores are now off then power on desired cores. - * The direct comparison against cores_enabled limits potential - * recursion to one level */ - if (!(data->cores_used & ~data->cores_desired) && - data->cores_enabled != data->cores_desired) { - data->cores_enabled = data->cores_desired; - - kbase_pm_update_cores_state_nolock(kbdev); - - kbdev->pm.backend.ca_in_transition = false; - } -} - -/* - * The struct kbase_pm_ca_policy structure for the random core availability - * policy. - * - * This is the static structure that defines the random core availability power - * policy's callback and name. - */ -const struct kbase_pm_ca_policy kbase_pm_ca_random_policy_ops = { - "random", /* name */ - random_init, /* init */ - random_term, /* term */ - random_get_core_mask, /* get_core_mask */ - random_update_core_status, /* update_core_status */ - 0u, /* flags */ - KBASE_PM_CA_POLICY_ID_RANDOM, /* id */ -}; - -KBASE_EXPORT_TEST_API(kbase_pm_ca_random_policy_ops); diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_random.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_random.h deleted file mode 100644 index 150560686215b..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_random.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * This confidential and proprietary software may be used only as - * authorised by a licensing agreement from ARM Limited - * (C) COPYRIGHT 2013 ARM Limited - * ALL RIGHTS RESERVED - * The entire notice above must be reproduced on all authorised - * copies and copies may only be made to the extent permitted - * by a licensing agreement from ARM Limited. - */ - -/* - * A core availability policy implementing random core rotation - */ - -#ifndef MALI_KBASE_PM_CA_RANDOM_H -#define MALI_KBASE_PM_CA_RANDOM_H - -/** - * struct kbasep_pm_ca_policy_random - Private structure for random ca policy - * - * This contains data that is private to the random core availability - * policy. - * - * @cores_desired: Cores that the policy wants to be available - * @cores_enabled: Cores that the policy is currently returning as available - * @cores_used: Cores currently powered or transitioning - * @core_change_timer: Timer for changing desired core mask - */ -struct kbasep_pm_ca_policy_random { - u64 cores_desired; - u64 cores_enabled; - u64 cores_used; - struct timer_list core_change_timer; -}; - -extern const struct kbase_pm_ca_policy kbase_pm_ca_random_policy_ops; - -#endif /* MALI_KBASE_PM_CA_RANDOM_H */ - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c deleted file mode 100755 index 487391168e25a..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * "Coarse Demand" power management policy - */ - -#include -#include - -static u64 coarse_demand_get_core_mask(struct kbase_device *kbdev) -{ - if (kbdev->pm.active_count == 0) - return 0; - - return kbdev->gpu_props.props.raw_props.shader_present; -} - -static bool coarse_demand_get_core_active(struct kbase_device *kbdev) -{ - if (0 == kbdev->pm.active_count && !(kbdev->shader_needed_bitmap | - kbdev->shader_inuse_bitmap)) - return false; - - return true; -} - -static void coarse_demand_init(struct kbase_device *kbdev) -{ - CSTD_UNUSED(kbdev); -} - -static void coarse_demand_term(struct kbase_device *kbdev) -{ - CSTD_UNUSED(kbdev); -} - -/* The struct kbase_pm_policy structure for the demand power policy. - * - * This is the static structure that defines the demand power policy's callback - * and name. - */ -const struct kbase_pm_policy kbase_pm_coarse_demand_policy_ops = { - "coarse_demand", /* name */ - coarse_demand_init, /* init */ - coarse_demand_term, /* term */ - coarse_demand_get_core_mask, /* get_core_mask */ - coarse_demand_get_core_active, /* get_core_active */ - 0u, /* flags */ - KBASE_PM_POLICY_ID_COARSE_DEMAND, /* id */ -}; - -KBASE_EXPORT_TEST_API(kbase_pm_coarse_demand_policy_ops); diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.h deleted file mode 100755 index 749d305eee9a9..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * "Coarse Demand" power management policy - */ - -#ifndef MALI_KBASE_PM_COARSE_DEMAND_H -#define MALI_KBASE_PM_COARSE_DEMAND_H - -/** - * DOC: - * The "Coarse" demand power management policy has the following - * characteristics: - * - When KBase indicates that the GPU will be powered up, but we don't yet - * know which Job Chains are to be run: - * - All Shader Cores are powered up, regardless of whether or not they will - * be needed later. - * - When KBase indicates that a set of Shader Cores are needed to submit the - * currently queued Job Chains: - * - All Shader Cores are kept powered, regardless of whether or not they will - * be needed - * - When KBase indicates that the GPU need not be powered: - * - The Shader Cores are powered off, and the GPU itself is powered off too. - * - * @note: - * - KBase indicates the GPU will be powered up when it has a User Process that - * has just started to submit Job Chains. - * - KBase indicates the GPU need not be powered when all the Job Chains from - * User Processes have finished, and it is waiting for a User Process to - * submit some more Job Chains. - */ - -/** - * struct kbasep_pm_policy_coarse_demand - Private structure for coarse demand - * policy - * - * This contains data that is private to the coarse demand power policy. - * - * @dummy: Dummy member - no state needed - */ -struct kbasep_pm_policy_coarse_demand { - int dummy; -}; - -extern const struct kbase_pm_policy kbase_pm_coarse_demand_policy_ops; - -#endif /* MALI_KBASE_PM_COARSE_DEMAND_H */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h deleted file mode 100755 index 893c271ed1e01..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h +++ /dev/null @@ -1,481 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/* - * Backend-specific Power Manager definitions - */ - -#ifndef _KBASE_PM_HWACCESS_DEFS_H_ -#define _KBASE_PM_HWACCESS_DEFS_H_ - -#include "mali_kbase_pm_ca_fixed.h" -#if !MALI_CUSTOMER_RELEASE -#include "mali_kbase_pm_ca_random.h" -#include "mali_kbase_pm_ca_demand.h" -#endif - -#include "mali_kbase_pm_always_on.h" -#include "mali_kbase_pm_coarse_demand.h" -#include "mali_kbase_pm_demand.h" -#if !MALI_CUSTOMER_RELEASE -#include "mali_kbase_pm_demand_always_powered.h" -#include "mali_kbase_pm_fast_start.h" -#endif - -/* Forward definition - see mali_kbase.h */ -struct kbase_device; -struct kbase_jd_atom; - -/** - * enum kbase_pm_core_type - The types of core in a GPU. - * - * These enumerated values are used in calls to - * - kbase_pm_get_present_cores() - * - kbase_pm_get_active_cores() - * - kbase_pm_get_trans_cores() - * - kbase_pm_get_ready_cores(). - * - * They specify which type of core should be acted on. These values are set in - * a manner that allows core_type_to_reg() function to be simpler and more - * efficient. - * - * @KBASE_PM_CORE_L2: The L2 cache - * @KBASE_PM_CORE_SHADER: Shader cores - * @KBASE_PM_CORE_TILER: Tiler cores - */ -enum kbase_pm_core_type { - KBASE_PM_CORE_L2 = L2_PRESENT_LO, - KBASE_PM_CORE_SHADER = SHADER_PRESENT_LO, - KBASE_PM_CORE_TILER = TILER_PRESENT_LO -}; - -/** - * struct kbasep_pm_metrics_data - Metrics data collected for use by the power - * management framework. - * - * @time_period_start: time at which busy/idle measurements started - * @time_busy: number of ns the GPU was busy executing jobs since the - * @time_period_start timestamp. - * @time_idle: number of ns since time_period_start the GPU was not executing - * jobs since the @time_period_start timestamp. - * @prev_busy: busy time in ns of previous time period. - * Updated when metrics are reset. - * @prev_idle: idle time in ns of previous time period - * Updated when metrics are reset. - * @gpu_active: true when the GPU is executing jobs. false when - * not. Updated when the job scheduler informs us a job in submitted - * or removed from a GPU slot. - * @busy_cl: number of ns the GPU was busy executing CL jobs. Note that - * if two CL jobs were active for 400ns, this value would be updated - * with 800. - * @busy_gl: number of ns the GPU was busy executing GL jobs. Note that - * if two GL jobs were active for 400ns, this value would be updated - * with 800. - * @active_cl_ctx: number of CL jobs active on the GPU. Array is per-device. - * @active_gl_ctx: number of GL jobs active on the GPU. Array is per-slot. As - * GL jobs never run on slot 2 this slot is not recorded. - * @lock: spinlock protecting the kbasep_pm_metrics_data structure - * @timer: timer to regularly make DVFS decisions based on the power - * management metrics. - * @timer_active: boolean indicating @timer is running - * @platform_data: pointer to data controlled by platform specific code - * @kbdev: pointer to kbase device for which metrics are collected - * - */ -struct kbasep_pm_metrics_data { - ktime_t time_period_start; - u32 time_busy; - u32 time_idle; - u32 prev_busy; - u32 prev_idle; - bool gpu_active; - u32 busy_cl[2]; - u32 busy_gl; - u32 active_cl_ctx[2]; - u32 active_gl_ctx[2]; /* GL jobs can only run on 2 of the 3 job slots */ - spinlock_t lock; - -#ifdef CONFIG_MALI_MIDGARD_DVFS - struct hrtimer timer; - bool timer_active; -#endif - - void *platform_data; - struct kbase_device *kbdev; -}; - -union kbase_pm_policy_data { - struct kbasep_pm_policy_always_on always_on; - struct kbasep_pm_policy_coarse_demand coarse_demand; - struct kbasep_pm_policy_demand demand; -#if !MALI_CUSTOMER_RELEASE - struct kbasep_pm_policy_demand_always_powered demand_always_powered; - struct kbasep_pm_policy_fast_start fast_start; -#endif -}; - -union kbase_pm_ca_policy_data { - struct kbasep_pm_ca_policy_fixed fixed; -#if !MALI_CUSTOMER_RELEASE - struct kbasep_pm_ca_policy_random random; - struct kbasep_pm_ca_policy_demand demand; -#endif -}; - -/** - * struct kbase_pm_backend_data - Data stored per device for power management. - * - * This structure contains data for the power management framework. There is one - * instance of this structure per device in the system. - * - * @ca_current_policy: The policy that is currently actively controlling core - * availability. - * @pm_current_policy: The policy that is currently actively controlling the - * power state. - * @ca_policy_data: Private data for current CA policy - * @pm_policy_data: Private data for current PM policy - * @ca_in_transition: Flag indicating when core availability policy is - * transitioning cores. The core availability policy must - * set this when a change in core availability is occurring. - * power_change_lock must be held when accessing this. - * @reset_done: Flag when a reset is complete - * @reset_done_wait: Wait queue to wait for changes to @reset_done - * @l2_powered_wait: Wait queue for whether the l2 cache has been powered as - * requested - * @l2_powered: State indicating whether all the l2 caches are powered. - * Non-zero indicates they're *all* powered - * Zero indicates that some (or all) are not powered - * @gpu_cycle_counter_requests: The reference count of active gpu cycle counter - * users - * @gpu_cycle_counter_requests_lock: Lock to protect @gpu_cycle_counter_requests - * @desired_shader_state: A bit mask identifying the shader cores that the - * power policy would like to be on. The current state - * of the cores may be different, but there should be - * transitions in progress that will eventually achieve - * this state (assuming that the policy doesn't change - * its mind in the mean time). - * @powering_on_shader_state: A bit mask indicating which shader cores are - * currently in a power-on transition - * @desired_tiler_state: A bit mask identifying the tiler cores that the power - * policy would like to be on. See @desired_shader_state - * @powering_on_tiler_state: A bit mask indicating which tiler core are - * currently in a power-on transition - * @powering_on_l2_state: A bit mask indicating which l2-caches are currently - * in a power-on transition - * @gpu_in_desired_state: This flag is set if the GPU is powered as requested - * by the desired_xxx_state variables - * @gpu_in_desired_state_wait: Wait queue set when @gpu_in_desired_state != 0 - * @gpu_powered: Set to true when the GPU is powered and register - * accesses are possible, false otherwise - * @instr_enabled: Set to true when instrumentation is enabled, - * false otherwise - * @cg1_disabled: Set if the policy wants to keep the second core group - * powered off - * @driver_ready_for_irqs: Debug state indicating whether sufficient - * initialization of the driver has occurred to handle - * IRQs - * @gpu_powered_lock: Spinlock that must be held when writing @gpu_powered or - * accessing @driver_ready_for_irqs - * @metrics: Structure to hold metrics for the GPU - * @gpu_poweroff_pending: number of poweroff timer ticks until the GPU is - * powered off - * @shader_poweroff_pending_time: number of poweroff timer ticks until shaders - * are powered off - * @gpu_poweroff_timer: Timer for powering off GPU - * @gpu_poweroff_wq: Workqueue to power off GPU on when timer fires - * @gpu_poweroff_work: Workitem used on @gpu_poweroff_wq - * @shader_poweroff_pending: Bit mask of shaders to be powered off on next - * timer callback - * @poweroff_timer_needed: true if the poweroff timer is currently running, - * false otherwise - * @callback_power_on: Callback when the GPU needs to be turned on. See - * &struct kbase_pm_callback_conf - * @callback_power_off: Callback when the GPU may be turned off. See - * &struct kbase_pm_callback_conf - * @callback_power_suspend: Callback when a suspend occurs and the GPU needs to - * be turned off. See &struct kbase_pm_callback_conf - * @callback_power_resume: Callback when a resume occurs and the GPU needs to - * be turned on. See &struct kbase_pm_callback_conf - * @callback_power_runtime_on: Callback when the GPU needs to be turned on. See - * &struct kbase_pm_callback_conf - * @callback_power_runtime_off: Callback when the GPU may be turned off. See - * &struct kbase_pm_callback_conf - * @callback_cci_snoop_ctrl: Callback when the GPU L2 power may transition. - * If enable is set then snoops should be enabled - * otherwise snoops should be disabled - * - * Note: - * During an IRQ, @ca_current_policy or @pm_current_policy can be NULL when the - * policy is being changed with kbase_pm_ca_set_policy() or - * kbase_pm_set_policy(). The change is protected under - * kbase_device.pm.power_change_lock. Direct access to this - * from IRQ context must therefore check for NULL. If NULL, then - * kbase_pm_ca_set_policy() or kbase_pm_set_policy() will re-issue the policy - * functions that would have been done under IRQ. - */ -struct kbase_pm_backend_data { - const struct kbase_pm_ca_policy *ca_current_policy; - const struct kbase_pm_policy *pm_current_policy; - union kbase_pm_ca_policy_data ca_policy_data; - union kbase_pm_policy_data pm_policy_data; - bool ca_in_transition; - bool reset_done; - wait_queue_head_t reset_done_wait; - wait_queue_head_t l2_powered_wait; - int l2_powered; - int gpu_cycle_counter_requests; - spinlock_t gpu_cycle_counter_requests_lock; - - u64 desired_shader_state; - u64 powering_on_shader_state; - u64 desired_tiler_state; - u64 powering_on_tiler_state; - u64 powering_on_l2_state; - - bool gpu_in_desired_state; - wait_queue_head_t gpu_in_desired_state_wait; - - bool gpu_powered; - - bool instr_enabled; - - bool cg1_disabled; - -#ifdef CONFIG_MALI_DEBUG - bool driver_ready_for_irqs; -#endif /* CONFIG_MALI_DEBUG */ - - spinlock_t gpu_powered_lock; - - - struct kbasep_pm_metrics_data metrics; - - int gpu_poweroff_pending; - int shader_poweroff_pending_time; - - struct hrtimer gpu_poweroff_timer; - struct workqueue_struct *gpu_poweroff_wq; - struct work_struct gpu_poweroff_work; - - u64 shader_poweroff_pending; - - bool poweroff_timer_needed; - - int (*callback_power_on)(struct kbase_device *kbdev); - void (*callback_power_off)(struct kbase_device *kbdev); - void (*callback_power_suspend)(struct kbase_device *kbdev); - void (*callback_power_resume)(struct kbase_device *kbdev); - int (*callback_power_runtime_on)(struct kbase_device *kbdev); - void (*callback_power_runtime_off)(struct kbase_device *kbdev); - -}; - - -/* List of policy IDs */ -enum kbase_pm_policy_id { - KBASE_PM_POLICY_ID_DEMAND = 1, - KBASE_PM_POLICY_ID_ALWAYS_ON, - KBASE_PM_POLICY_ID_COARSE_DEMAND, -#if !MALI_CUSTOMER_RELEASE - KBASE_PM_POLICY_ID_DEMAND_ALWAYS_POWERED, - KBASE_PM_POLICY_ID_FAST_START -#endif -}; - -typedef u32 kbase_pm_policy_flags; - -/** - * struct kbase_pm_policy - Power policy structure. - * - * Each power policy exposes a (static) instance of this structure which - * contains function pointers to the policy's methods. - * - * @name: The name of this policy - * @init: Function called when the policy is selected - * @term: Function called when the policy is unselected - * @get_core_mask: Function called to get the current shader core mask - * @get_core_active: Function called to get the current overall GPU power - * state - * @flags: Field indicating flags for this policy - * @id: Field indicating an ID for this policy. This is not - * necessarily the same as its index in the list returned - * by kbase_pm_list_policies(). - * It is used purely for debugging. - */ -struct kbase_pm_policy { - char *name; - - /** - * Function called when the policy is selected - * - * This should initialize the kbdev->pm.pm_policy_data structure. It - * should not attempt to make any changes to hardware state. - * - * It is undefined what state the cores are in when the function is - * called. - * - * @kbdev: The kbase device structure for the device (must be a - * valid pointer) - */ - void (*init)(struct kbase_device *kbdev); - - /** - * Function called when the policy is unselected. - * - * @kbdev: The kbase device structure for the device (must be a - * valid pointer) - */ - void (*term)(struct kbase_device *kbdev); - - /** - * Function called to get the current shader core mask - * - * The returned mask should meet or exceed (kbdev->shader_needed_bitmap - * | kbdev->shader_inuse_bitmap). - * - * @kbdev: The kbase device structure for the device (must be a - * valid pointer) - * - * Return: The mask of shader cores to be powered - */ - u64 (*get_core_mask)(struct kbase_device *kbdev); - - /** - * Function called to get the current overall GPU power state - * - * This function should consider the state of kbdev->pm.active_count. If - * this count is greater than 0 then there is at least one active - * context on the device and the GPU should be powered. If it is equal - * to 0 then there are no active contexts and the GPU could be powered - * off if desired. - * - * @kbdev: The kbase device structure for the device (must be a - * valid pointer) - * - * Return: true if the GPU should be powered, false otherwise - */ - bool (*get_core_active)(struct kbase_device *kbdev); - - kbase_pm_policy_flags flags; - enum kbase_pm_policy_id id; -}; - - -enum kbase_pm_ca_policy_id { - KBASE_PM_CA_POLICY_ID_FIXED = 1, -#if !MALI_CUSTOMER_RELEASE - KBASE_PM_CA_POLICY_ID_RANDOM, - KBASE_PM_CA_POLICY_ID_DEMAND, -#endif -}; - -typedef u32 kbase_pm_ca_policy_flags; - -/** - * struct kbase_pm_ca_policy - Core availability policy structure. - * - * Each core availability policy exposes a (static) instance of this structure - * which contains function pointers to the policy's methods. - * - * @name: The name of this policy - * @init: Function called when the policy is selected - * @term: Function called when the policy is unselected - * @get_core_mask: Function called to get the current shader core - * availability mask - * @update_core_status: Function called to update the current core status - * @flags: Field indicating flags for this policy - * @id: Field indicating an ID for this policy. This is not - * necessarily the same as its index in the list returned - * by kbase_pm_list_policies(). - * It is used purely for debugging. - */ -struct kbase_pm_ca_policy { - char *name; - - /** - * Function called when the policy is selected - * - * This should initialize the kbdev->pm.ca_policy_data structure. It - * should not attempt to make any changes to hardware state. - * - * It is undefined what state the cores are in when the function is - * called. - * - * @kbdev The kbase device structure for the device (must be a - * valid pointer) - */ - void (*init)(struct kbase_device *kbdev); - - /** - * Function called when the policy is unselected. - * - * @kbdev The kbase device structure for the device (must be a - * valid pointer) - */ - void (*term)(struct kbase_device *kbdev); - - /** - * Function called to get the current shader core availability mask - * - * When a change in core availability is occurring, the policy must set - * kbdev->pm.ca_in_transition to true. This is to indicate that - * reporting changes in power state cannot be optimized out, even if - * kbdev->pm.desired_shader_state remains unchanged. This must be done - * by any functions internal to the Core Availability Policy that change - * the return value of kbase_pm_ca_policy::get_core_mask. - * - * @kbdev The kbase device structure for the device (must be a - * valid pointer) - * - * Return: The current core availability mask - */ - u64 (*get_core_mask)(struct kbase_device *kbdev); - - /** - * Function called to update the current core status - * - * If none of the cores in core group 0 are ready or transitioning, then - * the policy must ensure that the next call to get_core_mask does not - * return 0 for all cores in core group 0. It is an error to disable - * core group 0 through the core availability policy. - * - * When a change in core availability has finished, the policy must set - * kbdev->pm.ca_in_transition to false. This is to indicate that - * changes in power state can once again be optimized out when - * kbdev->pm.desired_shader_state is unchanged. - * - * @kbdev: The kbase device structure for the device - * (must be a valid pointer) - * @cores_ready: The mask of cores currently powered and - * ready to run jobs - * @cores_transitioning: The mask of cores currently transitioning - * power state - */ - void (*update_core_status)(struct kbase_device *kbdev, u64 cores_ready, - u64 cores_transitioning); - - kbase_pm_ca_policy_flags flags; - - /** - * Field indicating an ID for this policy. This is not necessarily the - * same as its index in the list returned by kbase_pm_list_policies(). - * It is used purely for debugging. - */ - enum kbase_pm_ca_policy_id id; -}; - -#endif /* _KBASE_PM_HWACCESS_DEFS_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.c deleted file mode 100755 index 9dac2303bd00a..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.c +++ /dev/null @@ -1,72 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * A simple demand based power management policy - */ - -#include -#include - -static u64 demand_get_core_mask(struct kbase_device *kbdev) -{ - u64 desired = kbdev->shader_needed_bitmap | kbdev->shader_inuse_bitmap; - - if (0 == kbdev->pm.active_count) - return 0; - - return desired; -} - -static bool demand_get_core_active(struct kbase_device *kbdev) -{ - if (0 == kbdev->pm.active_count && !(kbdev->shader_needed_bitmap | - kbdev->shader_inuse_bitmap)) - return false; - - return true; -} - -static void demand_init(struct kbase_device *kbdev) -{ - CSTD_UNUSED(kbdev); -} - -static void demand_term(struct kbase_device *kbdev) -{ - CSTD_UNUSED(kbdev); -} - -/* - * The struct kbase_pm_policy structure for the demand power policy. - * - * This is the static structure that defines the demand power policy's callback - * and name. - */ -const struct kbase_pm_policy kbase_pm_demand_policy_ops = { - "demand", /* name */ - demand_init, /* init */ - demand_term, /* term */ - demand_get_core_mask, /* get_core_mask */ - demand_get_core_active, /* get_core_active */ - 0u, /* flags */ - KBASE_PM_POLICY_ID_DEMAND, /* id */ -}; - -KBASE_EXPORT_TEST_API(kbase_pm_demand_policy_ops); diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.h deleted file mode 100755 index c0c84b6e91891..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * A simple demand based power management policy - */ - -#ifndef MALI_KBASE_PM_DEMAND_H -#define MALI_KBASE_PM_DEMAND_H - -/** - * DOC: Demand power management policy - * - * The demand power management policy has the following characteristics: - * - When KBase indicates that the GPU will be powered up, but we don't yet - * know which Job Chains are to be run: - * - The Shader Cores are not powered up - * - * - When KBase indicates that a set of Shader Cores are needed to submit the - * currently queued Job Chains: - * - Only those Shader Cores are powered up - * - * - When KBase indicates that the GPU need not be powered: - * - The Shader Cores are powered off, and the GPU itself is powered off too. - * - * Note: - * - KBase indicates the GPU will be powered up when it has a User Process that - * has just started to submit Job Chains. - * - * - KBase indicates the GPU need not be powered when all the Job Chains from - * User Processes have finished, and it is waiting for a User Process to - * submit some more Job Chains. - */ - -/** - * struct kbasep_pm_policy_demand - Private structure for policy instance data - * - * @dummy: No state is needed, a dummy variable - * - * This contains data that is private to the demand power policy. - */ -struct kbasep_pm_policy_demand { - int dummy; -}; - -extern const struct kbase_pm_policy kbase_pm_demand_policy_ops; - -#endif /* MALI_KBASE_PM_DEMAND_H */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c deleted file mode 100755 index bcaf20c43af23..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c +++ /dev/null @@ -1,1498 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * Base kernel Power Management hardware control - */ - -#include -#include -#include -#if defined(CONFIG_MALI_GATOR_SUPPORT) -#include -#endif -#if defined(CONFIG_MALI_MIPE_ENABLED) -#include -#endif -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#if MALI_MOCK_TEST -#define MOCKABLE(function) function##_original -#else -#define MOCKABLE(function) function -#endif /* MALI_MOCK_TEST */ - -/* Special value to indicate that the JM_CONFIG reg isn't currently used. */ -#define KBASE_JM_CONFIG_UNUSED (1<<31) - -/** - * enum kbasep_pm_action - Actions that can be performed on a core. - * - * This enumeration is private to the file. Its values are set to allow - * core_type_to_reg() function, which decodes this enumeration, to be simpler - * and more efficient. - * - * @ACTION_PRESENT: The cores that are present - * @ACTION_READY: The cores that are ready - * @ACTION_PWRON: Power on the cores specified - * @ACTION_PWROFF: Power off the cores specified - * @ACTION_PWRTRANS: The cores that are transitioning - * @ACTION_PWRACTIVE: The cores that are active - */ -enum kbasep_pm_action { - ACTION_PRESENT = 0, - ACTION_READY = (SHADER_READY_LO - SHADER_PRESENT_LO), - ACTION_PWRON = (SHADER_PWRON_LO - SHADER_PRESENT_LO), - ACTION_PWROFF = (SHADER_PWROFF_LO - SHADER_PRESENT_LO), - ACTION_PWRTRANS = (SHADER_PWRTRANS_LO - SHADER_PRESENT_LO), - ACTION_PWRACTIVE = (SHADER_PWRACTIVE_LO - SHADER_PRESENT_LO) -}; - -static u64 kbase_pm_get_state( - struct kbase_device *kbdev, - enum kbase_pm_core_type core_type, - enum kbasep_pm_action action); - -/** - * core_type_to_reg - Decode a core type and action to a register. - * - * Given a core type (defined by kbase_pm_core_type) and an action (defined - * by kbasep_pm_action) this function will return the register offset that - * will perform the action on the core type. The register returned is the _LO - * register and an offset must be applied to use the _HI register. - * - * @core_type: The type of core - * @action: The type of action - * - * Return: The register offset of the _LO register that performs an action of - * type @action on a core of type @core_type. - */ -static u32 core_type_to_reg(enum kbase_pm_core_type core_type, - enum kbasep_pm_action action) -{ - return (u32)core_type + (u32)action; -} - - -/** - * kbase_pm_invoke - Invokes an action on a core set - * - * This function performs the action given by @action on a set of cores of a - * type given by @core_type. It is a static function used by - * kbase_pm_transition_core_type() - * - * @kbdev: The kbase device structure of the device - * @core_type: The type of core that the action should be performed on - * @cores: A bit mask of cores to perform the action on (low 32 bits) - * @action: The action to perform on the cores - */ -static void kbase_pm_invoke(struct kbase_device *kbdev, - enum kbase_pm_core_type core_type, - u64 cores, - enum kbasep_pm_action action) -{ - u32 reg; - u32 lo = cores & 0xFFFFFFFF; - u32 hi = (cores >> 32) & 0xFFFFFFFF; - - lockdep_assert_held(&kbdev->pm.power_change_lock); - - reg = core_type_to_reg(core_type, action); - - KBASE_DEBUG_ASSERT(reg); -#if defined(CONFIG_MALI_GATOR_SUPPORT) - if (cores) { - if (action == ACTION_PWRON) - kbase_trace_mali_pm_power_on(core_type, cores); - else if (action == ACTION_PWROFF) - kbase_trace_mali_pm_power_off(core_type, cores); - } -#endif -#if defined(CONFIG_MALI_MIPE_ENABLED) - if (cores) { - u64 state = kbase_pm_get_state(kbdev, core_type, ACTION_READY); - - if (action == ACTION_PWRON) - state |= cores; - else if (action == ACTION_PWROFF) - state &= ~cores; - kbase_tlstream_aux_pm_state(core_type, state); - } -#endif - /* Tracing */ - if (cores) { - if (action == ACTION_PWRON) - switch (core_type) { - case KBASE_PM_CORE_SHADER: - KBASE_TRACE_ADD(kbdev, PM_PWRON, NULL, NULL, 0u, - lo); - break; - case KBASE_PM_CORE_TILER: - KBASE_TRACE_ADD(kbdev, PM_PWRON_TILER, NULL, - NULL, 0u, lo); - break; - case KBASE_PM_CORE_L2: - KBASE_TRACE_ADD(kbdev, PM_PWRON_L2, NULL, NULL, - 0u, lo); - break; - default: - break; - } - else if (action == ACTION_PWROFF) - switch (core_type) { - case KBASE_PM_CORE_SHADER: - KBASE_TRACE_ADD(kbdev, PM_PWROFF, NULL, NULL, - 0u, lo); - break; - case KBASE_PM_CORE_TILER: - KBASE_TRACE_ADD(kbdev, PM_PWROFF_TILER, NULL, - NULL, 0u, lo); - break; - case KBASE_PM_CORE_L2: - KBASE_TRACE_ADD(kbdev, PM_PWROFF_L2, NULL, NULL, - 0u, lo); - break; - default: - break; - } - } - - if (lo != 0) - kbase_reg_write(kbdev, GPU_CONTROL_REG(reg), lo, NULL); - - if (hi != 0) - kbase_reg_write(kbdev, GPU_CONTROL_REG(reg + 4), hi, NULL); -} - -/** - * kbase_pm_get_state - Get information about a core set - * - * This function gets information (chosen by @action) about a set of cores of - * a type given by @core_type. It is a static function used by - * kbase_pm_get_present_cores(), kbase_pm_get_active_cores(), - * kbase_pm_get_trans_cores() and kbase_pm_get_ready_cores(). - * - * @kbdev: The kbase device structure of the device - * @core_type: The type of core that the should be queried - * @action: The property of the cores to query - * - * Return: A bit mask specifying the state of the cores - */ -static u64 kbase_pm_get_state(struct kbase_device *kbdev, - enum kbase_pm_core_type core_type, - enum kbasep_pm_action action) -{ - u32 reg; - u32 lo, hi; - - reg = core_type_to_reg(core_type, action); - - KBASE_DEBUG_ASSERT(reg); - - lo = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg), NULL); - hi = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg + 4), NULL); - - return (((u64) hi) << 32) | ((u64) lo); -} - -void kbasep_pm_read_present_cores(struct kbase_device *kbdev) -{ - kbdev->shader_inuse_bitmap = 0; - kbdev->shader_needed_bitmap = 0; - kbdev->shader_available_bitmap = 0; - kbdev->tiler_available_bitmap = 0; - kbdev->l2_users_count = 0; - kbdev->l2_available_bitmap = 0; - kbdev->tiler_needed_cnt = 0; - kbdev->tiler_inuse_cnt = 0; - - memset(kbdev->shader_needed_cnt, 0, sizeof(kbdev->shader_needed_cnt)); -} - -KBASE_EXPORT_TEST_API(kbasep_pm_read_present_cores); - -/** - * kbase_pm_get_present_cores - Get the cores that are present - * - * @kbdev: Kbase device - * @type: The type of cores to query - * - * Return: Bitmask of the cores that are present - */ -u64 kbase_pm_get_present_cores(struct kbase_device *kbdev, - enum kbase_pm_core_type type) -{ - KBASE_DEBUG_ASSERT(kbdev != NULL); - - switch (type) { - case KBASE_PM_CORE_L2: - return kbdev->gpu_props.props.raw_props.l2_present; - case KBASE_PM_CORE_SHADER: - return kbdev->gpu_props.props.raw_props.shader_present; - case KBASE_PM_CORE_TILER: - return kbdev->gpu_props.props.raw_props.tiler_present; - } - KBASE_DEBUG_ASSERT(0); - return 0; -} - -KBASE_EXPORT_TEST_API(kbase_pm_get_present_cores); - -/** - * kbase_pm_get_active_cores - Get the cores that are "active" - * (busy processing work) - * - * @kbdev: Kbase device - * @type: The type of cores to query - * - * Return: Bitmask of cores that are active - */ -u64 kbase_pm_get_active_cores(struct kbase_device *kbdev, - enum kbase_pm_core_type type) -{ - return kbase_pm_get_state(kbdev, type, ACTION_PWRACTIVE); -} - -KBASE_EXPORT_TEST_API(kbase_pm_get_active_cores); - -/** - * kbase_pm_get_trans_cores - Get the cores that are transitioning between - * power states - * - * @kbdev: Kbase device - * @type: The type of cores to query - * - * Return: Bitmask of cores that are transitioning - */ -u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev, - enum kbase_pm_core_type type) -{ - return kbase_pm_get_state(kbdev, type, ACTION_PWRTRANS); -} - -KBASE_EXPORT_TEST_API(kbase_pm_get_trans_cores); - -/** - * kbase_pm_get_ready_cores - Get the cores that are powered on - * - * @kbdev: Kbase device - * @type: The type of cores to query - * - * Return: Bitmask of cores that are ready (powered on) - */ -u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev, - enum kbase_pm_core_type type) -{ - u64 result; - - result = kbase_pm_get_state(kbdev, type, ACTION_READY); - - switch (type) { - case KBASE_PM_CORE_SHADER: - KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED, NULL, NULL, 0u, - (u32) result); - break; - case KBASE_PM_CORE_TILER: - KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED_TILER, NULL, NULL, 0u, - (u32) result); - break; - case KBASE_PM_CORE_L2: - KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED_L2, NULL, NULL, 0u, - (u32) result); - break; - default: - break; - } - - return result; -} - -KBASE_EXPORT_TEST_API(kbase_pm_get_ready_cores); - -/** - * kbase_pm_transition_core_type - Perform power transitions for a particular - * core type. - * - * This function will perform any available power transitions to make the actual - * hardware state closer to the desired state. If a core is currently - * transitioning then changes to the power state of that call cannot be made - * until the transition has finished. Cores which are not present in the - * hardware are ignored if they are specified in the desired_state bitmask, - * however the return value will always be 0 in this case. - * - * @kbdev: The kbase device - * @type: The core type to perform transitions for - * @desired_state: A bit mask of the desired state of the cores - * @in_use: A bit mask of the cores that are currently running - * jobs. These cores have to be kept powered up because - * there are jobs running (or about to run) on them. - * @available: Receives a bit mask of the cores that the job - * scheduler can use to submit jobs to. May be NULL if - * this is not needed. - * @powering_on: Bit mask to update with cores that are - * transitioning to a power-on state. - * - * Return: true if the desired state has been reached, false otherwise - */ -static bool kbase_pm_transition_core_type(struct kbase_device *kbdev, - enum kbase_pm_core_type type, - u64 desired_state, - u64 in_use, - u64 * const available, - u64 *powering_on) -{ - u64 present; - u64 ready; - u64 trans; - u64 powerup; - u64 powerdown; - u64 powering_on_trans; - u64 desired_state_in_use; - - lockdep_assert_held(&kbdev->pm.power_change_lock); - - /* Get current state */ - present = kbase_pm_get_present_cores(kbdev, type); - trans = kbase_pm_get_trans_cores(kbdev, type); - ready = kbase_pm_get_ready_cores(kbdev, type); - /* mask off ready from trans in case transitions finished between the - * register reads */ - trans &= ~ready; - - powering_on_trans = trans & *powering_on; - *powering_on = powering_on_trans; - - if (available != NULL) - *available = (ready | powering_on_trans) & desired_state; - - /* Update desired state to include the in-use cores. These have to be - * kept powered up because there are jobs running or about to run on - * these cores - */ - desired_state_in_use = desired_state | in_use; - - /* Update state of whether l2 caches are powered */ - if (type == KBASE_PM_CORE_L2) { - if ((ready == present) && (desired_state_in_use == ready) && - (trans == 0)) { - /* All are ready, none will be turned off, and none are - * transitioning */ - kbdev->pm.backend.l2_powered = 1; - if (kbdev->l2_users_count > 0) { - /* Notify any registered l2 cache users - * (optimized out when no users waiting) */ - wake_up(&kbdev->pm.backend.l2_powered_wait); - } - } else - kbdev->pm.backend.l2_powered = 0; - } - - if (desired_state_in_use == ready && (trans == 0)) - return true; - - /* Restrict the cores to those that are actually present */ - powerup = desired_state_in_use & present; - powerdown = (~desired_state_in_use) & present; - - /* Restrict to cores that are not already in the desired state */ - powerup &= ~ready; - powerdown &= ready; - - /* Don't transition any cores that are already transitioning, except for - * Mali cores that support the following case: - * - * If the SHADER_PWRON or TILER_PWRON registers are written to turn on - * a core that is currently transitioning to power off, then this is - * remembered and the shader core is automatically powered up again once - * the original transition completes. Once the automatic power on is - * complete any job scheduled on the shader core should start. - */ - powerdown &= ~trans; - - if (kbase_hw_has_feature(kbdev, - BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS)) - if (KBASE_PM_CORE_SHADER == type || KBASE_PM_CORE_TILER == type) - trans = powering_on_trans; /* for exception cases, only - * mask off cores in power on - * transitions */ - - powerup &= ~trans; - - /* Perform transitions if any */ - kbase_pm_invoke(kbdev, type, powerup, ACTION_PWRON); - kbase_pm_invoke(kbdev, type, powerdown, ACTION_PWROFF); - - /* Recalculate cores transitioning on, and re-evaluate our state */ - powering_on_trans |= powerup; - *powering_on = powering_on_trans; - if (available != NULL) - *available = (ready | powering_on_trans) & desired_state; - - return false; -} - -KBASE_EXPORT_TEST_API(kbase_pm_transition_core_type); - -/** - * get_desired_cache_status - Determine which caches should be on for a - * particular core state - * - * This function takes a bit mask of the present caches and the cores (or - * caches) that are attached to the caches that will be powered. It then - * computes which caches should be turned on to allow the cores requested to be - * powered up. - * - * @present: The bit mask of present caches - * @cores_powered: A bit mask of cores (or L2 caches) that are desired to - * be powered - * - * Return: A bit mask of the caches that should be turned on - */ -static u64 get_desired_cache_status(u64 present, u64 cores_powered) -{ - u64 desired = 0; - - while (present) { - /* Find out which is the highest set bit */ - u64 bit = fls64(present) - 1; - u64 bit_mask = 1ull << bit; - /* Create a mask which has all bits from 'bit' upwards set */ - - u64 mask = ~(bit_mask - 1); - - /* If there are any cores powered at this bit or above (that - * haven't previously been processed) then we need this core on - */ - if (cores_powered & mask) - desired |= bit_mask; - - /* Remove bits from cores_powered and present */ - cores_powered &= ~mask; - present &= ~bit_mask; - } - - return desired; -} - -KBASE_EXPORT_TEST_API(get_desired_cache_status); - -bool -MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbdev) -{ - bool cores_are_available = false; - bool in_desired_state = true; - u64 desired_l2_state; - u64 cores_powered; - u64 tiler_available_bitmap; - u64 shader_available_bitmap; - u64 shader_ready_bitmap; - u64 shader_transitioning_bitmap; - u64 l2_available_bitmap; - u64 prev_l2_available_bitmap; - - KBASE_DEBUG_ASSERT(NULL != kbdev); - lockdep_assert_held(&kbdev->pm.power_change_lock); - - spin_lock(&kbdev->pm.backend.gpu_powered_lock); - if (kbdev->pm.backend.gpu_powered == false) { - spin_unlock(&kbdev->pm.backend.gpu_powered_lock); - if (kbdev->pm.backend.desired_shader_state == 0 && - kbdev->pm.backend.desired_tiler_state == 0) - return true; - return false; - } - - /* Trace that a change-state is being requested, and that it took - * (effectively) no time to start it. This is useful for counting how - * many state changes occurred, in a way that's backwards-compatible - * with processing the trace data */ - kbase_timeline_pm_send_event(kbdev, - KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE); - kbase_timeline_pm_handle_event(kbdev, - KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE); - - /* If any cores are already powered then, we must keep the caches on */ - cores_powered = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_SHADER); - - cores_powered |= kbdev->pm.backend.desired_shader_state; - - /* If there are l2 cache users registered, keep all l2s powered even if - * all other cores are off. */ - if (kbdev->l2_users_count > 0) - cores_powered |= kbdev->gpu_props.props.raw_props.l2_present; - - desired_l2_state = get_desired_cache_status( - kbdev->gpu_props.props.raw_props.l2_present, - cores_powered); - - /* If any l2 cache is on, then enable l2 #0, for use by job manager */ - if (0 != desired_l2_state) { - desired_l2_state |= 1; - /* Also enable tiler if l2 cache is powered */ - kbdev->pm.backend.desired_tiler_state = - kbdev->gpu_props.props.raw_props.tiler_present; - } else { - kbdev->pm.backend.desired_tiler_state = 0; - } - - prev_l2_available_bitmap = kbdev->l2_available_bitmap; - in_desired_state &= kbase_pm_transition_core_type(kbdev, - KBASE_PM_CORE_L2, desired_l2_state, 0, - &l2_available_bitmap, - &kbdev->pm.backend.powering_on_l2_state); - - if (kbdev->l2_available_bitmap != l2_available_bitmap) - KBASE_TIMELINE_POWER_L2(kbdev, l2_available_bitmap); - - kbdev->l2_available_bitmap = l2_available_bitmap; - - if (in_desired_state) { - in_desired_state &= kbase_pm_transition_core_type(kbdev, - KBASE_PM_CORE_TILER, - kbdev->pm.backend.desired_tiler_state, - 0, &tiler_available_bitmap, - &kbdev->pm.backend.powering_on_tiler_state); - in_desired_state &= kbase_pm_transition_core_type(kbdev, - KBASE_PM_CORE_SHADER, - kbdev->pm.backend.desired_shader_state, - kbdev->shader_inuse_bitmap, - &shader_available_bitmap, - &kbdev->pm.backend.powering_on_shader_state); - - if (kbdev->shader_available_bitmap != shader_available_bitmap) { - KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL, - NULL, 0u, - (u32) shader_available_bitmap); - KBASE_TIMELINE_POWER_SHADER(kbdev, - shader_available_bitmap); - } - - kbdev->shader_available_bitmap = shader_available_bitmap; - - if (kbdev->tiler_available_bitmap != tiler_available_bitmap) { - KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER, - NULL, NULL, 0u, - (u32) tiler_available_bitmap); - KBASE_TIMELINE_POWER_TILER(kbdev, - tiler_available_bitmap); - } - - kbdev->tiler_available_bitmap = tiler_available_bitmap; - - } else if ((l2_available_bitmap & - kbdev->gpu_props.props.raw_props.tiler_present) != - kbdev->gpu_props.props.raw_props.tiler_present) { - tiler_available_bitmap = 0; - - if (kbdev->tiler_available_bitmap != tiler_available_bitmap) - KBASE_TIMELINE_POWER_TILER(kbdev, - tiler_available_bitmap); - - kbdev->tiler_available_bitmap = tiler_available_bitmap; - } - - /* State updated for slow-path waiters */ - kbdev->pm.backend.gpu_in_desired_state = in_desired_state; - - shader_ready_bitmap = kbase_pm_get_ready_cores(kbdev, - KBASE_PM_CORE_SHADER); - shader_transitioning_bitmap = kbase_pm_get_trans_cores(kbdev, - KBASE_PM_CORE_SHADER); - - /* Determine whether the cores are now available (even if the set of - * available cores is empty). Note that they can be available even if - * we've not finished transitioning to the desired state */ - if ((kbdev->shader_available_bitmap & - kbdev->pm.backend.desired_shader_state) - == kbdev->pm.backend.desired_shader_state && - (kbdev->tiler_available_bitmap & - kbdev->pm.backend.desired_tiler_state) - == kbdev->pm.backend.desired_tiler_state) { - cores_are_available = true; - - KBASE_TRACE_ADD(kbdev, PM_CORES_AVAILABLE, NULL, NULL, 0u, - (u32)(kbdev->shader_available_bitmap & - kbdev->pm.backend.desired_shader_state)); - KBASE_TRACE_ADD(kbdev, PM_CORES_AVAILABLE_TILER, NULL, NULL, 0u, - (u32)(kbdev->tiler_available_bitmap & - kbdev->pm.backend.desired_tiler_state)); - - /* Log timelining information about handling events that power - * up cores, to match up either with immediate submission either - * because cores already available, or from PM IRQ */ - if (!in_desired_state) - kbase_timeline_pm_send_event(kbdev, - KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED); - } - - if (in_desired_state) { - KBASE_DEBUG_ASSERT(cores_are_available); - -#if defined(CONFIG_MALI_GATOR_SUPPORT) - kbase_trace_mali_pm_status(KBASE_PM_CORE_L2, - kbase_pm_get_ready_cores(kbdev, - KBASE_PM_CORE_L2)); - kbase_trace_mali_pm_status(KBASE_PM_CORE_SHADER, - kbase_pm_get_ready_cores(kbdev, - KBASE_PM_CORE_SHADER)); - kbase_trace_mali_pm_status(KBASE_PM_CORE_TILER, - kbase_pm_get_ready_cores(kbdev, - KBASE_PM_CORE_TILER)); -#endif -#if defined(CONFIG_MALI_MIPE_ENABLED) - kbase_tlstream_aux_pm_state( - KBASE_PM_CORE_L2, - kbase_pm_get_ready_cores( - kbdev, KBASE_PM_CORE_L2)); - kbase_tlstream_aux_pm_state( - KBASE_PM_CORE_SHADER, - kbase_pm_get_ready_cores( - kbdev, KBASE_PM_CORE_SHADER)); - kbase_tlstream_aux_pm_state( - KBASE_PM_CORE_TILER, - kbase_pm_get_ready_cores( - kbdev, - KBASE_PM_CORE_TILER)); -#endif - - KBASE_TRACE_ADD(kbdev, PM_DESIRED_REACHED, NULL, NULL, - kbdev->pm.backend.gpu_in_desired_state, - (u32)kbdev->pm.backend.desired_shader_state); - KBASE_TRACE_ADD(kbdev, PM_DESIRED_REACHED_TILER, NULL, NULL, 0u, - (u32)kbdev->pm.backend.desired_tiler_state); - - /* Log timelining information for synchronous waiters */ - kbase_timeline_pm_send_event(kbdev, - KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED); - /* Wake slow-path waiters. Job scheduler does not use this. */ - KBASE_TRACE_ADD(kbdev, PM_WAKE_WAITERS, NULL, NULL, 0u, 0); - wake_up(&kbdev->pm.backend.gpu_in_desired_state_wait); - } - - spin_unlock(&kbdev->pm.backend.gpu_powered_lock); - - /* kbase_pm_ca_update_core_status can cause one-level recursion into - * this function, so it must only be called once all changes to kbdev - * have been committed, and after the gpu_powered_lock has been - * dropped. */ - if (kbdev->shader_ready_bitmap != shader_ready_bitmap || - kbdev->shader_transitioning_bitmap != shader_transitioning_bitmap) { - kbdev->shader_ready_bitmap = shader_ready_bitmap; - kbdev->shader_transitioning_bitmap = - shader_transitioning_bitmap; - - kbase_pm_ca_update_core_status(kbdev, shader_ready_bitmap, - shader_transitioning_bitmap); - } - - /* The core availability policy is not allowed to keep core group 0 - * turned off (unless it was changing the l2 power state) */ - if (!((shader_ready_bitmap | shader_transitioning_bitmap) & - kbdev->gpu_props.props.coherency_info.group[0].core_mask) && - (prev_l2_available_bitmap == desired_l2_state) && - !(kbase_pm_ca_get_core_mask(kbdev) & - kbdev->gpu_props.props.coherency_info.group[0].core_mask)) - BUG(); - - /* The core availability policy is allowed to keep core group 1 off, - * but all jobs specifically targeting CG1 must fail */ - if (!((shader_ready_bitmap | shader_transitioning_bitmap) & - kbdev->gpu_props.props.coherency_info.group[1].core_mask) && - !(kbase_pm_ca_get_core_mask(kbdev) & - kbdev->gpu_props.props.coherency_info.group[1].core_mask)) - kbdev->pm.backend.cg1_disabled = true; - else - kbdev->pm.backend.cg1_disabled = false; - - return cores_are_available; -} -KBASE_EXPORT_TEST_API(kbase_pm_check_transitions_nolock); - -/* Timeout for kbase_pm_check_transitions_sync when wait_event_killable has - * aborted due to a fatal signal. If the time spent waiting has exceeded this - * threshold then there is most likely a hardware issue. */ -#define PM_TIMEOUT (5*HZ) /* 5s */ - -void kbase_pm_check_transitions_sync(struct kbase_device *kbdev) -{ - unsigned long flags; - unsigned long timeout; - bool cores_are_available; - int ret; - - /* Force the transition to be checked and reported - the cores may be - * 'available' (for job submission) but not fully powered up. */ - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - cores_are_available = kbase_pm_check_transitions_nolock(kbdev); - /* Don't need 'cores_are_available', because we don't return anything */ - CSTD_UNUSED(cores_are_available); - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - - timeout = jiffies + PM_TIMEOUT; - - /* Wait for cores */ - ret = wait_event_killable(kbdev->pm.backend.gpu_in_desired_state_wait, - kbdev->pm.backend.gpu_in_desired_state); - - if (ret < 0 && time_after(jiffies, timeout)) { - dev_err(kbdev->dev, "Power transition timed out unexpectedly\n"); - dev_err(kbdev->dev, "Desired state :\n"); - dev_err(kbdev->dev, "\tShader=%016llx\n", - kbdev->pm.backend.desired_shader_state); - dev_err(kbdev->dev, "\tTiler =%016llx\n", - kbdev->pm.backend.desired_tiler_state); - dev_err(kbdev->dev, "Current state :\n"); - dev_err(kbdev->dev, "\tShader=%08x%08x\n", - kbase_reg_read(kbdev, - GPU_CONTROL_REG(SHADER_READY_HI), NULL), - kbase_reg_read(kbdev, - GPU_CONTROL_REG(SHADER_READY_LO), - NULL)); - dev_err(kbdev->dev, "\tTiler =%08x%08x\n", - kbase_reg_read(kbdev, - GPU_CONTROL_REG(TILER_READY_HI), NULL), - kbase_reg_read(kbdev, - GPU_CONTROL_REG(TILER_READY_LO), NULL)); - dev_err(kbdev->dev, "\tL2 =%08x%08x\n", - kbase_reg_read(kbdev, - GPU_CONTROL_REG(L2_READY_HI), NULL), - kbase_reg_read(kbdev, - GPU_CONTROL_REG(L2_READY_LO), NULL)); - dev_err(kbdev->dev, "Cores transitioning :\n"); - dev_err(kbdev->dev, "\tShader=%08x%08x\n", - kbase_reg_read(kbdev, GPU_CONTROL_REG( - SHADER_PWRTRANS_HI), NULL), - kbase_reg_read(kbdev, GPU_CONTROL_REG( - SHADER_PWRTRANS_LO), NULL)); - dev_err(kbdev->dev, "\tTiler =%08x%08x\n", - kbase_reg_read(kbdev, GPU_CONTROL_REG( - TILER_PWRTRANS_HI), NULL), - kbase_reg_read(kbdev, GPU_CONTROL_REG( - TILER_PWRTRANS_LO), NULL)); - dev_err(kbdev->dev, "\tL2 =%08x%08x\n", - kbase_reg_read(kbdev, GPU_CONTROL_REG( - L2_PWRTRANS_HI), NULL), - kbase_reg_read(kbdev, GPU_CONTROL_REG( - L2_PWRTRANS_LO), NULL)); -#if KBASE_GPU_RESET_EN - dev_err(kbdev->dev, "Sending reset to GPU - all running jobs will be lost\n"); - if (kbase_prepare_to_reset_gpu(kbdev)) - kbase_reset_gpu(kbdev); -#endif /* KBASE_GPU_RESET_EN */ - } else { - /* Log timelining information that a change in state has - * completed */ - kbase_timeline_pm_handle_event(kbdev, - KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED); - } -} -KBASE_EXPORT_TEST_API(kbase_pm_check_transitions_sync); - -void kbase_pm_enable_interrupts(struct kbase_device *kbdev) -{ - unsigned long flags; - - KBASE_DEBUG_ASSERT(NULL != kbdev); - /* - * Clear all interrupts, - * and unmask them all. - */ - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL, - NULL); - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), GPU_IRQ_REG_ALL, - NULL); - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - - kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF, - NULL); - kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0xFFFFFFFF, NULL); - - kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF, NULL); - kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0xFFFFFFFF, NULL); -} - -KBASE_EXPORT_TEST_API(kbase_pm_enable_interrupts); - -void kbase_pm_enable_interrupts_mmu_mask(struct kbase_device *kbdev, u32 mask) -{ - unsigned long flags; - - KBASE_DEBUG_ASSERT(NULL != kbdev); - /* - * Clear all interrupts, - * and unmask them all. - */ - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL, - NULL); - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), GPU_IRQ_REG_ALL, - NULL); - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - - kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF, - NULL); - kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0xFFFFFFFF, NULL); - - kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF, NULL); - kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), mask, NULL); -} - -void kbase_pm_disable_interrupts(struct kbase_device *kbdev) -{ - unsigned long flags; - - KBASE_DEBUG_ASSERT(NULL != kbdev); - /* - * Mask all interrupts, - * and clear them all. - */ - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), 0, NULL); - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL, - NULL); - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - - kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0, NULL); - kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF, - NULL); - - kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0, NULL); - kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF, NULL); -} - -KBASE_EXPORT_TEST_API(kbase_pm_disable_interrupts); - -/* - * pmu layout: - * 0x0000: PMU TAG (RO) (0xCAFECAFE) - * 0x0004: PMU VERSION ID (RO) (0x00000000) - * 0x0008: CLOCK ENABLE (RW) (31:1 SBZ, 0 CLOCK STATE) - */ -void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume) -{ - bool reset_required = is_resume; - struct kbasep_js_device_data *js_devdata = &kbdev->js_data; - unsigned long flags; - int i; - - KBASE_DEBUG_ASSERT(NULL != kbdev); - lockdep_assert_held(&js_devdata->runpool_mutex); - lockdep_assert_held(&kbdev->pm.lock); - - if (kbdev->pm.backend.gpu_powered) { - /* Already turned on */ - if (kbdev->poweroff_pending) - kbase_pm_enable_interrupts(kbdev); - kbdev->poweroff_pending = false; - KBASE_DEBUG_ASSERT(!is_resume); - return; - } - - kbdev->poweroff_pending = false; - - KBASE_TRACE_ADD(kbdev, PM_GPU_ON, NULL, NULL, 0u, 0u); - - if (is_resume && kbdev->pm.backend.callback_power_resume) { - kbdev->pm.backend.callback_power_resume(kbdev); - } else if (kbdev->pm.backend.callback_power_on) { - kbdev->pm.backend.callback_power_on(kbdev); - /* If your platform properly keeps the GPU state you may use the - * return value of the callback_power_on function to - * conditionally reset the GPU on power up. Currently we are - * conservative and always reset the GPU. */ - reset_required = true; - } - - spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags); - kbdev->pm.backend.gpu_powered = true; - spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags); - - if (reset_required) { - /* GPU state was lost, reset GPU to ensure it is in a - * consistent state */ - kbase_pm_init_hw(kbdev, PM_ENABLE_IRQS); - } - - /* Reprogram the GPU's MMU */ - for (i = 0; i < kbdev->nr_hw_address_spaces; i++) { - struct kbase_as *as = &kbdev->as[i]; - - mutex_lock(&as->transaction_mutex); - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - - if (js_devdata->runpool_irq.per_as_data[i].kctx) - kbase_mmu_update( - js_devdata->runpool_irq.per_as_data[i].kctx); - else - kbase_mmu_disable_as(kbdev, i); - - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - mutex_unlock(&as->transaction_mutex); - } - - /* Lastly, enable the interrupts */ - kbase_pm_enable_interrupts(kbdev); -} - -KBASE_EXPORT_TEST_API(kbase_pm_clock_on); - -bool kbase_pm_clock_off(struct kbase_device *kbdev, bool is_suspend) -{ - unsigned long flags; - - KBASE_DEBUG_ASSERT(NULL != kbdev); - lockdep_assert_held(&kbdev->pm.lock); - - /* ASSERT that the cores should now be unavailable. No lock needed. */ - KBASE_DEBUG_ASSERT(kbdev->shader_available_bitmap == 0u); - - kbdev->poweroff_pending = true; - - if (!kbdev->pm.backend.gpu_powered) { - /* Already turned off */ - if (is_suspend && kbdev->pm.backend.callback_power_suspend) - kbdev->pm.backend.callback_power_suspend(kbdev); - return true; - } - - KBASE_TRACE_ADD(kbdev, PM_GPU_OFF, NULL, NULL, 0u, 0u); - - /* Disable interrupts. This also clears any outstanding interrupts */ - kbase_pm_disable_interrupts(kbdev); - /* Ensure that any IRQ handlers have finished */ - kbase_synchronize_irqs(kbdev); - - spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags); - - if (atomic_read(&kbdev->faults_pending)) { - /* Page/bus faults are still being processed. The GPU can not - * be powered off until they have completed */ - spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, - flags); - return false; - } - - - /* The GPU power may be turned off from this point */ - kbdev->pm.backend.gpu_powered = false; - spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags); - - if (is_suspend && kbdev->pm.backend.callback_power_suspend) - kbdev->pm.backend.callback_power_suspend(kbdev); - else if (kbdev->pm.backend.callback_power_off) - kbdev->pm.backend.callback_power_off(kbdev); - return true; -} - -KBASE_EXPORT_TEST_API(kbase_pm_clock_off); - -struct kbasep_reset_timeout_data { - struct hrtimer timer; - bool timed_out; - struct kbase_device *kbdev; -}; - -void kbase_pm_reset_done(struct kbase_device *kbdev) -{ - KBASE_DEBUG_ASSERT(kbdev != NULL); - kbdev->pm.backend.reset_done = true; - wake_up(&kbdev->pm.backend.reset_done_wait); -} - -/** - * kbase_pm_wait_for_reset - Wait for a reset to happen - * - * Wait for the %RESET_COMPLETED IRQ to occur, then reset the waiting state. - * - * @kbdev: Kbase device - */ -static void kbase_pm_wait_for_reset(struct kbase_device *kbdev) -{ - lockdep_assert_held(&kbdev->pm.lock); - - wait_event(kbdev->pm.backend.reset_done_wait, - (kbdev->pm.backend.reset_done)); - kbdev->pm.backend.reset_done = false; -} - -KBASE_EXPORT_TEST_API(kbase_pm_reset_done); - -static enum hrtimer_restart kbasep_reset_timeout(struct hrtimer *timer) -{ - struct kbasep_reset_timeout_data *rtdata = - container_of(timer, struct kbasep_reset_timeout_data, timer); - - rtdata->timed_out = 1; - - /* Set the wait queue to wake up kbase_pm_init_hw even though the reset - * hasn't completed */ - kbase_pm_reset_done(rtdata->kbdev); - - return HRTIMER_NORESTART; -} - -static void kbase_pm_hw_issues_detect(struct kbase_device *kbdev) -{ - struct device_node *np = kbdev->dev->of_node; - u32 jm_values[4]; - const u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id; - const u32 prod_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >> - GPU_ID_VERSION_PRODUCT_ID_SHIFT; - const u32 major = (gpu_id & GPU_ID_VERSION_MAJOR) >> - GPU_ID_VERSION_MAJOR_SHIFT; - - kbdev->hw_quirks_sc = 0; - - /* Needed due to MIDBASE-1494: LS_PAUSEBUFFER_DISABLE. See PRLAM-8443. - * and needed due to MIDGLES-3539. See PRLAM-11035 */ - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8443) || - kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11035)) - kbdev->hw_quirks_sc |= SC_LS_PAUSEBUFFER_DISABLE; - - /* Needed due to MIDBASE-2054: SDC_DISABLE_OQ_DISCARD. See PRLAM-10327. - */ - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10327)) - kbdev->hw_quirks_sc |= SC_SDC_DISABLE_OQ_DISCARD; - - /* Enable alternative hardware counter selection if configured. */ - if (DEFAULT_ALTERNATIVE_HWC) - kbdev->hw_quirks_sc |= SC_ALT_COUNTERS; - - /* Needed due to MIDBASE-2795. ENABLE_TEXGRD_FLAGS. See PRLAM-10797. */ - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10797)) - kbdev->hw_quirks_sc |= SC_ENABLE_TEXGRD_FLAGS; - - kbdev->hw_quirks_tiler = kbase_reg_read(kbdev, - GPU_CONTROL_REG(TILER_CONFIG), NULL); - - /* Set tiler clock gate override if required */ - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3953)) - kbdev->hw_quirks_tiler |= TC_CLOCK_GATE_OVERRIDE; - - /* Limit the GPU bus bandwidth if the platform needs this. */ - kbdev->hw_quirks_mmu = kbase_reg_read(kbdev, - GPU_CONTROL_REG(L2_MMU_CONFIG), NULL); - - /* Limit read ID width for AXI */ - kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_READS); - kbdev->hw_quirks_mmu |= (DEFAULT_ARID_LIMIT & 0x3) << - L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT; - - /* Limit write ID width for AXI */ - kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES); - kbdev->hw_quirks_mmu |= (DEFAULT_AWID_LIMIT & 0x3) << - L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT; - - - /* Only for T86x/T88x-based products after r2p0 */ - if (prod_id >= 0x860 && prod_id <= 0x880 && major >= 2) { - /* The JM_CONFIG register is specified as follows in the - T86x/T88x Engineering Specification Supplement: - The values are read from device tree in order. - */ -#define TIMESTAMP_OVERRIDE 1 -#define CLOCK_GATE_OVERRIDE (1<<1) -#define JOB_THROTTLE_ENABLE (1<<2) -#define JOB_THROTTLE_LIMIT_SHIFT 3 - - /* 6 bits in the register */ - const u32 jm_max_limit = 0x3F; - - if (of_property_read_u32_array(np, - "jm_config", - &jm_values[0], - ARRAY_SIZE(jm_values))) { - /* Entry not in device tree, use defaults */ - jm_values[0] = 0; - jm_values[1] = 0; - jm_values[2] = 0; - jm_values[3] = jm_max_limit; /* Max value */ - } - - /* Limit throttle limit to 6 bits*/ - if (jm_values[3] > jm_max_limit) { - dev_dbg(kbdev->dev, "JOB_THROTTLE_LIMIT supplied in device tree is too large. Limiting to MAX (63)."); - jm_values[3] = jm_max_limit; - } - - /* Aggregate to one integer. */ - kbdev->hw_quirks_jm = (jm_values[0] ? TIMESTAMP_OVERRIDE : 0); - kbdev->hw_quirks_jm |= (jm_values[1] ? CLOCK_GATE_OVERRIDE : 0); - kbdev->hw_quirks_jm |= (jm_values[2] ? JOB_THROTTLE_ENABLE : 0); - kbdev->hw_quirks_jm |= (jm_values[3] << - JOB_THROTTLE_LIMIT_SHIFT); - } else { - kbdev->hw_quirks_jm = KBASE_JM_CONFIG_UNUSED; - } - - -} - -static void kbase_pm_hw_issues_apply(struct kbase_device *kbdev) -{ - if (kbdev->hw_quirks_sc) - kbase_reg_write(kbdev, GPU_CONTROL_REG(SHADER_CONFIG), - kbdev->hw_quirks_sc, NULL); - - kbase_reg_write(kbdev, GPU_CONTROL_REG(TILER_CONFIG), - kbdev->hw_quirks_tiler, NULL); - - kbase_reg_write(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG), - kbdev->hw_quirks_mmu, NULL); - - - if (kbdev->hw_quirks_jm != KBASE_JM_CONFIG_UNUSED) - kbase_reg_write(kbdev, GPU_CONTROL_REG(JM_CONFIG), - kbdev->hw_quirks_jm, NULL); - -} - - -int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags) -{ - unsigned long irq_flags; - struct kbasep_reset_timeout_data rtdata; - u64 core_ready; - u64 l2_ready; - u64 tiler_ready; - u32 value; - - KBASE_DEBUG_ASSERT(NULL != kbdev); - lockdep_assert_held(&kbdev->pm.lock); - - /* Ensure the clock is on before attempting to access the hardware */ - if (!kbdev->pm.backend.gpu_powered) { - if (kbdev->pm.backend.callback_power_on) - kbdev->pm.backend.callback_power_on(kbdev); - - spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, - irq_flags); - kbdev->pm.backend.gpu_powered = true; - spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, - irq_flags); - } - - /* Ensure interrupts are off to begin with, this also clears any - * outstanding interrupts */ - kbase_pm_disable_interrupts(kbdev); - /* Prepare for the soft-reset */ - kbdev->pm.backend.reset_done = false; - - /* The cores should be made unavailable due to the reset */ - spin_lock_irqsave(&kbdev->pm.power_change_lock, irq_flags); - if (kbdev->shader_available_bitmap != 0u) - KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL, - NULL, 0u, (u32)0u); - if (kbdev->tiler_available_bitmap != 0u) - KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER, - NULL, NULL, 0u, (u32)0u); - kbdev->shader_available_bitmap = 0u; - kbdev->tiler_available_bitmap = 0u; - kbdev->l2_available_bitmap = 0u; - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, irq_flags); - - /* Soft reset the GPU */ - KBASE_TRACE_ADD(kbdev, CORE_GPU_SOFT_RESET, NULL, NULL, 0u, 0); -#if defined(CONFIG_MALI_MIPE_ENABLED) - kbase_tlstream_jd_gpu_soft_reset(kbdev); -#endif -#if 0 - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), - GPU_COMMAND_SOFT_RESET, NULL); -#else - //dev_err(kbdev->dev, "%s, %d \n", __FILE__, __LINE__); - core_ready = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_SHADER); - l2_ready = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2); - tiler_ready = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_TILER); - - //printk("core_ready=%ld, l2_ready=%ld, tiler_ready=%ld\n", core_ready, l2_ready, tiler_ready); - if (core_ready) - kbase_pm_invoke(kbdev, KBASE_PM_CORE_SHADER, core_ready, ACTION_PWROFF); - - if (l2_ready) - kbase_pm_invoke(kbdev, KBASE_PM_CORE_L2, l2_ready, ACTION_PWROFF); - - if (tiler_ready) - kbase_pm_invoke(kbdev, KBASE_PM_CORE_TILER, tiler_ready, ACTION_PWROFF); - - /* do external reset */ - -//JOHNT -#if 0 - // Level reset mail - Wr(P_RESET0_MASK, ~(0x1<<20)); - Wr(P_RESET0_LEVEL, ~(0x1<<20)); - - // Level reset mail - Wr(P_RESET2_MASK, ~(0x1<<14)); - Wr(P_RESET2_LEVEL, ~(0x1<<14)); - - Wr(P_RESET2_LEVEL, 0xffffffff); - Wr(P_RESET0_LEVEL, 0xffffffff); -#else - if (reg_base_hiubus) { - value = Rd(RESET0_MASK); - value = value & (~(0x1<<20)); - //printk("line(%d), value=%x\n", __LINE__, value); - Wr(RESET0_MASK, value); - - value = Rd(RESET0_LEVEL); - value = value & (~(0x1<<20)); - //printk("line(%d), value=%x\n", __LINE__, value); - Wr(RESET0_LEVEL, value); - /////////////// - value = Rd(RESET2_MASK); - value = value & (~(0x1<<14)); - //printk("line(%d), value=%x\n", __LINE__, value); - Wr(RESET2_MASK, value); - - value = Rd(RESET2_LEVEL); - value = value & (~(0x1<<14)); - //printk("line(%d), value=%x\n", __LINE__, value); - Wr(RESET2_LEVEL, value); - - value = Rd(RESET0_LEVEL); - value = value | ((0x1<<20)); - //printk("line(%d), value=%x\n", __LINE__, value); - Wr(RESET0_LEVEL, value); - - value = Rd(RESET2_LEVEL); - value = value | ((0x1<<14)); - //printk("line(%d), value=%x\n", __LINE__, value); - Wr(RESET2_LEVEL, value); - } else { - dev_err(kbdev->dev, "reg_base_hiubus is null !!!\n"); - } -#endif - //writel(..../*e.g. PWR_RESET, EXTERNAL_PWR_REGISTER*/); - - /* any other action necessary, like a simple delay */ - kbase_reg_write(kbdev, GPU_CONTROL_REG(PWR_KEY), 0x2968A819, NULL); - kbase_reg_write(kbdev, GPU_CONTROL_REG(PWR_OVERRIDE1), 0xfff | (0x20<<16), NULL); -#endif - - /* Unmask the reset complete interrupt only */ - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), RESET_COMPLETED, - NULL); - - /* Initialize a structure for tracking the status of the reset */ - rtdata.kbdev = kbdev; - rtdata.timed_out = 0; - - /* Create a timer to use as a timeout on the reset */ - hrtimer_init_on_stack(&rtdata.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - rtdata.timer.function = kbasep_reset_timeout; - - hrtimer_start(&rtdata.timer, HR_TIMER_DELAY_MSEC(RESET_TIMEOUT), - HRTIMER_MODE_REL); - - /* Wait for the RESET_COMPLETED interrupt to be raised */ - kbase_pm_wait_for_reset(kbdev); - - if (rtdata.timed_out == 0) { - /* GPU has been reset */ - hrtimer_cancel(&rtdata.timer); - destroy_hrtimer_on_stack(&rtdata.timer); - goto out; - } - - /* No interrupt has been received - check if the RAWSTAT register says - * the reset has completed */ - if (kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL) & - RESET_COMPLETED) { - /* The interrupt is set in the RAWSTAT; this suggests that the - * interrupts are not getting to the CPU */ - dev_warn(kbdev->dev, "Reset interrupt didn't reach CPU. Check interrupt assignments.\n"); - /* If interrupts aren't working we can't continue. */ - destroy_hrtimer_on_stack(&rtdata.timer); - goto out; - } - - /* The GPU doesn't seem to be responding to the reset so try a hard - * reset */ - dev_err(kbdev->dev, "Failed to soft-reset GPU (timed out after %d ms), now attempting a hard reset\n", - RESET_TIMEOUT); - KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0); - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), - GPU_COMMAND_HARD_RESET, NULL); - - /* Restart the timer to wait for the hard reset to complete */ - rtdata.timed_out = 0; - - hrtimer_start(&rtdata.timer, HR_TIMER_DELAY_MSEC(RESET_TIMEOUT), - HRTIMER_MODE_REL); - - /* Wait for the RESET_COMPLETED interrupt to be raised */ - kbase_pm_wait_for_reset(kbdev); - - if (rtdata.timed_out == 0) { - /* GPU has been reset */ - hrtimer_cancel(&rtdata.timer); - destroy_hrtimer_on_stack(&rtdata.timer); - goto out; - } - - destroy_hrtimer_on_stack(&rtdata.timer); - - dev_err(kbdev->dev, "Failed to hard-reset the GPU (timed out after %d ms)\n", - RESET_TIMEOUT); - - /* The GPU still hasn't reset, give up */ - return -EINVAL; - -out: - - if (flags & PM_HW_ISSUES_DETECT) - kbase_pm_hw_issues_detect(kbdev); - - kbase_pm_hw_issues_apply(kbdev); - - - /* If cycle counter was in use re-enable it, enable_irqs will only be - * false when called from kbase_pm_powerup */ - if (kbdev->pm.backend.gpu_cycle_counter_requests && - (flags & PM_ENABLE_IRQS)) { - /* enable interrupts as the L2 may have to be powered on */ - kbase_pm_enable_interrupts(kbdev); - kbase_pm_request_l2_caches(kbdev); - - /* Re-enable the counters if we need to */ - spin_lock_irqsave( - &kbdev->pm.backend.gpu_cycle_counter_requests_lock, - irq_flags); - if (kbdev->pm.backend.gpu_cycle_counter_requests) - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), - GPU_COMMAND_CYCLE_COUNT_START, NULL); - spin_unlock_irqrestore( - &kbdev->pm.backend.gpu_cycle_counter_requests_lock, - irq_flags); - - kbase_pm_release_l2_caches(kbdev); - kbase_pm_disable_interrupts(kbdev); - } - - if (flags & PM_ENABLE_IRQS) - kbase_pm_enable_interrupts(kbdev); - - return 0; -} - -/** - * kbase_pm_request_gpu_cycle_counter_do_request - Request cycle counters - * - * Increase the count of cycle counter users and turn the cycle counters on if - * they were previously off - * - * This function is designed to be called by - * kbase_pm_request_gpu_cycle_counter() or - * kbase_pm_request_gpu_cycle_counter_l2_is_on() only - * - * When this function is called the l2 cache must be on and the l2 cache users - * count must have been incremented by a call to ( - * kbase_pm_request_l2_caches() or kbase_pm_request_l2_caches_l2_on() ) - * - * @kbdev: The kbase device structure of the device - */ -static void -kbase_pm_request_gpu_cycle_counter_do_request(struct kbase_device *kbdev) -{ - unsigned long flags; - - spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock, - flags); - - ++kbdev->pm.backend.gpu_cycle_counter_requests; - - if (1 == kbdev->pm.backend.gpu_cycle_counter_requests) - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), - GPU_COMMAND_CYCLE_COUNT_START, NULL); - - spin_unlock_irqrestore( - &kbdev->pm.backend.gpu_cycle_counter_requests_lock, - flags); -} - -void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev) -{ - KBASE_DEBUG_ASSERT(kbdev != NULL); - - KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered); - - KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests < - INT_MAX); - - kbase_pm_request_l2_caches(kbdev); - - kbase_pm_request_gpu_cycle_counter_do_request(kbdev); -} - -KBASE_EXPORT_TEST_API(kbase_pm_request_gpu_cycle_counter); - -void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev) -{ - KBASE_DEBUG_ASSERT(kbdev != NULL); - - KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered); - - KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests < - INT_MAX); - - kbase_pm_request_l2_caches_l2_is_on(kbdev); - - kbase_pm_request_gpu_cycle_counter_do_request(kbdev); -} - -KBASE_EXPORT_TEST_API(kbase_pm_request_gpu_cycle_counter_l2_is_on); - -void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev) -{ - unsigned long flags; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - - spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock, - flags); - - KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests > 0); - - --kbdev->pm.backend.gpu_cycle_counter_requests; - - if (0 == kbdev->pm.backend.gpu_cycle_counter_requests) - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), - GPU_COMMAND_CYCLE_COUNT_STOP, NULL); - - spin_unlock_irqrestore( - &kbdev->pm.backend.gpu_cycle_counter_requests_lock, - flags); - - kbase_pm_release_l2_caches(kbdev); -} - -KBASE_EXPORT_TEST_API(kbase_pm_release_gpu_cycle_counter); diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h deleted file mode 100755 index bcca37d054021..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h +++ /dev/null @@ -1,516 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * Power management API definitions used internally by GPU backend - */ - -#ifndef _KBASE_BACKEND_PM_INTERNAL_H_ -#define _KBASE_BACKEND_PM_INTERNAL_H_ - -#include - -#include "mali_kbase_pm_ca.h" -#include "mali_kbase_pm_policy.h" - - -/** - * kbase_pm_dev_idle - The GPU is idle. - * - * The OS may choose to turn off idle devices - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - */ -void kbase_pm_dev_idle(struct kbase_device *kbdev); - -/** - * kbase_pm_dev_activate - The GPU is active. - * - * The OS should avoid opportunistically turning off the GPU while it is active - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - */ -void kbase_pm_dev_activate(struct kbase_device *kbdev); - -/** - * kbase_pm_get_present_cores - Get details of the cores that are present in - * the device. - * - * This function can be called by the active power policy to return a bitmask of - * the cores (of a specified type) present in the GPU device and also a count of - * the number of cores. - * - * @kbdev: The kbase device structure for the device (must be a valid - * pointer) - * @type: The type of core (see the enum kbase_pm_core_type enumeration) - * - * Return: The bit mask of cores present - */ -u64 kbase_pm_get_present_cores(struct kbase_device *kbdev, - enum kbase_pm_core_type type); - -/** - * kbase_pm_get_active_cores - Get details of the cores that are currently - * active in the device. - * - * This function can be called by the active power policy to return a bitmask of - * the cores (of a specified type) that are actively processing work (i.e. - * turned on *and* busy). - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - * @type: The type of core (see the enum kbase_pm_core_type enumeration) - * - * Return: The bit mask of active cores - */ -u64 kbase_pm_get_active_cores(struct kbase_device *kbdev, - enum kbase_pm_core_type type); - -/** - * kbase_pm_get_trans_cores - Get details of the cores that are currently - * transitioning between power states. - * - * This function can be called by the active power policy to return a bitmask of - * the cores (of a specified type) that are currently transitioning between - * power states. - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - * @type: The type of core (see the enum kbase_pm_core_type enumeration) - * - * Return: The bit mask of transitioning cores - */ -u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev, - enum kbase_pm_core_type type); - -/** - * kbase_pm_get_ready_cores - Get details of the cores that are currently - * powered and ready for jobs. - * - * This function can be called by the active power policy to return a bitmask of - * the cores (of a specified type) that are powered and ready for jobs (they may - * or may not be currently executing jobs). - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - * @type: The type of core (see the enum kbase_pm_core_type enumeration) - * - * Return: The bit mask of ready cores - */ -u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev, - enum kbase_pm_core_type type); - -/** - * kbase_pm_clock_on - Turn the clock for the device on, and enable device - * interrupts. - * - * This function can be used by a power policy to turn the clock for the GPU on. - * It should be modified during integration to perform the necessary actions to - * ensure that the GPU is fully powered and clocked. - * - * @kbdev: The kbase device structure for the device (must be a valid - * pointer) - * @is_resume: true if clock on due to resume after suspend, false otherwise - */ -void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume); - -/** - * kbase_pm_clock_off - Disable device interrupts, and turn the clock for the - * device off. - * - * This function can be used by a power policy to turn the clock for the GPU - * off. It should be modified during integration to perform the necessary - * actions to turn the clock off (if this is possible in the integration). - * - * @kbdev: The kbase device structure for the device (must be a valid - * pointer) - * @is_suspend: true if clock off due to suspend, false otherwise - * - * Return: true if clock was turned off, or - * false if clock can not be turned off due to pending page/bus fault - * workers. Caller must flush MMU workqueues and retry - */ -bool kbase_pm_clock_off(struct kbase_device *kbdev, bool is_suspend); - -/** - * kbase_pm_enable_interrupts - Enable interrupts on the device. - * - * Interrupts are also enabled after a call to kbase_pm_clock_on(). - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - */ -void kbase_pm_enable_interrupts(struct kbase_device *kbdev); - -/** - * kbase_pm_enable_interrupts_mmu_mask - Enable interrupts on the device, using - * the provided mask to set MMU_IRQ_MASK. - * - * Interrupts are also enabled after a call to kbase_pm_clock_on(). - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - * @mask: The mask to use for MMU_IRQ_MASK - */ -void kbase_pm_enable_interrupts_mmu_mask(struct kbase_device *kbdev, u32 mask); - -/** - * kbase_pm_disable_interrupts - Disable interrupts on the device. - * - * This prevents delivery of Power Management interrupts to the CPU so that - * kbase_pm_check_transitions_nolock() will not be called from the IRQ handler - * until kbase_pm_enable_interrupts() or kbase_pm_clock_on() is called. - * - * Interrupts are also disabled after a call to kbase_pm_clock_off(). - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - */ -void kbase_pm_disable_interrupts(struct kbase_device *kbdev); - -/** - * kbase_pm_init_hw - Initialize the hardware. - * @kbdev: The kbase device structure for the device (must be a valid pointer) - * @flags: Flags specifying the type of PM init - * - * This function checks the GPU ID register to ensure that the GPU is supported - * by the driver and performs a reset on the device so that it is in a known - * state before the device is used. - * - * Return: 0 if the device is supported and successfully reset. - */ -int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags); - -/** - * kbase_pm_reset_done - The GPU has been reset successfully. - * - * This function must be called by the GPU interrupt handler when the - * RESET_COMPLETED bit is set. It signals to the power management initialization - * code that the GPU has been successfully reset. - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - */ -void kbase_pm_reset_done(struct kbase_device *kbdev); - - -/** - * kbase_pm_check_transitions_nolock - Check if there are any power transitions - * to make, and if so start them. - * - * This function will check the desired_xx_state members of - * struct kbase_pm_device_data and the actual status of the hardware to see if - * any power transitions can be made at this time to make the hardware state - * closer to the state desired by the power policy. - * - * The return value can be used to check whether all the desired cores are - * available, and so whether it's worth submitting a job (e.g. from a Power - * Management IRQ). - * - * Note that this still returns true when desired_xx_state has no - * cores. That is: of the no cores desired, none were *un*available. In - * this case, the caller may still need to try submitting jobs. This is because - * the Core Availability Policy might have taken us to an intermediate state - * where no cores are powered, before powering on more cores (e.g. for core - * rotation) - * - * The caller must hold kbase_device.pm.power_change_lock - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - * - * Return: non-zero when all desired cores are available. That is, - * it's worthwhile for the caller to submit a job. - * false otherwise - */ -bool kbase_pm_check_transitions_nolock(struct kbase_device *kbdev); - -/** - * kbase_pm_check_transitions_sync - Synchronous and locking variant of - * kbase_pm_check_transitions_nolock() - * - * On returning, the desired state at the time of the call will have been met. - * - * There is nothing to stop the core being switched off by calls to - * kbase_pm_release_cores() or kbase_pm_unrequest_cores(). Therefore, the - * caller must have already made a call to - * kbase_pm_request_cores()/kbase_pm_request_cores_sync() previously. - * - * The usual use-case for this is to ensure cores are 'READY' after performing - * a GPU Reset. - * - * Unlike kbase_pm_check_transitions_nolock(), the caller must not hold - * kbase_device.pm.power_change_lock, because this function will take that - * lock itself. - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - */ -void kbase_pm_check_transitions_sync(struct kbase_device *kbdev); - -/** - * kbase_pm_update_cores_state_nolock - Variant of kbase_pm_update_cores_state() - * where the caller must hold - * kbase_device.pm.power_change_lock - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - */ -void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev); - -/** - * kbase_pm_update_cores_state - Update the desired state of shader cores from - * the Power Policy, and begin any power - * transitions. - * - * This function will update the desired_xx_state members of - * struct kbase_pm_device_data by calling into the current Power Policy. It will - * then begin power transitions to make the hardware acheive the desired shader - * core state. - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - */ -void kbase_pm_update_cores_state(struct kbase_device *kbdev); - -/** - * kbase_pm_cancel_deferred_poweroff - Cancel any pending requests to power off - * the GPU and/or shader cores. - * - * This should be called by any functions which directly power off the GPU. - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - */ -void kbase_pm_cancel_deferred_poweroff(struct kbase_device *kbdev); - -/** - * kbasep_pm_read_present_cores - Read the bitmasks of present cores. - * - * This information is cached to avoid having to perform register reads whenever - * the information is required. - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - */ -void kbasep_pm_read_present_cores(struct kbase_device *kbdev); - -/** - * kbasep_pm_metrics_init - Initialize the metrics gathering framework. - * - * This must be called before other metric gathering APIs are called. - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - * - * Return: 0 on success, error code on error - */ -int kbasep_pm_metrics_init(struct kbase_device *kbdev); - -/** - * kbasep_pm_metrics_term - Terminate the metrics gathering framework. - * - * This must be called when metric gathering is no longer required. It is an - * error to call any metrics gathering function (other than - * kbasep_pm_metrics_init()) after calling this function. - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - */ -void kbasep_pm_metrics_term(struct kbase_device *kbdev); - -/** - * kbase_pm_report_vsync - Function to be called by the frame buffer driver to - * update the vsync metric. - * - * This function should be called by the frame buffer driver to update whether - * the system is hitting the vsync target or not. buffer_updated should be true - * if the vsync corresponded with a new frame being displayed, otherwise it - * should be false. This function does not need to be called every vsync, but - * only when the value of @buffer_updated differs from a previous call. - * - * @kbdev: The kbase device structure for the device (must be a - * valid pointer) - * @buffer_updated: True if the buffer has been updated on this VSync, - * false otherwise - */ -void kbase_pm_report_vsync(struct kbase_device *kbdev, int buffer_updated); - -/** - * kbase_pm_get_dvfs_action - Determine whether the DVFS system should change - * the clock speed of the GPU. - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - * - * This function should be called regularly by the DVFS system to check whether - * the clock speed of the GPU needs updating. - */ -void kbase_pm_get_dvfs_action(struct kbase_device *kbdev); - -/** - * kbase_pm_request_gpu_cycle_counter - Mark that the GPU cycle counter is - * needed - * - * If the caller is the first caller then the GPU cycle counters will be enabled - * along with the l2 cache - * - * The GPU must be powered when calling this function (i.e. - * kbase_pm_context_active() must have been called). - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - */ -void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev); - -/** - * kbase_pm_request_gpu_cycle_counter_l2_is_on - Mark GPU cycle counter is - * needed (l2 cache already on) - * - * This is a version of the above function - * (kbase_pm_request_gpu_cycle_counter()) suitable for being called when the - * l2 cache is known to be on and assured to be on until the subsequent call of - * kbase_pm_release_gpu_cycle_counter() such as when a job is submitted. It does - * not sleep and can be called from atomic functions. - * - * The GPU must be powered when calling this function (i.e. - * kbase_pm_context_active() must have been called) and the l2 cache must be - * powered on. - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - */ -void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev); - -/** - * kbase_pm_release_gpu_cycle_counter - Mark that the GPU cycle counter is no - * longer in use - * - * If the caller is the - * last caller then the GPU cycle counters will be disabled. A request must have - * been made before a call to this. - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - */ -void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev); - -/** - * kbase_pm_register_access_enable - Enable access to GPU registers - * - * Enables access to the GPU registers before power management has powered up - * the GPU with kbase_pm_powerup(). - * - * Access to registers should be done using kbase_os_reg_read()/write() at this - * stage, not kbase_reg_read()/write(). - * - * This results in the power management callbacks provided in the driver - * configuration to get called to turn on power and/or clocks to the GPU. See - * kbase_pm_callback_conf. - * - * This should only be used before power management is powered up with - * kbase_pm_powerup() - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - */ -void kbase_pm_register_access_enable(struct kbase_device *kbdev); - -/** - * kbase_pm_register_access_disable - Disable early register access - * - * Disables access to the GPU registers enabled earlier by a call to - * kbase_pm_register_access_enable(). - * - * This results in the power management callbacks provided in the driver - * configuration to get called to turn off power and/or clocks to the GPU. See - * kbase_pm_callback_conf - * - * This should only be used before power management is powered up with - * kbase_pm_powerup() - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - */ -void kbase_pm_register_access_disable(struct kbase_device *kbdev); - -/* NOTE: kbase_pm_is_suspending is in mali_kbase.h, because it is an inline - * function */ - -/** - * kbase_pm_metrics_is_active - Check if the power management metrics - * collection is active. - * - * Note that this returns if the power management metrics collection was - * active at the time of calling, it is possible that after the call the metrics - * collection enable may have changed state. - * - * The caller must handle the consequence that the state may have changed. - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - * Return: true if metrics collection was active else false. - */ -bool kbase_pm_metrics_is_active(struct kbase_device *kbdev); - -/** - * kbase_pm_do_poweron - Power on the GPU, and any cores that are requested. - * - * @kbdev: The kbase device structure for the device (must be a valid - * pointer) - * @is_resume: true if power on due to resume after suspend, - * false otherwise - */ -void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume); - -/** - * kbase_pm_do_poweroff - Power off the GPU, and any cores that have been - * requested. - * - * @kbdev: The kbase device structure for the device (must be a valid - * pointer) - * @is_suspend: true if power off due to suspend, - * false otherwise - * Return: - * true if power was turned off, else - * false if power can not be turned off due to pending page/bus - * fault workers. Caller must flush MMU workqueues and retry - */ -bool kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend); - -#ifdef CONFIG_PM_DEVFREQ -void kbase_pm_get_dvfs_utilisation(struct kbase_device *kbdev, - unsigned long *total, unsigned long *busy); -void kbase_pm_reset_dvfs_utilisation(struct kbase_device *kbdev); -#endif - -#ifdef CONFIG_MALI_MIDGARD_DVFS - -/** - * kbase_platform_dvfs_event - Report utilisation to DVFS code - * - * Function provided by platform specific code when DVFS is enabled to allow - * the power management metrics system to report utilisation. - * - * @kbdev: The kbase device structure for the device (must be a - * valid pointer) - * @utilisation: The current calculated utilisation by the metrics system. - * @util_gl_share: The current calculated gl share of utilisation. - * @util_cl_share: The current calculated cl share of utilisation per core - * group. - * Return: Returns 0 on failure and non zero on success. - */ - -int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation, - u32 util_gl_share, u32 util_cl_share[2]); -#endif - -void kbase_pm_power_changed(struct kbase_device *kbdev); - -/** - * kbase_pm_metrics_update - Inform the metrics system that an atom is either - * about to be run or has just completed. - * @kbdev: The kbase device structure for the device (must be a valid pointer) - * @now: Pointer to the timestamp of the change, or NULL to use current time - * - * Caller must hold runpool_irq.lock - */ -void kbase_pm_metrics_update(struct kbase_device *kbdev, - ktime_t *now); - - -#endif /* _KBASE_BACKEND_PM_INTERNAL_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_metrics.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_metrics.c deleted file mode 100755 index ae632564b96ab..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_metrics.c +++ /dev/null @@ -1,400 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * Metrics for power management - */ - -#include -#include -#include -#include - -/* When VSync is being hit aim for utilisation between 70-90% */ -#define KBASE_PM_VSYNC_MIN_UTILISATION 70 -#define KBASE_PM_VSYNC_MAX_UTILISATION 90 -/* Otherwise aim for 10-40% */ -#define KBASE_PM_NO_VSYNC_MIN_UTILISATION 10 -#define KBASE_PM_NO_VSYNC_MAX_UTILISATION 40 - -/* Shift used for kbasep_pm_metrics_data.time_busy/idle - units of (1 << 8) ns - * This gives a maximum period between samples of 2^(32+8)/100 ns = slightly - * under 11s. Exceeding this will cause overflow */ -#define KBASE_PM_TIME_SHIFT 8 - -/* Maximum time between sampling of utilization data, without resetting the - * counters. */ -#define MALI_UTILIZATION_MAX_PERIOD 100000 /* ns = 100ms */ - -#ifdef CONFIG_MALI_MIDGARD_DVFS -static enum hrtimer_restart dvfs_callback(struct hrtimer *timer) -{ - unsigned long flags; - struct kbasep_pm_metrics_data *metrics; - - KBASE_DEBUG_ASSERT(timer != NULL); - - metrics = container_of(timer, struct kbasep_pm_metrics_data, timer); - kbase_pm_get_dvfs_action(metrics->kbdev); - - spin_lock_irqsave(&metrics->lock, flags); - - if (metrics->timer_active) - hrtimer_start(timer, - HR_TIMER_DELAY_MSEC(metrics->kbdev->pm.dvfs_period), - HRTIMER_MODE_REL); - - spin_unlock_irqrestore(&metrics->lock, flags); - - return HRTIMER_NORESTART; -} -#endif /* CONFIG_MALI_MIDGARD_DVFS */ - -int kbasep_pm_metrics_init(struct kbase_device *kbdev) -{ - KBASE_DEBUG_ASSERT(kbdev != NULL); - - kbdev->pm.backend.metrics.kbdev = kbdev; - - kbdev->pm.backend.metrics.time_period_start = ktime_get(); - kbdev->pm.backend.metrics.time_busy = 0; - kbdev->pm.backend.metrics.time_idle = 0; - kbdev->pm.backend.metrics.prev_busy = 0; - kbdev->pm.backend.metrics.prev_idle = 0; - kbdev->pm.backend.metrics.gpu_active = false; - kbdev->pm.backend.metrics.active_cl_ctx[0] = 0; - kbdev->pm.backend.metrics.active_cl_ctx[1] = 0; - kbdev->pm.backend.metrics.active_gl_ctx[0] = 0; - kbdev->pm.backend.metrics.active_gl_ctx[1] = 0; - kbdev->pm.backend.metrics.busy_cl[0] = 0; - kbdev->pm.backend.metrics.busy_cl[1] = 0; - kbdev->pm.backend.metrics.busy_gl = 0; - - spin_lock_init(&kbdev->pm.backend.metrics.lock); - -#ifdef CONFIG_MALI_MIDGARD_DVFS - kbdev->pm.backend.metrics.timer_active = true; - hrtimer_init(&kbdev->pm.backend.metrics.timer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL); - kbdev->pm.backend.metrics.timer.function = dvfs_callback; - - hrtimer_start(&kbdev->pm.backend.metrics.timer, - HR_TIMER_DELAY_MSEC(kbdev->pm.dvfs_period), - HRTIMER_MODE_REL); -#endif /* CONFIG_MALI_MIDGARD_DVFS */ - - return 0; -} - -KBASE_EXPORT_TEST_API(kbasep_pm_metrics_init); - -void kbasep_pm_metrics_term(struct kbase_device *kbdev) -{ -#ifdef CONFIG_MALI_MIDGARD_DVFS - unsigned long flags; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - - spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags); - kbdev->pm.backend.metrics.timer_active = false; - spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags); - - hrtimer_cancel(&kbdev->pm.backend.metrics.timer); -#endif /* CONFIG_MALI_MIDGARD_DVFS */ -} - -KBASE_EXPORT_TEST_API(kbasep_pm_metrics_term); - -/* caller needs to hold kbdev->pm.backend.metrics.lock before calling this - * function - */ -static void kbase_pm_get_dvfs_utilisation_calc(struct kbase_device *kbdev, - ktime_t now) -{ - ktime_t diff; - - lockdep_assert_held(&kbdev->pm.backend.metrics.lock); - - diff = ktime_sub(now, kbdev->pm.backend.metrics.time_period_start); - if (ktime_to_ns(diff) < 0) - return; - - if (kbdev->pm.backend.metrics.gpu_active) { - u32 ns_time = (u32) (ktime_to_ns(diff) >> KBASE_PM_TIME_SHIFT); - - kbdev->pm.backend.metrics.time_busy += ns_time; - if (kbdev->pm.backend.metrics.active_cl_ctx[0]) - kbdev->pm.backend.metrics.busy_cl[0] += ns_time; - if (kbdev->pm.backend.metrics.active_cl_ctx[1]) - kbdev->pm.backend.metrics.busy_cl[1] += ns_time; - if (kbdev->pm.backend.metrics.active_gl_ctx[0]) - kbdev->pm.backend.metrics.busy_gl += ns_time; - if (kbdev->pm.backend.metrics.active_gl_ctx[1]) - kbdev->pm.backend.metrics.busy_gl += ns_time; - } else { - kbdev->pm.backend.metrics.time_idle += (u32) (ktime_to_ns(diff) - >> KBASE_PM_TIME_SHIFT); - } - - kbdev->pm.backend.metrics.time_period_start = now; -} - -#if defined(CONFIG_PM_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS) -/* Caller needs to hold kbdev->pm.backend.metrics.lock before calling this - * function. - */ -static void kbase_pm_reset_dvfs_utilisation_unlocked(struct kbase_device *kbdev, - ktime_t now) -{ - /* Store previous value */ - kbdev->pm.backend.metrics.prev_idle = - kbdev->pm.backend.metrics.time_idle; - kbdev->pm.backend.metrics.prev_busy = - kbdev->pm.backend.metrics.time_busy; - - /* Reset current values */ - kbdev->pm.backend.metrics.time_period_start = now; - kbdev->pm.backend.metrics.time_idle = 0; - kbdev->pm.backend.metrics.time_busy = 0; - kbdev->pm.backend.metrics.busy_cl[0] = 0; - kbdev->pm.backend.metrics.busy_cl[1] = 0; - kbdev->pm.backend.metrics.busy_gl = 0; -} - -void kbase_pm_reset_dvfs_utilisation(struct kbase_device *kbdev) -{ - unsigned long flags; - - spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags); - kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, ktime_get()); - spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags); -} - -void kbase_pm_get_dvfs_utilisation(struct kbase_device *kbdev, - unsigned long *total_out, unsigned long *busy_out) -{ - ktime_t now = ktime_get(); - unsigned long flags, busy, total; - - spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags); - kbase_pm_get_dvfs_utilisation_calc(kbdev, now); - - busy = kbdev->pm.backend.metrics.time_busy; - total = busy + kbdev->pm.backend.metrics.time_idle; - - /* Reset stats if older than MALI_UTILIZATION_MAX_PERIOD (default - * 100ms) */ - if (total >= MALI_UTILIZATION_MAX_PERIOD) { - kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, now); - } else if (total < (MALI_UTILIZATION_MAX_PERIOD / 2)) { - total += kbdev->pm.backend.metrics.prev_idle + - kbdev->pm.backend.metrics.prev_busy; - busy += kbdev->pm.backend.metrics.prev_busy; - } - - *total_out = total; - *busy_out = busy; - spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags); -} -#endif - -#ifdef CONFIG_MALI_MIDGARD_DVFS - -/* caller needs to hold kbdev->pm.backend.metrics.lock before calling this - * function - */ -int kbase_pm_get_dvfs_utilisation_old(struct kbase_device *kbdev, - int *util_gl_share, - int util_cl_share[2], - ktime_t now) -{ - int utilisation; - int busy; - - kbase_pm_get_dvfs_utilisation_calc(kbdev, now); - - if (kbdev->pm.backend.metrics.time_idle + - kbdev->pm.backend.metrics.time_busy == 0) { - /* No data - so we return NOP */ - utilisation = -1; - if (util_gl_share) - *util_gl_share = -1; - if (util_cl_share) { - util_cl_share[0] = -1; - util_cl_share[1] = -1; - } - goto out; - } - - utilisation = (100 * kbdev->pm.backend.metrics.time_busy) / - (kbdev->pm.backend.metrics.time_idle + - kbdev->pm.backend.metrics.time_busy); - - busy = kbdev->pm.backend.metrics.busy_gl + - kbdev->pm.backend.metrics.busy_cl[0] + - kbdev->pm.backend.metrics.busy_cl[1]; - - if (busy != 0) { - if (util_gl_share) - *util_gl_share = - (100 * kbdev->pm.backend.metrics.busy_gl) / - busy; - if (util_cl_share) { - util_cl_share[0] = - (100 * kbdev->pm.backend.metrics.busy_cl[0]) / - busy; - util_cl_share[1] = - (100 * kbdev->pm.backend.metrics.busy_cl[1]) / - busy; - } - } else { - if (util_gl_share) - *util_gl_share = -1; - if (util_cl_share) { - util_cl_share[0] = -1; - util_cl_share[1] = -1; - } - } - -out: - return utilisation; -} - -void kbase_pm_get_dvfs_action(struct kbase_device *kbdev) -{ - unsigned long flags; - int utilisation, util_gl_share; - int util_cl_share[2]; - ktime_t now; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - - spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags); - - now = ktime_get(); - - utilisation = kbase_pm_get_dvfs_utilisation_old(kbdev, &util_gl_share, - util_cl_share, now); - - if (utilisation < 0 || util_gl_share < 0 || util_cl_share[0] < 0 || - util_cl_share[1] < 0) { - utilisation = 0; - util_gl_share = 0; - util_cl_share[0] = 0; - util_cl_share[1] = 0; - goto out; - } - -out: -#ifdef CONFIG_MALI_MIDGARD_DVFS - kbase_platform_dvfs_event(kbdev, utilisation, util_gl_share, - util_cl_share); -#endif /*CONFIG_MALI_MIDGARD_DVFS */ - - kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, now); - - spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags); -} - -bool kbase_pm_metrics_is_active(struct kbase_device *kbdev) -{ - bool isactive; - unsigned long flags; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - - spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags); - isactive = kbdev->pm.backend.metrics.timer_active; - spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags); - - return isactive; -} -KBASE_EXPORT_TEST_API(kbase_pm_metrics_is_active); - -#endif /* CONFIG_MALI_MIDGARD_DVFS */ - -/** - * kbase_pm_metrics_active_calc - Update PM active counts based on currently - * running atoms - * @kbdev: Device pointer - * - * The caller must hold kbdev->pm.backend.metrics.lock - */ -static void kbase_pm_metrics_active_calc(struct kbase_device *kbdev) -{ - int js; - - lockdep_assert_held(&kbdev->pm.backend.metrics.lock); - - kbdev->pm.backend.metrics.active_gl_ctx[0] = 0; - kbdev->pm.backend.metrics.active_gl_ctx[1] = 0; - kbdev->pm.backend.metrics.active_cl_ctx[0] = 0; - kbdev->pm.backend.metrics.active_cl_ctx[1] = 0; - kbdev->pm.backend.metrics.gpu_active = false; - - for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) { - struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, 0); - - /* Head atom may have just completed, so if it isn't running - * then try the next atom */ - if (katom && katom->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED) - katom = kbase_gpu_inspect(kbdev, js, 1); - - if (katom && katom->gpu_rb_state == - KBASE_ATOM_GPU_RB_SUBMITTED) { - if (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) { - int device_nr = (katom->core_req & - BASE_JD_REQ_SPECIFIC_COHERENT_GROUP) - ? katom->device_nr : 0; - WARN_ON(device_nr >= 2); - kbdev->pm.backend.metrics.active_cl_ctx[ - device_nr] = 1; - } else { - /* Slot 2 should not be running non-compute - * atoms */ - WARN_ON(js >= 2); - kbdev->pm.backend.metrics.active_gl_ctx[js] = 1; - } - kbdev->pm.backend.metrics.gpu_active = true; - } - } -} - -/* called when job is submitted to or removed from a GPU slot */ -void kbase_pm_metrics_update(struct kbase_device *kbdev, ktime_t *timestamp) -{ - unsigned long flags; - ktime_t now; - - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags); - - if (!timestamp) { - now = ktime_get(); - timestamp = &now; - } - - /* Track how long CL and/or GL jobs have been busy for */ - kbase_pm_get_dvfs_utilisation_calc(kbdev, *timestamp); - - kbase_pm_metrics_active_calc(kbdev); - - spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags); -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.c deleted file mode 100755 index e3c4829019ae0..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.c +++ /dev/null @@ -1,934 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/* - * Power policy API implementations - */ - -#include -#include -#include -#include -#include -#include - -static const struct kbase_pm_policy *const policy_list[] = { -#ifdef CONFIG_MALI_NO_MALI - &kbase_pm_always_on_policy_ops, - &kbase_pm_demand_policy_ops, - &kbase_pm_coarse_demand_policy_ops, -#if !MALI_CUSTOMER_RELEASE - &kbase_pm_demand_always_powered_policy_ops, - &kbase_pm_fast_start_policy_ops, -#endif -#else /* CONFIG_MALI_NO_MALI */ - &kbase_pm_demand_policy_ops, - &kbase_pm_always_on_policy_ops, - &kbase_pm_coarse_demand_policy_ops, -#if !MALI_CUSTOMER_RELEASE - &kbase_pm_demand_always_powered_policy_ops, - &kbase_pm_fast_start_policy_ops, -#endif -#endif /* CONFIG_MALI_NO_MALI */ -}; - -/* The number of policies available in the system. - * This is derived from the number of functions listed in policy_get_functions. - */ -#define POLICY_COUNT (sizeof(policy_list)/sizeof(*policy_list)) - - -/* Function IDs for looking up Timeline Trace codes in - * kbase_pm_change_state_trace_code */ -enum kbase_pm_func_id { - KBASE_PM_FUNC_ID_REQUEST_CORES_START, - KBASE_PM_FUNC_ID_REQUEST_CORES_END, - KBASE_PM_FUNC_ID_RELEASE_CORES_START, - KBASE_PM_FUNC_ID_RELEASE_CORES_END, - /* Note: kbase_pm_unrequest_cores() is on the slow path, and we neither - * expect to hit it nor tend to hit it very much anyway. We can detect - * whether we need more instrumentation by a difference between - * PM_CHECKTRANS events and PM_SEND/HANDLE_EVENT. */ - - /* Must be the last */ - KBASE_PM_FUNC_ID_COUNT -}; - - -/* State changes during request/unrequest/release-ing cores */ -enum { - KBASE_PM_CHANGE_STATE_SHADER = (1u << 0), - KBASE_PM_CHANGE_STATE_TILER = (1u << 1), - - /* These two must be last */ - KBASE_PM_CHANGE_STATE_MASK = (KBASE_PM_CHANGE_STATE_TILER | - KBASE_PM_CHANGE_STATE_SHADER), - KBASE_PM_CHANGE_STATE_COUNT = KBASE_PM_CHANGE_STATE_MASK + 1 -}; -typedef u32 kbase_pm_change_state; - - -#ifdef CONFIG_MALI_TRACE_TIMELINE -/* Timeline Trace code lookups for each function */ -static u32 kbase_pm_change_state_trace_code[KBASE_PM_FUNC_ID_COUNT] - [KBASE_PM_CHANGE_STATE_COUNT] = { - /* kbase_pm_request_cores */ - [KBASE_PM_FUNC_ID_REQUEST_CORES_START][0] = 0, - [KBASE_PM_FUNC_ID_REQUEST_CORES_START][KBASE_PM_CHANGE_STATE_SHADER] = - SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_START, - [KBASE_PM_FUNC_ID_REQUEST_CORES_START][KBASE_PM_CHANGE_STATE_TILER] = - SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_START, - [KBASE_PM_FUNC_ID_REQUEST_CORES_START][KBASE_PM_CHANGE_STATE_SHADER | - KBASE_PM_CHANGE_STATE_TILER] = - SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_START, - - [KBASE_PM_FUNC_ID_REQUEST_CORES_END][0] = 0, - [KBASE_PM_FUNC_ID_REQUEST_CORES_END][KBASE_PM_CHANGE_STATE_SHADER] = - SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_END, - [KBASE_PM_FUNC_ID_REQUEST_CORES_END][KBASE_PM_CHANGE_STATE_TILER] = - SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_END, - [KBASE_PM_FUNC_ID_REQUEST_CORES_END][KBASE_PM_CHANGE_STATE_SHADER | - KBASE_PM_CHANGE_STATE_TILER] = - SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_END, - - /* kbase_pm_release_cores */ - [KBASE_PM_FUNC_ID_RELEASE_CORES_START][0] = 0, - [KBASE_PM_FUNC_ID_RELEASE_CORES_START][KBASE_PM_CHANGE_STATE_SHADER] = - SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_START, - [KBASE_PM_FUNC_ID_RELEASE_CORES_START][KBASE_PM_CHANGE_STATE_TILER] = - SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_START, - [KBASE_PM_FUNC_ID_RELEASE_CORES_START][KBASE_PM_CHANGE_STATE_SHADER | - KBASE_PM_CHANGE_STATE_TILER] = - SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_START, - - [KBASE_PM_FUNC_ID_RELEASE_CORES_END][0] = 0, - [KBASE_PM_FUNC_ID_RELEASE_CORES_END][KBASE_PM_CHANGE_STATE_SHADER] = - SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_END, - [KBASE_PM_FUNC_ID_RELEASE_CORES_END][KBASE_PM_CHANGE_STATE_TILER] = - SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_END, - [KBASE_PM_FUNC_ID_RELEASE_CORES_END][KBASE_PM_CHANGE_STATE_SHADER | - KBASE_PM_CHANGE_STATE_TILER] = - SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_END -}; - -static inline void kbase_timeline_pm_cores_func(struct kbase_device *kbdev, - enum kbase_pm_func_id func_id, - kbase_pm_change_state state) -{ - int trace_code; - - KBASE_DEBUG_ASSERT(func_id >= 0 && func_id < KBASE_PM_FUNC_ID_COUNT); - KBASE_DEBUG_ASSERT(state != 0 && (state & KBASE_PM_CHANGE_STATE_MASK) == - state); - - trace_code = kbase_pm_change_state_trace_code[func_id][state]; - KBASE_TIMELINE_PM_CHECKTRANS(kbdev, trace_code); -} - -#else /* CONFIG_MALI_TRACE_TIMELINE */ -static inline void kbase_timeline_pm_cores_func(struct kbase_device *kbdev, - enum kbase_pm_func_id func_id, kbase_pm_change_state state) -{ -} - -#endif /* CONFIG_MALI_TRACE_TIMELINE */ - -/** - * kbasep_pm_do_poweroff_cores - Process a poweroff request and power down any - * requested shader cores - * @kbdev: Device pointer - */ -static void kbasep_pm_do_poweroff_cores(struct kbase_device *kbdev) -{ - u64 prev_shader_state = kbdev->pm.backend.desired_shader_state; - - lockdep_assert_held(&kbdev->pm.power_change_lock); - - kbdev->pm.backend.desired_shader_state &= - ~kbdev->pm.backend.shader_poweroff_pending; - - kbdev->pm.backend.shader_poweroff_pending = 0; - - if (prev_shader_state != kbdev->pm.backend.desired_shader_state - || kbdev->pm.backend.ca_in_transition) { - bool cores_are_available; - - KBASE_TIMELINE_PM_CHECKTRANS(kbdev, - SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_START); - cores_are_available = kbase_pm_check_transitions_nolock(kbdev); - KBASE_TIMELINE_PM_CHECKTRANS(kbdev, - SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_END); - - /* Don't need 'cores_are_available', - * because we don't return anything */ - CSTD_UNUSED(cores_are_available); - } -} - -static enum hrtimer_restart -kbasep_pm_do_gpu_poweroff_callback(struct hrtimer *timer) -{ - struct kbase_device *kbdev; - - kbdev = container_of(timer, struct kbase_device, - pm.backend.gpu_poweroff_timer); - - /* It is safe for this call to do nothing if the work item is already - * queued. The worker function will read the must up-to-date state of - * kbdev->pm.backend.gpu_poweroff_pending under lock. - * - * If a state change occurs while the worker function is processing, - * this call will succeed as a work item can be requeued once it has - * started processing. - */ - if (kbdev->pm.backend.gpu_poweroff_pending) - queue_work(kbdev->pm.backend.gpu_poweroff_wq, - &kbdev->pm.backend.gpu_poweroff_work); - - if (kbdev->pm.backend.shader_poweroff_pending) { - unsigned long flags; - - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - - if (kbdev->pm.backend.shader_poweroff_pending) { - kbdev->pm.backend.shader_poweroff_pending_time--; - - KBASE_DEBUG_ASSERT( - kbdev->pm.backend.shader_poweroff_pending_time - >= 0); - - if (!kbdev->pm.backend.shader_poweroff_pending_time) - kbasep_pm_do_poweroff_cores(kbdev); - } - - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - } - - if (kbdev->pm.backend.poweroff_timer_needed) { - hrtimer_add_expires(timer, kbdev->pm.gpu_poweroff_time); - - return HRTIMER_RESTART; - } - - return HRTIMER_NORESTART; -} - -static void kbasep_pm_do_gpu_poweroff_wq(struct work_struct *data) -{ - unsigned long flags; - struct kbase_device *kbdev; - bool do_poweroff = false; - - kbdev = container_of(data, struct kbase_device, - pm.backend.gpu_poweroff_work); - - mutex_lock(&kbdev->pm.lock); - - if (kbdev->pm.backend.gpu_poweroff_pending == 0) { - mutex_unlock(&kbdev->pm.lock); - return; - } - - kbdev->pm.backend.gpu_poweroff_pending--; - - if (kbdev->pm.backend.gpu_poweroff_pending > 0) { - mutex_unlock(&kbdev->pm.lock); - return; - } - - KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_poweroff_pending == 0); - - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - - /* Only power off the GPU if a request is still pending */ - if (!kbdev->pm.backend.pm_current_policy->get_core_active(kbdev)) - do_poweroff = true; - - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - - if (do_poweroff) { - kbdev->pm.backend.poweroff_timer_needed = false; - hrtimer_cancel(&kbdev->pm.backend.gpu_poweroff_timer); - /* Power off the GPU */ - if (!kbase_pm_do_poweroff(kbdev, false)) { - /* GPU can not be powered off at present */ - kbdev->pm.backend.poweroff_timer_needed = true; - hrtimer_start(&kbdev->pm.backend.gpu_poweroff_timer, - kbdev->pm.gpu_poweroff_time, - HRTIMER_MODE_REL); - } - } - - mutex_unlock(&kbdev->pm.lock); -} - -int kbase_pm_policy_init(struct kbase_device *kbdev) -{ - struct workqueue_struct *wq; - - wq = alloc_workqueue("kbase_pm_do_poweroff", - WQ_HIGHPRI | WQ_UNBOUND, 1); - if (!wq) - return -ENOMEM; - - kbdev->pm.backend.gpu_poweroff_wq = wq; - INIT_WORK(&kbdev->pm.backend.gpu_poweroff_work, - kbasep_pm_do_gpu_poweroff_wq); - hrtimer_init(&kbdev->pm.backend.gpu_poweroff_timer, - CLOCK_MONOTONIC, HRTIMER_MODE_REL); - kbdev->pm.backend.gpu_poweroff_timer.function = - kbasep_pm_do_gpu_poweroff_callback; - kbdev->pm.backend.pm_current_policy = policy_list[0]; - kbdev->pm.backend.pm_current_policy->init(kbdev); - kbdev->pm.gpu_poweroff_time = - HR_TIMER_DELAY_NSEC(DEFAULT_PM_GPU_POWEROFF_TICK_NS); - kbdev->pm.poweroff_shader_ticks = DEFAULT_PM_POWEROFF_TICK_SHADER; - kbdev->pm.poweroff_gpu_ticks = DEFAULT_PM_POWEROFF_TICK_GPU; - - return 0; -} - -void kbase_pm_policy_term(struct kbase_device *kbdev) -{ - kbdev->pm.backend.pm_current_policy->term(kbdev); - destroy_workqueue(kbdev->pm.backend.gpu_poweroff_wq); -} - -void kbase_pm_cancel_deferred_poweroff(struct kbase_device *kbdev) -{ - unsigned long flags; - - lockdep_assert_held(&kbdev->pm.lock); - - kbdev->pm.backend.poweroff_timer_needed = false; - hrtimer_cancel(&kbdev->pm.backend.gpu_poweroff_timer); - - /* If wq is already running but is held off by pm.lock, make sure it has - * no effect */ - kbdev->pm.backend.gpu_poweroff_pending = 0; - - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - - kbdev->pm.backend.shader_poweroff_pending = 0; - kbdev->pm.backend.shader_poweroff_pending_time = 0; - - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); -} - -void kbase_pm_update_active(struct kbase_device *kbdev) -{ - unsigned long flags; - bool active; - - lockdep_assert_held(&kbdev->pm.lock); - - /* pm_current_policy will never be NULL while pm.lock is held */ - KBASE_DEBUG_ASSERT(kbdev->pm.backend.pm_current_policy); - - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - - active = kbdev->pm.backend.pm_current_policy->get_core_active(kbdev); - - if (active) { - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - - if (kbdev->pm.backend.gpu_poweroff_pending) { - /* Cancel any pending power off request */ - kbdev->pm.backend.gpu_poweroff_pending = 0; - - /* If a request was pending then the GPU was still - * powered, so no need to continue */ - if (!kbdev->poweroff_pending) - return; - } - - if (!kbdev->pm.backend.poweroff_timer_needed && - !kbdev->pm.backend.gpu_powered && - (kbdev->pm.poweroff_gpu_ticks || - kbdev->pm.poweroff_shader_ticks)) { - kbdev->pm.backend.poweroff_timer_needed = true; - hrtimer_start(&kbdev->pm.backend.gpu_poweroff_timer, - kbdev->pm.gpu_poweroff_time, - HRTIMER_MODE_REL); - } - - /* Power on the GPU and any cores requested by the policy */ - kbase_pm_do_poweron(kbdev, false); - } else { - /* It is an error for the power policy to power off the GPU - * when there are contexts active */ - KBASE_DEBUG_ASSERT(kbdev->pm.active_count == 0); - - if (kbdev->pm.backend.shader_poweroff_pending) { - kbdev->pm.backend.shader_poweroff_pending = 0; - kbdev->pm.backend.shader_poweroff_pending_time = 0; - } - - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - - - /* Request power off */ - if (kbdev->pm.backend.gpu_powered) { - if (kbdev->pm.poweroff_gpu_ticks) { - kbdev->pm.backend.gpu_poweroff_pending = - kbdev->pm.poweroff_gpu_ticks; - if (!kbdev->pm.backend.poweroff_timer_needed) { - /* Start timer if not running (eg if - * power policy has been changed from - * always_on to something else). This - * will ensure the GPU is actually - * powered off */ - kbdev->pm.backend.poweroff_timer_needed - = true; - hrtimer_start( - &kbdev->pm.backend.gpu_poweroff_timer, - kbdev->pm.gpu_poweroff_time, - HRTIMER_MODE_REL); - } - } else { - /* Power off the GPU immediately */ - if (!kbase_pm_do_poweroff(kbdev, false)) { - /* GPU can not be powered off at present - */ - kbdev->pm.backend.poweroff_timer_needed - = true; - hrtimer_start( - &kbdev->pm.backend.gpu_poweroff_timer, - kbdev->pm.gpu_poweroff_time, - HRTIMER_MODE_REL); - } - } - } - } -} - -void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev) -{ - u64 desired_bitmap; - bool cores_are_available; - bool do_poweroff = false; - - lockdep_assert_held(&kbdev->pm.power_change_lock); - - if (kbdev->pm.backend.pm_current_policy == NULL) - return; - - desired_bitmap = - kbdev->pm.backend.pm_current_policy->get_core_mask(kbdev); - desired_bitmap &= kbase_pm_ca_get_core_mask(kbdev); - - /* Enable core 0 if tiler required, regardless of core availability */ - if (kbdev->tiler_needed_cnt > 0 || kbdev->tiler_inuse_cnt > 0) - desired_bitmap |= 1; - - if (kbdev->pm.backend.desired_shader_state != desired_bitmap) - KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_DESIRED, NULL, NULL, 0u, - (u32)desired_bitmap); - /* Are any cores being powered on? */ - if (~kbdev->pm.backend.desired_shader_state & desired_bitmap || - kbdev->pm.backend.ca_in_transition) { - /* Check if we are powering off any cores before updating shader - * state */ - if (kbdev->pm.backend.desired_shader_state & ~desired_bitmap) { - /* Start timer to power off cores */ - kbdev->pm.backend.shader_poweroff_pending |= - (kbdev->pm.backend.desired_shader_state & - ~desired_bitmap); - - if (kbdev->pm.poweroff_shader_ticks) - kbdev->pm.backend.shader_poweroff_pending_time = - kbdev->pm.poweroff_shader_ticks; - else - do_poweroff = true; - } - - kbdev->pm.backend.desired_shader_state = desired_bitmap; - - /* If any cores are being powered on, transition immediately */ - cores_are_available = kbase_pm_check_transitions_nolock(kbdev); - } else if (kbdev->pm.backend.desired_shader_state & ~desired_bitmap) { - /* Start timer to power off cores */ - kbdev->pm.backend.shader_poweroff_pending |= - (kbdev->pm.backend.desired_shader_state & - ~desired_bitmap); - if (kbdev->pm.poweroff_shader_ticks) - kbdev->pm.backend.shader_poweroff_pending_time = - kbdev->pm.poweroff_shader_ticks; - else - kbasep_pm_do_poweroff_cores(kbdev); - } else if (kbdev->pm.active_count == 0 && desired_bitmap != 0 && - kbdev->pm.backend.poweroff_timer_needed) { - /* If power policy is keeping cores on despite there being no - * active contexts then disable poweroff timer as it isn't - * required. - * Only reset poweroff_timer_needed if we're not in the middle - * of the power off callback */ - kbdev->pm.backend.poweroff_timer_needed = false; - hrtimer_try_to_cancel(&kbdev->pm.backend.gpu_poweroff_timer); - } - - /* Ensure timer does not power off wanted cores and make sure to power - * off unwanted cores */ - if (kbdev->pm.backend.shader_poweroff_pending != 0) { - kbdev->pm.backend.shader_poweroff_pending &= - ~(kbdev->pm.backend.desired_shader_state & - desired_bitmap); - if (kbdev->pm.backend.shader_poweroff_pending == 0) - kbdev->pm.backend.shader_poweroff_pending_time = 0; - } - - /* Shader poweroff is deferred to the end of the function, to eliminate - * issues caused by the core availability policy recursing into this - * function */ - if (do_poweroff) - kbasep_pm_do_poweroff_cores(kbdev); - - /* Don't need 'cores_are_available', because we don't return anything */ - CSTD_UNUSED(cores_are_available); -} - -void kbase_pm_update_cores_state(struct kbase_device *kbdev) -{ - unsigned long flags; - - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - - kbase_pm_update_cores_state_nolock(kbdev); - - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); -} - -int kbase_pm_list_policies(const struct kbase_pm_policy * const **list) -{ - if (!list) - return POLICY_COUNT; - - *list = policy_list; - - return POLICY_COUNT; -} - -KBASE_EXPORT_TEST_API(kbase_pm_list_policies); - -const struct kbase_pm_policy *kbase_pm_get_policy(struct kbase_device *kbdev) -{ - KBASE_DEBUG_ASSERT(kbdev != NULL); - - return kbdev->pm.backend.pm_current_policy; -} - -KBASE_EXPORT_TEST_API(kbase_pm_get_policy); - -void kbase_pm_set_policy(struct kbase_device *kbdev, - const struct kbase_pm_policy *new_policy) -{ - struct kbasep_js_device_data *js_devdata = &kbdev->js_data; - const struct kbase_pm_policy *old_policy; - unsigned long flags; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(new_policy != NULL); - - KBASE_TRACE_ADD(kbdev, PM_SET_POLICY, NULL, NULL, 0u, new_policy->id); - - /* During a policy change we pretend the GPU is active */ - /* A suspend won't happen here, because we're in a syscall from a - * userspace thread */ - kbase_pm_context_active(kbdev); - - mutex_lock(&js_devdata->runpool_mutex); - mutex_lock(&kbdev->pm.lock); - - /* Remove the policy to prevent IRQ handlers from working on it */ - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - old_policy = kbdev->pm.backend.pm_current_policy; - kbdev->pm.backend.pm_current_policy = NULL; - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - - KBASE_TRACE_ADD(kbdev, PM_CURRENT_POLICY_TERM, NULL, NULL, 0u, - old_policy->id); - if (old_policy->term) - old_policy->term(kbdev); - - KBASE_TRACE_ADD(kbdev, PM_CURRENT_POLICY_INIT, NULL, NULL, 0u, - new_policy->id); - if (new_policy->init) - new_policy->init(kbdev); - - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - kbdev->pm.backend.pm_current_policy = new_policy; - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - - /* If any core power state changes were previously attempted, but - * couldn't be made because the policy was changing (current_policy was - * NULL), then re-try them here. */ - kbase_pm_update_active(kbdev); - kbase_pm_update_cores_state(kbdev); - - mutex_unlock(&kbdev->pm.lock); - mutex_unlock(&js_devdata->runpool_mutex); - - /* Now the policy change is finished, we release our fake context active - * reference */ - kbase_pm_context_idle(kbdev); -} - -KBASE_EXPORT_TEST_API(kbase_pm_set_policy); - -/* Check whether a state change has finished, and trace it as completed */ -static void -kbase_pm_trace_check_and_finish_state_change(struct kbase_device *kbdev) -{ - if ((kbdev->shader_available_bitmap & - kbdev->pm.backend.desired_shader_state) - == kbdev->pm.backend.desired_shader_state && - (kbdev->tiler_available_bitmap & - kbdev->pm.backend.desired_tiler_state) - == kbdev->pm.backend.desired_tiler_state) - kbase_timeline_pm_check_handle_event(kbdev, - KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED); -} - -void kbase_pm_request_cores(struct kbase_device *kbdev, - bool tiler_required, u64 shader_cores) -{ - unsigned long flags; - u64 cores; - - kbase_pm_change_state change_gpu_state = 0u; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - - cores = shader_cores; - while (cores) { - int bitnum = fls64(cores) - 1; - u64 bit = 1ULL << bitnum; - - /* It should be almost impossible for this to overflow. It would - * require 2^32 atoms to request a particular core, which would - * require 2^24 contexts to submit. This would require an amount - * of memory that is impossible on a 32-bit system and extremely - * unlikely on a 64-bit system. */ - int cnt = ++kbdev->shader_needed_cnt[bitnum]; - - if (1 == cnt) { - kbdev->shader_needed_bitmap |= bit; - change_gpu_state |= KBASE_PM_CHANGE_STATE_SHADER; - } - - cores &= ~bit; - } - - if (tiler_required) { - int cnt = ++kbdev->tiler_needed_cnt; - - if (1 == cnt) - change_gpu_state |= KBASE_PM_CHANGE_STATE_TILER; - - KBASE_DEBUG_ASSERT(kbdev->tiler_needed_cnt != 0); - } - - if (change_gpu_state) { - KBASE_TRACE_ADD(kbdev, PM_REQUEST_CHANGE_SHADER_NEEDED, NULL, - NULL, 0u, (u32) kbdev->shader_needed_bitmap); - - kbase_timeline_pm_cores_func(kbdev, - KBASE_PM_FUNC_ID_REQUEST_CORES_START, - change_gpu_state); - kbase_pm_update_cores_state_nolock(kbdev); - kbase_timeline_pm_cores_func(kbdev, - KBASE_PM_FUNC_ID_REQUEST_CORES_END, - change_gpu_state); - } - - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); -} - -KBASE_EXPORT_TEST_API(kbase_pm_request_cores); - -void kbase_pm_unrequest_cores(struct kbase_device *kbdev, - bool tiler_required, u64 shader_cores) -{ - unsigned long flags; - - kbase_pm_change_state change_gpu_state = 0u; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - - while (shader_cores) { - int bitnum = fls64(shader_cores) - 1; - u64 bit = 1ULL << bitnum; - int cnt; - - KBASE_DEBUG_ASSERT(kbdev->shader_needed_cnt[bitnum] > 0); - - cnt = --kbdev->shader_needed_cnt[bitnum]; - - if (0 == cnt) { - kbdev->shader_needed_bitmap &= ~bit; - - change_gpu_state |= KBASE_PM_CHANGE_STATE_SHADER; - } - - shader_cores &= ~bit; - } - - if (tiler_required) { - int cnt; - - KBASE_DEBUG_ASSERT(kbdev->tiler_needed_cnt > 0); - - cnt = --kbdev->tiler_needed_cnt; - - if (0 == cnt) - change_gpu_state |= KBASE_PM_CHANGE_STATE_TILER; - } - - if (change_gpu_state) { - KBASE_TRACE_ADD(kbdev, PM_UNREQUEST_CHANGE_SHADER_NEEDED, NULL, - NULL, 0u, (u32) kbdev->shader_needed_bitmap); - - kbase_pm_update_cores_state_nolock(kbdev); - - /* Trace that any state change effectively completes immediately - * - no-one will wait on the state change */ - kbase_pm_trace_check_and_finish_state_change(kbdev); - } - - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); -} - -KBASE_EXPORT_TEST_API(kbase_pm_unrequest_cores); - -enum kbase_pm_cores_ready -kbase_pm_register_inuse_cores(struct kbase_device *kbdev, - bool tiler_required, u64 shader_cores) -{ - unsigned long flags; - u64 prev_shader_needed; /* Just for tracing */ - u64 prev_shader_inuse; /* Just for tracing */ - - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - - prev_shader_needed = kbdev->shader_needed_bitmap; - prev_shader_inuse = kbdev->shader_inuse_bitmap; - - /* If desired_shader_state does not contain the requested cores, then - * power management is not attempting to powering those cores (most - * likely due to core availability policy) and a new job affinity must - * be chosen */ - if ((kbdev->pm.backend.desired_shader_state & shader_cores) != - shader_cores) { - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - - return KBASE_NEW_AFFINITY; - } - - if ((kbdev->shader_available_bitmap & shader_cores) != shader_cores || - (tiler_required && !kbdev->tiler_available_bitmap)) { - /* Trace ongoing core transition */ - kbase_timeline_pm_l2_transition_start(kbdev); - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - return KBASE_CORES_NOT_READY; - } - - /* If we started to trace a state change, then trace it has being - * finished by now, at the very latest */ - kbase_pm_trace_check_and_finish_state_change(kbdev); - /* Trace core transition done */ - kbase_timeline_pm_l2_transition_done(kbdev); - - while (shader_cores) { - int bitnum = fls64(shader_cores) - 1; - u64 bit = 1ULL << bitnum; - int cnt; - - KBASE_DEBUG_ASSERT(kbdev->shader_needed_cnt[bitnum] > 0); - - cnt = --kbdev->shader_needed_cnt[bitnum]; - - if (0 == cnt) - kbdev->shader_needed_bitmap &= ~bit; - - /* shader_inuse_cnt should not overflow because there can only - * be a very limited number of jobs on the h/w at one time */ - - kbdev->shader_inuse_cnt[bitnum]++; - kbdev->shader_inuse_bitmap |= bit; - - shader_cores &= ~bit; - } - - if (tiler_required) { - KBASE_DEBUG_ASSERT(kbdev->tiler_needed_cnt > 0); - - --kbdev->tiler_needed_cnt; - - kbdev->tiler_inuse_cnt++; - - KBASE_DEBUG_ASSERT(kbdev->tiler_inuse_cnt != 0); - } - - if (prev_shader_needed != kbdev->shader_needed_bitmap) - KBASE_TRACE_ADD(kbdev, PM_REGISTER_CHANGE_SHADER_NEEDED, NULL, - NULL, 0u, (u32) kbdev->shader_needed_bitmap); - - if (prev_shader_inuse != kbdev->shader_inuse_bitmap) - KBASE_TRACE_ADD(kbdev, PM_REGISTER_CHANGE_SHADER_INUSE, NULL, - NULL, 0u, (u32) kbdev->shader_inuse_bitmap); - - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - - return KBASE_CORES_READY; -} - -KBASE_EXPORT_TEST_API(kbase_pm_register_inuse_cores); - -void kbase_pm_release_cores(struct kbase_device *kbdev, - bool tiler_required, u64 shader_cores) -{ - unsigned long flags; - kbase_pm_change_state change_gpu_state = 0u; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - - while (shader_cores) { - int bitnum = fls64(shader_cores) - 1; - u64 bit = 1ULL << bitnum; - int cnt; - - KBASE_DEBUG_ASSERT(kbdev->shader_inuse_cnt[bitnum] > 0); - - cnt = --kbdev->shader_inuse_cnt[bitnum]; - - if (0 == cnt) { - kbdev->shader_inuse_bitmap &= ~bit; - change_gpu_state |= KBASE_PM_CHANGE_STATE_SHADER; - } - - shader_cores &= ~bit; - } - - if (tiler_required) { - int cnt; - - KBASE_DEBUG_ASSERT(kbdev->tiler_inuse_cnt > 0); - - cnt = --kbdev->tiler_inuse_cnt; - - if (0 == cnt) - change_gpu_state |= KBASE_PM_CHANGE_STATE_TILER; - } - - if (change_gpu_state) { - KBASE_TRACE_ADD(kbdev, PM_RELEASE_CHANGE_SHADER_INUSE, NULL, - NULL, 0u, (u32) kbdev->shader_inuse_bitmap); - - kbase_timeline_pm_cores_func(kbdev, - KBASE_PM_FUNC_ID_RELEASE_CORES_START, - change_gpu_state); - kbase_pm_update_cores_state_nolock(kbdev); - kbase_timeline_pm_cores_func(kbdev, - KBASE_PM_FUNC_ID_RELEASE_CORES_END, - change_gpu_state); - - /* Trace that any state change completed immediately */ - kbase_pm_trace_check_and_finish_state_change(kbdev); - } - - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); -} - -KBASE_EXPORT_TEST_API(kbase_pm_release_cores); - -void kbase_pm_request_cores_sync(struct kbase_device *kbdev, - bool tiler_required, - u64 shader_cores) -{ - kbase_pm_request_cores(kbdev, tiler_required, shader_cores); - - kbase_pm_check_transitions_sync(kbdev); -} - -KBASE_EXPORT_TEST_API(kbase_pm_request_cores_sync); - -void kbase_pm_request_l2_caches(struct kbase_device *kbdev) -{ - unsigned long flags; - u32 prior_l2_users_count; - - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - - prior_l2_users_count = kbdev->l2_users_count++; - - KBASE_DEBUG_ASSERT(kbdev->l2_users_count != 0); - - /* if the GPU is reset while the l2 is on, l2 will be off but - * prior_l2_users_count will be > 0. l2_available_bitmap will have been - * set to 0 though by kbase_pm_init_hw */ - if (!prior_l2_users_count || !kbdev->l2_available_bitmap) - kbase_pm_check_transitions_nolock(kbdev); - - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - wait_event(kbdev->pm.backend.l2_powered_wait, - kbdev->pm.backend.l2_powered == 1); - - /* Trace that any state change completed immediately */ - kbase_pm_trace_check_and_finish_state_change(kbdev); -} - -KBASE_EXPORT_TEST_API(kbase_pm_request_l2_caches); - -void kbase_pm_request_l2_caches_l2_is_on(struct kbase_device *kbdev) -{ - unsigned long flags; - - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - - kbdev->l2_users_count++; - - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); -} - -KBASE_EXPORT_TEST_API(kbase_pm_request_l2_caches_l2_is_on); - -void kbase_pm_release_l2_caches(struct kbase_device *kbdev) -{ - unsigned long flags; - - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - - KBASE_DEBUG_ASSERT(kbdev->l2_users_count > 0); - - --kbdev->l2_users_count; - - if (!kbdev->l2_users_count) { - kbase_pm_check_transitions_nolock(kbdev); - /* Trace that any state change completed immediately */ - kbase_pm_trace_check_and_finish_state_change(kbdev); - } - - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); -} - -KBASE_EXPORT_TEST_API(kbase_pm_release_l2_caches); diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.h deleted file mode 100755 index 611a90e66e659..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.h +++ /dev/null @@ -1,227 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/* - * Power policy API definitions - */ - -#ifndef _KBASE_PM_POLICY_H_ -#define _KBASE_PM_POLICY_H_ - -/** - * kbase_pm_policy_init - Initialize power policy framework - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - * - * Must be called before calling any other policy function - * - * Return: 0 if the power policy framework was successfully - * initialized, -errno otherwise. - */ -int kbase_pm_policy_init(struct kbase_device *kbdev); - -/** - * kbase_pm_policy_term - Terminate power policy framework - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - */ -void kbase_pm_policy_term(struct kbase_device *kbdev); - -/** - * kbase_pm_update_active - Update the active power state of the GPU - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - * - * Calls into the current power policy - */ -void kbase_pm_update_active(struct kbase_device *kbdev); - -/** - * kbase_pm_update_cores - Update the desired core state of the GPU - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - * - * Calls into the current power policy - */ -void kbase_pm_update_cores(struct kbase_device *kbdev); - - -enum kbase_pm_cores_ready { - KBASE_CORES_NOT_READY = 0, - KBASE_NEW_AFFINITY = 1, - KBASE_CORES_READY = 2 -}; - - -/** - * kbase_pm_request_cores_sync - Synchronous variant of kbase_pm_request_cores() - * - * @kbdev: The kbase device structure for the device - * @tiler_required: true if the tiler is required, false otherwise - * @shader_cores: A bitmask of shader cores which are necessary for the job - * - * When this function returns, the @shader_cores will be in the READY state. - * - * This is safe variant of kbase_pm_check_transitions_sync(): it handles the - * work of ensuring the requested cores will remain powered until a matching - * call to kbase_pm_unrequest_cores()/kbase_pm_release_cores() (as appropriate) - * is made. - */ -void kbase_pm_request_cores_sync(struct kbase_device *kbdev, - bool tiler_required, u64 shader_cores); - -/** - * kbase_pm_request_cores - Mark one or more cores as being required - * for jobs to be submitted - * - * @kbdev: The kbase device structure for the device - * @tiler_required: true if the tiler is required, false otherwise - * @shader_cores: A bitmask of shader cores which are necessary for the job - * - * This function is called by the job scheduler to mark one or more cores as - * being required to submit jobs that are ready to run. - * - * The cores requested are reference counted and a subsequent call to - * kbase_pm_register_inuse_cores() or kbase_pm_unrequest_cores() should be - * made to dereference the cores as being 'needed'. - * - * The active power policy will meet or exceed the requirements of the - * requested cores in the system. Any core transitions needed will be begun - * immediately, but they might not complete/the cores might not be available - * until a Power Management IRQ. - * - * Return: 0 if the cores were successfully requested, or -errno otherwise. - */ -void kbase_pm_request_cores(struct kbase_device *kbdev, - bool tiler_required, u64 shader_cores); - -/** - * kbase_pm_unrequest_cores - Unmark one or more cores as being required for - * jobs to be submitted. - * - * @kbdev: The kbase device structure for the device - * @tiler_required: true if the tiler is required, false otherwise - * @shader_cores: A bitmask of shader cores (as given to - * kbase_pm_request_cores() ) - * - * This function undoes the effect of kbase_pm_request_cores(). It should be - * used when a job is not going to be submitted to the hardware (e.g. the job is - * cancelled before it is enqueued). - * - * The active power policy will meet or exceed the requirements of the - * requested cores in the system. Any core transitions needed will be begun - * immediately, but they might not complete until a Power Management IRQ. - * - * The policy may use this as an indication that it can power down cores. - */ -void kbase_pm_unrequest_cores(struct kbase_device *kbdev, - bool tiler_required, u64 shader_cores); - -/** - * kbase_pm_register_inuse_cores - Register a set of cores as in use by a job - * - * @kbdev: The kbase device structure for the device - * @tiler_required: true if the tiler is required, false otherwise - * @shader_cores: A bitmask of shader cores (as given to - * kbase_pm_request_cores() ) - * - * This function should be called after kbase_pm_request_cores() when the job - * is about to be submitted to the hardware. It will check that the necessary - * cores are available and if so update the 'needed' and 'inuse' bitmasks to - * reflect that the job is now committed to being run. - * - * If the necessary cores are not currently available then the function will - * return %KBASE_CORES_NOT_READY and have no effect. - * - * Return: %KBASE_CORES_NOT_READY if the cores are not immediately ready, - * - * %KBASE_NEW_AFFINITY if the affinity requested is not allowed, - * - * %KBASE_CORES_READY if the cores requested are already available - */ -enum kbase_pm_cores_ready kbase_pm_register_inuse_cores( - struct kbase_device *kbdev, - bool tiler_required, - u64 shader_cores); - -/** - * kbase_pm_release_cores - Release cores after a job has run - * - * @kbdev: The kbase device structure for the device - * @tiler_required: true if the tiler is required, false otherwise - * @shader_cores: A bitmask of shader cores (as given to - * kbase_pm_register_inuse_cores() ) - * - * This function should be called when a job has finished running on the - * hardware. A call to kbase_pm_register_inuse_cores() must have previously - * occurred. The reference counts of the specified cores will be decremented - * which may cause the bitmask of 'inuse' cores to be reduced. The power policy - * may then turn off any cores which are no longer 'inuse'. - */ -void kbase_pm_release_cores(struct kbase_device *kbdev, - bool tiler_required, u64 shader_cores); - -/** - * kbase_pm_request_l2_caches - Request l2 caches - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - * - * Request the use of l2 caches for all core groups, power up, wait and prevent - * the power manager from powering down the l2 caches. - * - * This tells the power management that the caches should be powered up, and - * they should remain powered, irrespective of the usage of shader cores. This - * does not return until the l2 caches are powered up. - * - * The caller must call kbase_pm_release_l2_caches() when they are finished - * to allow normal power management of the l2 caches to resume. - * - * This should only be used when power management is active. - */ -void kbase_pm_request_l2_caches(struct kbase_device *kbdev); - -/** - * kbase_pm_request_l2_caches_l2_is_on - Request l2 caches but don't power on - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - * - * Increment the count of l2 users but do not attempt to power on the l2 - * - * It is the callers responsibility to ensure that the l2 is already powered up - * and to eventually call kbase_pm_release_l2_caches() - */ -void kbase_pm_request_l2_caches_l2_is_on(struct kbase_device *kbdev); - -/** - * kbase_pm_request_l2_caches - Release l2 caches - * - * @kbdev: The kbase device structure for the device (must be a valid pointer) - * - * Release the use of l2 caches for all core groups and allow the power manager - * to power them down when necessary. - * - * This tells the power management that the caches can be powered down if - * necessary, with respect to the usage of shader cores. - * - * The caller must have called kbase_pm_request_l2_caches() prior to a call - * to this. - * - * This should only be used when power management is active. - */ -void kbase_pm_release_l2_caches(struct kbase_device *kbdev); - -#endif /* _KBASE_PM_POLICY_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.c deleted file mode 100755 index 4bcde85f3ee13..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.c +++ /dev/null @@ -1,102 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include -#include -#include -#include - -void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter, - u64 *system_time, struct timespec *ts) -{ - u32 hi1, hi2; - - kbase_pm_request_gpu_cycle_counter(kbdev); - - /* Read hi, lo, hi to ensure that overflow from lo to hi is handled - * correctly */ - do { - hi1 = kbase_reg_read(kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI), - NULL); - *cycle_counter = kbase_reg_read(kbdev, - GPU_CONTROL_REG(CYCLE_COUNT_LO), NULL); - hi2 = kbase_reg_read(kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI), - NULL); - *cycle_counter |= (((u64) hi1) << 32); - } while (hi1 != hi2); - - /* Read hi, lo, hi to ensure that overflow from lo to hi is handled - * correctly */ - do { - hi1 = kbase_reg_read(kbdev, GPU_CONTROL_REG(TIMESTAMP_HI), - NULL); - *system_time = kbase_reg_read(kbdev, - GPU_CONTROL_REG(TIMESTAMP_LO), NULL); - hi2 = kbase_reg_read(kbdev, GPU_CONTROL_REG(TIMESTAMP_HI), - NULL); - *system_time |= (((u64) hi1) << 32); - } while (hi1 != hi2); - - /* Record the CPU's idea of current time */ - getrawmonotonic(ts); - - kbase_pm_release_gpu_cycle_counter(kbdev); -} - -/** - * kbase_wait_write_flush - Wait for GPU write flush - * @kctx: Context pointer - * - * Wait 1000 GPU clock cycles. This delay is known to give the GPU time to flush - * its write buffer. - * - * Only in use for BASE_HW_ISSUE_6367 - * - * Note : If GPU resets occur then the counters are reset to zero, the delay may - * not be as expected. - */ -#ifndef CONFIG_MALI_NO_MALI -void kbase_wait_write_flush(struct kbase_context *kctx) -{ - u32 base_count = 0; - - /* A suspend won't happen here, because we're in a syscall from a - * userspace thread */ - - kbase_pm_context_active(kctx->kbdev); - kbase_pm_request_gpu_cycle_counter(kctx->kbdev); - - while (true) { - u32 new_count; - - new_count = kbase_reg_read(kctx->kbdev, - GPU_CONTROL_REG(CYCLE_COUNT_LO), NULL); - /* First time around, just store the count. */ - if (base_count == 0) { - base_count = new_count; - continue; - } - - /* No need to handle wrapping, unsigned maths works for this. */ - if ((new_count - base_count) > 1000) - break; - } - - kbase_pm_release_gpu_cycle_counter(kctx->kbdev); - kbase_pm_context_idle(kctx->kbdev); -} -#endif /* CONFIG_MALI_NO_MALI */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.h deleted file mode 100755 index 35088abc8fe5e..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#ifndef _KBASE_BACKEND_TIME_H_ -#define _KBASE_BACKEND_TIME_H_ - -/** - * kbase_backend_get_gpu_time() - Get current GPU time - * @kbdev: Device pointer - * @cycle_counter: Pointer to u64 to store cycle counter in - * @system_time: Pointer to u64 to store system time in - * @ts: Pointer to struct timespec to store current monotonic - * time in - */ -void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter, - u64 *system_time, struct timespec *ts); - -/** - * kbase_wait_write_flush() - Wait for GPU write flush - * @kctx: Context pointer - * - * Wait 1000 GPU clock cycles. This delay is known to give the GPU time to flush - * its write buffer. - * - * If GPU resets occur then the counters are reset to zero, the delay may not be - * as expected. - * - * This function is only in use for BASE_HW_ISSUE_6367 - */ -#ifdef CONFIG_MALI_NO_MALI -static inline void kbase_wait_write_flush(struct kbase_context *kctx) -{ -} -#else -void kbase_wait_write_flush(struct kbase_context *kctx); -#endif - -#endif /* _KBASE_BACKEND_TIME_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/docs/Doxyfile b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/docs/Doxyfile deleted file mode 100755 index 35ff2f1ce4a06..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/docs/Doxyfile +++ /dev/null @@ -1,126 +0,0 @@ -# -# (C) COPYRIGHT 2011-2013, 2015 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -############################################################################## - -# This file contains per-module Doxygen configuration. Please do not add -# extra settings to this file without consulting all stakeholders, as they -# may cause override project-wide settings. -# -# Additionally, when defining aliases, macros, sections etc, use the module -# name as a prefix e.g. gles_my_alias. - -############################################################################## - -@INCLUDE = ../../bldsys/Doxyfile_common - -# The INPUT tag can be used to specify the files and/or directories that contain -# documented source files. You may enter file names like "myfile.cpp" or -# directories like "/usr/src/myproject". Separate the files or directories -# with spaces. - -INPUT += ../../kernel/drivers/gpu/arm/midgard/ - -############################################################################## -# Everything below here is optional, and in most cases not required -############################################################################## - -# This tag can be used to specify a number of aliases that acts -# as commands in the documentation. An alias has the form "name=value". -# For example adding "sideeffect=\par Side Effects:\n" will allow you to -# put the command \sideeffect (or @sideeffect) in the documentation, which -# will result in a user-defined paragraph with heading "Side Effects:". -# You can put \n's in the value part of an alias to insert newlines. - -ALIASES += - -# The ENABLED_SECTIONS tag can be used to enable conditional -# documentation sections, marked by \if sectionname ... \endif. - -ENABLED_SECTIONS += - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank the following patterns are tested: -# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx -# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 - -FILE_PATTERNS += - -# The EXCLUDE tag can be used to specify files and/or directories that should -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. -EXCLUDE += ../../kernel/drivers/gpu/arm/midgard/platform ../../kernel/drivers/gpu/arm/midgard/platform_dummy ../../kernel/drivers/gpu/arm/midgard/scripts ../../kernel/drivers/gpu/arm/midgard/tests ../../kernel/drivers/gpu/arm/midgard/Makefile ../../kernel/drivers/gpu/arm/midgard/Makefile.kbase ../../kernel/drivers/gpu/arm/midgard/Kbuild ../../kernel/drivers/gpu/arm/midgard/Kconfig ../../kernel/drivers/gpu/arm/midgard/sconscript ../../kernel/drivers/gpu/arm/midgard/docs ../../kernel/drivers/gpu/arm/midgard/pm_test_script.sh ../../kernel/drivers/gpu/arm/midgard/mali_uk.h ../../kernel/drivers/gpu/arm/midgard/Makefile - - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. Note that the wildcards are matched -# against the file with absolute path, so to exclude all test directories -# for example use the pattern */test/* - -EXCLUDE_PATTERNS += - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test - -EXCLUDE_SYMBOLS += - -# The EXAMPLE_PATH tag can be used to specify one or more files or -# directories that contain example code fragments that are included (see -# the \include command). - -EXAMPLE_PATH += - -# The IMAGE_PATH tag can be used to specify one or more files or -# directories that contain image that are included in the documentation (see -# the \image command). - -IMAGE_PATH += - -# The INCLUDE_PATH tag can be used to specify one or more directories that -# contain include files that are not input files but should be processed by -# the preprocessor. - -INCLUDE_PATH += - -# The PREDEFINED tag can be used to specify one or more macro names that -# are defined before the preprocessor is started (similar to the -D option of -# gcc). The argument of the tag is a list of macros of the form: name -# or name=definition (no spaces). If the definition and the = are -# omitted =1 is assumed. To prevent a macro definition from being -# undefined via #undef or recursively expanded use the := operator -# instead of the = operator. - -PREDEFINED += - -# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then -# this tag can be used to specify a list of macro names that should be expanded. -# The macro definition that is found in the sources will be used. -# Use the PREDEFINED tag if you want to use a different macro definition. - -EXPAND_AS_DEFINED += - -# The DOTFILE_DIRS tag can be used to specify one or more directories that -# contain dot files that are included in the documentation (see the -# \dotfile command). - -DOTFILE_DIRS += ../../kernel/drivers/gpu/arm/midgard/docs - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/docs/policy_operation_diagram.dot b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/docs/policy_operation_diagram.dot deleted file mode 100755 index 7ae05c2f8ded4..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/docs/policy_operation_diagram.dot +++ /dev/null @@ -1,112 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -digraph policy_objects_diagram { - rankdir=LR; - size="12,8"; - compound=true; - - node [ shape = box ]; - - subgraph cluster_policy_queues { - low_queue [ shape=record label = "LowP | {ctx_lo | ... | ctx_i | ... | ctx_hi}" ]; - queues_middle_sep [ label="" shape=plaintext width=0 height=0 ]; - - rt_queue [ shape=record label = "RT | {ctx_lo | ... | ctx_j | ... | ctx_hi}" ]; - - label = "Policy's Queue(s)"; - } - - call_enqueue [ shape=plaintext label="enqueue_ctx()" ]; - - { - rank=same; - ordering=out; - call_dequeue [ shape=plaintext label="dequeue_head_ctx()\n+ runpool_add_ctx()" ]; - call_ctxfinish [ shape=plaintext label="runpool_remove_ctx()" ]; - - call_ctxdone [ shape=plaintext label="don't requeue;\n/* ctx has no more jobs */" ]; - } - - subgraph cluster_runpool { - - as0 [ width=2 height = 0.25 label="AS0: Job_1, ..., Job_n" ]; - as1 [ width=2 height = 0.25 label="AS1: Job_1, ..., Job_m" ]; - as2 [ width=2 height = 0.25 label="AS2: Job_1, ..., Job_p" ]; - as3 [ width=2 height = 0.25 label="AS3: Job_1, ..., Job_q" ]; - - label = "Policy's Run Pool"; - } - - { - rank=same; - call_jdequeue [ shape=plaintext label="dequeue_job()" ]; - sstop_dotfixup [ shape=plaintext label="" width=0 height=0 ]; - } - - { - rank=same; - ordering=out; - sstop [ shape=ellipse label="SS-Timer expires" ] - jobslots [ shape=record label="Jobslots: | <0>js[0] | <1>js[1] | <2>js[2]" ]; - - irq [ label="IRQ" shape=ellipse ]; - - job_finish [ shape=plaintext label="don't requeue;\n/* job done */" ]; - } - - hstop [ shape=ellipse label="HS-Timer expires" ] - - /* - * Edges - */ - - call_enqueue -> queues_middle_sep [ lhead=cluster_policy_queues ]; - - low_queue:qr -> call_dequeue:w; - rt_queue:qr -> call_dequeue:w; - - call_dequeue -> as1 [lhead=cluster_runpool]; - - as1->call_jdequeue [ltail=cluster_runpool]; - call_jdequeue->jobslots:0; - call_jdequeue->sstop_dotfixup [ arrowhead=none]; - sstop_dotfixup->sstop [label="Spawn SS-Timer"]; - sstop->jobslots [label="SoftStop"]; - sstop->hstop [label="Spawn HS-Timer"]; - hstop->jobslots:ne [label="HardStop"]; - - - as3->call_ctxfinish:ne [ ltail=cluster_runpool ]; - call_ctxfinish:sw->rt_queue:qm [ lhead=cluster_policy_queues label="enqueue_ctx()\n/* ctx still has jobs */" ]; - - call_ctxfinish->call_ctxdone [constraint=false]; - - call_ctxdone->call_enqueue [weight=0.1 labeldistance=20.0 labelangle=0.0 taillabel="Job submitted to the ctx" style=dotted constraint=false]; - - - { - jobslots->irq [constraint=false]; - - irq->job_finish [constraint=false]; - } - - irq->as2 [lhead=cluster_runpool label="requeue_job()\n/* timeslice expired */" ]; - -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/docs/policy_overview.dot b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/docs/policy_overview.dot deleted file mode 100755 index 159b993b7d61f..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/docs/policy_overview.dot +++ /dev/null @@ -1,63 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -digraph policy_objects_diagram { - rankdir=LR - size="6,6" - compound=true; - - node [ shape = box ]; - - call_enqueue [ shape=plaintext label="enqueue ctx" ]; - - - policy_queue [ label="Policy's Queue" ]; - - { - rank=same; - runpool [ label="Policy's Run Pool" ]; - - ctx_finish [ label="ctx finished" ]; - } - - { - rank=same; - jobslots [ shape=record label="Jobslots: | <0>js[0] | <1>js[1] | <2>js[2]" ]; - - job_finish [ label="Job finished" ]; - } - - - - /* - * Edges - */ - - call_enqueue -> policy_queue; - - policy_queue->runpool [label="dequeue ctx" weight=0.1]; - runpool->policy_queue [label="requeue ctx" weight=0.1]; - - runpool->ctx_finish [ style=dotted ]; - - runpool->jobslots [label="dequeue job" weight=0.1]; - jobslots->runpool [label="requeue job" weight=0.1]; - - jobslots->job_finish [ style=dotted ]; -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_base_hwconfig_features.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_base_hwconfig_features.h deleted file mode 100755 index 5a1523034c15e..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_base_hwconfig_features.h +++ /dev/null @@ -1,165 +0,0 @@ -/* - * - * (C) COPYRIGHT 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/* AUTOMATICALLY GENERATED FILE. If you want to amend the issues/features, - * please update base/tools/hwconfig_generator/hwc_{issues,features}.py - * For more information see base/tools/hwconfig_generator/README - */ - -#ifndef _BASE_HWCONFIG_FEATURES_H_ -#define _BASE_HWCONFIG_FEATURES_H_ - -enum base_hw_feature { - BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION, - BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS, - BASE_HW_FEATURE_33BIT_VA, - BASE_HW_FEATURE_OUT_OF_ORDER_EXEC, - BASE_HW_FEATURE_MRT, - BASE_HW_FEATURE_BRNDOUT_CC, - BASE_HW_FEATURE_INTERPIPE_REG_ALIASING, - BASE_HW_FEATURE_LD_ST_TILEBUFFER, - BASE_HW_FEATURE_MSAA_16X, - BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS, - BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, - BASE_HW_FEATURE_OPTIMIZED_COVERAGE_MASK, - BASE_HW_FEATURE_T7XX_PAIRING_RULES, - BASE_HW_FEATURE_LD_ST_LEA_TEX, - BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, - BASE_HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4, - BASE_HW_FEATURE_IMAGES_IN_FRAGMENT_SHADERS, - BASE_HW_FEATURE_TEST4_DATUM_MODE, - BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE, - BASE_HW_FEATURE_BRNDOUT_KILL, - BASE_HW_FEATURE_WARPING, - BASE_HW_FEATURE_FLUSH_REDUCTION, - BASE_HW_FEATURE_V4, - BASE_HW_FEATURE_PROTECTED_MODE, - BASE_HW_FEATURE_END -}; - -static const enum base_hw_feature base_hw_features_generic[] = { - BASE_HW_FEATURE_END -}; - -static const enum base_hw_feature base_hw_features_t60x[] = { - BASE_HW_FEATURE_LD_ST_LEA_TEX, - BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, - BASE_HW_FEATURE_V4, - BASE_HW_FEATURE_END -}; - -static const enum base_hw_feature base_hw_features_t62x[] = { - BASE_HW_FEATURE_LD_ST_LEA_TEX, - BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, - BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, - BASE_HW_FEATURE_V4, - BASE_HW_FEATURE_END -}; - -static const enum base_hw_feature base_hw_features_t72x[] = { - BASE_HW_FEATURE_33BIT_VA, - BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS, - BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, - BASE_HW_FEATURE_INTERPIPE_REG_ALIASING, - BASE_HW_FEATURE_OPTIMIZED_COVERAGE_MASK, - BASE_HW_FEATURE_T7XX_PAIRING_RULES, - BASE_HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4, - BASE_HW_FEATURE_WARPING, - BASE_HW_FEATURE_V4, - BASE_HW_FEATURE_END -}; - -static const enum base_hw_feature base_hw_features_t76x[] = { - BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION, - BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS, - BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS, - BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, - BASE_HW_FEATURE_BRNDOUT_CC, - BASE_HW_FEATURE_LD_ST_LEA_TEX, - BASE_HW_FEATURE_LD_ST_TILEBUFFER, - BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, - BASE_HW_FEATURE_MRT, - BASE_HW_FEATURE_MSAA_16X, - BASE_HW_FEATURE_OUT_OF_ORDER_EXEC, - BASE_HW_FEATURE_T7XX_PAIRING_RULES, - BASE_HW_FEATURE_TEST4_DATUM_MODE, - BASE_HW_FEATURE_END -}; - -static const enum base_hw_feature base_hw_features_tFxx[] = { - BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION, - BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS, - BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS, - BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, - BASE_HW_FEATURE_BRNDOUT_CC, - BASE_HW_FEATURE_BRNDOUT_KILL, - BASE_HW_FEATURE_LD_ST_LEA_TEX, - BASE_HW_FEATURE_LD_ST_TILEBUFFER, - BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, - BASE_HW_FEATURE_MRT, - BASE_HW_FEATURE_MSAA_16X, - BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE, - BASE_HW_FEATURE_OUT_OF_ORDER_EXEC, - BASE_HW_FEATURE_T7XX_PAIRING_RULES, - BASE_HW_FEATURE_TEST4_DATUM_MODE, - BASE_HW_FEATURE_END -}; - -static const enum base_hw_feature base_hw_features_t83x[] = { - BASE_HW_FEATURE_33BIT_VA, - BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION, - BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS, - BASE_HW_FEATURE_WARPING, - BASE_HW_FEATURE_INTERPIPE_REG_ALIASING, - BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS, - BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, - BASE_HW_FEATURE_BRNDOUT_CC, - BASE_HW_FEATURE_BRNDOUT_KILL, - BASE_HW_FEATURE_LD_ST_LEA_TEX, - BASE_HW_FEATURE_LD_ST_TILEBUFFER, - BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, - BASE_HW_FEATURE_MRT, - BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE, - BASE_HW_FEATURE_OUT_OF_ORDER_EXEC, - BASE_HW_FEATURE_T7XX_PAIRING_RULES, - BASE_HW_FEATURE_TEST4_DATUM_MODE, - BASE_HW_FEATURE_END -}; - -static const enum base_hw_feature base_hw_features_t82x[] = { - BASE_HW_FEATURE_33BIT_VA, - BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION, - BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS, - BASE_HW_FEATURE_WARPING, - BASE_HW_FEATURE_INTERPIPE_REG_ALIASING, - BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS, - BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, - BASE_HW_FEATURE_BRNDOUT_CC, - BASE_HW_FEATURE_BRNDOUT_KILL, - BASE_HW_FEATURE_LD_ST_LEA_TEX, - BASE_HW_FEATURE_LD_ST_TILEBUFFER, - BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, - BASE_HW_FEATURE_MRT, - BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE, - BASE_HW_FEATURE_OUT_OF_ORDER_EXEC, - BASE_HW_FEATURE_T7XX_PAIRING_RULES, - BASE_HW_FEATURE_TEST4_DATUM_MODE, - BASE_HW_FEATURE_END -}; - - -#endif /* _BASE_HWCONFIG_FEATURES_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_base_hwconfig_issues.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_base_hwconfig_issues.h deleted file mode 100755 index 9fae0f6c806ac..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_base_hwconfig_issues.h +++ /dev/null @@ -1,760 +0,0 @@ -/* - * - * (C) COPYRIGHT 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/* AUTOMATICALLY GENERATED FILE. If you want to amend the issues/features, - * please update base/tools/hwconfig_generator/hwc_{issues,features}.py - * For more information see base/tools/hwconfig_generator/README - */ - -#ifndef _BASE_HWCONFIG_ISSUES_H_ -#define _BASE_HWCONFIG_ISSUES_H_ - -enum base_hw_issue { - BASE_HW_ISSUE_5736, - BASE_HW_ISSUE_6367, - BASE_HW_ISSUE_6398, - BASE_HW_ISSUE_6402, - BASE_HW_ISSUE_6787, - BASE_HW_ISSUE_7027, - BASE_HW_ISSUE_7144, - BASE_HW_ISSUE_7304, - BASE_HW_ISSUE_8073, - BASE_HW_ISSUE_8186, - BASE_HW_ISSUE_8215, - BASE_HW_ISSUE_8245, - BASE_HW_ISSUE_8250, - BASE_HW_ISSUE_8260, - BASE_HW_ISSUE_8280, - BASE_HW_ISSUE_8316, - BASE_HW_ISSUE_8381, - BASE_HW_ISSUE_8394, - BASE_HW_ISSUE_8401, - BASE_HW_ISSUE_8408, - BASE_HW_ISSUE_8443, - BASE_HW_ISSUE_8456, - BASE_HW_ISSUE_8564, - BASE_HW_ISSUE_8634, - BASE_HW_ISSUE_8778, - BASE_HW_ISSUE_8791, - BASE_HW_ISSUE_8833, - BASE_HW_ISSUE_8879, - BASE_HW_ISSUE_8896, - BASE_HW_ISSUE_8975, - BASE_HW_ISSUE_8986, - BASE_HW_ISSUE_8987, - BASE_HW_ISSUE_9010, - BASE_HW_ISSUE_9275, - BASE_HW_ISSUE_9418, - BASE_HW_ISSUE_9423, - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_9510, - BASE_HW_ISSUE_9566, - BASE_HW_ISSUE_9630, - BASE_HW_ISSUE_10127, - BASE_HW_ISSUE_10327, - BASE_HW_ISSUE_10410, - BASE_HW_ISSUE_10471, - BASE_HW_ISSUE_10472, - BASE_HW_ISSUE_10487, - BASE_HW_ISSUE_10607, - BASE_HW_ISSUE_10632, - BASE_HW_ISSUE_10676, - BASE_HW_ISSUE_10682, - BASE_HW_ISSUE_10684, - BASE_HW_ISSUE_10797, - BASE_HW_ISSUE_10817, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10931, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_10959, - BASE_HW_ISSUE_10969, - BASE_HW_ISSUE_10984, - BASE_HW_ISSUE_10995, - BASE_HW_ISSUE_11012, - BASE_HW_ISSUE_11020, - BASE_HW_ISSUE_11024, - BASE_HW_ISSUE_11035, - BASE_HW_ISSUE_11042, - BASE_HW_ISSUE_T76X_26, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3542, - BASE_HW_ISSUE_T76X_3556, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_T76X_3953, - BASE_HW_ISSUE_T76X_3960, - BASE_HW_ISSUE_T76X_3966, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_generic[] = { - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t60x_r0p0_15dev0[] = { - BASE_HW_ISSUE_6367, - BASE_HW_ISSUE_6398, - BASE_HW_ISSUE_6402, - BASE_HW_ISSUE_6787, - BASE_HW_ISSUE_7027, - BASE_HW_ISSUE_7144, - BASE_HW_ISSUE_7304, - BASE_HW_ISSUE_8073, - BASE_HW_ISSUE_8186, - BASE_HW_ISSUE_8215, - BASE_HW_ISSUE_8245, - BASE_HW_ISSUE_8250, - BASE_HW_ISSUE_8260, - BASE_HW_ISSUE_8280, - BASE_HW_ISSUE_8316, - BASE_HW_ISSUE_8381, - BASE_HW_ISSUE_8394, - BASE_HW_ISSUE_8401, - BASE_HW_ISSUE_8408, - BASE_HW_ISSUE_8443, - BASE_HW_ISSUE_8456, - BASE_HW_ISSUE_8564, - BASE_HW_ISSUE_8634, - BASE_HW_ISSUE_8778, - BASE_HW_ISSUE_8791, - BASE_HW_ISSUE_8833, - BASE_HW_ISSUE_8896, - BASE_HW_ISSUE_8975, - BASE_HW_ISSUE_8986, - BASE_HW_ISSUE_8987, - BASE_HW_ISSUE_9010, - BASE_HW_ISSUE_9275, - BASE_HW_ISSUE_9418, - BASE_HW_ISSUE_9423, - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_9510, - BASE_HW_ISSUE_9566, - BASE_HW_ISSUE_9630, - BASE_HW_ISSUE_10410, - BASE_HW_ISSUE_10471, - BASE_HW_ISSUE_10472, - BASE_HW_ISSUE_10487, - BASE_HW_ISSUE_10607, - BASE_HW_ISSUE_10632, - BASE_HW_ISSUE_10676, - BASE_HW_ISSUE_10682, - BASE_HW_ISSUE_10684, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10931, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_10969, - BASE_HW_ISSUE_10984, - BASE_HW_ISSUE_10995, - BASE_HW_ISSUE_11012, - BASE_HW_ISSUE_11020, - BASE_HW_ISSUE_11035, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t60x_r0p0_eac[] = { - BASE_HW_ISSUE_6367, - BASE_HW_ISSUE_6402, - BASE_HW_ISSUE_6787, - BASE_HW_ISSUE_7027, - BASE_HW_ISSUE_7304, - BASE_HW_ISSUE_8408, - BASE_HW_ISSUE_8564, - BASE_HW_ISSUE_8778, - BASE_HW_ISSUE_8975, - BASE_HW_ISSUE_9010, - BASE_HW_ISSUE_9275, - BASE_HW_ISSUE_9418, - BASE_HW_ISSUE_9423, - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_9510, - BASE_HW_ISSUE_10410, - BASE_HW_ISSUE_10471, - BASE_HW_ISSUE_10472, - BASE_HW_ISSUE_10487, - BASE_HW_ISSUE_10607, - BASE_HW_ISSUE_10632, - BASE_HW_ISSUE_10676, - BASE_HW_ISSUE_10682, - BASE_HW_ISSUE_10684, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10931, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_10969, - BASE_HW_ISSUE_11012, - BASE_HW_ISSUE_11020, - BASE_HW_ISSUE_11035, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t60x_r0p1[] = { - BASE_HW_ISSUE_6367, - BASE_HW_ISSUE_6402, - BASE_HW_ISSUE_6787, - BASE_HW_ISSUE_7027, - BASE_HW_ISSUE_7304, - BASE_HW_ISSUE_8408, - BASE_HW_ISSUE_8564, - BASE_HW_ISSUE_8778, - BASE_HW_ISSUE_8975, - BASE_HW_ISSUE_9010, - BASE_HW_ISSUE_9275, - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_9510, - BASE_HW_ISSUE_10410, - BASE_HW_ISSUE_10471, - BASE_HW_ISSUE_10472, - BASE_HW_ISSUE_10487, - BASE_HW_ISSUE_10607, - BASE_HW_ISSUE_10632, - BASE_HW_ISSUE_10676, - BASE_HW_ISSUE_10682, - BASE_HW_ISSUE_10684, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10931, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_11012, - BASE_HW_ISSUE_11020, - BASE_HW_ISSUE_11035, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t62x_r0p1[] = { - BASE_HW_ISSUE_6402, - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10127, - BASE_HW_ISSUE_10327, - BASE_HW_ISSUE_10410, - BASE_HW_ISSUE_10471, - BASE_HW_ISSUE_10472, - BASE_HW_ISSUE_10487, - BASE_HW_ISSUE_10607, - BASE_HW_ISSUE_10632, - BASE_HW_ISSUE_10676, - BASE_HW_ISSUE_10682, - BASE_HW_ISSUE_10684, - BASE_HW_ISSUE_10817, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10931, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_10959, - BASE_HW_ISSUE_11012, - BASE_HW_ISSUE_11020, - BASE_HW_ISSUE_11024, - BASE_HW_ISSUE_11035, - BASE_HW_ISSUE_11042, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t62x_r1p0[] = { - BASE_HW_ISSUE_6402, - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10471, - BASE_HW_ISSUE_10472, - BASE_HW_ISSUE_10684, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10931, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_10959, - BASE_HW_ISSUE_11012, - BASE_HW_ISSUE_11020, - BASE_HW_ISSUE_11024, - BASE_HW_ISSUE_11042, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t62x_r1p1[] = { - BASE_HW_ISSUE_6402, - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10471, - BASE_HW_ISSUE_10472, - BASE_HW_ISSUE_10684, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10931, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_10959, - BASE_HW_ISSUE_11012, - BASE_HW_ISSUE_11042, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t76x_r0p0[] = { - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_11020, - BASE_HW_ISSUE_11024, - BASE_HW_ISSUE_11042, - BASE_HW_ISSUE_T76X_26, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3542, - BASE_HW_ISSUE_T76X_3556, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_T76X_3953, - BASE_HW_ISSUE_T76X_3960, - BASE_HW_ISSUE_T76X_3966, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t76x_r0p1[] = { - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_11020, - BASE_HW_ISSUE_11024, - BASE_HW_ISSUE_11042, - BASE_HW_ISSUE_T76X_26, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3542, - BASE_HW_ISSUE_T76X_3556, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_T76X_3953, - BASE_HW_ISSUE_T76X_3960, - BASE_HW_ISSUE_T76X_3966, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t76x_r0p1_50rel0[] = { - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_11042, - BASE_HW_ISSUE_T76X_26, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3542, - BASE_HW_ISSUE_T76X_3556, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_T76X_3953, - BASE_HW_ISSUE_T76X_3960, - BASE_HW_ISSUE_T76X_3966, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t76x_r0p2[] = { - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_11020, - BASE_HW_ISSUE_11024, - BASE_HW_ISSUE_11042, - BASE_HW_ISSUE_T76X_26, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3542, - BASE_HW_ISSUE_T76X_3556, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_T76X_3953, - BASE_HW_ISSUE_T76X_3960, - BASE_HW_ISSUE_T76X_3966, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t76x_r0p3[] = { - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_11042, - BASE_HW_ISSUE_T76X_26, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3542, - BASE_HW_ISSUE_T76X_3556, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_T76X_3953, - BASE_HW_ISSUE_T76X_3960, - BASE_HW_ISSUE_T76X_3966, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t76x_r1p0[] = { - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_11042, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_T76X_3953, - BASE_HW_ISSUE_T76X_3960, - BASE_HW_ISSUE_T76X_3966, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t72x_r0p0[] = { - BASE_HW_ISSUE_6402, - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10471, - BASE_HW_ISSUE_10684, - BASE_HW_ISSUE_10797, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_11042, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t72x_r1p0[] = { - BASE_HW_ISSUE_6402, - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10471, - BASE_HW_ISSUE_10684, - BASE_HW_ISSUE_10797, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_11042, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t72x_r1p1[] = { - BASE_HW_ISSUE_6402, - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10471, - BASE_HW_ISSUE_10684, - BASE_HW_ISSUE_10797, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_11042, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_model_t72x[] = { - BASE_HW_ISSUE_5736, - BASE_HW_ISSUE_6402, - BASE_HW_ISSUE_9275, - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10471, - BASE_HW_ISSUE_10797, - BASE_HW_ISSUE_11042, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_model_t76x[] = { - BASE_HW_ISSUE_5736, - BASE_HW_ISSUE_9275, - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_11020, - BASE_HW_ISSUE_11024, - BASE_HW_ISSUE_11042, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_model_t60x[] = { - BASE_HW_ISSUE_5736, - BASE_HW_ISSUE_6402, - BASE_HW_ISSUE_8778, - BASE_HW_ISSUE_9275, - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10472, - BASE_HW_ISSUE_10931, - BASE_HW_ISSUE_11012, - BASE_HW_ISSUE_11020, - BASE_HW_ISSUE_11024, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_model_t62x[] = { - BASE_HW_ISSUE_5736, - BASE_HW_ISSUE_6402, - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10472, - BASE_HW_ISSUE_10931, - BASE_HW_ISSUE_11012, - BASE_HW_ISSUE_11020, - BASE_HW_ISSUE_11024, - BASE_HW_ISSUE_11042, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_tFRx_r0p1[] = { - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_T76X_3953, - BASE_HW_ISSUE_T76X_3960, - BASE_HW_ISSUE_T76X_3966, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_tFRx_r0p2[] = { - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_T76X_3953, - BASE_HW_ISSUE_T76X_3966, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_tFRx_r1p0[] = { - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_T76X_3953, - BASE_HW_ISSUE_T76X_3966, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_tFRx_r2p0[] = { - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_T76X_3953, - BASE_HW_ISSUE_T76X_3966, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_model_tFRx[] = { - BASE_HW_ISSUE_5736, - BASE_HW_ISSUE_9275, - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t86x_r0p2[] = { - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_T76X_3953, - BASE_HW_ISSUE_T76X_3966, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t86x_r1p0[] = { - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_T76X_3953, - BASE_HW_ISSUE_T76X_3966, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t86x_r2p0[] = { - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_T76X_3953, - BASE_HW_ISSUE_T76X_3966, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_model_t86x[] = { - BASE_HW_ISSUE_5736, - BASE_HW_ISSUE_9275, - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t83x_r0p1[] = { - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_T76X_3953, - BASE_HW_ISSUE_T76X_3960, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t83x_r1p0[] = { - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_T76X_3953, - BASE_HW_ISSUE_T76X_3960, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_model_t83x[] = { - BASE_HW_ISSUE_5736, - BASE_HW_ISSUE_9275, - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t82x_r0p0[] = { - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_T76X_3953, - BASE_HW_ISSUE_T76X_3960, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t82x_r0p1[] = { - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_T76X_3953, - BASE_HW_ISSUE_T76X_3960, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_t82x_r1p0[] = { - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_10821, - BASE_HW_ISSUE_10883, - BASE_HW_ISSUE_10946, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_T76X_3953, - BASE_HW_ISSUE_T76X_3960, - BASE_HW_ISSUE_END -}; - -static const enum base_hw_issue base_hw_issues_model_t82x[] = { - BASE_HW_ISSUE_5736, - BASE_HW_ISSUE_9275, - BASE_HW_ISSUE_9435, - BASE_HW_ISSUE_T76X_1909, - BASE_HW_ISSUE_T76X_1963, - BASE_HW_ISSUE_T76X_3086, - BASE_HW_ISSUE_T76X_3700, - BASE_HW_ISSUE_T76X_3793, - BASE_HW_ISSUE_END -}; - - - -#endif /* _BASE_HWCONFIG_ISSUES_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_base_kernel.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_base_kernel.h deleted file mode 100755 index 56621309c5ad5..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_base_kernel.h +++ /dev/null @@ -1,1599 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file - * Base structures shared with the kernel. - */ - -#ifndef _BASE_KERNEL_H_ -#define _BASE_KERNEL_H_ - -#ifndef __user -#define __user -#endif - -/* Support UK6 IOCTLS */ -#define BASE_LEGACY_UK6_SUPPORT 1 - -/* Support UK7 IOCTLS */ -/* NB: To support UK6 we also need to support UK7 */ -#define BASE_LEGACY_UK7_SUPPORT 1 - -/* Support UK8 IOCTLS */ -#define BASE_LEGACY_UK8_SUPPORT 1 - -/* Support UK9 IOCTLS */ -#define BASE_LEGACY_UK9_SUPPORT 1 - -typedef u64 base_mem_handle; - -#include "mali_base_mem_priv.h" -#include "mali_kbase_profiling_gator_api.h" - -/* - * Dependency stuff, keep it private for now. May want to expose it if - * we decide to make the number of semaphores a configurable - * option. - */ -#define BASE_JD_ATOM_COUNT 256 - -#define BASEP_JD_SEM_PER_WORD_LOG2 5 -#define BASEP_JD_SEM_PER_WORD (1 << BASEP_JD_SEM_PER_WORD_LOG2) -#define BASEP_JD_SEM_WORD_NR(x) ((x) >> BASEP_JD_SEM_PER_WORD_LOG2) -#define BASEP_JD_SEM_MASK_IN_WORD(x) (1 << ((x) & (BASEP_JD_SEM_PER_WORD - 1))) -#define BASEP_JD_SEM_ARRAY_SIZE BASEP_JD_SEM_WORD_NR(BASE_JD_ATOM_COUNT) - -#define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 3 - -#define BASE_MAX_COHERENT_GROUPS 16 - -#if defined CDBG_ASSERT -#define LOCAL_ASSERT CDBG_ASSERT -#elif defined KBASE_DEBUG_ASSERT -#define LOCAL_ASSERT KBASE_DEBUG_ASSERT -#else -#error assert macro not defined! -#endif - -#if defined PAGE_MASK -#define LOCAL_PAGE_LSB ~PAGE_MASK -#else -#include - -#if defined OSU_CONFIG_CPU_PAGE_SIZE_LOG2 -#define LOCAL_PAGE_LSB ((1ul << OSU_CONFIG_CPU_PAGE_SIZE_LOG2) - 1) -#else -#error Failed to find page size -#endif -#endif - -/** 32/64-bit neutral way to represent pointers */ -typedef union kbase_pointer { - void __user *value; /**< client should store their pointers here */ - u32 compat_value; /**< 64-bit kernels should fetch value here when handling 32-bit clients */ - u64 sizer; /**< Force 64-bit storage for all clients regardless */ -} kbase_pointer; - -/** - * @addtogroup base_user_api User-side Base APIs - * @{ - */ - -/** - * @addtogroup base_user_api_memory User-side Base Memory APIs - * @{ - */ - -/** - * @brief Memory allocation, access/hint flags - * - * A combination of MEM_PROT/MEM_HINT flags must be passed to each allocator - * in order to determine the best cache policy. Some combinations are - * of course invalid (eg @c MEM_PROT_CPU_WR | @c MEM_HINT_CPU_RD), - * which defines a @a write-only region on the CPU side, which is - * heavily read by the CPU... - * Other flags are only meaningful to a particular allocator. - * More flags can be added to this list, as long as they don't clash - * (see ::BASE_MEM_FLAGS_NR_BITS for the number of the first free bit). - */ -typedef u32 base_mem_alloc_flags; - -/** - * @brief Memory allocation, access/hint flags - * - * See ::base_mem_alloc_flags. - * - */ -enum { -/* IN */ - BASE_MEM_PROT_CPU_RD = (1U << 0), /**< Read access CPU side */ - BASE_MEM_PROT_CPU_WR = (1U << 1), /**< Write access CPU side */ - BASE_MEM_PROT_GPU_RD = (1U << 2), /**< Read access GPU side */ - BASE_MEM_PROT_GPU_WR = (1U << 3), /**< Write access GPU side */ - BASE_MEM_PROT_GPU_EX = (1U << 4), /**< Execute allowed on the GPU - side */ - - /* BASE_MEM_HINT flags have been removed, but their values are reserved - * for backwards compatibility with older user-space drivers. The values - * can be re-used once support for r5p0 user-space drivers is removed, - * presumably in r7p0. - * - * RESERVED: (1U << 5) - * RESERVED: (1U << 6) - * RESERVED: (1U << 7) - * RESERVED: (1U << 8) - */ - - BASE_MEM_GROW_ON_GPF = (1U << 9), /**< Grow backing store on GPU - Page Fault */ - - BASE_MEM_COHERENT_SYSTEM = (1U << 10), /**< Page coherence Outer - shareable, if available */ - BASE_MEM_COHERENT_LOCAL = (1U << 11), /**< Page coherence Inner - shareable */ - BASE_MEM_CACHED_CPU = (1U << 12), /**< Should be cached on the - CPU */ - -/* IN/OUT */ - BASE_MEM_SAME_VA = (1U << 13), /**< Must have same VA on both the GPU - and the CPU */ -/* OUT */ - BASE_MEM_NEED_MMAP = (1U << 14), /**< Must call mmap to aquire a GPU - address for the alloc */ -/* IN */ - BASE_MEM_COHERENT_SYSTEM_REQUIRED = (1U << 15), /**< Page coherence - Outer shareable, required. */ - BASE_MEM_SECURE = (1U << 16) /**< Secure memory */ - -}; - -/** - * @brief Number of bits used as flags for base memory management - * - * Must be kept in sync with the ::base_mem_alloc_flags flags - */ -#define BASE_MEM_FLAGS_NR_BITS 17 - -/** - * A mask for all output bits, excluding IN/OUT bits. - */ -#define BASE_MEM_FLAGS_OUTPUT_MASK BASE_MEM_NEED_MMAP - -/** - * A mask for all input bits, including IN/OUT bits. - */ -#define BASE_MEM_FLAGS_INPUT_MASK \ - (((1 << BASE_MEM_FLAGS_NR_BITS) - 1) & ~BASE_MEM_FLAGS_OUTPUT_MASK) - - -/** - * @brief Memory types supported by @a base_mem_import - * - * Each type defines what the supported handle type is. - * - * If any new type is added here ARM must be contacted - * to allocate a numeric value for it. - * Do not just add a new type without synchronizing with ARM - * as future releases from ARM might include other new types - * which could clash with your custom types. - */ -typedef enum base_mem_import_type { - BASE_MEM_IMPORT_TYPE_INVALID = 0, - /** UMP import. Handle type is ump_secure_id. */ - BASE_MEM_IMPORT_TYPE_UMP = 1, - /** UMM import. Handle type is a file descriptor (int) */ - BASE_MEM_IMPORT_TYPE_UMM = 2 -} base_mem_import_type; - -/** - * @brief Invalid memory handle type. - * Return value from functions returning @a base_mem_handle on error. - */ -#define BASE_MEM_INVALID_HANDLE (0ull << 12) -#define BASE_MEM_MMU_DUMP_HANDLE (1ull << 12) -#define BASE_MEM_TRACE_BUFFER_HANDLE (2ull << 12) -#define BASE_MEM_MAP_TRACKING_HANDLE (3ull << 12) -#define BASE_MEM_WRITE_ALLOC_PAGES_HANDLE (4ull << 12) -/* reserved handles ..-64< for future special handles */ -#define BASE_MEM_COOKIE_BASE (64ul << 12) -#define BASE_MEM_FIRST_FREE_ADDRESS ((BITS_PER_LONG << 12) + \ - BASE_MEM_COOKIE_BASE) - -/* Mask to detect 4GB boundary alignment */ -#define BASE_MEM_MASK_4GB 0xfffff000UL - -/* Bit mask of cookies used for for memory allocation setup */ -#define KBASE_COOKIE_MASK ~1UL /* bit 0 is reserved */ - - -/** - * @brief Result codes of changing the size of the backing store allocated to a tmem region - */ -typedef enum base_backing_threshold_status { - BASE_BACKING_THRESHOLD_OK = 0, /**< Resize successful */ - BASE_BACKING_THRESHOLD_ERROR_NOT_GROWABLE = -1, /**< Not a growable tmem object */ - BASE_BACKING_THRESHOLD_ERROR_OOM = -2, /**< Increase failed due to an out-of-memory condition */ - BASE_BACKING_THRESHOLD_ERROR_MAPPED = -3, /**< Resize attempted on buffer while it was mapped, which is not permitted */ - BASE_BACKING_THRESHOLD_ERROR_INVALID_ARGUMENTS = -4 /**< Invalid arguments (not tmem, illegal size request, etc.) */ -} base_backing_threshold_status; - -/** - * @addtogroup base_user_api_memory_defered User-side Base Defered Memory Coherency APIs - * @{ - */ - -/** - * @brief a basic memory operation (sync-set). - * - * The content of this structure is private, and should only be used - * by the accessors. - */ -typedef struct base_syncset { - struct basep_syncset basep_sset; -} base_syncset; - -/** @} end group base_user_api_memory_defered */ - -/** - * Handle to represent imported memory object. - * Simple opague handle to imported memory, can't be used - * with anything but base_external_resource_init to bind to an atom. - */ -typedef struct base_import_handle { - struct { - u64 handle; - } basep; -} base_import_handle; - -/** @} end group base_user_api_memory */ - -/** - * @addtogroup base_user_api_job_dispatch User-side Base Job Dispatcher APIs - * @{ - */ - -typedef int platform_fence_type; -#define INVALID_PLATFORM_FENCE ((platform_fence_type)-1) - -/** - * Base stream handle. - * - * References an underlying base stream object. - */ -typedef struct base_stream { - struct { - int fd; - } basep; -} base_stream; - -/** - * Base fence handle. - * - * References an underlying base fence object. - */ -typedef struct base_fence { - struct { - int fd; - int stream_fd; - } basep; -} base_fence; - -/** - * @brief Per-job data - * - * This structure is used to store per-job data, and is completly unused - * by the Base driver. It can be used to store things such as callback - * function pointer, data to handle job completion. It is guaranteed to be - * untouched by the Base driver. - */ -typedef struct base_jd_udata { - u64 blob[2]; /**< per-job data array */ -} base_jd_udata; - -/** - * @brief Memory aliasing info - * - * Describes a memory handle to be aliased. - * A subset of the handle can be chosen for aliasing, given an offset and a - * length. - * A special handle BASE_MEM_WRITE_ALLOC_PAGES_HANDLE is used to represent a - * region where a special page is mapped with a write-alloc cache setup, - * typically used when the write result of the GPU isn't needed, but the GPU - * must write anyway. - * - * Offset and length are specified in pages. - * Offset must be within the size of the handle. - * Offset+length must not overrun the size of the handle. - * - * @handle Handle to alias, can be BASE_MEM_WRITE_ALLOC_PAGES_HANDLE - * @offset Offset within the handle to start aliasing from, in pages. - * Not used with BASE_MEM_WRITE_ALLOC_PAGES_HANDLE. - * @length Length to alias, in pages. For BASE_MEM_WRITE_ALLOC_PAGES_HANDLE - * specifies the number of times the special page is needed. - */ -struct base_mem_aliasing_info { - base_mem_handle handle; - u64 offset; - u64 length; -}; - -/** - * @brief Job dependency type. - * - * A flags field will be inserted into the atom structure to specify whether a dependency is a data or - * ordering dependency (by putting it before/after 'core_req' in the structure it should be possible to add without - * changing the structure size). - * When the flag is set for a particular dependency to signal that it is an ordering only dependency then - * errors will not be propagated. - */ -typedef u8 base_jd_dep_type; - - -#define BASE_JD_DEP_TYPE_INVALID (0) /**< Invalid dependency */ -#define BASE_JD_DEP_TYPE_DATA (1U << 0) /**< Data dependency */ -#define BASE_JD_DEP_TYPE_ORDER (1U << 1) /**< Order dependency */ - -/** - * @brief Job chain hardware requirements. - * - * A job chain must specify what GPU features it needs to allow the - * driver to schedule the job correctly. By not specifying the - * correct settings can/will cause an early job termination. Multiple - * values can be ORed together to specify multiple requirements. - * Special case is ::BASE_JD_REQ_DEP, which is used to express complex - * dependencies, and that doesn't execute anything on the hardware. - */ -typedef u16 base_jd_core_req; - -/* Requirements that come from the HW */ -#define BASE_JD_REQ_DEP 0 /**< No requirement, dependency only */ -#define BASE_JD_REQ_FS (1U << 0) /**< Requires fragment shaders */ -/** - * Requires compute shaders - * This covers any of the following Midgard Job types: - * - Vertex Shader Job - * - Geometry Shader Job - * - An actual Compute Shader Job - * - * Compare this with @ref BASE_JD_REQ_ONLY_COMPUTE, which specifies that the - * job is specifically just the "Compute Shader" job type, and not the "Vertex - * Shader" nor the "Geometry Shader" job type. - */ -#define BASE_JD_REQ_CS (1U << 1) -#define BASE_JD_REQ_T (1U << 2) /**< Requires tiling */ -#define BASE_JD_REQ_CF (1U << 3) /**< Requires cache flushes */ -#define BASE_JD_REQ_V (1U << 4) /**< Requires value writeback */ - -/* SW-only requirements - the HW does not expose these as part of the job slot capabilities */ - -/* Requires fragment job with AFBC encoding */ -#define BASE_JD_REQ_FS_AFBC (1U << 13) - -/** - * SW Only requirement: the job chain requires a coherent core group. We don't - * mind which coherent core group is used. - */ -#define BASE_JD_REQ_COHERENT_GROUP (1U << 6) - -/** - * SW Only requirement: The performance counters should be enabled only when - * they are needed, to reduce power consumption. - */ - -#define BASE_JD_REQ_PERMON (1U << 7) - -/** - * SW Only requirement: External resources are referenced by this atom. - * When external resources are referenced no syncsets can be bundled with the atom - * but should instead be part of a NULL jobs inserted into the dependency tree. - * The first pre_dep object must be configured for the external resouces to use, - * the second pre_dep object can be used to create other dependencies. - */ -#define BASE_JD_REQ_EXTERNAL_RESOURCES (1U << 8) - -/** - * SW Only requirement: Software defined job. Jobs with this bit set will not be submitted - * to the hardware but will cause some action to happen within the driver - */ -#define BASE_JD_REQ_SOFT_JOB (1U << 9) - -#define BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME (BASE_JD_REQ_SOFT_JOB | 0x1) -#define BASE_JD_REQ_SOFT_FENCE_TRIGGER (BASE_JD_REQ_SOFT_JOB | 0x2) -#define BASE_JD_REQ_SOFT_FENCE_WAIT (BASE_JD_REQ_SOFT_JOB | 0x3) - -/** - * SW Only requirement : Replay job. - * - * If the preceeding job fails, the replay job will cause the jobs specified in - * the list of base_jd_replay_payload pointed to by the jc pointer to be - * replayed. - * - * A replay job will only cause jobs to be replayed up to BASEP_JD_REPLAY_LIMIT - * times. If a job fails more than BASEP_JD_REPLAY_LIMIT times then the replay - * job is failed, as well as any following dependencies. - * - * The replayed jobs will require a number of atom IDs. If there are not enough - * free atom IDs then the replay job will fail. - * - * If the preceeding job does not fail, then the replay job is returned as - * completed. - * - * The replayed jobs will never be returned to userspace. The preceeding failed - * job will be returned to userspace as failed; the status of this job should - * be ignored. Completion should be determined by the status of the replay soft - * job. - * - * In order for the jobs to be replayed, the job headers will have to be - * modified. The Status field will be reset to NOT_STARTED. If the Job Type - * field indicates a Vertex Shader Job then it will be changed to Null Job. - * - * The replayed jobs have the following assumptions : - * - * - No external resources. Any required external resources will be held by the - * replay atom. - * - Pre-dependencies are created based on job order. - * - Atom numbers are automatically assigned. - * - device_nr is set to 0. This is not relevant as - * BASE_JD_REQ_SPECIFIC_COHERENT_GROUP should not be set. - * - Priority is inherited from the replay job. - */ -#define BASE_JD_REQ_SOFT_REPLAY (BASE_JD_REQ_SOFT_JOB | 0x4) - -/** - * HW Requirement: Requires Compute shaders (but not Vertex or Geometry Shaders) - * - * This indicates that the Job Chain contains Midgard Jobs of the 'Compute Shaders' type. - * - * In contrast to @ref BASE_JD_REQ_CS, this does \b not indicate that the Job - * Chain contains 'Geometry Shader' or 'Vertex Shader' jobs. - * - * @note This is a more flexible variant of the @ref BASE_CONTEXT_HINT_ONLY_COMPUTE flag, - * allowing specific jobs to be marked as 'Only Compute' instead of the entire context - */ -#define BASE_JD_REQ_ONLY_COMPUTE (1U << 10) - -/** - * HW Requirement: Use the base_jd_atom::device_nr field to specify a - * particular core group - * - * If both BASE_JD_REQ_COHERENT_GROUP and this flag are set, this flag takes priority - * - * This is only guaranteed to work for BASE_JD_REQ_ONLY_COMPUTE atoms. - * - * If the core availability policy is keeping the required core group turned off, then - * the job will fail with a BASE_JD_EVENT_PM_EVENT error code. - */ -#define BASE_JD_REQ_SPECIFIC_COHERENT_GROUP (1U << 11) - -/** - * SW Flag: If this bit is set then the successful completion of this atom - * will not cause an event to be sent to userspace - */ -#define BASE_JD_REQ_EVENT_ONLY_ON_FAILURE (1U << 12) - -/** - * SW Flag: If this bit is set then completion of this atom will not cause an - * event to be sent to userspace, whether successful or not. - */ -#define BASEP_JD_REQ_EVENT_NEVER (1U << 14) - -/** -* These requirement bits are currently unused in base_jd_core_req (currently a u16) -*/ - -#define BASEP_JD_REQ_RESERVED_BIT5 (1U << 5) -#define BASEP_JD_REQ_RESERVED_BIT15 (1U << 15) - -/** -* Mask of all the currently unused requirement bits in base_jd_core_req. -*/ - -#define BASEP_JD_REQ_RESERVED (BASEP_JD_REQ_RESERVED_BIT5 | \ - BASEP_JD_REQ_RESERVED_BIT15) - -/** - * Mask of all bits in base_jd_core_req that control the type of the atom. - * - * This allows dependency only atoms to have flags set - */ -#define BASEP_JD_REQ_ATOM_TYPE (~(BASEP_JD_REQ_RESERVED | BASE_JD_REQ_EVENT_ONLY_ON_FAILURE |\ - BASE_JD_REQ_EXTERNAL_RESOURCES | BASEP_JD_REQ_EVENT_NEVER)) - -/** - * @brief States to model state machine processed by kbasep_js_job_check_ref_cores(), which - * handles retaining cores for power management and affinity management. - * - * The state @ref KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY prevents an attack - * where lots of atoms could be submitted before powerup, and each has an - * affinity chosen that causes other atoms to have an affinity - * violation. Whilst the affinity was not causing violations at the time it - * was chosen, it could cause violations thereafter. For example, 1000 jobs - * could have had their affinity chosen during the powerup time, so any of - * those 1000 jobs could cause an affinity violation later on. - * - * The attack would otherwise occur because other atoms/contexts have to wait for: - * -# the currently running atoms (which are causing the violation) to - * finish - * -# and, the atoms that had their affinity chosen during powerup to - * finish. These are run preferrentially because they don't cause a - * violation, but instead continue to cause the violation in others. - * -# or, the attacker is scheduled out (which might not happen for just 2 - * contexts) - * - * By re-choosing the affinity (which is designed to avoid violations at the - * time it's chosen), we break condition (2) of the wait, which minimizes the - * problem to just waiting for current jobs to finish (which can be bounded if - * the Job Scheduling Policy has a timer). - */ -enum kbase_atom_coreref_state { - /** Starting state: No affinity chosen, and cores must be requested. kbase_jd_atom::affinity==0 */ - KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED, - /** Cores requested, but waiting for them to be powered. Requested cores given by kbase_jd_atom::affinity */ - KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES, - /** Cores given by kbase_jd_atom::affinity are powered, but affinity might be out-of-date, so must recheck */ - KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY, - /** Cores given by kbase_jd_atom::affinity are powered, and affinity is up-to-date, but must check for violations */ - KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS, - /** Cores are powered, kbase_jd_atom::affinity up-to-date, no affinity violations: atom can be submitted to HW */ - KBASE_ATOM_COREREF_STATE_READY -}; - -/* - * Base Atom priority - * - * Only certain priority levels are actually implemented, as specified by the - * BASE_JD_PRIO_<...> definitions below. It is undefined to use a priority - * level that is not one of those defined below. - * - * Priority levels only affect scheduling between atoms of the same type within - * a base context, and only after the atoms have had dependencies resolved. - * Fragment atoms does not affect non-frament atoms with lower priorities, and - * the other way around. For example, a low priority atom that has had its - * dependencies resolved might run before a higher priority atom that has not - * had its dependencies resolved. - * - * The scheduling between base contexts/processes and between atoms from - * different base contexts/processes is unaffected by atom priority. - * - * The atoms are scheduled as follows with respect to their priorities: - * - Let atoms 'X' and 'Y' be for the same job slot who have dependencies - * resolved, and atom 'X' has a higher priority than atom 'Y' - * - If atom 'Y' is currently running on the HW, then it is interrupted to - * allow atom 'X' to run soon after - * - If instead neither atom 'Y' nor atom 'X' are running, then when choosing - * the next atom to run, atom 'X' will always be chosen instead of atom 'Y' - * - Any two atoms that have the same priority could run in any order with - * respect to each other. That is, there is no ordering constraint between - * atoms of the same priority. - */ -typedef u8 base_jd_prio; - -/* Medium atom priority. This is a priority higher than BASE_JD_PRIO_LOW */ -#define BASE_JD_PRIO_MEDIUM ((base_jd_prio)0) -/* High atom priority. This is a priority higher than BASE_JD_PRIO_MEDIUM and - * BASE_JD_PRIO_LOW */ -#define BASE_JD_PRIO_HIGH ((base_jd_prio)1) -/* Low atom priority. */ -#define BASE_JD_PRIO_LOW ((base_jd_prio)2) - -/* Count of the number of priority levels. This itself is not a valid - * base_jd_prio setting */ -#define BASE_JD_NR_PRIO_LEVELS 3 - -enum kbase_jd_atom_state { - /** Atom is not used */ - KBASE_JD_ATOM_STATE_UNUSED, - /** Atom is queued in JD */ - KBASE_JD_ATOM_STATE_QUEUED, - /** Atom has been given to JS (is runnable/running) */ - KBASE_JD_ATOM_STATE_IN_JS, - /** Atom has been completed, but not yet handed back to job dispatcher - * for dependency resolution */ - KBASE_JD_ATOM_STATE_HW_COMPLETED, - /** Atom has been completed, but not yet handed back to userspace */ - KBASE_JD_ATOM_STATE_COMPLETED -}; - -typedef u8 base_atom_id; /**< Type big enough to store an atom number in */ - -struct base_dependency { - base_atom_id atom_id; /**< An atom number */ - base_jd_dep_type dependency_type; /**< Dependency type */ -}; - -typedef struct base_jd_atom_v2 { - u64 jc; /**< job-chain GPU address */ - struct base_jd_udata udata; /**< user data */ - kbase_pointer extres_list; /**< list of external resources */ - u16 nr_extres; /**< nr of external resources */ - base_jd_core_req core_req; /**< core requirements */ - struct base_dependency pre_dep[2]; /**< pre-dependencies, one need to use SETTER function to assign this field, - this is done in order to reduce possibility of improper assigment of a dependency field */ - base_atom_id atom_number; /**< unique number to identify the atom */ - base_jd_prio prio; /**< Atom priority. Refer to @ref base_jd_prio for more details */ - u8 device_nr; /**< coregroup when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP specified */ - u8 padding[5]; -} base_jd_atom_v2; - -#ifdef BASE_LEGACY_UK6_SUPPORT -struct base_jd_atom_v2_uk6 { - u64 jc; /**< job-chain GPU address */ - struct base_jd_udata udata; /**< user data */ - kbase_pointer extres_list; /**< list of external resources */ - u16 nr_extres; /**< nr of external resources */ - base_jd_core_req core_req; /**< core requirements */ - base_atom_id pre_dep[2]; /**< pre-dependencies */ - base_atom_id atom_number; /**< unique number to identify the atom */ - base_jd_prio prio; /**< priority - smaller is higher priority */ - u8 device_nr; /**< coregroup when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP specified */ - u8 padding[7]; -}; -#endif - -typedef enum base_external_resource_access { - BASE_EXT_RES_ACCESS_SHARED, - BASE_EXT_RES_ACCESS_EXCLUSIVE -} base_external_resource_access; - -typedef struct base_external_resource { - u64 ext_resource; -} base_external_resource; - -/** - * @brief Setter for a dependency structure - * - * @param[in] dep The kbase jd atom dependency to be initialized. - * @param id The atom_id to be assigned. - * @param dep_type The dep_type to be assigned. - * - */ -static inline void base_jd_atom_dep_set(struct base_dependency *dep, - base_atom_id id, base_jd_dep_type dep_type) -{ - LOCAL_ASSERT(dep != NULL); - - /* - * make sure we don't set not allowed combinations - * of atom_id/dependency_type. - */ - LOCAL_ASSERT((id == 0 && dep_type == BASE_JD_DEP_TYPE_INVALID) || - (id > 0 && dep_type != BASE_JD_DEP_TYPE_INVALID)); - - dep->atom_id = id; - dep->dependency_type = dep_type; -} - -/** - * @brief Make a copy of a dependency structure - * - * @param[in,out] dep The kbase jd atom dependency to be written. - * @param[in] from The dependency to make a copy from. - * - */ -static inline void base_jd_atom_dep_copy(struct base_dependency *dep, - const struct base_dependency *from) -{ - LOCAL_ASSERT(dep != NULL); - - base_jd_atom_dep_set(dep, from->atom_id, from->dependency_type); -} - -/** - * @brief Soft-atom fence trigger setup. - * - * Sets up an atom to be a SW-only atom signaling a fence - * when it reaches the run state. - * - * Using the existing base dependency system the fence can - * be set to trigger when a GPU job has finished. - * - * The base fence object must not be terminated until the atom - * has been submitted to @a base_jd_submit_bag and @a base_jd_submit_bag has returned. - * - * @a fence must be a valid fence set up with @a base_fence_init. - * Calling this function with a uninitialized fence results in undefined behavior. - * - * @param[out] atom A pre-allocated atom to configure as a fence trigger SW atom - * @param[in] fence The base fence object to trigger. - */ -static inline void base_jd_fence_trigger_setup_v2(struct base_jd_atom_v2 *atom, struct base_fence *fence) -{ - LOCAL_ASSERT(atom); - LOCAL_ASSERT(fence); - LOCAL_ASSERT(fence->basep.fd == INVALID_PLATFORM_FENCE); - LOCAL_ASSERT(fence->basep.stream_fd >= 0); - atom->jc = (uintptr_t) fence; - atom->core_req = BASE_JD_REQ_SOFT_FENCE_TRIGGER; -} - -/** - * @brief Soft-atom fence wait setup. - * - * Sets up an atom to be a SW-only atom waiting on a fence. - * When the fence becomes triggered the atom becomes runnable - * and completes immediately. - * - * Using the existing base dependency system the fence can - * be set to block a GPU job until it has been triggered. - * - * The base fence object must not be terminated until the atom - * has been submitted to @a base_jd_submit_bag and @a base_jd_submit_bag has returned. - * - * @a fence must be a valid fence set up with @a base_fence_init or @a base_fence_import. - * Calling this function with a uninitialized fence results in undefined behavior. - * - * @param[out] atom A pre-allocated atom to configure as a fence wait SW atom - * @param[in] fence The base fence object to wait on - */ -static inline void base_jd_fence_wait_setup_v2(struct base_jd_atom_v2 *atom, struct base_fence *fence) -{ - LOCAL_ASSERT(atom); - LOCAL_ASSERT(fence); - LOCAL_ASSERT(fence->basep.fd >= 0); - atom->jc = (uintptr_t) fence; - atom->core_req = BASE_JD_REQ_SOFT_FENCE_WAIT; -} - -/** - * @brief External resource info initialization. - * - * Sets up a external resource object to reference - * a memory allocation and the type of access requested. - * - * @param[in] res The resource object to initialize - * @param handle The handle to the imported memory object - * @param access The type of access requested - */ -static inline void base_external_resource_init(struct base_external_resource *res, struct base_import_handle handle, base_external_resource_access access) -{ - u64 address; - - address = handle.basep.handle; - - LOCAL_ASSERT(res != NULL); - LOCAL_ASSERT(0 == (address & LOCAL_PAGE_LSB)); - LOCAL_ASSERT(access == BASE_EXT_RES_ACCESS_SHARED || access == BASE_EXT_RES_ACCESS_EXCLUSIVE); - - res->ext_resource = address | (access & LOCAL_PAGE_LSB); -} - -/** - * @brief Job chain event code bits - * Defines the bits used to create ::base_jd_event_code - */ -enum { - BASE_JD_SW_EVENT_KERNEL = (1u << 15), /**< Kernel side event */ - BASE_JD_SW_EVENT = (1u << 14), /**< SW defined event */ - BASE_JD_SW_EVENT_SUCCESS = (1u << 13), /**< Event idicates success (SW events only) */ - BASE_JD_SW_EVENT_JOB = (0u << 11), /**< Job related event */ - BASE_JD_SW_EVENT_BAG = (1u << 11), /**< Bag related event */ - BASE_JD_SW_EVENT_INFO = (2u << 11), /**< Misc/info event */ - BASE_JD_SW_EVENT_RESERVED = (3u << 11), /**< Reserved event type */ - BASE_JD_SW_EVENT_TYPE_MASK = (3u << 11) /**< Mask to extract the type from an event code */ -}; - -/** - * @brief Job chain event codes - * - * HW and low-level SW events are represented by event codes. - * The status of jobs which succeeded are also represented by - * an event code (see ::BASE_JD_EVENT_DONE). - * Events are usually reported as part of a ::base_jd_event. - * - * The event codes are encoded in the following way: - * @li 10:0 - subtype - * @li 12:11 - type - * @li 13 - SW success (only valid if the SW bit is set) - * @li 14 - SW event (HW event if not set) - * @li 15 - Kernel event (should never be seen in userspace) - * - * Events are split up into ranges as follows: - * - BASE_JD_EVENT_RANGE_\_START - * - BASE_JD_EVENT_RANGE_\_END - * - * \a code is in \'s range when: - * - BASE_JD_EVENT_RANGE_\_START <= code < BASE_JD_EVENT_RANGE_\_END - * - * Ranges can be asserted for adjacency by testing that the END of the previous - * is equal to the START of the next. This is useful for optimizing some tests - * for range. - * - * A limitation is that the last member of this enum must explicitly be handled - * (with an assert-unreachable statement) in switch statements that use - * variables of this type. Otherwise, the compiler warns that we have not - * handled that enum value. - */ -typedef enum base_jd_event_code { - /* HW defined exceptions */ - - /** Start of HW Non-fault status codes - * - * @note Obscurely, BASE_JD_EVENT_TERMINATED indicates a real fault, - * because the job was hard-stopped - */ - BASE_JD_EVENT_RANGE_HW_NONFAULT_START = 0, - - /* non-fatal exceptions */ - BASE_JD_EVENT_NOT_STARTED = 0x00, /**< Can't be seen by userspace, treated as 'previous job done' */ - BASE_JD_EVENT_DONE = 0x01, - BASE_JD_EVENT_STOPPED = 0x03, /**< Can't be seen by userspace, becomes TERMINATED, DONE or JOB_CANCELLED */ - BASE_JD_EVENT_TERMINATED = 0x04, /**< This is actually a fault status code - the job was hard stopped */ - BASE_JD_EVENT_ACTIVE = 0x08, /**< Can't be seen by userspace, jobs only returned on complete/fail/cancel */ - - /** End of HW Non-fault status codes - * - * @note Obscurely, BASE_JD_EVENT_TERMINATED indicates a real fault, - * because the job was hard-stopped - */ - BASE_JD_EVENT_RANGE_HW_NONFAULT_END = 0x40, - - /** Start of HW fault and SW Error status codes */ - BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_START = 0x40, - - /* job exceptions */ - BASE_JD_EVENT_JOB_CONFIG_FAULT = 0x40, - BASE_JD_EVENT_JOB_POWER_FAULT = 0x41, - BASE_JD_EVENT_JOB_READ_FAULT = 0x42, - BASE_JD_EVENT_JOB_WRITE_FAULT = 0x43, - BASE_JD_EVENT_JOB_AFFINITY_FAULT = 0x44, - BASE_JD_EVENT_JOB_BUS_FAULT = 0x48, - BASE_JD_EVENT_INSTR_INVALID_PC = 0x50, - BASE_JD_EVENT_INSTR_INVALID_ENC = 0x51, - BASE_JD_EVENT_INSTR_TYPE_MISMATCH = 0x52, - BASE_JD_EVENT_INSTR_OPERAND_FAULT = 0x53, - BASE_JD_EVENT_INSTR_TLS_FAULT = 0x54, - BASE_JD_EVENT_INSTR_BARRIER_FAULT = 0x55, - BASE_JD_EVENT_INSTR_ALIGN_FAULT = 0x56, - BASE_JD_EVENT_DATA_INVALID_FAULT = 0x58, - BASE_JD_EVENT_TILE_RANGE_FAULT = 0x59, - BASE_JD_EVENT_STATE_FAULT = 0x5A, - BASE_JD_EVENT_OUT_OF_MEMORY = 0x60, - BASE_JD_EVENT_UNKNOWN = 0x7F, - - /* GPU exceptions */ - BASE_JD_EVENT_DELAYED_BUS_FAULT = 0x80, - BASE_JD_EVENT_SHAREABILITY_FAULT = 0x88, - - /* MMU exceptions */ - BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL1 = 0xC1, - BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL2 = 0xC2, - BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL3 = 0xC3, - BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL4 = 0xC4, - BASE_JD_EVENT_PERMISSION_FAULT = 0xC8, - BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL1 = 0xD1, - BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL2 = 0xD2, - BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL3 = 0xD3, - BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL4 = 0xD4, - BASE_JD_EVENT_ACCESS_FLAG = 0xD8, - - /* SW defined exceptions */ - BASE_JD_EVENT_MEM_GROWTH_FAILED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x000, - BASE_JD_EVENT_TIMED_OUT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x001, - BASE_JD_EVENT_JOB_CANCELLED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x002, - BASE_JD_EVENT_JOB_INVALID = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x003, - BASE_JD_EVENT_PM_EVENT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x004, - BASE_JD_EVENT_FORCE_REPLAY = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x005, - - BASE_JD_EVENT_BAG_INVALID = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_BAG | 0x003, - - /** End of HW fault and SW Error status codes */ - BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_RESERVED | 0x3FF, - - /** Start of SW Success status codes */ - BASE_JD_EVENT_RANGE_SW_SUCCESS_START = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | 0x000, - - BASE_JD_EVENT_PROGRESS_REPORT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_JOB | 0x000, - BASE_JD_EVENT_BAG_DONE = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_BAG | 0x000, - BASE_JD_EVENT_DRV_TERMINATED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_INFO | 0x000, - - /** End of SW Success status codes */ - BASE_JD_EVENT_RANGE_SW_SUCCESS_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_RESERVED | 0x3FF, - - /** Start of Kernel-only status codes. Such codes are never returned to user-space */ - BASE_JD_EVENT_RANGE_KERNEL_ONLY_START = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | 0x000, - BASE_JD_EVENT_REMOVED_FROM_NEXT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_JOB | 0x000, - - /** End of Kernel-only status codes. */ - BASE_JD_EVENT_RANGE_KERNEL_ONLY_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_RESERVED | 0x3FF -} base_jd_event_code; - -/** - * @brief Event reporting structure - * - * This structure is used by the kernel driver to report information - * about GPU events. The can either be HW-specific events or low-level - * SW events, such as job-chain completion. - * - * The event code contains an event type field which can be extracted - * by ANDing with ::BASE_JD_SW_EVENT_TYPE_MASK. - * - * Based on the event type base_jd_event::data holds: - * @li ::BASE_JD_SW_EVENT_JOB : the offset in the ring-buffer for the completed - * job-chain - * @li ::BASE_JD_SW_EVENT_BAG : The address of the ::base_jd_bag that has - * been completed (ie all contained job-chains have been completed). - * @li ::BASE_JD_SW_EVENT_INFO : base_jd_event::data not used - */ -typedef struct base_jd_event_v2 { - base_jd_event_code event_code; /**< event code */ - base_atom_id atom_number; /**< the atom number that has completed */ - struct base_jd_udata udata; /**< user data */ -} base_jd_event_v2; - -/** - * Padding required to ensure that the @ref struct base_dump_cpu_gpu_counters structure fills - * a full cache line. - */ - -#define BASE_CPU_GPU_CACHE_LINE_PADDING (36) - - -/** - * @brief Structure for BASE_JD_REQ_SOFT_DUMP_CPU_GPU_COUNTERS jobs. - * - * This structure is stored into the memory pointed to by the @c jc field of @ref base_jd_atom. - * - * This structure must be padded to ensure that it will occupy whole cache lines. This is to avoid - * cases where access to pages containing the structure is shared between cached and un-cached - * memory regions, which would cause memory corruption. Here we set the structure size to be 64 bytes - * which is the cache line for ARM A15 processors. - */ - -typedef struct base_dump_cpu_gpu_counters { - u64 system_time; - u64 cycle_counter; - u64 sec; - u32 usec; - u8 padding[BASE_CPU_GPU_CACHE_LINE_PADDING]; -} base_dump_cpu_gpu_counters; - - - -/** @} end group base_user_api_job_dispatch */ - -#define GPU_MAX_JOB_SLOTS 16 - -/** - * @page page_base_user_api_gpuprops User-side Base GPU Property Query API - * - * The User-side Base GPU Property Query API encapsulates two - * sub-modules: - * - * - @ref base_user_api_gpuprops_dyn "Dynamic GPU Properties" - * - @ref base_plat_config_gpuprops "Base Platform Config GPU Properties" - * - * There is a related third module outside of Base, which is owned by the MIDG - * module: - * - @ref gpu_props_static "Midgard Compile-time GPU Properties" - * - * Base only deals with properties that vary between different Midgard - * implementations - the Dynamic GPU properties and the Platform Config - * properties. - * - * For properties that are constant for the Midgard Architecture, refer to the - * MIDG module. However, we will discuss their relevance here just to - * provide background information. - * - * @section sec_base_user_api_gpuprops_about About the GPU Properties in Base and MIDG modules - * - * The compile-time properties (Platform Config, Midgard Compile-time - * properties) are exposed as pre-processor macros. - * - * Complementing the compile-time properties are the Dynamic GPU - * Properties, which act as a conduit for the Midgard Configuration - * Discovery. - * - * In general, the dynamic properties are present to verify that the platform - * has been configured correctly with the right set of Platform Config - * Compile-time Properties. - * - * As a consistant guide across the entire DDK, the choice for dynamic or - * compile-time should consider the following, in order: - * -# Can the code be written so that it doesn't need to know the - * implementation limits at all? - * -# If you need the limits, get the information from the Dynamic Property - * lookup. This should be done once as you fetch the context, and then cached - * as part of the context data structure, so it's cheap to access. - * -# If there's a clear and arguable inefficiency in using Dynamic Properties, - * then use a Compile-Time Property (Platform Config, or Midgard Compile-time - * property). Examples of where this might be sensible follow: - * - Part of a critical inner-loop - * - Frequent re-use throughout the driver, causing significant extra load - * instructions or control flow that would be worthwhile optimizing out. - * - * We cannot provide an exhaustive set of examples, neither can we provide a - * rule for every possible situation. Use common sense, and think about: what - * the rest of the driver will be doing; how the compiler might represent the - * value if it is a compile-time constant; whether an OEM shipping multiple - * devices would benefit much more from a single DDK binary, instead of - * insignificant micro-optimizations. - * - * @section sec_base_user_api_gpuprops_dyn Dynamic GPU Properties - * - * Dynamic GPU properties are presented in two sets: - * -# the commonly used properties in @ref base_gpu_props, which have been - * unpacked from GPU register bitfields. - * -# The full set of raw, unprocessed properties in @ref gpu_raw_gpu_props - * (also a member of @ref base_gpu_props). All of these are presented in - * the packed form, as presented by the GPU registers themselves. - * - * @usecase The raw properties in @ref gpu_raw_gpu_props are necessary to - * allow a user of the Mali Tools (e.g. PAT) to determine "Why is this device - * behaving differently?". In this case, all information about the - * configuration is potentially useful, but it does not need to be processed - * by the driver. Instead, the raw registers can be processed by the Mali - * Tools software on the host PC. - * - * The properties returned extend the Midgard Configuration Discovery - * registers. For example, GPU clock speed is not specified in the Midgard - * Architecture, but is necessary for OpenCL's clGetDeviceInfo() function. - * - * The GPU properties are obtained by a call to - * _mali_base_get_gpu_props(). This simply returns a pointer to a const - * base_gpu_props structure. It is constant for the life of a base - * context. Multiple calls to _mali_base_get_gpu_props() to a base context - * return the same pointer to a constant structure. This avoids cache pollution - * of the common data. - * - * This pointer must not be freed, because it does not point to the start of a - * region allocated by the memory allocator; instead, just close the @ref - * base_context. - * - * - * @section sec_base_user_api_gpuprops_config Platform Config Compile-time Properties - * - * The Platform Config File sets up gpu properties that are specific to a - * certain platform. Properties that are 'Implementation Defined' in the - * Midgard Architecture spec are placed here. - * - * @note Reference configurations are provided for Midgard Implementations, such as - * the Mali-T600 family. The customer need not repeat this information, and can select one of - * these reference configurations. For example, VA_BITS, PA_BITS and the - * maximum number of samples per pixel might vary between Midgard Implementations, but - * \b not for platforms using the Mali-T604. This information is placed in - * the reference configuration files. - * - * The System Integrator creates the following structure: - * - platform_XYZ - * - platform_XYZ/plat - * - platform_XYZ/plat/plat_config.h - * - * They then edit plat_config.h, using the example plat_config.h files as a - * guide. - * - * At the very least, the customer must set @ref CONFIG_GPU_CORE_TYPE, and will - * receive a helpful \#error message if they do not do this correctly. This - * selects the Reference Configuration for the Midgard Implementation. The rationale - * behind this decision (against asking the customer to write \#include - * in their plat_config.h) is as follows: - * - This mechanism 'looks' like a regular config file (such as Linux's - * .config) - * - It is difficult to get wrong in a way that will produce strange build - * errors: - * - They need not know where the mali_t600.h, other_midg_gpu.h etc. files are stored - and - * so they won't accidentally pick another file with 'mali_t600' in its name - * - When the build doesn't work, the System Integrator may think the DDK is - * doesn't work, and attempt to fix it themselves: - * - For the @ref CONFIG_GPU_CORE_TYPE mechanism, the only way to get past the - * error is to set @ref CONFIG_GPU_CORE_TYPE, and this is what the \#error tells - * you. - * - For a \#include mechanism, checks must still be made elsewhere, which the - * System Integrator may try working around by setting \#defines (such as - * VA_BITS) themselves in their plat_config.h. In the worst case, they may - * set the prevention-mechanism \#define of - * "A_CORRECT_MIDGARD_CORE_WAS_CHOSEN". - * - In this case, they would believe they are on the right track, because - * the build progresses with their fix, but with errors elsewhere. - * - * However, there is nothing to prevent the customer using \#include to organize - * their own configurations files hierarchically. - * - * The mechanism for the header file processing is as follows: - * - * @dot - digraph plat_config_mechanism { - rankdir=BT - size="6,6" - - "mali_base.h"; - "gpu/mali_gpu.h"; - - node [ shape=box ]; - { - rank = same; ordering = out; - - "gpu/mali_gpu_props.h"; - "base/midg_gpus/mali_t600.h"; - "base/midg_gpus/other_midg_gpu.h"; - } - { rank = same; "plat/plat_config.h"; } - { - rank = same; - "gpu/mali_gpu.h" [ shape=box ]; - gpu_chooser [ label="" style="invisible" width=0 height=0 fixedsize=true ]; - select_gpu [ label="Mali-T600 | Other\n(select_gpu.h)" shape=polygon,sides=4,distortion=0.25 width=3.3 height=0.99 fixedsize=true ] ; - } - node [ shape=box ]; - { rank = same; "plat/plat_config.h"; } - { rank = same; "mali_base.h"; } - - "mali_base.h" -> "gpu/mali_gpu.h" -> "gpu/mali_gpu_props.h"; - "mali_base.h" -> "plat/plat_config.h" ; - "mali_base.h" -> select_gpu ; - - "plat/plat_config.h" -> gpu_chooser [style="dotted,bold" dir=none weight=4] ; - gpu_chooser -> select_gpu [style="dotted,bold"] ; - - select_gpu -> "base/midg_gpus/mali_t600.h" ; - select_gpu -> "base/midg_gpus/other_midg_gpu.h" ; - } - @enddot - * - * - * @section sec_base_user_api_gpuprops_kernel Kernel Operation - * - * During Base Context Create time, user-side makes a single kernel call: - * - A call to fill user memory with GPU information structures - * - * The kernel-side will fill the provided the entire processed @ref base_gpu_props - * structure, because this information is required in both - * user and kernel side; it does not make sense to decode it twice. - * - * Coherency groups must be derived from the bitmasks, but this can be done - * kernel side, and just once at kernel startup: Coherency groups must already - * be known kernel-side, to support chains that specify a 'Only Coherent Group' - * SW requirement, or 'Only Coherent Group with Tiler' SW requirement. - * - * @section sec_base_user_api_gpuprops_cocalc Coherency Group calculation - * Creation of the coherent group data is done at device-driver startup, and so - * is one-time. This will most likely involve a loop with CLZ, shifting, and - * bit clearing on the L2_PRESENT mask, depending on whether the - * system is L2 Coherent. The number of shader cores is done by a - * population count, since faulty cores may be disabled during production, - * producing a non-contiguous mask. - * - * The memory requirements for this algoirthm can be determined either by a u64 - * population count on the L2_PRESENT mask (a LUT helper already is - * requried for the above), or simple assumption that there can be no more than - * 16 coherent groups, since core groups are typically 4 cores. - */ - -/** - * @addtogroup base_user_api_gpuprops User-side Base GPU Property Query APIs - * @{ - */ - -/** - * @addtogroup base_user_api_gpuprops_dyn Dynamic HW Properties - * @{ - */ - -#define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 3 - -#define BASE_MAX_COHERENT_GROUPS 16 - -struct mali_base_gpu_core_props { - /** - * Product specific value. - */ - u32 product_id; - - /** - * Status of the GPU release. - * No defined values, but starts at 0 and increases by one for each release - * status (alpha, beta, EAC, etc.). - * 4 bit values (0-15). - */ - u16 version_status; - - /** - * Minor release number of the GPU. "P" part of an "RnPn" release number. - * 8 bit values (0-255). - */ - u16 minor_revision; - - /** - * Major release number of the GPU. "R" part of an "RnPn" release number. - * 4 bit values (0-15). - */ - u16 major_revision; - - u16 padding; - - /** - * @usecase GPU clock speed is not specified in the Midgard Architecture, but is - * necessary for OpenCL's clGetDeviceInfo() function. - */ - u32 gpu_speed_mhz; - - /** - * @usecase GPU clock max/min speed is required for computing best/worst case - * in tasks as job scheduling ant irq_throttling. (It is not specified in the - * Midgard Architecture). - */ - u32 gpu_freq_khz_max; - u32 gpu_freq_khz_min; - - /** - * Size of the shader program counter, in bits. - */ - u32 log2_program_counter_size; - - /** - * TEXTURE_FEATURES_x registers, as exposed by the GPU. This is a - * bitpattern where a set bit indicates that the format is supported. - * - * Before using a texture format, it is recommended that the corresponding - * bit be checked. - */ - u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS]; - - /** - * Theoretical maximum memory available to the GPU. It is unlikely that a - * client will be able to allocate all of this memory for their own - * purposes, but this at least provides an upper bound on the memory - * available to the GPU. - * - * This is required for OpenCL's clGetDeviceInfo() call when - * CL_DEVICE_GLOBAL_MEM_SIZE is requested, for OpenCL GPU devices. The - * client will not be expecting to allocate anywhere near this value. - */ - u64 gpu_available_memory_size; -}; - -/** - * - * More information is possible - but associativity and bus width are not - * required by upper-level apis. - */ -struct mali_base_gpu_l2_cache_props { - u8 log2_line_size; - u8 log2_cache_size; - u8 num_l2_slices; /* Number of L2C slices. 1 or higher */ - u8 padding[5]; -}; - -struct mali_base_gpu_tiler_props { - u32 bin_size_bytes; /* Max is 4*2^15 */ - u32 max_active_levels; /* Max is 2^15 */ -}; - -/** - * GPU threading system details. - */ -struct mali_base_gpu_thread_props { - u32 max_threads; /* Max. number of threads per core */ - u32 max_workgroup_size; /* Max. number of threads per workgroup */ - u32 max_barrier_size; /* Max. number of threads that can synchronize on a simple barrier */ - u16 max_registers; /* Total size [1..65535] of the register file available per core. */ - u8 max_task_queue; /* Max. tasks [1..255] which may be sent to a core before it becomes blocked. */ - u8 max_thread_group_split; /* Max. allowed value [1..15] of the Thread Group Split field. */ - u8 impl_tech; /* 0 = Not specified, 1 = Silicon, 2 = FPGA, 3 = SW Model/Emulation */ - u8 padding[7]; -}; - -/** - * @brief descriptor for a coherent group - * - * \c core_mask exposes all cores in that coherent group, and \c num_cores - * provides a cached population-count for that mask. - * - * @note Whilst all cores are exposed in the mask, not all may be available to - * the application, depending on the Kernel Power policy. - * - * @note if u64s must be 8-byte aligned, then this structure has 32-bits of wastage. - */ -struct mali_base_gpu_coherent_group { - u64 core_mask; /**< Core restriction mask required for the group */ - u16 num_cores; /**< Number of cores in the group */ - u16 padding[3]; -}; - -/** - * @brief Coherency group information - * - * Note that the sizes of the members could be reduced. However, the \c group - * member might be 8-byte aligned to ensure the u64 core_mask is 8-byte - * aligned, thus leading to wastage if the other members sizes were reduced. - * - * The groups are sorted by core mask. The core masks are non-repeating and do - * not intersect. - */ -struct mali_base_gpu_coherent_group_info { - u32 num_groups; - - /** - * Number of core groups (coherent or not) in the GPU. Equivalent to the number of L2 Caches. - * - * The GPU Counter dumping writes 2048 bytes per core group, regardless of - * whether the core groups are coherent or not. Hence this member is needed - * to calculate how much memory is required for dumping. - * - * @note Do not use it to work out how many valid elements are in the - * group[] member. Use num_groups instead. - */ - u32 num_core_groups; - - /** - * Coherency features of the memory, accessed by @ref gpu_mem_features - * methods - */ - u32 coherency; - - u32 padding; - - /** - * Descriptors of coherent groups - */ - struct mali_base_gpu_coherent_group group[BASE_MAX_COHERENT_GROUPS]; -}; - -/** - * A complete description of the GPU's Hardware Configuration Discovery - * registers. - * - * The information is presented inefficiently for access. For frequent access, - * the values should be better expressed in an unpacked form in the - * base_gpu_props structure. - * - * @usecase The raw properties in @ref gpu_raw_gpu_props are necessary to - * allow a user of the Mali Tools (e.g. PAT) to determine "Why is this device - * behaving differently?". In this case, all information about the - * configuration is potentially useful, but it does not need to be processed - * by the driver. Instead, the raw registers can be processed by the Mali - * Tools software on the host PC. - * - */ -struct gpu_raw_gpu_props { - u64 shader_present; - u64 tiler_present; - u64 l2_present; - u32 coherency_enabled; - u32 unused_1; /* keep for backward compatibility */ - - u32 l2_features; - u32 suspend_size; /* API 8.2+ */ - u32 mem_features; - u32 mmu_features; - - u32 as_present; - - u32 js_present; - u32 js_features[GPU_MAX_JOB_SLOTS]; - u32 tiler_features; - u32 texture_features[3]; - - u32 gpu_id; - - u32 thread_max_threads; - u32 thread_max_workgroup_size; - u32 thread_max_barrier_size; - u32 thread_features; - - u32 coherency_features; -}; - -/** - * Return structure for _mali_base_get_gpu_props(). - * - * NOTE: the raw_props member in this datastructure contains the register - * values from which the value of the other members are derived. The derived - * members exist to allow for efficient access and/or shielding the details - * of the layout of the registers. - * - */ -typedef struct mali_base_gpu_props { - struct mali_base_gpu_core_props core_props; - struct mali_base_gpu_l2_cache_props l2_props; - u64 unused_1; /* keep for backwards compatibility */ - struct mali_base_gpu_tiler_props tiler_props; - struct mali_base_gpu_thread_props thread_props; - - /** This member is large, likely to be 128 bytes */ - struct gpu_raw_gpu_props raw_props; - - /** This must be last member of the structure */ - struct mali_base_gpu_coherent_group_info coherency_info; -} base_gpu_props; - -/** @} end group base_user_api_gpuprops_dyn */ - -/** @} end group base_user_api_gpuprops */ - -/** - * @addtogroup base_user_api_core User-side Base core APIs - * @{ - */ - -/** - * \enum base_context_create_flags - * - * Flags to pass to ::base_context_init. - * Flags can be ORed together to enable multiple things. - * - * These share the same space as @ref basep_context_private_flags, and so must - * not collide with them. - */ -enum base_context_create_flags { - /** No flags set */ - BASE_CONTEXT_CREATE_FLAG_NONE = 0, - - /** Base context is embedded in a cctx object (flag used for CINSTR software counter macros) */ - BASE_CONTEXT_CCTX_EMBEDDED = (1u << 0), - - /** Base context is a 'System Monitor' context for Hardware counters. - * - * One important side effect of this is that job submission is disabled. */ - BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED = (1u << 1), - - /** Base context flag indicating a 'hint' that this context uses Compute - * Jobs only. - * - * Specifially, this means that it only sends atoms that do not - * contain the following @ref base_jd_core_req : - * - BASE_JD_REQ_FS - * - BASE_JD_REQ_T - * - * Violation of these requirements will cause the Job-Chains to be rejected. - * - * In addition, it is inadvisable for the atom's Job-Chains to contain Jobs - * of the following @ref gpu_job_type (whilst it may work now, it may not - * work in future) : - * - @ref GPU_JOB_VERTEX - * - @ref GPU_JOB_GEOMETRY - * - * @note An alternative to using this is to specify the BASE_JD_REQ_ONLY_COMPUTE - * requirement in atoms. - */ - BASE_CONTEXT_HINT_ONLY_COMPUTE = (1u << 2) -}; - -/** - * Bitpattern describing the ::base_context_create_flags that can be passed to base_context_init() - */ -#define BASE_CONTEXT_CREATE_ALLOWED_FLAGS \ - (((u32)BASE_CONTEXT_CCTX_EMBEDDED) | \ - ((u32)BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) | \ - ((u32)BASE_CONTEXT_HINT_ONLY_COMPUTE)) - -/** - * Bitpattern describing the ::base_context_create_flags that can be passed to the kernel - */ -#define BASE_CONTEXT_CREATE_KERNEL_FLAGS \ - (((u32)BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) | \ - ((u32)BASE_CONTEXT_HINT_ONLY_COMPUTE)) - -/** - * Private flags used on the base context - * - * These start at bit 31, and run down to zero. - * - * They share the same space as @ref base_context_create_flags, and so must - * not collide with them. - */ -enum basep_context_private_flags { - /** Private flag tracking whether job descriptor dumping is disabled */ - BASEP_CONTEXT_FLAG_JOB_DUMP_DISABLED = (1 << 31) -}; - -/** @} end group base_user_api_core */ - -/** @} end group base_user_api */ - -/** - * @addtogroup base_plat_config_gpuprops Base Platform Config GPU Properties - * @{ - * - * C Pre-processor macros are exposed here to do with Platform - * Config. - * - * These include: - * - GPU Properties that are constant on a particular Midgard Family - * Implementation e.g. Maximum samples per pixel on Mali-T600. - * - General platform config for the GPU, such as the GPU major and minor - * revison. - */ - -/** @} end group base_plat_config_gpuprops */ - -/** - * @addtogroup base_api Base APIs - * @{ - */ - -/** - * @brief The payload for a replay job. This must be in GPU memory. - */ -typedef struct base_jd_replay_payload { - /** - * Pointer to the first entry in the base_jd_replay_jc list. These - * will be replayed in @b reverse order (so that extra ones can be added - * to the head in future soft jobs without affecting this soft job) - */ - u64 tiler_jc_list; - - /** - * Pointer to the fragment job chain. - */ - u64 fragment_jc; - - /** - * Pointer to the tiler heap free FBD field to be modified. - */ - u64 tiler_heap_free; - - /** - * Hierarchy mask for the replayed fragment jobs. May be zero. - */ - u16 fragment_hierarchy_mask; - - /** - * Hierarchy mask for the replayed tiler jobs. May be zero. - */ - u16 tiler_hierarchy_mask; - - /** - * Default weight to be used for hierarchy levels not in the original - * mask. - */ - u32 hierarchy_default_weight; - - /** - * Core requirements for the tiler job chain - */ - base_jd_core_req tiler_core_req; - - /** - * Core requirements for the fragment job chain - */ - base_jd_core_req fragment_core_req; - - u8 padding[4]; -} base_jd_replay_payload; - -/** - * @brief An entry in the linked list of job chains to be replayed. This must - * be in GPU memory. - */ -typedef struct base_jd_replay_jc { - /** - * Pointer to next entry in the list. A setting of NULL indicates the - * end of the list. - */ - u64 next; - - /** - * Pointer to the job chain. - */ - u64 jc; - -} base_jd_replay_jc; - -/* Maximum number of jobs allowed in a fragment chain in the payload of a - * replay job */ -#define BASE_JD_REPLAY_F_CHAIN_JOB_LIMIT 256 - -/** @} end group base_api */ - -typedef struct base_profiling_controls { - u32 profiling_controls[FBDUMP_CONTROL_MAX]; -} base_profiling_controls; - -#endif /* _BASE_KERNEL_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_base_kernel_sync.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_base_kernel_sync.h deleted file mode 100755 index a24791f4af7c7..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_base_kernel_sync.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file - * Base cross-proccess sync API. - */ - -#ifndef _BASE_KERNEL_SYNC_H_ -#define _BASE_KERNEL_SYNC_H_ - -#include - -#define STREAM_IOC_MAGIC '~' - -/* Fence insert. - * - * Inserts a fence on the stream operated on. - * Fence can be waited via a base fence wait soft-job - * or triggered via a base fence trigger soft-job. - * - * Fences must be cleaned up with close when no longer needed. - * - * No input/output arguments. - * Returns - * >=0 fd - * <0 error code - */ -#define STREAM_IOC_FENCE_INSERT _IO(STREAM_IOC_MAGIC, 0) - -#endif /* _BASE_KERNEL_SYNC_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_base_mem_priv.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_base_mem_priv.h deleted file mode 100755 index 4a98a72cc37a7..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_base_mem_priv.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#ifndef _BASE_MEM_PRIV_H_ -#define _BASE_MEM_PRIV_H_ - -#define BASE_SYNCSET_OP_MSYNC (1U << 0) -#define BASE_SYNCSET_OP_CSYNC (1U << 1) - -/* - * This structure describe a basic memory coherency operation. - * It can either be: - * @li a sync from CPU to Memory: - * - type = ::BASE_SYNCSET_OP_MSYNC - * - mem_handle = a handle to the memory object on which the operation - * is taking place - * - user_addr = the address of the range to be synced - * - size = the amount of data to be synced, in bytes - * - offset is ignored. - * @li a sync from Memory to CPU: - * - type = ::BASE_SYNCSET_OP_CSYNC - * - mem_handle = a handle to the memory object on which the operation - * is taking place - * - user_addr = the address of the range to be synced - * - size = the amount of data to be synced, in bytes. - * - offset is ignored. - */ -struct basep_syncset { - base_mem_handle mem_handle; - u64 user_addr; - u64 size; - u8 type; - u8 padding[7]; -}; - -#endif diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_base_vendor_specific_func.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_base_vendor_specific_func.h deleted file mode 100755 index be454a216a399..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_base_vendor_specific_func.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010, 2012-2013, 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - -#ifndef _BASE_VENDOR_SPEC_FUNC_H_ -#define _BASE_VENDOR_SPEC_FUNC_H_ - -int kbase_get_vendor_specific_cpu_clock_speed(u32 * const); - -#endif /*_BASE_VENDOR_SPEC_FUNC_H_*/ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase.h deleted file mode 100755 index 8840d60ab1be6..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase.h +++ /dev/null @@ -1,558 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#ifndef _KBASE_H_ -#define _KBASE_H_ - -#include - -#include - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "mali_base_kernel.h" -#include -#include - -#include "mali_kbase_pm.h" -#include "mali_kbase_mem_lowlevel.h" -#include "mali_kbase_defs.h" -#include "mali_kbase_trace_timeline.h" -#include "mali_kbase_js.h" -#include "mali_kbase_mem.h" -#include "mali_kbase_security.h" -#include "mali_kbase_utility.h" -#include "mali_kbase_gpu_memory_debugfs.h" -#include "mali_kbase_mem_profile_debugfs.h" -#include "mali_kbase_debug_job_fault.h" -#include "mali_kbase_jd_debugfs.h" -#include "mali_kbase_gpuprops.h" -#include "mali_kbase_jm.h" -#include "mali_kbase_vinstr.h" -#include "mali_kbase_ipa.h" -#ifdef CONFIG_GPU_TRACEPOINTS -#include -#endif -/** - * @page page_base_kernel_main Kernel-side Base (KBase) APIs - * - * The Kernel-side Base (KBase) APIs are divided up as follows: - * - @subpage page_kbase_js_policy - */ - -/** - * @defgroup base_kbase_api Kernel-side Base (KBase) APIs - */ - -struct kbase_device *kbase_device_alloc(void); -/* -* note: configuration attributes member of kbdev needs to have -* been setup before calling kbase_device_init -*/ - -/* -* API to acquire device list semaphone and return pointer -* to the device list head -*/ -const struct list_head *kbase_dev_list_get(void); -/* API to release the device list semaphore */ -void kbase_dev_list_put(const struct list_head *dev_list); - -int kbase_device_init(struct kbase_device * const kbdev); -void kbase_device_term(struct kbase_device *kbdev); -void kbase_device_free(struct kbase_device *kbdev); -int kbase_device_has_feature(struct kbase_device *kbdev, u32 feature); - -/* Needed for gator integration and for reporting vsync information */ -struct kbase_device *kbase_find_device(int minor); -void kbase_release_device(struct kbase_device *kbdev); - -void kbase_set_profiling_control(struct kbase_device *kbdev, u32 control, u32 value); - -u32 kbase_get_profiling_control(struct kbase_device *kbdev, u32 control); - -struct kbase_context * -kbase_create_context(struct kbase_device *kbdev, bool is_compat); -void kbase_destroy_context(struct kbase_context *kctx); -int kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags); - -int kbase_jd_init(struct kbase_context *kctx); -void kbase_jd_exit(struct kbase_context *kctx); -#ifdef BASE_LEGACY_UK6_SUPPORT -int kbase_jd_submit(struct kbase_context *kctx, - const struct kbase_uk_job_submit *submit_data, - int uk6_atom); -#else -int kbase_jd_submit(struct kbase_context *kctx, - const struct kbase_uk_job_submit *submit_data); -#endif - -/** - * kbase_jd_done_worker - Handle a job completion - * @data: a &struct work_struct - * - * This function requeues the job from the runpool (if it was soft-stopped or - * removed from NEXT registers). - * - * Removes it from the system if it finished/failed/was cancelled. - * - * Resolves dependencies to add dependent jobs to the context, potentially - * starting them if necessary (which may add more references to the context) - * - * Releases the reference to the context from the no-longer-running job. - * - * Handles retrying submission outside of IRQ context if it failed from within - * IRQ context. - */ -void kbase_jd_done_worker(struct work_struct *data); - -void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr, ktime_t *end_timestamp, - kbasep_js_atom_done_code done_code); -void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom); -void kbase_jd_evict(struct kbase_device *kbdev, struct kbase_jd_atom *katom); -void kbase_jd_zap_context(struct kbase_context *kctx); -bool jd_done_nolock(struct kbase_jd_atom *katom, - struct list_head *completed_jobs_ctx); -void kbase_jd_free_external_resources(struct kbase_jd_atom *katom); -bool jd_submit_atom(struct kbase_context *kctx, - const struct base_jd_atom_v2 *user_atom, - struct kbase_jd_atom *katom); - -void kbase_job_done(struct kbase_device *kbdev, u32 done); - -void kbase_gpu_cacheclean(struct kbase_device *kbdev, - struct kbase_jd_atom *katom); -/** - * kbase_job_slot_ctx_priority_check_locked(): - Check for lower priority atoms - * and soft stop them - * @kctx: Pointer to context to check. - * @katom: Pointer to priority atom. - * - * Atoms from @kctx on the same job slot as @katom, which have lower priority - * than @katom will be soft stopped and put back in the queue, so that atoms - * with higher priority can run. - * - * The js_data.runpool_irq.lock must be held when calling this function. - */ -void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx, - struct kbase_jd_atom *katom); - -void kbase_job_slot_softstop(struct kbase_device *kbdev, int js, - struct kbase_jd_atom *target_katom); -void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, int js, - struct kbase_jd_atom *target_katom, u32 sw_flags); -void kbase_job_slot_hardstop(struct kbase_context *kctx, int js, - struct kbase_jd_atom *target_katom); -void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action, - u16 core_reqs, struct kbase_jd_atom *target_katom); -void kbase_job_check_leave_disjoint(struct kbase_device *kbdev, - struct kbase_jd_atom *target_katom); - -void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *event); -int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent); -int kbase_event_pending(struct kbase_context *ctx); -int kbase_event_init(struct kbase_context *kctx); -void kbase_event_close(struct kbase_context *kctx); -void kbase_event_cleanup(struct kbase_context *kctx); -void kbase_event_wakeup(struct kbase_context *kctx); - -int kbase_process_soft_job(struct kbase_jd_atom *katom); -int kbase_prepare_soft_job(struct kbase_jd_atom *katom); -void kbase_finish_soft_job(struct kbase_jd_atom *katom); -void kbase_cancel_soft_job(struct kbase_jd_atom *katom); -void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev); - -bool kbase_replay_process(struct kbase_jd_atom *katom); - -/* api used internally for register access. Contains validation and tracing */ -void kbase_device_trace_register_access(struct kbase_context *kctx, enum kbase_reg_access_type type, u16 reg_offset, u32 reg_value); -void kbase_device_trace_buffer_install(struct kbase_context *kctx, u32 *tb, size_t size); -void kbase_device_trace_buffer_uninstall(struct kbase_context *kctx); - -/* api to be ported per OS, only need to do the raw register access */ -void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value); -u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset); - - -void kbasep_as_do_poke(struct work_struct *work); - -/** Returns the name associated with a Mali exception code - * - * This function is called from the interrupt handler when a GPU fault occurs. - * It reports the details of the fault using KBASE_DEBUG_PRINT_WARN. - * - * @param[in] kbdev The kbase device that the GPU fault occurred from. - * @param[in] exception_code exception code - * @return name associated with the exception code - */ -const char *kbase_exception_name(struct kbase_device *kbdev, - u32 exception_code); - -/** - * Check whether a system suspend is in progress, or has already been suspended - * - * The caller should ensure that either kbdev->pm.active_count_lock is held, or - * a dmb was executed recently (to ensure the value is most - * up-to-date). However, without a lock the value could change afterwards. - * - * @return false if a suspend is not in progress - * @return !=false otherwise - */ -static inline bool kbase_pm_is_suspending(struct kbase_device *kbdev) -{ - return kbdev->pm.suspending; -} - -/** - * Return the atom's ID, as was originally supplied by userspace in - * base_jd_atom_v2::atom_number - */ -static inline int kbase_jd_atom_id(struct kbase_context *kctx, struct kbase_jd_atom *katom) -{ - int result; - - KBASE_DEBUG_ASSERT(kctx); - KBASE_DEBUG_ASSERT(katom); - KBASE_DEBUG_ASSERT(katom->kctx == kctx); - - result = katom - &kctx->jctx.atoms[0]; - KBASE_DEBUG_ASSERT(result >= 0 && result <= BASE_JD_ATOM_COUNT); - return result; -} - -/** - * kbase_jd_atom_from_id - Return the atom structure for the given atom ID - * @kctx: Context pointer - * @id: ID of atom to retrieve - * - * Return: Pointer to struct kbase_jd_atom associated with the supplied ID - */ -static inline struct kbase_jd_atom *kbase_jd_atom_from_id( - struct kbase_context *kctx, int id) -{ - return &kctx->jctx.atoms[id]; -} - -/** - * Initialize the disjoint state - * - * The disjoint event count and state are both set to zero. - * - * Disjoint functions usage: - * - * The disjoint event count should be incremented whenever a disjoint event occurs. - * - * There are several cases which are regarded as disjoint behavior. Rather than just increment - * the counter during disjoint events we also increment the counter when jobs may be affected - * by what the GPU is currently doing. To facilitate this we have the concept of disjoint state. - * - * Disjoint state is entered during GPU reset and for the entire time that an atom is replaying - * (as part of the replay workaround). Increasing the disjoint state also increases the count of - * disjoint events. - * - * The disjoint state is then used to increase the count of disjoint events during job submission - * and job completion. Any atom submitted or completed while the disjoint state is greater than - * zero is regarded as a disjoint event. - * - * The disjoint event counter is also incremented immediately whenever a job is soft stopped - * and during context creation. - * - * @param kbdev The kbase device - */ -void kbase_disjoint_init(struct kbase_device *kbdev); - -/** - * Increase the count of disjoint events - * called when a disjoint event has happened - * - * @param kbdev The kbase device - */ -void kbase_disjoint_event(struct kbase_device *kbdev); - -/** - * Increase the count of disjoint events only if the GPU is in a disjoint state - * - * This should be called when something happens which could be disjoint if the GPU - * is in a disjoint state. The state refcount keeps track of this. - * - * @param kbdev The kbase device - */ -void kbase_disjoint_event_potential(struct kbase_device *kbdev); - -/** - * Returns the count of disjoint events - * - * @param kbdev The kbase device - * @return the count of disjoint events - */ -u32 kbase_disjoint_event_get(struct kbase_device *kbdev); - -/** - * Increment the refcount state indicating that the GPU is in a disjoint state. - * - * Also Increment the disjoint event count (calls @ref kbase_disjoint_event) - * eventually after the disjoint state has completed @ref kbase_disjoint_state_down - * should be called - * - * @param kbdev The kbase device - */ -void kbase_disjoint_state_up(struct kbase_device *kbdev); - -/** - * Decrement the refcount state - * - * Also Increment the disjoint event count (calls @ref kbase_disjoint_event) - * - * Called after @ref kbase_disjoint_state_up once the disjoint state is over - * - * @param kbdev The kbase device - */ -void kbase_disjoint_state_down(struct kbase_device *kbdev); - -/** - * If a job is soft stopped and the number of contexts is >= this value - * it is reported as a disjoint event - */ -#define KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD 2 - -#if KBASE_TRACE_ENABLE -void kbasep_trace_debugfs_init(struct kbase_device *kbdev); - -#ifndef CONFIG_MALI_SYSTEM_TRACE -/** Add trace values about a job-slot - * - * @note Any functions called through this macro will still be evaluated in - * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any - * functions called to get the parameters supplied to this macro must: - * - be static or static inline - * - must just return 0 and have no other statements present in the body. - */ -#define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot) \ - kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \ - KBASE_TRACE_FLAG_JOBSLOT, 0, jobslot, 0) - -/** Add trace values about a job-slot, with info - * - * @note Any functions called through this macro will still be evaluated in - * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any - * functions called to get the parameters supplied to this macro must: - * - be static or static inline - * - must just return 0 and have no other statements present in the body. - */ -#define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val) \ - kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \ - KBASE_TRACE_FLAG_JOBSLOT, 0, jobslot, info_val) - -/** Add trace values about a ctx refcount - * - * @note Any functions called through this macro will still be evaluated in - * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any - * functions called to get the parameters supplied to this macro must: - * - be static or static inline - * - must just return 0 and have no other statements present in the body. - */ -#define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount) \ - kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \ - KBASE_TRACE_FLAG_REFCOUNT, refcount, 0, 0) -/** Add trace values about a ctx refcount, and info - * - * @note Any functions called through this macro will still be evaluated in - * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any - * functions called to get the parameters supplied to this macro must: - * - be static or static inline - * - must just return 0 and have no other statements present in the body. - */ -#define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val) \ - kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \ - KBASE_TRACE_FLAG_REFCOUNT, refcount, 0, info_val) - -/** Add trace values (no slot or refcount) - * - * @note Any functions called through this macro will still be evaluated in - * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any - * functions called to get the parameters supplied to this macro must: - * - be static or static inline - * - must just return 0 and have no other statements present in the body. - */ -#define KBASE_TRACE_ADD(kbdev, code, ctx, katom, gpu_addr, info_val) \ - kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \ - 0, 0, 0, info_val) - -/** Clear the trace */ -#define KBASE_TRACE_CLEAR(kbdev) \ - kbasep_trace_clear(kbdev) - -/** Dump the slot trace */ -#define KBASE_TRACE_DUMP(kbdev) \ - kbasep_trace_dump(kbdev) - -/** PRIVATE - do not use directly. Use KBASE_TRACE_ADD() instead */ -void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, void *ctx, struct kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val); -/** PRIVATE - do not use directly. Use KBASE_TRACE_CLEAR() instead */ -void kbasep_trace_clear(struct kbase_device *kbdev); -#else /* #ifndef CONFIG_MALI_SYSTEM_TRACE */ -/* Dispatch kbase trace events as system trace events */ -#include -#define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot)\ - trace_mali_##code(jobslot, 0) - -#define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val)\ - trace_mali_##code(jobslot, info_val) - -#define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount)\ - trace_mali_##code(refcount, 0) - -#define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val)\ - trace_mali_##code(refcount, info_val) - -#define KBASE_TRACE_ADD(kbdev, code, ctx, katom, gpu_addr, info_val)\ - trace_mali_##code(gpu_addr, info_val) - -#define KBASE_TRACE_CLEAR(kbdev)\ - do {\ - CSTD_UNUSED(kbdev);\ - CSTD_NOP(0);\ - } while (0) -#define KBASE_TRACE_DUMP(kbdev)\ - do {\ - CSTD_UNUSED(kbdev);\ - CSTD_NOP(0);\ - } while (0) - -#endif /* #ifndef CONFIG_MALI_SYSTEM_TRACE */ -#else -#define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot)\ - do {\ - CSTD_UNUSED(kbdev);\ - CSTD_NOP(code);\ - CSTD_UNUSED(ctx);\ - CSTD_UNUSED(katom);\ - CSTD_UNUSED(gpu_addr);\ - CSTD_UNUSED(jobslot);\ - } while (0) - -#define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val)\ - do {\ - CSTD_UNUSED(kbdev);\ - CSTD_NOP(code);\ - CSTD_UNUSED(ctx);\ - CSTD_UNUSED(katom);\ - CSTD_UNUSED(gpu_addr);\ - CSTD_UNUSED(jobslot);\ - CSTD_UNUSED(info_val);\ - CSTD_NOP(0);\ - } while (0) - -#define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount)\ - do {\ - CSTD_UNUSED(kbdev);\ - CSTD_NOP(code);\ - CSTD_UNUSED(ctx);\ - CSTD_UNUSED(katom);\ - CSTD_UNUSED(gpu_addr);\ - CSTD_UNUSED(refcount);\ - CSTD_NOP(0);\ - } while (0) - -#define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val)\ - do {\ - CSTD_UNUSED(kbdev);\ - CSTD_NOP(code);\ - CSTD_UNUSED(ctx);\ - CSTD_UNUSED(katom);\ - CSTD_UNUSED(gpu_addr);\ - CSTD_UNUSED(info_val);\ - CSTD_NOP(0);\ - } while (0) - -#define KBASE_TRACE_ADD(kbdev, code, subcode, ctx, katom, val)\ - do {\ - CSTD_UNUSED(kbdev);\ - CSTD_NOP(code);\ - CSTD_UNUSED(subcode);\ - CSTD_UNUSED(ctx);\ - CSTD_UNUSED(katom);\ - CSTD_UNUSED(val);\ - CSTD_NOP(0);\ - } while (0) - -#define KBASE_TRACE_CLEAR(kbdev)\ - do {\ - CSTD_UNUSED(kbdev);\ - CSTD_NOP(0);\ - } while (0) -#define KBASE_TRACE_DUMP(kbdev)\ - do {\ - CSTD_UNUSED(kbdev);\ - CSTD_NOP(0);\ - } while (0) -#endif /* KBASE_TRACE_ENABLE */ -/** PRIVATE - do not use directly. Use KBASE_TRACE_DUMP() instead */ -void kbasep_trace_dump(struct kbase_device *kbdev); - -#ifdef CONFIG_MALI_DEBUG -/** - * kbase_set_driver_inactive - Force driver to go inactive - * @kbdev: Device pointer - * @inactive: true if driver should go inactive, false otherwise - * - * Forcing the driver inactive will cause all future IOCTLs to wait until the - * driver is made active again. This is intended solely for the use of tests - * which require that no jobs are running while the test executes. - */ -void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive); -#endif /* CONFIG_MALI_DEBUG */ - -#ifndef RESET0_LEVEL -extern u32 override_value_aml; -extern void *reg_base_hiubus; -#define RESET0_MASK 0x01 -#define RESET1_MASK 0x02 -#define RESET2_MASK 0x03 - -#define RESET0_LEVEL 0x10 -#define RESET1_LEVEL 0x11 -#define RESET2_LEVEL 0x12 -#define Rd(r) readl((reg_base_hiubus) + ((r)<<2)) -#define Wr(r, v) writel((v), ((reg_base_hiubus) + ((r)<<2))) -#define Mali_WrReg(unit, core, regnum, value) kbase_reg_write(kbdev, (regnum), (value), NULL) -#define Mali_RdReg(unit, core, regnum) kbase_reg_read(kbdev, (regnum), NULL) -#define stimulus_print printk -#define stimulus_display printk -#define Mali_pwr_on(x) Mali_pwr_on_with_kdev(kbdev, (x)) -#define Mali_pwr_off(x) Mali_pwr_off_with_kdev(kbdev, (x)) -#endif - - -#endif diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c deleted file mode 100755 index 933aa2854697b..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c +++ /dev/null @@ -1,209 +0,0 @@ -/* - * - * (C) COPYRIGHT 2013-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - -#include -#include -#include - -/* This function is used to solve an HW issue with single iterator GPUs. - * If a fragment job is soft-stopped on the edge of its bounding box, can happen that the - * restart index is out of bounds and the rerun causes a tile range fault. If this happens - * we try to clamp the restart index to a correct value and rerun the job. - */ -/* Mask of X and Y coordinates for the coordinates words in the descriptors*/ -#define X_COORDINATE_MASK 0x00000FFF -#define Y_COORDINATE_MASK 0x0FFF0000 -/* Max number of words needed from the fragment shader job descriptor */ -#define JOB_HEADER_SIZE_IN_WORDS 10 -#define JOB_HEADER_SIZE (JOB_HEADER_SIZE_IN_WORDS*sizeof(u32)) - -/* Word 0: Status Word */ -#define JOB_DESC_STATUS_WORD 0 -/* Word 1: Restart Index */ -#define JOB_DESC_RESTART_INDEX_WORD 1 -/* Word 2: Fault address low word */ -#define JOB_DESC_FAULT_ADDR_LOW_WORD 2 -/* Word 8: Minimum Tile Coordinates */ -#define FRAG_JOB_DESC_MIN_TILE_COORD_WORD 8 -/* Word 9: Maximum Tile Coordinates */ -#define FRAG_JOB_DESC_MAX_TILE_COORD_WORD 9 - -int kbasep_10969_workaround_clamp_coordinates(struct kbase_jd_atom *katom) -{ - struct device *dev = katom->kctx->kbdev->dev; - u32 clamped = 0; - struct kbase_va_region *region; - phys_addr_t *page_array; - u64 page_index; - u32 offset = katom->jc & (~PAGE_MASK); - u32 *page_1 = NULL; - u32 *page_2 = NULL; - u32 job_header[JOB_HEADER_SIZE_IN_WORDS]; - void *dst = job_header; - u32 minX, minY, maxX, maxY; - u32 restartX, restartY; - struct page *p; - u32 copy_size; - - dev_warn(dev, "Called TILE_RANGE_FAULT workaround clamping function.\n"); - if (!(katom->core_req & BASE_JD_REQ_FS)) - return 0; - - kbase_gpu_vm_lock(katom->kctx); - region = kbase_region_tracker_find_region_enclosing_address(katom->kctx, - katom->jc); - if (!region || (region->flags & KBASE_REG_FREE)) - goto out_unlock; - - page_array = kbase_get_cpu_phy_pages(region); - if (!page_array) - goto out_unlock; - - page_index = (katom->jc >> PAGE_SHIFT) - region->start_pfn; - - p = pfn_to_page(PFN_DOWN(page_array[page_index])); - - /* we need the first 10 words of the fragment shader job descriptor. - * We need to check that the offset + 10 words is less that the page - * size otherwise we need to load the next page. - * page_size_overflow will be equal to 0 in case the whole descriptor - * is within the page > 0 otherwise. - */ - copy_size = MIN(PAGE_SIZE - offset, JOB_HEADER_SIZE); - - page_1 = kmap_atomic(p); - - /* page_1 is a u32 pointer, offset is expressed in bytes */ - page_1 += offset>>2; - - kbase_sync_single_for_cpu(katom->kctx->kbdev, - kbase_dma_addr(p) + offset, - copy_size, DMA_BIDIRECTIONAL); - - memcpy(dst, page_1, copy_size); - - /* The data needed overflows page the dimension, - * need to map the subsequent page */ - if (copy_size < JOB_HEADER_SIZE) { - p = pfn_to_page(PFN_DOWN(page_array[page_index + 1])); - page_2 = kmap_atomic(p); - - kbase_sync_single_for_cpu(katom->kctx->kbdev, - kbase_dma_addr(p), - JOB_HEADER_SIZE - copy_size, DMA_BIDIRECTIONAL); - - memcpy(dst + copy_size, page_2, JOB_HEADER_SIZE - copy_size); - } - - /* We managed to correctly map one or two pages (in case of overflow) */ - /* Get Bounding Box data and restart index from fault address low word */ - minX = job_header[FRAG_JOB_DESC_MIN_TILE_COORD_WORD] & X_COORDINATE_MASK; - minY = job_header[FRAG_JOB_DESC_MIN_TILE_COORD_WORD] & Y_COORDINATE_MASK; - maxX = job_header[FRAG_JOB_DESC_MAX_TILE_COORD_WORD] & X_COORDINATE_MASK; - maxY = job_header[FRAG_JOB_DESC_MAX_TILE_COORD_WORD] & Y_COORDINATE_MASK; - restartX = job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] & X_COORDINATE_MASK; - restartY = job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] & Y_COORDINATE_MASK; - - dev_warn(dev, "Before Clamping:\n" - "Jobstatus: %08x\n" - "restartIdx: %08x\n" - "Fault_addr_low: %08x\n" - "minCoordsX: %08x minCoordsY: %08x\n" - "maxCoordsX: %08x maxCoordsY: %08x\n", - job_header[JOB_DESC_STATUS_WORD], - job_header[JOB_DESC_RESTART_INDEX_WORD], - job_header[JOB_DESC_FAULT_ADDR_LOW_WORD], - minX, minY, - maxX, maxY); - - /* Set the restart index to the one which generated the fault*/ - job_header[JOB_DESC_RESTART_INDEX_WORD] = - job_header[JOB_DESC_FAULT_ADDR_LOW_WORD]; - - if (restartX < minX) { - job_header[JOB_DESC_RESTART_INDEX_WORD] = (minX) | restartY; - dev_warn(dev, - "Clamping restart X index to minimum. %08x clamped to %08x\n", - restartX, minX); - clamped = 1; - } - if (restartY < minY) { - job_header[JOB_DESC_RESTART_INDEX_WORD] = (minY) | restartX; - dev_warn(dev, - "Clamping restart Y index to minimum. %08x clamped to %08x\n", - restartY, minY); - clamped = 1; - } - if (restartX > maxX) { - job_header[JOB_DESC_RESTART_INDEX_WORD] = (maxX) | restartY; - dev_warn(dev, - "Clamping restart X index to maximum. %08x clamped to %08x\n", - restartX, maxX); - clamped = 1; - } - if (restartY > maxY) { - job_header[JOB_DESC_RESTART_INDEX_WORD] = (maxY) | restartX; - dev_warn(dev, - "Clamping restart Y index to maximum. %08x clamped to %08x\n", - restartY, maxY); - clamped = 1; - } - - if (clamped) { - /* Reset the fault address low word - * and set the job status to STOPPED */ - job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] = 0x0; - job_header[JOB_DESC_STATUS_WORD] = BASE_JD_EVENT_STOPPED; - dev_warn(dev, "After Clamping:\n" - "Jobstatus: %08x\n" - "restartIdx: %08x\n" - "Fault_addr_low: %08x\n" - "minCoordsX: %08x minCoordsY: %08x\n" - "maxCoordsX: %08x maxCoordsY: %08x\n", - job_header[JOB_DESC_STATUS_WORD], - job_header[JOB_DESC_RESTART_INDEX_WORD], - job_header[JOB_DESC_FAULT_ADDR_LOW_WORD], - minX, minY, - maxX, maxY); - - /* Flush CPU cache to update memory for future GPU reads*/ - memcpy(page_1, dst, copy_size); - p = pfn_to_page(PFN_DOWN(page_array[page_index])); - - kbase_sync_single_for_device(katom->kctx->kbdev, - kbase_dma_addr(p) + offset, - copy_size, DMA_TO_DEVICE); - - if (copy_size < JOB_HEADER_SIZE) { - memcpy(page_2, dst + copy_size, - JOB_HEADER_SIZE - copy_size); - p = pfn_to_page(PFN_DOWN(page_array[page_index + 1])); - - kbase_sync_single_for_device(katom->kctx->kbdev, - kbase_dma_addr(p), - JOB_HEADER_SIZE - copy_size, - DMA_TO_DEVICE); - } - } - if (copy_size < JOB_HEADER_SIZE) - kunmap_atomic(page_2); - - kunmap_atomic(page_1); - -out_unlock: - kbase_gpu_vm_unlock(katom->kctx); - return clamped; -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.h deleted file mode 100755 index 099a29861672e..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * - * (C) COPYRIGHT 2013-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#ifndef _KBASE_10969_WORKAROUND_ -#define _KBASE_10969_WORKAROUND_ - -int kbasep_10969_workaround_clamp_coordinates(struct kbase_jd_atom *katom); - -#endif /* _KBASE_10969_WORKAROUND_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_cache_policy.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_cache_policy.c deleted file mode 100755 index 2fb5e3edf49f6..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_cache_policy.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * Cache Policy API. - */ - -#include "mali_kbase_cache_policy.h" - -/* - * The output flags should be a combination of the following values: - * KBASE_REG_CPU_CACHED: CPU cache should be enabled. - */ -u32 kbase_cache_enabled(u32 flags, u32 nr_pages) -{ - u32 cache_flags = 0; - - CSTD_UNUSED(nr_pages); - - if (flags & BASE_MEM_CACHED_CPU) - cache_flags |= KBASE_REG_CPU_CACHED; - - return cache_flags; -} - - -void kbase_sync_single_for_device(struct kbase_device *kbdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) -{ - dma_sync_single_for_device(kbdev->dev, handle, size, dir); -} - - -void kbase_sync_single_for_cpu(struct kbase_device *kbdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) -{ - dma_sync_single_for_cpu(kbdev->dev, handle, size, dir); -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_cache_policy.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_cache_policy.h deleted file mode 100755 index 0c18bdb357b0a..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_cache_policy.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2013, 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * Cache Policy API. - */ - -#ifndef _KBASE_CACHE_POLICY_H_ -#define _KBASE_CACHE_POLICY_H_ - -#include "mali_kbase.h" -#include "mali_base_kernel.h" - -/** - * kbase_cache_enabled - Choose the cache policy for a specific region - * @flags: flags describing attributes of the region - * @nr_pages: total number of pages (backed or not) for the region - * - * Tells whether the CPU and GPU caches should be enabled or not for a specific - * region. - * This function can be modified to customize the cache policy depending on the - * flags and size of the region. - * - * Return: a combination of %KBASE_REG_CPU_CACHED and %KBASE_REG_GPU_CACHED - * depending on the cache policy - */ -u32 kbase_cache_enabled(u32 flags, u32 nr_pages); - -#endif /* _KBASE_CACHE_POLICY_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_config.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_config.c deleted file mode 100755 index fb615ae02ead9..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_config.c +++ /dev/null @@ -1,51 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#include -#include -#include - -int kbasep_platform_device_init(struct kbase_device *kbdev) -{ - struct kbase_platform_funcs_conf *platform_funcs_p; - - platform_funcs_p = (struct kbase_platform_funcs_conf *)PLATFORM_FUNCS; - if (platform_funcs_p && platform_funcs_p->platform_init_func) - return platform_funcs_p->platform_init_func(kbdev); - - return 0; -} - -void kbasep_platform_device_term(struct kbase_device *kbdev) -{ - struct kbase_platform_funcs_conf *platform_funcs_p; - - platform_funcs_p = (struct kbase_platform_funcs_conf *)PLATFORM_FUNCS; - if (platform_funcs_p && platform_funcs_p->platform_term_func) - platform_funcs_p->platform_term_func(kbdev); -} - -int kbase_cpuprops_get_default_clock_speed(u32 * const clock_speed) -{ - KBASE_DEBUG_ASSERT(NULL != clock_speed); - - *clock_speed = 100; - return 0; -} - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_config.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_config.h deleted file mode 100755 index 0285e25840291..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_config.h +++ /dev/null @@ -1,326 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_kbase_config.h - * Configuration API and Attributes for KBase - */ - -#ifndef _KBASE_CONFIG_H_ -#define _KBASE_CONFIG_H_ - -#include - -#include -#include - -/** - * @addtogroup base_api - * @{ - */ - -/** - * @addtogroup base_kbase_api - * @{ - */ - -/** - * @addtogroup kbase_config Configuration API and Attributes - * @{ - */ - -#if !MALI_CUSTOMER_RELEASE -/* This flag is set for internal builds so we can run tests without credentials. */ -#define KBASE_HWCNT_DUMP_BYPASS_ROOT 1 -#else -#define KBASE_HWCNT_DUMP_BYPASS_ROOT 0 -#endif - -#include - -/* Forward declaration of struct kbase_device */ -struct kbase_device; - -/** - * kbase_platform_funcs_conf - Specifies platform init/term function pointers - * - * Specifies the functions pointers for platform specific initialization and - * termination. By default no functions are required. No additional platform - * specific control is necessary. - */ -struct kbase_platform_funcs_conf { - /** - * platform_init_func - platform specific init function pointer - * @kbdev - kbase_device pointer - * - * Returns 0 on success, negative error code otherwise. - * - * Function pointer for platform specific initialization or NULL if no - * initialization function is required. At the point this the GPU is - * not active and its power and clocks are in unknown (platform specific - * state) as kbase doesn't yet have control of power and clocks. - * - * The platform specific private pointer kbase_device::platform_context - * can be accessed (and possibly initialized) in here. - */ - int (*platform_init_func)(struct kbase_device *kbdev); - /** - * platform_term_func - platform specific termination function pointer - * @kbdev - kbase_device pointer - * - * Function pointer for platform specific termination or NULL if no - * termination function is required. At the point this the GPU will be - * idle but still powered and clocked. - * - * The platform specific private pointer kbase_device::platform_context - * can be accessed (and possibly terminated) in here. - */ - void (*platform_term_func)(struct kbase_device *kbdev); -}; - -/* - * @brief Specifies the callbacks for power management - * - * By default no callbacks will be made and the GPU must not be powered off. - */ -struct kbase_pm_callback_conf { - /** Callback for when the GPU is idle and the power to it can be switched off. - * - * The system integrator can decide whether to either do nothing, just switch off - * the clocks to the GPU, or to completely power down the GPU. - * The platform specific private pointer kbase_device::platform_context can be accessed and modified in here. It is the - * platform \em callbacks responsiblity to initialize and terminate this pointer if used (see @ref kbase_platform_funcs_conf). - */ - void (*power_off_callback)(struct kbase_device *kbdev); - - /** Callback for when the GPU is about to become active and power must be supplied. - * - * This function must not return until the GPU is powered and clocked sufficiently for register access to - * succeed. The return value specifies whether the GPU was powered down since the call to power_off_callback. - * If the GPU state has been lost then this function must return 1, otherwise it should return 0. - * The platform specific private pointer kbase_device::platform_context can be accessed and modified in here. It is the - * platform \em callbacks responsiblity to initialize and terminate this pointer if used (see @ref kbase_platform_funcs_conf). - * - * The return value of the first call to this function is ignored. - * - * @return 1 if the GPU state may have been lost, 0 otherwise. - */ - int (*power_on_callback)(struct kbase_device *kbdev); - - /** Callback for when the system is requesting a suspend and GPU power - * must be switched off. - * - * Note that if this callback is present, then this may be called - * without a preceding call to power_off_callback. Therefore this - * callback must be able to take any action that might otherwise happen - * in power_off_callback. - * - * The platform specific private pointer kbase_device::platform_context - * can be accessed and modified in here. It is the platform \em - * callbacks responsibility to initialize and terminate this pointer if - * used (see @ref kbase_platform_funcs_conf). - */ - void (*power_suspend_callback)(struct kbase_device *kbdev); - - /** Callback for when the system is resuming from a suspend and GPU - * power must be switched on. - * - * Note that if this callback is present, then this may be called - * without a following call to power_on_callback. Therefore this - * callback must be able to take any action that might otherwise happen - * in power_on_callback. - * - * The platform specific private pointer kbase_device::platform_context - * can be accessed and modified in here. It is the platform \em - * callbacks responsibility to initialize and terminate this pointer if - * used (see @ref kbase_platform_funcs_conf). - */ - void (*power_resume_callback)(struct kbase_device *kbdev); - - /** Callback for handling runtime power management initialization. - * - * The runtime power management callbacks @ref power_runtime_off_callback and @ref power_runtime_on_callback - * will become active from calls made to the OS from within this function. - * The runtime calls can be triggered by calls from @ref power_off_callback and @ref power_on_callback. - * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature. - * - * @return 0 on success, else int erro code. - */ - int (*power_runtime_init_callback)(struct kbase_device *kbdev); - - /** Callback for handling runtime power management termination. - * - * The runtime power management callbacks @ref power_runtime_off_callback and @ref power_runtime_on_callback - * should no longer be called by the OS on completion of this function. - * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature. - */ - void (*power_runtime_term_callback)(struct kbase_device *kbdev); - - /** Callback for runtime power-off power management callback - * - * For linux this callback will be called by the kernel runtime_suspend callback. - * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature. - * - * @return 0 on success, else OS error code. - */ - void (*power_runtime_off_callback)(struct kbase_device *kbdev); - - /** Callback for runtime power-on power management callback - * - * For linux this callback will be called by the kernel runtime_resume callback. - * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature. - */ - int (*power_runtime_on_callback)(struct kbase_device *kbdev); -}; - -/** - * kbase_cpuprops_get_default_clock_speed - default for CPU_SPEED_FUNC - * @clock_speed - see kbase_cpu_clk_speed_func for details on the parameters - * - * Returns 0 on success, negative error code otherwise. - * - * Default implementation of CPU_SPEED_FUNC. This function sets clock_speed - * to 100, so will be an underestimate for any real system. - */ -int kbase_cpuprops_get_default_clock_speed(u32 * const clock_speed); - -/** - * kbase_cpu_clk_speed_func - Type of the function pointer for CPU_SPEED_FUNC - * @param clock_speed - pointer to store the current CPU clock speed in MHz - * - * Returns 0 on success, otherwise negative error code. - * - * This is mainly used to implement OpenCL's clGetDeviceInfo(). - */ -typedef int (*kbase_cpu_clk_speed_func) (u32 *clock_speed); - -/** - * kbase_gpu_clk_speed_func - Type of the function pointer for GPU_SPEED_FUNC - * @param clock_speed - pointer to store the current GPU clock speed in MHz - * - * Returns 0 on success, otherwise negative error code. - * When an error is returned the caller assumes maximum GPU speed stored in - * gpu_freq_khz_max. - * - * If the system timer is not available then this function is required - * for the OpenCL queue profiling to return correct timing information. - * - */ -typedef int (*kbase_gpu_clk_speed_func) (u32 *clock_speed); - -#ifdef CONFIG_OF -struct kbase_platform_config { -}; -#else - -/* - * @brief Specifies start and end of I/O memory region. - */ -struct kbase_io_memory_region { - u64 start; - u64 end; -}; - -/* - * @brief Specifies I/O related resources like IRQs and memory region for I/O operations. - */ -struct kbase_io_resources { - u32 job_irq_number; - u32 mmu_irq_number; - u32 gpu_irq_number; - struct kbase_io_memory_region io_memory_region; -}; - -struct kbase_platform_config { - const struct kbase_io_resources *io_resources; -}; - -#endif /* CONFIG_OF */ - -/** - * @brief Gets the pointer to platform config. - * - * @return Pointer to the platform config - */ -struct kbase_platform_config *kbase_get_platform_config(void); - -/** - * kbasep_platform_device_init: - Platform specific call to initialize hardware - * @kbdev: kbase device pointer - * - * Function calls a platform defined routine if specified in the configuration - * attributes. The routine can initialize any hardware and context state that - * is required for the GPU block to function. - * - * Return: 0 if no errors have been found in the config. - * Negative error code otherwise. - */ -int kbasep_platform_device_init(struct kbase_device *kbdev); - -/** - * kbasep_platform_device_term - Platform specific call to terminate hardware - * @kbdev: Kbase device pointer - * - * Function calls a platform defined routine if specified in the configuration - * attributes. The routine can destroy any platform specific context state and - * shut down any hardware functionality that are outside of the Power Management - * callbacks. - * - */ -void kbasep_platform_device_term(struct kbase_device *kbdev); - - -/** - * kbase_platform_early_init - Early initialisation of the platform code - * - * This function will be called when the module is loaded to perform any - * early initialisation required by the platform code. Such as reading - * platform specific device tree entries for the GPU. - * - * Return: 0 for success, any other fail causes module initialisation to fail - */ -int kbase_platform_early_init(void); - -#ifndef CONFIG_OF -#ifdef CONFIG_MALI_PLATFORM_FAKE -/** - * kbase_platform_fake_register - Register a platform device for the GPU - * - * This can be used to register a platform device on systems where device tree - * is not enabled and the platform initialisation code in the kernel doesn't - * create the GPU device. Where possible device tree should be used instead. - * - * Return: 0 for success, any other fail causes module initialisation to fail - */ -int kbase_platform_fake_register(void); - -/** - * kbase_platform_fake_unregister - Unregister a fake platform device - * - * Unregister the platform device created with kbase_platform_fake_register() - */ -void kbase_platform_fake_unregister(void); -#endif -#endif - - /** @} *//* end group kbase_config */ - /** @} *//* end group base_kbase_api */ - /** @} *//* end group base_api */ - -#endif /* _KBASE_CONFIG_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_config_defaults.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_config_defaults.h deleted file mode 100755 index ce5d0703911c2..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_config_defaults.h +++ /dev/null @@ -1,259 +0,0 @@ -/* - * - * (C) COPYRIGHT 2013-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * @file mali_kbase_config_defaults.h - * - * Default values for configuration settings - * - */ - -#ifndef _KBASE_CONFIG_DEFAULTS_H_ -#define _KBASE_CONFIG_DEFAULTS_H_ - -/* Include mandatory definitions per platform */ -#include - -/** - * Irq throttle. It is the minimum desired time in between two - * consecutive gpu interrupts (given in 'us'). The irq throttle - * gpu register will be configured after this, taking into - * account the configured max frequency. - * - * Attached value: number in micro seconds - */ -#define DEFAULT_IRQ_THROTTLE_TIME_US 20 - -/** - * Default Job Scheduler initial runtime of a context for the CFS Policy, - * in time-slices. - * - * This value is relative to that of the least-run context, and defines - * where in the CFS queue a new context is added. A value of 1 means 'after - * the least-run context has used its timeslice'. Therefore, when all - * contexts consistently use the same amount of time, a value of 1 models a - * FIFO. A value of 0 would model a LIFO. - * - * The value is represented in "numbers of time slices". Multiply this - * value by that defined in @ref DEFAULT_JS_CTX_TIMESLICE_NS to get - * the time value for this in nanoseconds. - */ -#define DEFAULT_JS_CFS_CTX_RUNTIME_INIT_SLICES 1 - -/** - * Default Job Scheduler minimum runtime value of a context for CFS, in - * time_slices relative to that of the least-run context. - * - * This is a measure of how much preferrential treatment is given to a - * context that is not run very often. - * - * Specficially, this value defines how many timeslices such a context is - * (initially) allowed to use at once. Such contexts (e.g. 'interactive' - * processes) will appear near the front of the CFS queue, and can initially - * use more time than contexts that run continuously (e.g. 'batch' - * processes). - * - * This limit \b prevents a "stored-up timeslices" DoS attack, where a ctx - * not run for a long time attacks the system by using a very large initial - * number of timeslices when it finally does run. - * - * @note A value of zero allows not-run-often contexts to get scheduled in - * quickly, but to only use a single timeslice when they get scheduled in. - */ -#define DEFAULT_JS_CFS_CTX_RUNTIME_MIN_SLICES 2 - -/** -* Boolean indicating whether the driver is configured to be secure at -* a potential loss of performance. -* -* This currently affects only r0p0-15dev0 HW and earlier. -* -* On r0p0-15dev0 HW and earlier, there are tradeoffs between security and -* performance: -* -* - When this is set to true, the driver remains fully secure, -* but potentially loses performance compared with setting this to -* false. -* - When set to false, the driver is open to certain security -* attacks. -* -* From r0p0-00rel0 and onwards, there is no security loss by setting -* this to false, and no performance loss by setting it to -* true. -*/ -#define DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE false - -enum { - /** - * Use unrestricted Address ID width on the AXI bus. - */ - KBASE_AID_32 = 0x0, - - /** - * Restrict GPU to a half of maximum Address ID count. - * This will reduce performance, but reduce bus load due to GPU. - */ - KBASE_AID_16 = 0x3, - - /** - * Restrict GPU to a quarter of maximum Address ID count. - * This will reduce performance, but reduce bus load due to GPU. - */ - KBASE_AID_8 = 0x2, - - /** - * Restrict GPU to an eighth of maximum Address ID count. - * This will reduce performance, but reduce bus load due to GPU. - */ - KBASE_AID_4 = 0x1 -}; - -/** - * Default setting for read Address ID limiting on AXI bus. - * - * Attached value: u32 register value - * KBASE_AID_32 - use the full 32 IDs (5 ID bits) - * KBASE_AID_16 - use 16 IDs (4 ID bits) - * KBASE_AID_8 - use 8 IDs (3 ID bits) - * KBASE_AID_4 - use 4 IDs (2 ID bits) - * Default value: KBASE_AID_32 (no limit). Note hardware implementation - * may limit to a lower value. - */ -#define DEFAULT_ARID_LIMIT KBASE_AID_32 - -/** - * Default setting for write Address ID limiting on AXI. - * - * Attached value: u32 register value - * KBASE_AID_32 - use the full 32 IDs (5 ID bits) - * KBASE_AID_16 - use 16 IDs (4 ID bits) - * KBASE_AID_8 - use 8 IDs (3 ID bits) - * KBASE_AID_4 - use 4 IDs (2 ID bits) - * Default value: KBASE_AID_32 (no limit). Note hardware implementation - * may limit to a lower value. - */ -#define DEFAULT_AWID_LIMIT KBASE_AID_32 - -/** - * Default setting for using alternative hardware counters. - */ -#define DEFAULT_ALTERNATIVE_HWC false - -/** - * Default UMP device mapping. A UMP_DEVICE__SHIFT value which - * defines which UMP device this GPU should be mapped to. - */ -#define DEFAULT_UMP_GPU_DEVICE_SHIFT UMP_DEVICE_Z_SHIFT - -/* - * Default period for DVFS sampling - */ -#define DEFAULT_PM_DVFS_PERIOD 100 /* 100ms */ - -/* - * Power Management poweroff tick granuality. This is in nanoseconds to - * allow HR timer support. - * - * On each scheduling tick, the power manager core may decide to: - * -# Power off one or more shader cores - * -# Power off the entire GPU - */ -#define DEFAULT_PM_GPU_POWEROFF_TICK_NS (400000) /* 400us */ - -/* - * Power Manager number of ticks before shader cores are powered off - */ -#define DEFAULT_PM_POWEROFF_TICK_SHADER (2) /* 400-800us */ - -/* - * Power Manager number of ticks before GPU is powered off - */ -#define DEFAULT_PM_POWEROFF_TICK_GPU (2) /* 400-800us */ - -/* - * Default scheduling tick granuality - */ -#define DEFAULT_JS_SCHEDULING_PERIOD_NS (100000000u) /* 100ms */ - -/* - * Default minimum number of scheduling ticks before jobs are soft-stopped. - * - * This defines the time-slice for a job (which may be different from that of a - * context) - */ -#define DEFAULT_JS_SOFT_STOP_TICKS (1) /* 100ms-200ms */ - -/* - * Default minimum number of scheduling ticks before CL jobs are soft-stopped. - */ -#define DEFAULT_JS_SOFT_STOP_TICKS_CL (1) /* 100ms-200ms */ - -/* - * Default minimum number of scheduling ticks before jobs are hard-stopped - */ -#define DEFAULT_JS_HARD_STOP_TICKS_SS (50) /* 5s */ -#define DEFAULT_JS_HARD_STOP_TICKS_SS_8408 (300) /* 30s */ - -/* - * Default minimum number of scheduling ticks before CL jobs are hard-stopped. - */ -#define DEFAULT_JS_HARD_STOP_TICKS_CL (50) /* 5s */ - -/* - * Default minimum number of scheduling ticks before jobs are hard-stopped - * during dumping - */ -#define DEFAULT_JS_HARD_STOP_TICKS_DUMPING (15000) /* 1500s */ - -/* - * Default minimum number of scheduling ticks before the GPU is reset to clear a - * "stuck" job - */ -#define DEFAULT_JS_RESET_TICKS_SS (55) /* 5.5s */ -#define DEFAULT_JS_RESET_TICKS_SS_8408 (450) /* 45s */ - -/* - * Default minimum number of scheduling ticks before the GPU is reset to clear a - * "stuck" CL job. - */ -#define DEFAULT_JS_RESET_TICKS_CL (55) /* 5.5s */ - -/* - * Default minimum number of scheduling ticks before the GPU is reset to clear a - * "stuck" job during dumping. - */ -#define DEFAULT_JS_RESET_TICKS_DUMPING (15020) /* 1502s */ - -/* - * Default number of milliseconds given for other jobs on the GPU to be - * soft-stopped when the GPU needs to be reset. - */ -#define DEFAULT_RESET_TIMEOUT_MS (3000) /* 3s */ - -/* - * Default timeslice that a context is scheduled in for, in nanoseconds. - * - * When a context has used up this amount of time across its jobs, it is - * scheduled out to let another run. - * - * @note the resolution is nanoseconds (ns) here, because that's the format - * often used by the OS. - */ -#define DEFAULT_JS_CTX_TIMESLICE_NS (50000000) /* 50ms */ - -#endif /* _KBASE_CONFIG_DEFAULTS_H_ */ - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_context.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_context.c deleted file mode 100755 index 3a6e9f867206a..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_context.c +++ /dev/null @@ -1,277 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * Base kernel context APIs - */ - -#include -#include -#include - - -/** - * kbase_create_context() - Create a kernel base context. - * @kbdev: Kbase device - * @is_compat: Force creation of a 32-bit context - * - * Allocate and init a kernel base context. - * - * Return: new kbase context - */ -struct kbase_context * -kbase_create_context(struct kbase_device *kbdev, bool is_compat) -{ - struct kbase_context *kctx; - int err; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - - /* zero-inited as lot of code assume it's zero'ed out on create */ - kctx = vzalloc(sizeof(*kctx)); - - if (!kctx) - goto out; - - /* creating a context is considered a disjoint event */ - kbase_disjoint_event(kbdev); - - kctx->kbdev = kbdev; - kctx->as_nr = KBASEP_AS_NR_INVALID; - kctx->is_compat = is_compat; -#ifdef CONFIG_MALI_TRACE_TIMELINE - kctx->timeline.owner_tgid = task_tgid_nr(current); -#endif - atomic_set(&kctx->setup_complete, 0); - atomic_set(&kctx->setup_in_progress, 0); - kctx->infinite_cache_active = 0; - spin_lock_init(&kctx->mm_update_lock); - kctx->process_mm = NULL; - atomic_set(&kctx->nonmapped_pages, 0); - kctx->slots_pullable = 0; - - err = kbase_mem_pool_init(&kctx->mem_pool, - kbdev->mem_pool_max_size_default, - kctx->kbdev, &kbdev->mem_pool); - if (err) - goto free_kctx; - - atomic_set(&kctx->used_pages, 0); - - err = kbase_jd_init(kctx); - if (err) - goto free_pool; - - err = kbasep_js_kctx_init(kctx); - if (err) - goto free_jd; /* safe to call kbasep_js_kctx_term in this case */ - - err = kbase_event_init(kctx); - if (err) - goto free_jd; - - mutex_init(&kctx->reg_lock); - - INIT_LIST_HEAD(&kctx->waiting_soft_jobs); -#ifdef CONFIG_KDS - INIT_LIST_HEAD(&kctx->waiting_kds_resource); -#endif - - err = kbase_mmu_init(kctx); - if (err) - goto free_event; - - kctx->pgd = kbase_mmu_alloc_pgd(kctx); - if (!kctx->pgd) - goto free_mmu; - - kctx->aliasing_sink_page = kbase_mem_pool_alloc(&kctx->mem_pool); - if (!kctx->aliasing_sink_page) - goto no_sink_page; - - kctx->tgid = current->tgid; - kctx->pid = current->pid; - init_waitqueue_head(&kctx->event_queue); - - kctx->cookies = KBASE_COOKIE_MASK; - - /* Make sure page 0 is not used... */ - err = kbase_region_tracker_init(kctx); - if (err) - goto no_region_tracker; -#ifdef CONFIG_GPU_TRACEPOINTS - atomic_set(&kctx->jctx.work_id, 0); -#endif -#ifdef CONFIG_MALI_TRACE_TIMELINE - atomic_set(&kctx->timeline.jd_atoms_in_flight, 0); -#endif - - kctx->id = atomic_add_return(1, &(kbdev->ctx_num)) - 1; - - mutex_init(&kctx->vinstr_cli_lock); - - return kctx; - -no_region_tracker: - kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false); -no_sink_page: - /* VM lock needed for the call to kbase_mmu_free_pgd */ - kbase_gpu_vm_lock(kctx); - kbase_mmu_free_pgd(kctx); - kbase_gpu_vm_unlock(kctx); -free_mmu: - kbase_mmu_term(kctx); -free_event: - kbase_event_cleanup(kctx); -free_jd: - /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */ - kbasep_js_kctx_term(kctx); - kbase_jd_exit(kctx); -free_pool: - kbase_mem_pool_term(&kctx->mem_pool); -free_kctx: - vfree(kctx); -out: - return NULL; -} -KBASE_EXPORT_SYMBOL(kbase_create_context); - -static void kbase_reg_pending_dtor(struct kbase_va_region *reg) -{ - dev_dbg(reg->kctx->kbdev->dev, "Freeing pending unmapped region\n"); - kbase_mem_phy_alloc_put(reg->cpu_alloc); - kbase_mem_phy_alloc_put(reg->gpu_alloc); - kfree(reg); -} - -/** - * kbase_destroy_context - Destroy a kernel base context. - * @kctx: Context to destroy - * - * Calls kbase_destroy_os_context() to free OS specific structures. - * Will release all outstanding regions. - */ -void kbase_destroy_context(struct kbase_context *kctx) -{ - struct kbase_device *kbdev; - int pages; - unsigned long pending_regions_to_clean; - - KBASE_DEBUG_ASSERT(NULL != kctx); - - kbdev = kctx->kbdev; - KBASE_DEBUG_ASSERT(NULL != kbdev); - - KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u); - - /* Ensure the core is powered up for the destroy process */ - /* A suspend won't happen here, because we're in a syscall from a userspace - * thread. */ - kbase_pm_context_active(kbdev); - - kbase_jd_zap_context(kctx); - kbase_event_cleanup(kctx); - - kbase_gpu_vm_lock(kctx); - - /* MMU is disabled as part of scheduling out the context */ - kbase_mmu_free_pgd(kctx); - - /* drop the aliasing sink page now that it can't be mapped anymore */ - kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false); - - /* free pending region setups */ - pending_regions_to_clean = (~kctx->cookies) & KBASE_COOKIE_MASK; - while (pending_regions_to_clean) { - unsigned int cookie = __ffs(pending_regions_to_clean); - - BUG_ON(!kctx->pending_regions[cookie]); - - kbase_reg_pending_dtor(kctx->pending_regions[cookie]); - - kctx->pending_regions[cookie] = NULL; - pending_regions_to_clean &= ~(1UL << cookie); - } - - kbase_region_tracker_term(kctx); - kbase_gpu_vm_unlock(kctx); - - /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */ - kbasep_js_kctx_term(kctx); - - kbase_jd_exit(kctx); - - kbase_pm_context_idle(kbdev); - - kbase_mmu_term(kctx); - - pages = atomic_read(&kctx->used_pages); - if (pages != 0) - dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages); - - kbase_mem_pool_term(&kctx->mem_pool); - WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0); - - vfree(kctx); -} -KBASE_EXPORT_SYMBOL(kbase_destroy_context); - -/** - * kbase_context_set_create_flags - Set creation flags on a context - * @kctx: Kbase context - * @flags: Flags to set - * - * Return: 0 on success - */ -int kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags) -{ - int err = 0; - struct kbasep_js_kctx_info *js_kctx_info; - unsigned long irq_flags; - - KBASE_DEBUG_ASSERT(NULL != kctx); - - js_kctx_info = &kctx->jctx.sched_info; - - /* Validate flags */ - if (flags != (flags & BASE_CONTEXT_CREATE_KERNEL_FLAGS)) { - err = -EINVAL; - goto out; - } - - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); - spin_lock_irqsave(&kctx->kbdev->js_data.runpool_irq.lock, irq_flags); - - /* Translate the flags */ - if ((flags & BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0) - js_kctx_info->ctx.flags &= ~((u32) KBASE_CTX_FLAG_SUBMIT_DISABLED); - - if ((flags & BASE_CONTEXT_HINT_ONLY_COMPUTE) != 0) - js_kctx_info->ctx.flags |= (u32) KBASE_CTX_FLAG_HINT_ONLY_COMPUTE; - - /* Latch the initial attributes into the Job Scheduler */ - kbasep_js_ctx_attr_set_initial_attrs(kctx->kbdev, kctx); - - spin_unlock_irqrestore(&kctx->kbdev->js_data.runpool_irq.lock, - irq_flags); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - out: - return err; -} -KBASE_EXPORT_SYMBOL(kbase_context_set_create_flags); diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_core_linux.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_core_linux.c deleted file mode 100755 index adf484d6ba375..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_core_linux.c +++ /dev/null @@ -1,4137 +0,0 @@ - -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#ifdef CONFIG_MALI_DEVFREQ -#include -#endif /* CONFIG_MALI_DEVFREQ */ -#ifdef CONFIG_MALI_NO_MALI -#include "mali_kbase_model_linux.h" -#endif /* CONFIG_MALI_NO_MALI */ -#include "mali_kbase_mem_profile_debugfs_buf_size.h" -#include "mali_kbase_debug_mem_view.h" -#include "mali_kbase_mem.h" -#include "mali_kbase_mem_pool_debugfs.h" -#include -#include -#include - -#ifdef CONFIG_KDS -#include -#include -#include -#endif /* CONFIG_KDS */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include /* is_compat_task */ -#include -#ifdef CONFIG_MALI_PLATFORM_DEVICETREE -#include -#endif /* CONFIG_MALI_PLATFORM_DEVICETREE */ -#include -#include -#ifdef CONFIG_MALI_PLATFORM_FAKE -#include -#endif /*CONFIG_MALI_PLATFORM_FAKE */ -#ifdef CONFIG_SYNC -#include -#endif /* CONFIG_SYNC */ -#ifdef CONFIG_PM_DEVFREQ -#include -#endif /* CONFIG_PM_DEVFREQ */ -#include -#include - -#include - -#ifdef CONFIG_MACH_MANTA -#include -#endif - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) -#include -#else -#include -#endif - -#if defined(CONFIG_MALI_MIPE_ENABLED) -#include -#endif - -/* GPU IRQ Tags */ -#define JOB_IRQ_TAG 0 -#define MMU_IRQ_TAG 1 -#define GPU_IRQ_TAG 2 - -#if MALI_UNIT_TEST -static struct kbase_exported_test_data shared_kernel_test_data; -EXPORT_SYMBOL(shared_kernel_test_data); -#endif /* MALI_UNIT_TEST */ - -#define KBASE_DRV_NAME "mali" - -static const char kbase_drv_name[] = KBASE_DRV_NAME; - -static int kbase_dev_nr; -#ifdef CONFIG_MALI_MIDGARD_DVFS -extern int mali_pm_statue; -#endif - -static DEFINE_MUTEX(kbase_dev_list_lock); -static LIST_HEAD(kbase_dev_list); - -#define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)" -static inline void __compile_time_asserts(void) -{ - CSTD_COMPILE_TIME_ASSERT(sizeof(KERNEL_SIDE_DDK_VERSION_STRING) <= KBASE_GET_VERSION_BUFFER_SIZE); -} - -#ifdef CONFIG_KDS - -struct kbasep_kds_resource_set_file_data { - struct kds_resource_set *lock; -}; - -static int kds_resource_release(struct inode *inode, struct file *file); - -static const struct file_operations kds_resource_fops = { - .release = kds_resource_release -}; - -struct kbase_kds_resource_list_data { - struct kds_resource **kds_resources; - unsigned long *kds_access_bitmap; - int num_elems; -}; - -static int kds_resource_release(struct inode *inode, struct file *file) -{ - struct kbasep_kds_resource_set_file_data *data; - - data = (struct kbasep_kds_resource_set_file_data *)file->private_data; - if (NULL != data) { - if (NULL != data->lock) - kds_resource_set_release(&data->lock); - - kfree(data); - } - return 0; -} - -static int kbasep_kds_allocate_resource_list_data(struct kbase_context *kctx, struct base_external_resource *ext_res, int num_elems, struct kbase_kds_resource_list_data *resources_list) -{ - struct base_external_resource *res = ext_res; - int res_id; - - /* assume we have to wait for all */ - - KBASE_DEBUG_ASSERT(0 != num_elems); - resources_list->kds_resources = kmalloc_array(num_elems, - sizeof(struct kds_resource *), GFP_KERNEL); - - if (NULL == resources_list->kds_resources) - return -ENOMEM; - - KBASE_DEBUG_ASSERT(0 != num_elems); - resources_list->kds_access_bitmap = kzalloc( - sizeof(unsigned long) * - ((num_elems + BITS_PER_LONG - 1) / BITS_PER_LONG), - GFP_KERNEL); - - if (NULL == resources_list->kds_access_bitmap) { - kfree(resources_list->kds_access_bitmap); - return -ENOMEM; - } - - kbase_gpu_vm_lock(kctx); - for (res_id = 0; res_id < num_elems; res_id++, res++) { - int exclusive; - struct kbase_va_region *reg; - struct kds_resource *kds_res = NULL; - - exclusive = res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE; - reg = kbase_region_tracker_find_region_enclosing_address(kctx, res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE); - - /* did we find a matching region object? */ - if (NULL == reg || (reg->flags & KBASE_REG_FREE)) - break; - - /* no need to check reg->alloc as only regions with an alloc has - * a size, and kbase_region_tracker_find_region_enclosing_address - * only returns regions with size > 0 */ - switch (reg->gpu_alloc->type) { -#if defined(CONFIG_UMP) && defined(CONFIG_KDS) - case KBASE_MEM_TYPE_IMPORTED_UMP: - kds_res = ump_dd_kds_resource_get(reg->gpu_alloc->imported.ump_handle); - break; -#endif /* defined(CONFIG_UMP) && defined(CONFIG_KDS) */ - default: - break; - } - - /* no kds resource for the region ? */ - if (!kds_res) - break; - - resources_list->kds_resources[res_id] = kds_res; - - if (exclusive) - set_bit(res_id, resources_list->kds_access_bitmap); - } - kbase_gpu_vm_unlock(kctx); - - /* did the loop run to completion? */ - if (res_id == num_elems) - return 0; - - /* Clean up as the resource list is not valid. */ - kfree(resources_list->kds_resources); - kfree(resources_list->kds_access_bitmap); - - return -EINVAL; -} - -static bool kbasep_validate_kbase_pointer( - struct kbase_context *kctx, union kbase_pointer *p) -{ - if (kctx->is_compat) { - if (p->compat_value == 0) - return false; - } else { - if (NULL == p->value) - return false; - } - return true; -} - -static int kbase_external_buffer_lock(struct kbase_context *kctx, - struct kbase_uk_ext_buff_kds_data *args, u32 args_size) -{ - struct base_external_resource *ext_res_copy; - size_t ext_resource_size; - int ret = -EINVAL; - int fd = -EBADF; - struct base_external_resource __user *ext_res_user; - int __user *file_desc_usr; - struct kbasep_kds_resource_set_file_data *fdata; - struct kbase_kds_resource_list_data resource_list_data; - - if (args_size != sizeof(struct kbase_uk_ext_buff_kds_data)) - return -EINVAL; - - /* Check user space has provided valid data */ - if (!kbasep_validate_kbase_pointer(kctx, &args->external_resource) || - !kbasep_validate_kbase_pointer(kctx, &args->file_descriptor) || - (0 == args->num_res) || - (args->num_res > KBASE_MAXIMUM_EXT_RESOURCES)) - return -EINVAL; - - ext_resource_size = sizeof(struct base_external_resource) * args->num_res; - - KBASE_DEBUG_ASSERT(0 != ext_resource_size); - ext_res_copy = kmalloc(ext_resource_size, GFP_KERNEL); - - if (!ext_res_copy) - return -EINVAL; -#ifdef CONFIG_COMPAT - if (kctx->is_compat) { - ext_res_user = compat_ptr(args->external_resource.compat_value); - file_desc_usr = compat_ptr(args->file_descriptor.compat_value); - } else { -#endif /* CONFIG_COMPAT */ - ext_res_user = args->external_resource.value; - file_desc_usr = args->file_descriptor.value; -#ifdef CONFIG_COMPAT - } -#endif /* CONFIG_COMPAT */ - - /* Copy the external resources to lock from user space */ - if (copy_from_user(ext_res_copy, ext_res_user, ext_resource_size)) - goto out; - - /* Allocate data to be stored in the file */ - fdata = kmalloc(sizeof(*fdata), GFP_KERNEL); - - if (!fdata) { - ret = -ENOMEM; - goto out; - } - - /* Parse given elements and create resource and access lists */ - ret = kbasep_kds_allocate_resource_list_data(kctx, - ext_res_copy, args->num_res, &resource_list_data); - if (!ret) { - long err; - - fdata->lock = NULL; - - fd = anon_inode_getfd("kds_ext", &kds_resource_fops, fdata, 0); - - err = copy_to_user(file_desc_usr, &fd, sizeof(fd)); - - /* If the file descriptor was valid and we successfully copied - * it to user space, then we can try and lock the requested - * kds resources. - */ - if ((fd >= 0) && (0 == err)) { - struct kds_resource_set *lock; - - lock = kds_waitall(args->num_res, - resource_list_data.kds_access_bitmap, - resource_list_data.kds_resources, - KDS_WAIT_BLOCKING); - - if (IS_ERR_OR_NULL(lock)) { - ret = -EINVAL; - } else { - ret = 0; - fdata->lock = lock; - } - } else { - ret = -EINVAL; - } - - kfree(resource_list_data.kds_resources); - kfree(resource_list_data.kds_access_bitmap); - } - - if (ret) { - /* If the file was opened successfully then close it which will - * clean up the file data, otherwise we clean up the file data - * ourself. - */ - if (fd >= 0) - sys_close(fd); - else - kfree(fdata); - } -out: - kfree(ext_res_copy); - - return ret; -} -#endif /* CONFIG_KDS */ - -#ifdef CONFIG_MALI_MIPE_ENABLED -static void kbase_create_timeline_objects(struct kbase_context *kctx) -{ - struct kbase_device *kbdev = kctx->kbdev; - unsigned int lpu_id; - unsigned int as_nr; - struct kbasep_kctx_list_element *element; - - /* Create LPU objects. */ - for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) { - u32 *lpu = - &kbdev->gpu_props.props.raw_props.js_features[lpu_id]; - kbase_tlstream_tl_summary_new_lpu(lpu, lpu_id, *lpu); - } - - /* Create Address Space objects. */ - for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++) - kbase_tlstream_tl_summary_new_as(&kbdev->as[as_nr], as_nr); - - /* Create GPU object and make it retain all LPUs and address spaces. */ - kbase_tlstream_tl_summary_new_gpu( - kbdev, - kbdev->gpu_props.props.raw_props.gpu_id, - kbdev->gpu_props.num_cores); - - for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) { - void *lpu = - &kbdev->gpu_props.props.raw_props.js_features[lpu_id]; - kbase_tlstream_tl_summary_lifelink_lpu_gpu(lpu, kbdev); - } - for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++) - kbase_tlstream_tl_summary_lifelink_as_gpu( - &kbdev->as[as_nr], - kbdev); - - /* Create object for each known context. */ - mutex_lock(&kbdev->kctx_list_lock); - list_for_each_entry(element, &kbdev->kctx_list, link) { - kbase_tlstream_tl_summary_new_ctx( - element->kctx, - (u32)(element->kctx->id)); - } - /* Before releasing the lock, reset body stream buffers. - * This will prevent context creation message to be directed to both - * summary and body stream. */ - kbase_tlstream_reset_body_streams(); - mutex_unlock(&kbdev->kctx_list_lock); - /* Static object are placed into summary packet that needs to be - * transmitted first. Flush all streams to make it available to - * user space. */ - kbase_tlstream_flush_streams(); -} -#endif - -static void kbase_api_handshake(struct uku_version_check_args *version) -{ - switch (version->major) { -#ifdef BASE_LEGACY_UK6_SUPPORT - case 6: - /* We are backwards compatible with version 6, - * so pretend to be the old version */ - version->major = 6; - version->minor = 1; - break; -#endif /* BASE_LEGACY_UK6_SUPPORT */ -#ifdef BASE_LEGACY_UK7_SUPPORT - case 7: - /* We are backwards compatible with version 7, - * so pretend to be the old version */ - version->major = 7; - version->minor = 1; - break; -#endif /* BASE_LEGACY_UK7_SUPPORT */ -#ifdef BASE_LEGACY_UK8_SUPPORT - case 8: - /* We are backwards compatible with version 8, - * so pretend to be the old version */ - version->major = 8; - version->minor = 4; - break; -#endif /* BASE_LEGACY_UK8_SUPPORT */ -#ifdef BASE_LEGACY_UK9_SUPPORT - case 9: - /* We are backwards compatible with version 9, - * so pretend to be the old version */ - version->major = 9; - version->minor = 0; - break; -#endif /* BASE_LEGACY_UK8_SUPPORT */ - case BASE_UK_VERSION_MAJOR: - /* set minor to be the lowest common */ - version->minor = min_t(int, BASE_UK_VERSION_MINOR, - (int)version->minor); - break; - default: - /* We return our actual version regardless if it - * matches the version returned by userspace - - * userspace can bail if it can't handle this - * version */ - version->major = BASE_UK_VERSION_MAJOR; - version->minor = BASE_UK_VERSION_MINOR; - break; - } -} - -/** - * enum mali_error - Mali error codes shared with userspace - * - * This is subset of those common Mali errors that can be returned to userspace. - * Values of matching user and kernel space enumerators MUST be the same. - * MALI_ERROR_NONE is guaranteed to be 0. - */ -enum mali_error { - MALI_ERROR_NONE = 0, - MALI_ERROR_OUT_OF_GPU_MEMORY, - MALI_ERROR_OUT_OF_MEMORY, - MALI_ERROR_FUNCTION_FAILED, -}; - -#ifdef CONFIG_MALI_DEBUG -#define INACTIVE_WAIT_MS (5000) - -void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive) -{ - kbdev->driver_inactive = inactive; - wake_up(&kbdev->driver_inactive_wait); - - /* Wait for any running IOCTLs to complete */ - if (inactive) - msleep(INACTIVE_WAIT_MS); -} -KBASE_EXPORT_TEST_API(kbase_set_driver_inactive); -#endif /* CONFIG_MALI_DEBUG */ - -static int kbase_dispatch(struct kbase_context *kctx, void * const args, u32 args_size) -{ - struct kbase_device *kbdev; - union uk_header *ukh = args; - u32 id; - - KBASE_DEBUG_ASSERT(ukh != NULL); - - kbdev = kctx->kbdev; - id = ukh->id; - ukh->ret = MALI_ERROR_NONE; /* Be optimistic */ - -#ifdef CONFIG_MALI_DEBUG - wait_event(kbdev->driver_inactive_wait, - kbdev->driver_inactive == false); -#endif /* CONFIG_MALI_DEBUG */ - - if (UKP_FUNC_ID_CHECK_VERSION == id) { - struct uku_version_check_args *version_check; - - if (args_size != sizeof(struct uku_version_check_args)) { - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - return 0; - } - version_check = (struct uku_version_check_args *)args; - kbase_api_handshake(version_check); - /* save the proposed version number for later use */ - kctx->api_version = KBASE_API_VERSION(version_check->major, - version_check->minor); - ukh->ret = MALI_ERROR_NONE; - return 0; - } - - /* block calls until version handshake */ - if (kctx->api_version == 0) - return -EINVAL; - - if (!atomic_read(&kctx->setup_complete)) { - struct kbase_uk_set_flags *kbase_set_flags; - - /* setup pending, try to signal that we'll do the setup, - * if setup was already in progress, err this call - */ - if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1) != 0) - return -EINVAL; - - /* if unexpected call, will stay stuck in setup mode - * (is it the only call we accept?) - */ - if (id != KBASE_FUNC_SET_FLAGS) - return -EINVAL; - - kbase_set_flags = (struct kbase_uk_set_flags *)args; - - /* if not matching the expected call, stay in setup mode */ - if (sizeof(*kbase_set_flags) != args_size) - goto bad_size; - - /* if bad flags, will stay stuck in setup mode */ - if (kbase_context_set_create_flags(kctx, - kbase_set_flags->create_flags) != 0) - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - - atomic_set(&kctx->setup_complete, 1); - return 0; - } - - /* setup complete, perform normal operation */ - switch (id) { - case KBASE_FUNC_MEM_ALLOC: - { - struct kbase_uk_mem_alloc *mem = args; - struct kbase_va_region *reg; - - if (sizeof(*mem) != args_size) - goto bad_size; - - reg = kbase_mem_alloc(kctx, mem->va_pages, - mem->commit_pages, mem->extent, - &mem->flags, &mem->gpu_va, - &mem->va_alignment); - if (!reg) - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - break; - } - case KBASE_FUNC_MEM_IMPORT: - { - struct kbase_uk_mem_import *mem_import = args; - int __user *phandle; - int handle; - - if (sizeof(*mem_import) != args_size) - goto bad_size; -#ifdef CONFIG_COMPAT - if (kctx->is_compat) - phandle = compat_ptr(mem_import->phandle.compat_value); - else -#endif - phandle = mem_import->phandle.value; - - switch (mem_import->type) { - case BASE_MEM_IMPORT_TYPE_UMP: - get_user(handle, phandle); - break; - case BASE_MEM_IMPORT_TYPE_UMM: - get_user(handle, phandle); - break; - default: - mem_import->type = BASE_MEM_IMPORT_TYPE_INVALID; - break; - } - - if (mem_import->type == BASE_MEM_IMPORT_TYPE_INVALID || - kbase_mem_import(kctx, mem_import->type, - handle, &mem_import->gpu_va, - &mem_import->va_pages, - &mem_import->flags)) - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - break; - } - case KBASE_FUNC_MEM_ALIAS: { - struct kbase_uk_mem_alias *alias = args; - struct base_mem_aliasing_info __user *user_ai; - struct base_mem_aliasing_info *ai; - - if (sizeof(*alias) != args_size) - goto bad_size; - - if (alias->nents > 2048) { - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - break; - } - if (!alias->nents) { - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - break; - } - -#ifdef CONFIG_COMPAT - if (kctx->is_compat) - user_ai = compat_ptr(alias->ai.compat_value); - else -#endif - user_ai = alias->ai.value; - - ai = vmalloc(sizeof(*ai) * alias->nents); - - if (!ai) { - ukh->ret = MALI_ERROR_OUT_OF_MEMORY; - break; - } - - if (copy_from_user(ai, user_ai, - sizeof(*ai) * alias->nents)) { - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - goto copy_failed; - } - - alias->gpu_va = kbase_mem_alias(kctx, &alias->flags, - alias->stride, - alias->nents, ai, - &alias->va_pages); - if (!alias->gpu_va) { - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - goto no_alias; - } -no_alias: -copy_failed: - vfree(ai); - break; - } - case KBASE_FUNC_MEM_COMMIT: - { - struct kbase_uk_mem_commit *commit = args; - - if (sizeof(*commit) != args_size) - goto bad_size; - - if (commit->gpu_addr & ~PAGE_MASK) { - dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_COMMIT: commit->gpu_addr: passed parameter is invalid"); - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - break; - } - - if (kbase_mem_commit(kctx, commit->gpu_addr, - commit->pages, - (base_backing_threshold_status *) - &commit->result_subcode) != 0) - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - - break; - } - - case KBASE_FUNC_MEM_QUERY: - { - struct kbase_uk_mem_query *query = args; - - if (sizeof(*query) != args_size) - goto bad_size; - - if (query->gpu_addr & ~PAGE_MASK) { - dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->gpu_addr: passed parameter is invalid"); - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - break; - } - if (query->query != KBASE_MEM_QUERY_COMMIT_SIZE && - query->query != KBASE_MEM_QUERY_VA_SIZE && - query->query != KBASE_MEM_QUERY_FLAGS) { - dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_QUERY: query->query = %lld unknown", (unsigned long long)query->query); - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - break; - } - - if (kbase_mem_query(kctx, query->gpu_addr, - query->query, &query->value) != 0) - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - else - ukh->ret = MALI_ERROR_NONE; - break; - } - break; - - case KBASE_FUNC_MEM_FLAGS_CHANGE: - { - struct kbase_uk_mem_flags_change *fc = args; - - if (sizeof(*fc) != args_size) - goto bad_size; - - if ((fc->gpu_va & ~PAGE_MASK) && (fc->gpu_va >= PAGE_SIZE)) { - dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FLAGS_CHANGE: mem->gpu_va: passed parameter is invalid"); - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - break; - } - - if (kbase_mem_flags_change(kctx, fc->gpu_va, - fc->flags, fc->mask) != 0) - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - - break; - } - case KBASE_FUNC_MEM_FREE: - { - struct kbase_uk_mem_free *mem = args; - - if (sizeof(*mem) != args_size) - goto bad_size; - - if ((mem->gpu_addr & ~PAGE_MASK) && (mem->gpu_addr >= PAGE_SIZE)) { - dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_MEM_FREE: mem->gpu_addr: passed parameter is invalid"); - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - break; - } - - if (kbase_mem_free(kctx, mem->gpu_addr) != 0) - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - break; - } - - case KBASE_FUNC_JOB_SUBMIT: - { - struct kbase_uk_job_submit *job = args; - - if (sizeof(*job) != args_size) - goto bad_size; - -#ifdef BASE_LEGACY_UK6_SUPPORT - if (kbase_jd_submit(kctx, job, 0) != 0) -#else - if (kbase_jd_submit(kctx, job) != 0) -#endif /* BASE_LEGACY_UK6_SUPPORT */ - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - break; - } - -#ifdef BASE_LEGACY_UK6_SUPPORT - case KBASE_FUNC_JOB_SUBMIT_UK6: - { - struct kbase_uk_job_submit *job = args; - - if (sizeof(*job) != args_size) - goto bad_size; - - if (kbase_jd_submit(kctx, job, 1) != 0) - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - break; - } -#endif - - case KBASE_FUNC_SYNC: - { - struct kbase_uk_sync_now *sn = args; - - if (sizeof(*sn) != args_size) - goto bad_size; - - if (sn->sset.basep_sset.mem_handle & ~PAGE_MASK) { - dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_SYNC: sn->sset.basep_sset.mem_handle: passed parameter is invalid"); - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - break; - } - -#ifndef CONFIG_MALI_COH_USER - if (kbase_sync_now(kctx, &sn->sset) != 0) - ukh->ret = MALI_ERROR_FUNCTION_FAILED; -#endif - break; - } - - case KBASE_FUNC_DISJOINT_QUERY: - { - struct kbase_uk_disjoint_query *dquery = args; - - if (sizeof(*dquery) != args_size) - goto bad_size; - - /* Get the disjointness counter value. */ - dquery->counter = kbase_disjoint_event_get(kctx->kbdev); - break; - } - - case KBASE_FUNC_POST_TERM: - { - kbase_event_close(kctx); - break; - } - - case KBASE_FUNC_HWCNT_SETUP: - { - struct kbase_uk_hwcnt_setup *setup = args; - bool access_allowed; - - if (sizeof(*setup) != args_size) - goto bad_size; - - access_allowed = kbase_security_has_capability( - kctx, - KBASE_SEC_INSTR_HW_COUNTERS_COLLECT, - KBASE_SEC_FLAG_NOAUDIT); - if (!access_allowed) - goto out_bad; - - mutex_lock(&kctx->vinstr_cli_lock); - if (kbase_vinstr_legacy_hwc_setup(kbdev->vinstr_ctx, - &kctx->vinstr_cli, setup) != 0) - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - mutex_unlock(&kctx->vinstr_cli_lock); - break; - } - - case KBASE_FUNC_HWCNT_DUMP: - { - /* args ignored */ - mutex_lock(&kctx->vinstr_cli_lock); - if (kbase_vinstr_hwc_dump(kctx->vinstr_cli, - BASE_HWCNT_READER_EVENT_MANUAL) != 0) - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - mutex_unlock(&kctx->vinstr_cli_lock); - break; - } - - case KBASE_FUNC_HWCNT_CLEAR: - { - /* args ignored */ - mutex_lock(&kctx->vinstr_cli_lock); - if (kbase_vinstr_hwc_clear(kctx->vinstr_cli) != 0) - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - mutex_unlock(&kctx->vinstr_cli_lock); - break; - } - - case KBASE_FUNC_HWCNT_READER_SETUP: - { - struct kbase_uk_hwcnt_reader_setup *setup = args; - bool access_allowed; - - if (sizeof(*setup) != args_size) - goto bad_size; - - access_allowed = kbase_security_has_capability( - kctx, - KBASE_SEC_INSTR_HW_COUNTERS_COLLECT, - KBASE_SEC_FLAG_NOAUDIT); - if (!access_allowed) - goto out_bad; - - mutex_lock(&kctx->vinstr_cli_lock); - if (kbase_vinstr_hwcnt_reader_setup(kbdev->vinstr_ctx, - setup) != 0) - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - mutex_unlock(&kctx->vinstr_cli_lock); - break; - } - - case KBASE_FUNC_GPU_PROPS_REG_DUMP: - { - struct kbase_uk_gpuprops *setup = args; - - if (sizeof(*setup) != args_size) - goto bad_size; - - if (kbase_gpuprops_uk_get_props(kctx, setup) != 0) - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - break; - } - case KBASE_FUNC_FIND_CPU_OFFSET: - { - struct kbase_uk_find_cpu_offset *find = args; - - if (sizeof(*find) != args_size) - goto bad_size; - - if (find->gpu_addr & ~PAGE_MASK) { - dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_FIND_CPU_OFFSET: find->gpu_addr: passed parameter is invalid"); - goto out_bad; - } - - if (find->size > SIZE_MAX || find->cpu_addr > ULONG_MAX) { - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - } else { - int err; - - err = kbasep_find_enclosing_cpu_mapping_offset( - kctx, - find->gpu_addr, - (uintptr_t) find->cpu_addr, - (size_t) find->size, - &find->offset); - - if (err) - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - } - break; - } - case KBASE_FUNC_GET_VERSION: - { - struct kbase_uk_get_ddk_version *get_version = (struct kbase_uk_get_ddk_version *)args; - - if (sizeof(*get_version) != args_size) - goto bad_size; - - /* version buffer size check is made in compile time assert */ - memcpy(get_version->version_buffer, KERNEL_SIDE_DDK_VERSION_STRING, sizeof(KERNEL_SIDE_DDK_VERSION_STRING)); - get_version->version_string_size = sizeof(KERNEL_SIDE_DDK_VERSION_STRING); - break; - } - - case KBASE_FUNC_STREAM_CREATE: - { -#ifdef CONFIG_SYNC - struct kbase_uk_stream_create *screate = (struct kbase_uk_stream_create *)args; - - if (sizeof(*screate) != args_size) - goto bad_size; - - if (strnlen(screate->name, sizeof(screate->name)) >= sizeof(screate->name)) { - /* not NULL terminated */ - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - break; - } - - if (kbase_stream_create(screate->name, &screate->fd) != 0) - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - else - ukh->ret = MALI_ERROR_NONE; -#else /* CONFIG_SYNC */ - ukh->ret = MALI_ERROR_FUNCTION_FAILED; -#endif /* CONFIG_SYNC */ - break; - } - case KBASE_FUNC_FENCE_VALIDATE: - { -#ifdef CONFIG_SYNC - struct kbase_uk_fence_validate *fence_validate = (struct kbase_uk_fence_validate *)args; - - if (sizeof(*fence_validate) != args_size) - goto bad_size; - - if (kbase_fence_validate(fence_validate->fd) != 0) - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - else - ukh->ret = MALI_ERROR_NONE; -#endif /* CONFIG_SYNC */ - break; - } - - case KBASE_FUNC_EXT_BUFFER_LOCK: - { -#ifdef CONFIG_KDS - switch (kbase_external_buffer_lock(kctx, - (struct kbase_uk_ext_buff_kds_data *)args, - args_size)) { - case 0: - ukh->ret = MALI_ERROR_NONE; - break; - case -ENOMEM: - ukh->ret = MALI_ERROR_OUT_OF_MEMORY; - break; - default: - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - } -#endif /* CONFIG_KDS */ - break; - } - - case KBASE_FUNC_SET_TEST_DATA: - { -#if MALI_UNIT_TEST - struct kbase_uk_set_test_data *set_data = args; - - shared_kernel_test_data = set_data->test_data; - shared_kernel_test_data.kctx.value = (void __user *)kctx; - shared_kernel_test_data.mm.value = (void __user *)current->mm; - ukh->ret = MALI_ERROR_NONE; -#endif /* MALI_UNIT_TEST */ - break; - } - - case KBASE_FUNC_INJECT_ERROR: - { -#ifdef CONFIG_MALI_ERROR_INJECT - unsigned long flags; - struct kbase_error_params params = ((struct kbase_uk_error_params *)args)->params; - - /*mutex lock */ - spin_lock_irqsave(&kbdev->reg_op_lock, flags); - if (job_atom_inject_error(¶ms) != 0) - ukh->ret = MALI_ERROR_OUT_OF_MEMORY; - else - ukh->ret = MALI_ERROR_NONE; - spin_unlock_irqrestore(&kbdev->reg_op_lock, flags); - /*mutex unlock */ -#endif /* CONFIG_MALI_ERROR_INJECT */ - break; - } - - case KBASE_FUNC_MODEL_CONTROL: - { -#ifdef CONFIG_MALI_NO_MALI - unsigned long flags; - struct kbase_model_control_params params = - ((struct kbase_uk_model_control_params *)args)->params; - - /*mutex lock */ - spin_lock_irqsave(&kbdev->reg_op_lock, flags); - if (gpu_model_control(kbdev->model, ¶ms) != 0) - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - else - ukh->ret = MALI_ERROR_NONE; - spin_unlock_irqrestore(&kbdev->reg_op_lock, flags); - /*mutex unlock */ -#endif /* CONFIG_MALI_NO_MALI */ - break; - } - -#ifdef BASE_LEGACY_UK8_SUPPORT - case KBASE_FUNC_KEEP_GPU_POWERED: - { - dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_KEEP_GPU_POWERED: function is deprecated and disabled\n"); - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - break; - } -#endif /* BASE_LEGACY_UK8_SUPPORT */ - - case KBASE_FUNC_GET_PROFILING_CONTROLS: - { - struct kbase_uk_profiling_controls *controls = - (struct kbase_uk_profiling_controls *)args; - u32 i; - - if (sizeof(*controls) != args_size) - goto bad_size; - - for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++) - controls->profiling_controls[i] = kbase_get_profiling_control(kbdev, i); - - break; - } - - /* used only for testing purposes; these controls are to be set by gator through gator API */ - case KBASE_FUNC_SET_PROFILING_CONTROLS: - { - struct kbase_uk_profiling_controls *controls = - (struct kbase_uk_profiling_controls *)args; - u32 i; - - if (sizeof(*controls) != args_size) - goto bad_size; - - for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++) - _mali_profiling_control(i, controls->profiling_controls[i]); - - break; - } - - case KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD: - { - struct kbase_uk_debugfs_mem_profile_add *add_data = - (struct kbase_uk_debugfs_mem_profile_add *)args; - char *buf; - char __user *user_buf; - - if (sizeof(*add_data) != args_size) - goto bad_size; - - if (add_data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) { - dev_err(kbdev->dev, "buffer too big"); - goto out_bad; - } - -#ifdef CONFIG_COMPAT - if (kctx->is_compat) - user_buf = compat_ptr(add_data->buf.compat_value); - else -#endif - user_buf = add_data->buf.value; - - buf = kmalloc(add_data->len, GFP_KERNEL); - if (!buf) - goto out_bad; - - if (0 != copy_from_user(buf, user_buf, add_data->len)) { - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - kfree(buf); - goto out_bad; - } - kbasep_mem_profile_debugfs_insert(kctx, buf, - add_data->len); - - break; - } -#ifdef CONFIG_MALI_MIPE_ENABLED - case KBASE_FUNC_TLSTREAM_ACQUIRE: - { - struct kbase_uk_tlstream_acquire *tlstream_acquire = - args; - - if (sizeof(*tlstream_acquire) != args_size) - goto bad_size; - - if (0 != kbase_tlstream_acquire( - kctx, - &tlstream_acquire->fd)) { - ukh->ret = MALI_ERROR_FUNCTION_FAILED; - } else if (0 <= tlstream_acquire->fd) { - /* Summary stream was cleared during acquire. - * Create static timeline objects that will be - * read by client. */ - kbase_create_timeline_objects(kctx); - } - break; - } - case KBASE_FUNC_TLSTREAM_FLUSH: - { - struct kbase_uk_tlstream_flush *tlstream_flush = - args; - - if (sizeof(*tlstream_flush) != args_size) - goto bad_size; - - kbase_tlstream_flush_streams(); - break; - } -#if MALI_UNIT_TEST - case KBASE_FUNC_TLSTREAM_TEST: - { - struct kbase_uk_tlstream_test *tlstream_test = args; - - if (sizeof(*tlstream_test) != args_size) - goto bad_size; - - kbase_tlstream_test( - tlstream_test->tpw_count, - tlstream_test->msg_delay, - tlstream_test->msg_count, - tlstream_test->aux_msg); - break; - } - case KBASE_FUNC_TLSTREAM_STATS: - { - struct kbase_uk_tlstream_stats *tlstream_stats = args; - - if (sizeof(*tlstream_stats) != args_size) - goto bad_size; - - kbase_tlstream_stats( - &tlstream_stats->bytes_collected, - &tlstream_stats->bytes_generated); - break; - } -#endif /* MALI_UNIT_TEST */ -#endif /* CONFIG_MALI_MIPE_ENABLED */ - - case KBASE_FUNC_GET_CONTEXT_ID: - { - struct kbase_uk_context_id *info = args; - - info->id = kctx->id; - break; - } - - default: - dev_err(kbdev->dev, "unknown ioctl %u", id); - goto out_bad; - } - - return 0; - - bad_size: - dev_err(kbdev->dev, "Wrong syscall size (%d) for %08x\n", args_size, id); - out_bad: - return -EINVAL; -} - -static struct kbase_device *to_kbase_device(struct device *dev) -{ - return dev_get_drvdata(dev); -} - -/* - * API to acquire device list mutex and - * return pointer to the device list head - */ -const struct list_head *kbase_dev_list_get(void) -{ - mutex_lock(&kbase_dev_list_lock); - return &kbase_dev_list; -} -KBASE_EXPORT_TEST_API(kbase_dev_list_get); - -/* API to release the device list mutex */ -void kbase_dev_list_put(const struct list_head *dev_list) -{ - mutex_unlock(&kbase_dev_list_lock); -} -KBASE_EXPORT_TEST_API(kbase_dev_list_put); - -/* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */ -struct kbase_device *kbase_find_device(int minor) -{ - struct kbase_device *kbdev = NULL; - struct list_head *entry; - const struct list_head *dev_list = kbase_dev_list_get(); - - list_for_each(entry, dev_list) { - struct kbase_device *tmp; - - tmp = list_entry(entry, struct kbase_device, entry); - if (tmp->mdev.minor == minor || minor == -1) { - kbdev = tmp; - get_device(kbdev->dev); - break; - } - } - kbase_dev_list_put(dev_list); - - return kbdev; -} -EXPORT_SYMBOL(kbase_find_device); - -void kbase_release_device(struct kbase_device *kbdev) -{ - put_device(kbdev->dev); -} -EXPORT_SYMBOL(kbase_release_device); - -static int kbase_open(struct inode *inode, struct file *filp) -{ - struct kbase_device *kbdev = NULL; - struct kbase_context *kctx; - int ret = 0; -#ifdef CONFIG_DEBUG_FS - char kctx_name[64]; -#endif - - kbdev = kbase_find_device(iminor(inode)); - - if (!kbdev) - return -ENODEV; - - kctx = kbase_create_context(kbdev, is_compat_task()); - if (!kctx) { - ret = -ENOMEM; - goto out; - } - - init_waitqueue_head(&kctx->event_queue); - filp->private_data = kctx; - - kctx->infinite_cache_active = kbdev->infinite_cache_active_default; - -#ifdef CONFIG_DEBUG_FS - snprintf(kctx_name, 64, "%d_%d", kctx->tgid, kctx->id); - - kctx->kctx_dentry = debugfs_create_dir(kctx_name, - kbdev->debugfs_ctx_directory); - - if (IS_ERR_OR_NULL(kctx->kctx_dentry)) { - ret = -ENOMEM; - goto out; - } - -#ifdef CONFIG_MALI_COH_USER - /* if cache is completely coherent at hardware level, then remove the - * infinite cache control support from debugfs. - */ -#else - debugfs_create_bool("infinite_cache", 0644, kctx->kctx_dentry, - &kctx->infinite_cache_active); -#endif /* CONFIG_MALI_COH_USER */ - kbasep_mem_profile_debugfs_add(kctx); - - kbasep_jd_debugfs_ctx_add(kctx); - kbase_debug_mem_view_init(filp); - - kbase_debug_job_fault_context_init(kctx); - - kbase_mem_pool_debugfs_add(kctx->kctx_dentry, &kctx->mem_pool); - -#endif /* CONFIG_DEBUGFS */ - - dev_dbg(kbdev->dev, "created base context\n"); - - { - struct kbasep_kctx_list_element *element; - - element = kzalloc(sizeof(*element), GFP_KERNEL); - if (element) { - mutex_lock(&kbdev->kctx_list_lock); - element->kctx = kctx; - list_add(&element->link, &kbdev->kctx_list); -#ifdef CONFIG_MALI_MIPE_ENABLED - kbase_tlstream_tl_new_ctx( - element->kctx, - (u32)(element->kctx->id)); -#endif - mutex_unlock(&kbdev->kctx_list_lock); - } else { - /* we don't treat this as a fail - just warn about it */ - dev_warn(kbdev->dev, "couldn't add kctx to kctx_list\n"); - } - } - return 0; - - out: - kbase_release_device(kbdev); - return ret; -} - -static int kbase_release(struct inode *inode, struct file *filp) -{ - struct kbase_context *kctx = filp->private_data; - struct kbase_device *kbdev = kctx->kbdev; - struct kbasep_kctx_list_element *element, *tmp; - bool found_element = false; - -#ifdef CONFIG_MALI_MIPE_ENABLED - kbase_tlstream_tl_del_ctx(kctx); -#endif - -#ifdef CONFIG_DEBUG_FS - debugfs_remove_recursive(kctx->kctx_dentry); - kbasep_mem_profile_debugfs_remove(kctx); - kbase_debug_job_fault_context_exit(kctx); -#endif - - mutex_lock(&kbdev->kctx_list_lock); - list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) { - if (element->kctx == kctx) { - list_del(&element->link); - kfree(element); - found_element = true; - } - } - mutex_unlock(&kbdev->kctx_list_lock); - if (!found_element) - dev_warn(kbdev->dev, "kctx not in kctx_list\n"); - - filp->private_data = NULL; - - mutex_lock(&kctx->vinstr_cli_lock); - /* If this client was performing hwcnt dumping and did not explicitly - * detach itself, remove it from the vinstr core now */ - if (kctx->vinstr_cli) { - struct kbase_uk_hwcnt_setup setup; - - setup.dump_buffer = 0llu; - kbase_vinstr_legacy_hwc_setup( - kbdev->vinstr_ctx, &kctx->vinstr_cli, &setup); - } - mutex_unlock(&kctx->vinstr_cli_lock); - - kbase_destroy_context(kctx); - - dev_dbg(kbdev->dev, "deleted base context\n"); - kbase_release_device(kbdev); - return 0; -} - -#define CALL_MAX_SIZE 536 - -static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) -{ - u64 msg[(CALL_MAX_SIZE + 7) >> 3] = { 0xdeadbeefdeadbeefull }; /* alignment fixup */ - u32 size = _IOC_SIZE(cmd); - struct kbase_context *kctx = filp->private_data; - - if (size > CALL_MAX_SIZE) - return -ENOTTY; - - if (0 != copy_from_user(&msg, (void __user *)arg, size)) { - dev_err(kctx->kbdev->dev, "failed to copy ioctl argument into kernel space\n"); - return -EFAULT; - } - - if (kbase_dispatch(kctx, &msg, size) != 0) - return -EFAULT; - - if (0 != copy_to_user((void __user *)arg, &msg, size)) { - dev_err(kctx->kbdev->dev, "failed to copy results of UK call back to user space\n"); - return -EFAULT; - } - return 0; -} - -static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) -{ - struct kbase_context *kctx = filp->private_data; - struct base_jd_event_v2 uevent; - int out_count = 0; - - if (count < sizeof(uevent)) - return -ENOBUFS; - - do { - while (kbase_event_dequeue(kctx, &uevent)) { - if (out_count > 0) - goto out; - - if (filp->f_flags & O_NONBLOCK) - return -EAGAIN; - - if (wait_event_interruptible(kctx->event_queue, - kbase_event_pending(kctx)) != 0) - return -ERESTARTSYS; - } - if (uevent.event_code == BASE_JD_EVENT_DRV_TERMINATED) { - if (out_count == 0) - return -EPIPE; - goto out; - } - - if (copy_to_user(buf, &uevent, sizeof(uevent)) != 0) - return -EFAULT; - - buf += sizeof(uevent); - out_count++; - count -= sizeof(uevent); - } while (count >= sizeof(uevent)); - - out: - return out_count * sizeof(uevent); -} - -static unsigned int kbase_poll(struct file *filp, poll_table *wait) -{ - struct kbase_context *kctx = filp->private_data; - - poll_wait(filp, &kctx->event_queue, wait); - if (kbase_event_pending(kctx)) - return POLLIN | POLLRDNORM; - - return 0; -} - -void kbase_event_wakeup(struct kbase_context *kctx) -{ - KBASE_DEBUG_ASSERT(kctx); - - wake_up_interruptible(&kctx->event_queue); -} - -KBASE_EXPORT_TEST_API(kbase_event_wakeup); - -static int kbase_check_flags(int flags) -{ - /* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always - * closes the file descriptor in a child process. - */ - if (0 == (flags & O_CLOEXEC)) - return -EINVAL; - - return 0; -} - -static unsigned long kbase_get_unmapped_area(struct file *filp, - const unsigned long addr, const unsigned long len, - const unsigned long pgoff, const unsigned long flags) -{ -#ifdef CONFIG_64BIT - /* based on get_unmapped_area, but simplified slightly due to that some - * values are known in advance */ - struct kbase_context *kctx = filp->private_data; - - if (!kctx->is_compat && !addr && - kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA)) { - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - unsigned long low_limit, high_limit, gap_start, gap_end; - - /* Hardware has smaller VA than userspace, ensure the page - * comes from a VA which can be used on the GPU */ - - gap_end = (1UL<<33); - if (gap_end < len) - return -ENOMEM; - high_limit = gap_end - len; - low_limit = PAGE_SIZE + len; - - gap_start = mm->highest_vm_end; - if (gap_start <= high_limit) - goto found_highest; - - if (RB_EMPTY_ROOT(&mm->mm_rb)) - return -ENOMEM; - vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); - if (vma->rb_subtree_gap < len) - return -ENOMEM; - - while (true) { - gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; - if (gap_start <= high_limit && vma->vm_rb.rb_right) { - struct vm_area_struct *right = - rb_entry(vma->vm_rb.rb_right, - struct vm_area_struct, vm_rb); - if (right->rb_subtree_gap >= len) { - vma = right; - continue; - } - } -check_current: - gap_end = vma->vm_start; - if (gap_end < low_limit) - return -ENOMEM; - if (gap_start <= high_limit && - gap_end - gap_start >= len) - goto found; - - if (vma->vm_rb.rb_left) { - struct vm_area_struct *left = - rb_entry(vma->vm_rb.rb_left, - struct vm_area_struct, vm_rb); - - if (left->rb_subtree_gap >= len) { - vma = left; - continue; - } - } - while (true) { - struct rb_node *prev = &vma->vm_rb; - - if (!rb_parent(prev)) - return -ENOMEM; - vma = rb_entry(rb_parent(prev), - struct vm_area_struct, vm_rb); - if (prev == vma->vm_rb.rb_right) { - gap_start = vma->vm_prev ? - vma->vm_prev->vm_end : 0; - goto check_current; - } - } - } - -found: - if (gap_end > (1UL<<33)) - gap_end = (1UL<<33); - -found_highest: - gap_end -= len; - - VM_BUG_ON(gap_end < PAGE_SIZE); - VM_BUG_ON(gap_end < gap_start); - return gap_end; - } -#endif - /* No special requirements - fallback to the default version */ - return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); -} - -static const struct file_operations kbase_fops = { - .owner = THIS_MODULE, - .open = kbase_open, - .release = kbase_release, - .read = kbase_read, - .poll = kbase_poll, - .unlocked_ioctl = kbase_ioctl, - .compat_ioctl = kbase_ioctl, - .mmap = kbase_mmap, - .check_flags = kbase_check_flags, - .get_unmapped_area = kbase_get_unmapped_area, -}; - -#ifndef CONFIG_MALI_NO_MALI -void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value) -{ - writel(value, kbdev->reg + offset); -} - -u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset) -{ - return readl(kbdev->reg + offset); -} -#endif /* !CONFIG_MALI_NO_MALI */ - - -/** Show callback for the @c power_policy sysfs file. - * - * This function is called to get the contents of the @c power_policy sysfs - * file. This is a list of the available policies with the currently active one - * surrounded by square brackets. - * - * @param dev The device this sysfs file is for - * @param attr The attributes of the sysfs file - * @param buf The output buffer for the sysfs file contents - * - * @return The number of bytes output to @c buf. - */ -static ssize_t show_policy(struct device *dev, struct device_attribute *attr, char *const buf) -{ - struct kbase_device *kbdev; - const struct kbase_pm_policy *current_policy; - const struct kbase_pm_policy *const *policy_list; - int policy_count; - int i; - ssize_t ret = 0; - - kbdev = to_kbase_device(dev); - - if (!kbdev) - return -ENODEV; - - current_policy = kbase_pm_get_policy(kbdev); - - policy_count = kbase_pm_list_policies(&policy_list); - - for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) { - if (policy_list[i] == current_policy) - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name); - else - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name); - } - - if (ret < PAGE_SIZE - 1) { - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); - } else { - buf[PAGE_SIZE - 2] = '\n'; - buf[PAGE_SIZE - 1] = '\0'; - ret = PAGE_SIZE - 1; - } - - return ret; -} - -/** Store callback for the @c power_policy sysfs file. - * - * This function is called when the @c power_policy sysfs file is written to. - * It matches the requested policy against the available policies and if a - * matching policy is found calls @ref kbase_pm_set_policy to change the - * policy. - * - * @param dev The device with sysfs file is for - * @param attr The attributes of the sysfs file - * @param buf The value written to the sysfs file - * @param count The number of bytes written to the sysfs file - * - * @return @c count if the function succeeded. An error code on failure. - */ -static ssize_t set_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -{ - struct kbase_device *kbdev; - const struct kbase_pm_policy *new_policy = NULL; - const struct kbase_pm_policy *const *policy_list; - int policy_count; - int i; - - kbdev = to_kbase_device(dev); - - if (!kbdev) - return -ENODEV; - - policy_count = kbase_pm_list_policies(&policy_list); - - for (i = 0; i < policy_count; i++) { - if (sysfs_streq(policy_list[i]->name, buf)) { - new_policy = policy_list[i]; - break; - } - } - - if (!new_policy) { - dev_err(dev, "power_policy: policy not found\n"); - return -EINVAL; - } - - kbase_pm_set_policy(kbdev, new_policy); - - return count; -} - -/** The sysfs file @c power_policy. - * - * This is used for obtaining information about the available policies, - * determining which policy is currently active, and changing the active - * policy. - */ -static DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy); - -/** Show callback for the @c core_availability_policy sysfs file. - * - * This function is called to get the contents of the @c core_availability_policy - * sysfs file. This is a list of the available policies with the currently - * active one surrounded by square brackets. - * - * @param dev The device this sysfs file is for - * @param attr The attributes of the sysfs file - * @param buf The output buffer for the sysfs file contents - * - * @return The number of bytes output to @c buf. - */ -static ssize_t show_ca_policy(struct device *dev, struct device_attribute *attr, char * const buf) -{ - struct kbase_device *kbdev; - const struct kbase_pm_ca_policy *current_policy; - const struct kbase_pm_ca_policy *const *policy_list; - int policy_count; - int i; - ssize_t ret = 0; - - kbdev = to_kbase_device(dev); - - if (!kbdev) - return -ENODEV; - - current_policy = kbase_pm_ca_get_policy(kbdev); - - policy_count = kbase_pm_ca_list_policies(&policy_list); - - for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) { - if (policy_list[i] == current_policy) - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name); - else - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name); - } - - if (ret < PAGE_SIZE - 1) { - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); - } else { - buf[PAGE_SIZE - 2] = '\n'; - buf[PAGE_SIZE - 1] = '\0'; - ret = PAGE_SIZE - 1; - } - - return ret; -} - -/** Store callback for the @c core_availability_policy sysfs file. - * - * This function is called when the @c core_availability_policy sysfs file is - * written to. It matches the requested policy against the available policies - * and if a matching policy is found calls @ref kbase_pm_set_policy to change - * the policy. - * - * @param dev The device with sysfs file is for - * @param attr The attributes of the sysfs file - * @param buf The value written to the sysfs file - * @param count The number of bytes written to the sysfs file - * - * @return @c count if the function succeeded. An error code on failure. - */ -static ssize_t set_ca_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -{ - struct kbase_device *kbdev; - const struct kbase_pm_ca_policy *new_policy = NULL; - const struct kbase_pm_ca_policy *const *policy_list; - int policy_count; - int i; - - kbdev = to_kbase_device(dev); - - if (!kbdev) - return -ENODEV; - - policy_count = kbase_pm_ca_list_policies(&policy_list); - - for (i = 0; i < policy_count; i++) { - if (sysfs_streq(policy_list[i]->name, buf)) { - new_policy = policy_list[i]; - break; - } - } - - if (!new_policy) { - dev_err(dev, "core_availability_policy: policy not found\n"); - return -EINVAL; - } - - kbase_pm_ca_set_policy(kbdev, new_policy); - - return count; -} - -/** The sysfs file @c core_availability_policy - * - * This is used for obtaining information about the available policies, - * determining which policy is currently active, and changing the active - * policy. - */ -static DEVICE_ATTR(core_availability_policy, S_IRUGO | S_IWUSR, show_ca_policy, set_ca_policy); - -/** Show callback for the @c core_mask sysfs file. - * - * This function is called to get the contents of the @c core_mask sysfs - * file. - * - * @param dev The device this sysfs file is for - * @param attr The attributes of the sysfs file - * @param buf The output buffer for the sysfs file contents - * - * @return The number of bytes output to @c buf. - */ -static ssize_t show_core_mask(struct device *dev, struct device_attribute *attr, char * const buf) -{ - struct kbase_device *kbdev; - ssize_t ret = 0; - - kbdev = to_kbase_device(dev); - - if (!kbdev) - return -ENODEV; - - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "Current core mask : 0x%llX\n", kbdev->pm.debug_core_mask); - ret += scnprintf(buf + ret, PAGE_SIZE - ret, - "Available core mask : 0x%llX\n", - kbdev->gpu_props.props.raw_props.shader_present); - - return ret; -} - -/** Store callback for the @c core_mask sysfs file. - * - * This function is called when the @c core_mask sysfs file is written to. - * - * @param dev The device with sysfs file is for - * @param attr The attributes of the sysfs file - * @param buf The value written to the sysfs file - * @param count The number of bytes written to the sysfs file - * - * @return @c count if the function succeeded. An error code on failure. - */ -static ssize_t set_core_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -{ - struct kbase_device *kbdev; - u64 new_core_mask; - int rc; - - kbdev = to_kbase_device(dev); - - if (!kbdev) - return -ENODEV; - - rc = kstrtoull(buf, 16, &new_core_mask); - if (rc) - return rc; - - if ((new_core_mask & kbdev->gpu_props.props.raw_props.shader_present) - != new_core_mask || - !(new_core_mask & kbdev->gpu_props.props.coherency_info.group[0].core_mask)) { - dev_err(dev, "power_policy: invalid core specification\n"); - return -EINVAL; - } - - if (kbdev->pm.debug_core_mask != new_core_mask) { - unsigned long flags; - - spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); - - kbase_pm_set_debug_core_mask(kbdev, new_core_mask); - - spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); - } - - return count; -} - -/** The sysfs file @c core_mask. - * - * This is used to restrict shader core availability for debugging purposes. - * Reading it will show the current core mask and the mask of cores available. - * Writing to it will set the current core mask. - */ -static DEVICE_ATTR(core_mask, S_IRUGO | S_IWUSR, show_core_mask, set_core_mask); - -#ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS -/** - * struct sc_split_config - * @tag: Short name - * @human_readable: Long name - * @js0_mask: Mask for job slot 0 - * @js1_mask: Mask for job slot 1 - * @js2_mask: Mask for job slot 2 - * - * Structure containing a single shader affinity split configuration. - */ -struct sc_split_config { - char const *tag; - char const *human_readable; - u64 js0_mask; - u64 js1_mask; - u64 js2_mask; -}; - -/* - * Array of available shader affinity split configurations. - */ -static struct sc_split_config const sc_split_configs[] = { - /* All must be the first config (default). */ - { - "all", "All cores", - 0xFFFFFFFFFFFFFFFFULL, 0xFFFFFFFFFFFFFFFFULL, 0xFFFFFFFFFFFFFFFFULL - }, - { - "mp1", "MP1 shader core", - 0x1, 0x1, 0x1 - }, - { - "mp2", "MP2 shader core", - 0x3, 0x3, 0x3 - }, - { - "mp4", "MP4 shader core", - 0xF, 0xF, 0xF - }, - { - "mp1_vf", "MP1 vertex + MP1 fragment shader core", - 0x2, 0x1, 0xFFFFFFFFFFFFFFFFULL - }, - { - "mp2_vf", "MP2 vertex + MP2 fragment shader core", - 0xA, 0x5, 0xFFFFFFFFFFFFFFFFULL - }, - /* This must be the last config. */ - { - NULL, NULL, - 0x0, 0x0, 0x0 - }, -}; - -/* Pointer to the currently active shader split configuration. */ -static struct sc_split_config const *current_sc_split_config = &sc_split_configs[0]; - -/** Show callback for the @c sc_split sysfs file - * - * Returns the current shader core affinity policy. - */ -static ssize_t show_split(struct device *dev, struct device_attribute *attr, char * const buf) -{ - ssize_t ret; - /* We know we are given a buffer which is PAGE_SIZE long. Our strings are all guaranteed - * to be shorter than that at this time so no length check needed. */ - ret = scnprintf(buf, PAGE_SIZE, "Current sc_split: '%s'\n", current_sc_split_config->tag); - return ret; -} - -/** Store callback for the @c sc_split sysfs file. - * - * This function is called when the @c sc_split sysfs file is written to - * It modifies the system shader core affinity configuration to allow - * system profiling with different hardware configurations. - * - * @param dev The device with sysfs file is for - * @param attr The attributes of the sysfs file - * @param buf The value written to the sysfs file - * @param count The number of bytes written to the sysfs file - * - * @return @c count if the function succeeded. An error code on failure. - */ -static ssize_t set_split(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -{ - struct sc_split_config const *config = &sc_split_configs[0]; - - /* Try to match: loop until we hit the last "NULL" entry */ - while (config->tag) { - if (sysfs_streq(config->tag, buf)) { - current_sc_split_config = config; - mali_js0_affinity_mask = config->js0_mask; - mali_js1_affinity_mask = config->js1_mask; - mali_js2_affinity_mask = config->js2_mask; - dev_dbg(dev, "Setting sc_split: '%s'\n", config->tag); - return count; - } - config++; - } - - /* No match found in config list */ - dev_err(dev, "sc_split: invalid value\n"); - dev_err(dev, " Possible settings: mp[1|2|4], mp[1|2]_vf\n"); - return -ENOENT; -} - -/** The sysfs file @c sc_split - * - * This is used for configuring/querying the current shader core work affinity - * configuration. - */ -static DEVICE_ATTR(sc_split, S_IRUGO|S_IWUSR, show_split, set_split); -#endif /* CONFIG_MALI_DEBUG_SHADER_SPLIT_FS */ - - -/** Store callback for the @c js_timeouts sysfs file. - * - * This function is called to get the contents of the @c js_timeouts sysfs - * file. This file contains five values separated by whitespace. The values - * are basically the same as JS_SOFT_STOP_TICKS, JS_HARD_STOP_TICKS_SS, - * JS_HARD_STOP_TICKS_DUMPING, JS_RESET_TICKS_SS, JS_RESET_TICKS_DUMPING - * configuration values (in that order), with the difference that the js_timeout - * values are expressed in MILLISECONDS. - * - * The js_timeouts sysfile file allows the current values in - * use by the job scheduler to get override. Note that a value needs to - * be other than 0 for it to override the current job scheduler value. - * - * @param dev The device with sysfs file is for - * @param attr The attributes of the sysfs file - * @param buf The value written to the sysfs file - * @param count The number of bytes written to the sysfs file - * - * @return @c count if the function succeeded. An error code on failure. - */ -static ssize_t set_js_timeouts(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -{ - struct kbase_device *kbdev; - int items; - long js_soft_stop_ms; - long js_soft_stop_ms_cl; - long js_hard_stop_ms_ss; - long js_hard_stop_ms_cl; - long js_hard_stop_ms_dumping; - long js_reset_ms_ss; - long js_reset_ms_cl; - long js_reset_ms_dumping; - - kbdev = to_kbase_device(dev); - if (!kbdev) - return -ENODEV; - - items = sscanf(buf, "%ld %ld %ld %ld %ld %ld %ld %ld", - &js_soft_stop_ms, &js_soft_stop_ms_cl, - &js_hard_stop_ms_ss, &js_hard_stop_ms_cl, - &js_hard_stop_ms_dumping, &js_reset_ms_ss, - &js_reset_ms_cl, &js_reset_ms_dumping); - - if (items == 8) { - u64 ticks; - - if (js_soft_stop_ms >= 0) { - ticks = js_soft_stop_ms * 1000000ULL; - do_div(ticks, kbdev->js_data.scheduling_period_ns); - kbdev->js_soft_stop_ticks = ticks; - } else { - kbdev->js_soft_stop_ticks = -1; - } - - if (js_soft_stop_ms_cl >= 0) { - ticks = js_soft_stop_ms_cl * 1000000ULL; - do_div(ticks, kbdev->js_data.scheduling_period_ns); - kbdev->js_soft_stop_ticks_cl = ticks; - } else { - kbdev->js_soft_stop_ticks_cl = -1; - } - - if (js_hard_stop_ms_ss >= 0) { - ticks = js_hard_stop_ms_ss * 1000000ULL; - do_div(ticks, kbdev->js_data.scheduling_period_ns); - kbdev->js_hard_stop_ticks_ss = ticks; - } else { - kbdev->js_hard_stop_ticks_ss = -1; - } - - if (js_hard_stop_ms_cl >= 0) { - ticks = js_hard_stop_ms_cl * 1000000ULL; - do_div(ticks, kbdev->js_data.scheduling_period_ns); - kbdev->js_hard_stop_ticks_cl = ticks; - } else { - kbdev->js_hard_stop_ticks_cl = -1; - } - - if (js_hard_stop_ms_dumping >= 0) { - ticks = js_hard_stop_ms_dumping * 1000000ULL; - do_div(ticks, kbdev->js_data.scheduling_period_ns); - kbdev->js_hard_stop_ticks_dumping = ticks; - } else { - kbdev->js_hard_stop_ticks_dumping = -1; - } - - if (js_reset_ms_ss >= 0) { - ticks = js_reset_ms_ss * 1000000ULL; - do_div(ticks, kbdev->js_data.scheduling_period_ns); - kbdev->js_reset_ticks_ss = ticks; - } else { - kbdev->js_reset_ticks_ss = -1; - } - - if (js_reset_ms_cl >= 0) { - ticks = js_reset_ms_cl * 1000000ULL; - do_div(ticks, kbdev->js_data.scheduling_period_ns); - kbdev->js_reset_ticks_cl = ticks; - } else { - kbdev->js_reset_ticks_cl = -1; - } - - if (js_reset_ms_dumping >= 0) { - ticks = js_reset_ms_dumping * 1000000ULL; - do_div(ticks, kbdev->js_data.scheduling_period_ns); - kbdev->js_reset_ticks_dumping = ticks; - } else { - kbdev->js_reset_ticks_dumping = -1; - } - - kbdev->js_timeouts_updated = true; - - dev_dbg(kbdev->dev, "Overriding JS_SOFT_STOP_TICKS with %lu ticks (%lu ms)\n", - (unsigned long)kbdev->js_soft_stop_ticks, - js_soft_stop_ms); - dev_dbg(kbdev->dev, "Overriding JS_SOFT_STOP_TICKS_CL with %lu ticks (%lu ms)\n", - (unsigned long)kbdev->js_soft_stop_ticks_cl, - js_soft_stop_ms_cl); - dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_SS with %lu ticks (%lu ms)\n", - (unsigned long)kbdev->js_hard_stop_ticks_ss, - js_hard_stop_ms_ss); - dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_CL with %lu ticks (%lu ms)\n", - (unsigned long)kbdev->js_hard_stop_ticks_cl, - js_hard_stop_ms_cl); - dev_dbg(kbdev->dev, "Overriding JS_HARD_STOP_TICKS_DUMPING with %lu ticks (%lu ms)\n", - (unsigned long) - kbdev->js_hard_stop_ticks_dumping, - js_hard_stop_ms_dumping); - dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_SS with %lu ticks (%lu ms)\n", - (unsigned long)kbdev->js_reset_ticks_ss, - js_reset_ms_ss); - dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_CL with %lu ticks (%lu ms)\n", - (unsigned long)kbdev->js_reset_ticks_cl, - js_reset_ms_cl); - dev_dbg(kbdev->dev, "Overriding JS_RESET_TICKS_DUMPING with %lu ticks (%lu ms)\n", - (unsigned long)kbdev->js_reset_ticks_dumping, - js_reset_ms_dumping); - - return count; - } - - dev_err(kbdev->dev, "Couldn't process js_timeouts write operation.\n" - "Use format \n" - "Write 0 for no change, -1 to restore default timeout\n"); - return -EINVAL; -} - -/** Show callback for the @c js_timeouts sysfs file. - * - * This function is called to get the contents of the @c js_timeouts sysfs - * file. It returns the last set values written to the js_timeouts sysfs file. - * If the file didn't get written yet, the values will be current setting in - * use. - * @param dev The device this sysfs file is for - * @param attr The attributes of the sysfs file - * @param buf The output buffer for the sysfs file contents - * - * @return The number of bytes output to @c buf. - */ -static ssize_t show_js_timeouts(struct device *dev, struct device_attribute *attr, char * const buf) -{ - struct kbase_device *kbdev; - ssize_t ret; - u64 ms; - unsigned long js_soft_stop_ms; - unsigned long js_soft_stop_ms_cl; - unsigned long js_hard_stop_ms_ss; - unsigned long js_hard_stop_ms_cl; - unsigned long js_hard_stop_ms_dumping; - unsigned long js_reset_ms_ss; - unsigned long js_reset_ms_cl; - unsigned long js_reset_ms_dumping; - unsigned long ticks; - u32 scheduling_period_ns; - - kbdev = to_kbase_device(dev); - if (!kbdev) - return -ENODEV; - - /* If no contexts have been scheduled since js_timeouts was last written - * to, the new timeouts might not have been latched yet. So check if an - * update is pending and use the new values if necessary. */ - if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns > 0) - scheduling_period_ns = kbdev->js_scheduling_period_ns; - else - scheduling_period_ns = kbdev->js_data.scheduling_period_ns; - - if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks > 0) - ticks = kbdev->js_soft_stop_ticks; - else - ticks = kbdev->js_data.soft_stop_ticks; - ms = (u64)ticks * scheduling_period_ns; - do_div(ms, 1000000UL); - js_soft_stop_ms = (unsigned long)ms; - - if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks_cl > 0) - ticks = kbdev->js_soft_stop_ticks_cl; - else - ticks = kbdev->js_data.soft_stop_ticks_cl; - ms = (u64)ticks * scheduling_period_ns; - do_div(ms, 1000000UL); - js_soft_stop_ms_cl = (unsigned long)ms; - - if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_ss > 0) - ticks = kbdev->js_hard_stop_ticks_ss; - else - ticks = kbdev->js_data.hard_stop_ticks_ss; - ms = (u64)ticks * scheduling_period_ns; - do_div(ms, 1000000UL); - js_hard_stop_ms_ss = (unsigned long)ms; - - if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_cl > 0) - ticks = kbdev->js_hard_stop_ticks_cl; - else - ticks = kbdev->js_data.hard_stop_ticks_cl; - ms = (u64)ticks * scheduling_period_ns; - do_div(ms, 1000000UL); - js_hard_stop_ms_cl = (unsigned long)ms; - - if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_dumping > 0) - ticks = kbdev->js_hard_stop_ticks_dumping; - else - ticks = kbdev->js_data.hard_stop_ticks_dumping; - ms = (u64)ticks * scheduling_period_ns; - do_div(ms, 1000000UL); - js_hard_stop_ms_dumping = (unsigned long)ms; - - if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_ss > 0) - ticks = kbdev->js_reset_ticks_ss; - else - ticks = kbdev->js_data.gpu_reset_ticks_ss; - ms = (u64)ticks * scheduling_period_ns; - do_div(ms, 1000000UL); - js_reset_ms_ss = (unsigned long)ms; - - if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_cl > 0) - ticks = kbdev->js_reset_ticks_cl; - else - ticks = kbdev->js_data.gpu_reset_ticks_cl; - ms = (u64)ticks * scheduling_period_ns; - do_div(ms, 1000000UL); - js_reset_ms_cl = (unsigned long)ms; - - if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_dumping > 0) - ticks = kbdev->js_reset_ticks_dumping; - else - ticks = kbdev->js_data.gpu_reset_ticks_dumping; - ms = (u64)ticks * scheduling_period_ns; - do_div(ms, 1000000UL); - js_reset_ms_dumping = (unsigned long)ms; - - ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n", - js_soft_stop_ms, js_soft_stop_ms_cl, - js_hard_stop_ms_ss, js_hard_stop_ms_cl, - js_hard_stop_ms_dumping, js_reset_ms_ss, - js_reset_ms_cl, js_reset_ms_dumping); - - if (ret >= PAGE_SIZE) { - buf[PAGE_SIZE - 2] = '\n'; - buf[PAGE_SIZE - 1] = '\0'; - ret = PAGE_SIZE - 1; - } - - return ret; -} - -/** The sysfs file @c js_timeouts. - * - * This is used to override the current job scheduler values for - * JS_STOP_STOP_TICKS_SS - * JS_STOP_STOP_TICKS_CL - * JS_HARD_STOP_TICKS_SS - * JS_HARD_STOP_TICKS_CL - * JS_HARD_STOP_TICKS_DUMPING - * JS_RESET_TICKS_SS - * JS_RESET_TICKS_CL - * JS_RESET_TICKS_DUMPING. - */ -static DEVICE_ATTR(js_timeouts, S_IRUGO | S_IWUSR, show_js_timeouts, set_js_timeouts); - -/** - * set_js_scheduling_period - Store callback for the js_scheduling_period sysfs - * file - * @dev: The device the sysfs file is for - * @attr: The attributes of the sysfs file - * @buf: The value written to the sysfs file - * @count: The number of bytes written to the sysfs file - * - * This function is called when the js_scheduling_period sysfs file is written - * to. It checks the data written, and if valid updates the js_scheduling_period - * value - * - * Return: @c count if the function succeeded. An error code on failure. - */ -static ssize_t set_js_scheduling_period(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct kbase_device *kbdev; - int ret; - unsigned int js_scheduling_period; - u32 new_scheduling_period_ns; - u32 old_period; - u64 ticks; - - kbdev = to_kbase_device(dev); - if (!kbdev) - return -ENODEV; - - ret = kstrtouint(buf, 0, &js_scheduling_period); - if (ret || !js_scheduling_period) { - dev_err(kbdev->dev, "Couldn't process js_scheduling_period write operation.\n" - "Use format \n"); - return -EINVAL; - } - - new_scheduling_period_ns = js_scheduling_period * 1000000; - - /* Update scheduling timeouts */ - mutex_lock(&kbdev->js_data.runpool_mutex); - - /* If no contexts have been scheduled since js_timeouts was last written - * to, the new timeouts might not have been latched yet. So check if an - * update is pending and use the new values if necessary. */ - - /* Use previous 'new' scheduling period as a base if present. */ - if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns) - old_period = kbdev->js_scheduling_period_ns; - else - old_period = kbdev->js_data.scheduling_period_ns; - - if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks > 0) - ticks = (u64)kbdev->js_soft_stop_ticks * old_period; - else - ticks = (u64)kbdev->js_data.soft_stop_ticks * - kbdev->js_data.scheduling_period_ns; - do_div(ticks, new_scheduling_period_ns); - kbdev->js_soft_stop_ticks = ticks ? ticks : 1; - - if (kbdev->js_timeouts_updated && kbdev->js_soft_stop_ticks_cl > 0) - ticks = (u64)kbdev->js_soft_stop_ticks_cl * old_period; - else - ticks = (u64)kbdev->js_data.soft_stop_ticks_cl * - kbdev->js_data.scheduling_period_ns; - do_div(ticks, new_scheduling_period_ns); - kbdev->js_soft_stop_ticks_cl = ticks ? ticks : 1; - - if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_ss > 0) - ticks = (u64)kbdev->js_hard_stop_ticks_ss * old_period; - else - ticks = (u64)kbdev->js_data.hard_stop_ticks_ss * - kbdev->js_data.scheduling_period_ns; - do_div(ticks, new_scheduling_period_ns); - kbdev->js_hard_stop_ticks_ss = ticks ? ticks : 1; - - if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_cl > 0) - ticks = (u64)kbdev->js_hard_stop_ticks_cl * old_period; - else - ticks = (u64)kbdev->js_data.hard_stop_ticks_cl * - kbdev->js_data.scheduling_period_ns; - do_div(ticks, new_scheduling_period_ns); - kbdev->js_hard_stop_ticks_cl = ticks ? ticks : 1; - - if (kbdev->js_timeouts_updated && kbdev->js_hard_stop_ticks_dumping > 0) - ticks = (u64)kbdev->js_hard_stop_ticks_dumping * old_period; - else - ticks = (u64)kbdev->js_data.hard_stop_ticks_dumping * - kbdev->js_data.scheduling_period_ns; - do_div(ticks, new_scheduling_period_ns); - kbdev->js_hard_stop_ticks_dumping = ticks ? ticks : 1; - - if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_ss > 0) - ticks = (u64)kbdev->js_reset_ticks_ss * old_period; - else - ticks = (u64)kbdev->js_data.gpu_reset_ticks_ss * - kbdev->js_data.scheduling_period_ns; - do_div(ticks, new_scheduling_period_ns); - kbdev->js_reset_ticks_ss = ticks ? ticks : 1; - - if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_cl > 0) - ticks = (u64)kbdev->js_reset_ticks_cl * old_period; - else - ticks = (u64)kbdev->js_data.gpu_reset_ticks_cl * - kbdev->js_data.scheduling_period_ns; - do_div(ticks, new_scheduling_period_ns); - kbdev->js_reset_ticks_cl = ticks ? ticks : 1; - - if (kbdev->js_timeouts_updated && kbdev->js_reset_ticks_dumping > 0) - ticks = (u64)kbdev->js_reset_ticks_dumping * old_period; - else - ticks = (u64)kbdev->js_data.gpu_reset_ticks_dumping * - kbdev->js_data.scheduling_period_ns; - do_div(ticks, new_scheduling_period_ns); - kbdev->js_reset_ticks_dumping = ticks ? ticks : 1; - - kbdev->js_scheduling_period_ns = new_scheduling_period_ns; - kbdev->js_timeouts_updated = true; - - mutex_unlock(&kbdev->js_data.runpool_mutex); - - dev_dbg(kbdev->dev, "JS scheduling period: %dms\n", - js_scheduling_period); - - return count; -} - -/** - * show_js_scheduling_period - Show callback for the js_scheduling_period sysfs - * entry. - * @dev: The device this sysfs file is for. - * @attr: The attributes of the sysfs file. - * @buf: The output buffer to receive the GPU information. - * - * This function is called to get the current period used for the JS scheduling - * period. - * - * Return: The number of bytes output to buf. - */ -static ssize_t show_js_scheduling_period(struct device *dev, - struct device_attribute *attr, char * const buf) -{ - struct kbase_device *kbdev; - u32 period; - ssize_t ret; - - kbdev = to_kbase_device(dev); - if (!kbdev) - return -ENODEV; - - if (kbdev->js_timeouts_updated && kbdev->js_scheduling_period_ns > 0) - period = kbdev->js_scheduling_period_ns; - else - period = kbdev->js_data.scheduling_period_ns; - - ret = scnprintf(buf, PAGE_SIZE, "%d\n", - period / 1000000); - - return ret; -} - -static DEVICE_ATTR(js_scheduling_period, S_IRUGO | S_IWUSR, - show_js_scheduling_period, set_js_scheduling_period); - -#if !MALI_CUSTOMER_RELEASE -/** Store callback for the @c force_replay sysfs file. - * - * @param dev The device with sysfs file is for - * @param attr The attributes of the sysfs file - * @param buf The value written to the sysfs file - * @param count The number of bytes written to the sysfs file - * - * @return @c count if the function succeeded. An error code on failure. - */ -static ssize_t set_force_replay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -{ - struct kbase_device *kbdev; - - kbdev = to_kbase_device(dev); - if (!kbdev) - return -ENODEV; - - if (!strncmp("limit=", buf, MIN(6, count))) { - int force_replay_limit; - int items = sscanf(buf, "limit=%u", &force_replay_limit); - - if (items == 1) { - kbdev->force_replay_random = false; - kbdev->force_replay_limit = force_replay_limit; - kbdev->force_replay_count = 0; - - return count; - } - } else if (!strncmp("random_limit", buf, MIN(12, count))) { - kbdev->force_replay_random = true; - kbdev->force_replay_count = 0; - - return count; - } else if (!strncmp("norandom_limit", buf, MIN(14, count))) { - kbdev->force_replay_random = false; - kbdev->force_replay_limit = KBASEP_FORCE_REPLAY_DISABLED; - kbdev->force_replay_count = 0; - - return count; - } else if (!strncmp("core_req=", buf, MIN(9, count))) { - unsigned int core_req; - int items = sscanf(buf, "core_req=%x", &core_req); - - if (items == 1) { - kbdev->force_replay_core_req = (base_jd_core_req)core_req; - - return count; - } - } - dev_err(kbdev->dev, "Couldn't process force_replay write operation.\nPossible settings: limit=, random_limit, norandom_limit, core_req=\n"); - return -EINVAL; -} - -/** Show callback for the @c force_replay sysfs file. - * - * This function is called to get the contents of the @c force_replay sysfs - * file. It returns the last set value written to the force_replay sysfs file. - * If the file didn't get written yet, the values will be 0. - * - * @param dev The device this sysfs file is for - * @param attr The attributes of the sysfs file - * @param buf The output buffer for the sysfs file contents - * - * @return The number of bytes output to @c buf. - */ -static ssize_t show_force_replay(struct device *dev, - struct device_attribute *attr, char * const buf) -{ - struct kbase_device *kbdev; - ssize_t ret; - - kbdev = to_kbase_device(dev); - if (!kbdev) - return -ENODEV; - - if (kbdev->force_replay_random) - ret = scnprintf(buf, PAGE_SIZE, - "limit=0\nrandom_limit\ncore_req=%x\n", - kbdev->force_replay_core_req); - else - ret = scnprintf(buf, PAGE_SIZE, - "limit=%u\nnorandom_limit\ncore_req=%x\n", - kbdev->force_replay_limit, - kbdev->force_replay_core_req); - - if (ret >= PAGE_SIZE) { - buf[PAGE_SIZE - 2] = '\n'; - buf[PAGE_SIZE - 1] = '\0'; - ret = PAGE_SIZE - 1; - } - - return ret; -} - -/** The sysfs file @c force_replay. - * - */ -static DEVICE_ATTR(force_replay, S_IRUGO | S_IWUSR, show_force_replay, - set_force_replay); -#endif /* !MALI_CUSTOMER_RELEASE */ - -#ifdef CONFIG_MALI_DEBUG -static ssize_t set_js_softstop_always(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct kbase_device *kbdev; - int ret; - int softstop_always; - - kbdev = to_kbase_device(dev); - if (!kbdev) - return -ENODEV; - - ret = kstrtoint(buf, 0, &softstop_always); - if (ret || ((softstop_always != 0) && (softstop_always != 1))) { - dev_err(kbdev->dev, "Couldn't process js_softstop_always write operation.\n" - "Use format \n"); - return -EINVAL; - } - - kbdev->js_data.softstop_always = (bool) softstop_always; - dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n", - (kbdev->js_data.softstop_always) ? - "Enabled" : "Disabled"); - return count; -} - -static ssize_t show_js_softstop_always(struct device *dev, - struct device_attribute *attr, char * const buf) -{ - struct kbase_device *kbdev; - ssize_t ret; - - kbdev = to_kbase_device(dev); - if (!kbdev) - return -ENODEV; - - ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always); - - if (ret >= PAGE_SIZE) { - buf[PAGE_SIZE - 2] = '\n'; - buf[PAGE_SIZE - 1] = '\0'; - ret = PAGE_SIZE - 1; - } - - return ret; -} - -/* - * By default, soft-stops are disabled when only a single context is present. The ability to - * enable soft-stop when only a single context is present can be used for debug and unit-testing purposes. - * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.) - */ -static DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR, show_js_softstop_always, set_js_softstop_always); -#endif /* CONFIG_MALI_DEBUG */ - -#ifdef CONFIG_MALI_DEBUG -typedef void (kbasep_debug_command_func) (struct kbase_device *); - -enum kbasep_debug_command_code { - KBASEP_DEBUG_COMMAND_DUMPTRACE, - - /* This must be the last enum */ - KBASEP_DEBUG_COMMAND_COUNT -}; - -struct kbasep_debug_command { - char *str; - kbasep_debug_command_func *func; -}; - -/** Debug commands supported by the driver */ -static const struct kbasep_debug_command debug_commands[] = { - { - .str = "dumptrace", - .func = &kbasep_trace_dump, - } -}; - -/** Show callback for the @c debug_command sysfs file. - * - * This function is called to get the contents of the @c debug_command sysfs - * file. This is a list of the available debug commands, separated by newlines. - * - * @param dev The device this sysfs file is for - * @param attr The attributes of the sysfs file - * @param buf The output buffer for the sysfs file contents - * - * @return The number of bytes output to @c buf. - */ -static ssize_t show_debug(struct device *dev, struct device_attribute *attr, char * const buf) -{ - struct kbase_device *kbdev; - int i; - ssize_t ret = 0; - - kbdev = to_kbase_device(dev); - - if (!kbdev) - return -ENODEV; - - for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++) - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n", debug_commands[i].str); - - if (ret >= PAGE_SIZE) { - buf[PAGE_SIZE - 2] = '\n'; - buf[PAGE_SIZE - 1] = '\0'; - ret = PAGE_SIZE - 1; - } - - return ret; -} - -/** Store callback for the @c debug_command sysfs file. - * - * This function is called when the @c debug_command sysfs file is written to. - * It matches the requested command against the available commands, and if - * a matching command is found calls the associated function from - * @ref debug_commands to issue the command. - * - * @param dev The device with sysfs file is for - * @param attr The attributes of the sysfs file - * @param buf The value written to the sysfs file - * @param count The number of bytes written to the sysfs file - * - * @return @c count if the function succeeded. An error code on failure. - */ -static ssize_t issue_debug(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -{ - struct kbase_device *kbdev; - int i; - - kbdev = to_kbase_device(dev); - - if (!kbdev) - return -ENODEV; - - for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) { - if (sysfs_streq(debug_commands[i].str, buf)) { - debug_commands[i].func(kbdev); - return count; - } - } - - /* Debug Command not found */ - dev_err(dev, "debug_command: command not known\n"); - return -EINVAL; -} - -/** The sysfs file @c debug_command. - * - * This is used to issue general debug commands to the device driver. - * Reading it will produce a list of debug commands, separated by newlines. - * Writing to it with one of those commands will issue said command. - */ -static DEVICE_ATTR(debug_command, S_IRUGO | S_IWUSR, show_debug, issue_debug); -#endif /* CONFIG_MALI_DEBUG */ - -/** - * kbase_show_gpuinfo - Show callback for the gpuinfo sysfs entry. - * @dev: The device this sysfs file is for. - * @attr: The attributes of the sysfs file. - * @buf: The output buffer to receive the GPU information. - * - * This function is called to get a description of the present Mali - * GPU via the gpuinfo sysfs entry. This includes the GPU family, the - * number of cores, the hardware version and the raw product id. For - * example: - * - * Mali-T60x MP4 r0p0 0x6956 - * - * Return: The number of bytes output to buf. - */ -static ssize_t kbase_show_gpuinfo(struct device *dev, - struct device_attribute *attr, char *buf) -{ - static const struct gpu_product_id_name { - unsigned id; - char *name; - } gpu_product_id_names[] = { - { .id = GPU_ID_PI_T60X, .name = "Mali-T60x" }, - { .id = GPU_ID_PI_T62X, .name = "Mali-T62x" }, - { .id = GPU_ID_PI_T72X, .name = "Mali-T72x" }, - { .id = GPU_ID_PI_T76X, .name = "Mali-T76x" }, - { .id = GPU_ID_PI_T82X, .name = "Mali-T82x" }, - { .id = GPU_ID_PI_T83X, .name = "Mali-T83x" }, - { .id = GPU_ID_PI_T86X, .name = "Mali-T86x" }, - { .id = GPU_ID_PI_TFRX, .name = "Mali-T88x" }, - }; - const char *product_name = "(Unknown Mali GPU)"; - struct kbase_device *kbdev; - u32 gpu_id; - unsigned product_id; - unsigned i; - - kbdev = to_kbase_device(dev); - if (!kbdev) - return -ENODEV; - - gpu_id = kbdev->gpu_props.props.raw_props.gpu_id; - product_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT; - - for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) { - if (gpu_product_id_names[i].id == product_id) { - product_name = gpu_product_id_names[i].name; - break; - } - } - - return scnprintf(buf, PAGE_SIZE, "%s MP%d r%dp%d 0x%04X\n", - product_name, kbdev->gpu_props.num_cores, - (gpu_id & GPU_ID_VERSION_MAJOR) >> GPU_ID_VERSION_MAJOR_SHIFT, - (gpu_id & GPU_ID_VERSION_MINOR) >> GPU_ID_VERSION_MINOR_SHIFT, - product_id); -} -static DEVICE_ATTR(gpuinfo, S_IRUGO, kbase_show_gpuinfo, NULL); - -/** - * set_dvfs_period - Store callback for the dvfs_period sysfs file. - * @dev: The device with sysfs file is for - * @attr: The attributes of the sysfs file - * @buf: The value written to the sysfs file - * @count: The number of bytes written to the sysfs file - * - * This function is called when the dvfs_period sysfs file is written to. It - * checks the data written, and if valid updates the DVFS period variable, - * - * Return: @c count if the function succeeded. An error code on failure. - */ -static ssize_t set_dvfs_period(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct kbase_device *kbdev; - int ret; - int dvfs_period; - - kbdev = to_kbase_device(dev); - if (!kbdev) - return -ENODEV; - - ret = kstrtoint(buf, 0, &dvfs_period); - if (ret || dvfs_period <= 0) { - dev_err(kbdev->dev, "Couldn't process dvfs_period write operation.\n" - "Use format \n"); - return -EINVAL; - } - - kbdev->pm.dvfs_period = dvfs_period; - dev_dbg(kbdev->dev, "DVFS period: %dms\n", dvfs_period); - - return count; -} - -/** - * show_dvfs_period - Show callback for the dvfs_period sysfs entry. - * @dev: The device this sysfs file is for. - * @attr: The attributes of the sysfs file. - * @buf: The output buffer to receive the GPU information. - * - * This function is called to get the current period used for the DVFS sample - * timer. - * - * Return: The number of bytes output to buf. - */ -static ssize_t show_dvfs_period(struct device *dev, - struct device_attribute *attr, char * const buf) -{ - struct kbase_device *kbdev; - ssize_t ret; - - kbdev = to_kbase_device(dev); - if (!kbdev) - return -ENODEV; - - ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->pm.dvfs_period); - - return ret; -} - -static DEVICE_ATTR(dvfs_period, S_IRUGO | S_IWUSR, show_dvfs_period, - set_dvfs_period); - -/** - * set_pm_poweroff - Store callback for the pm_poweroff sysfs file. - * @dev: The device with sysfs file is for - * @attr: The attributes of the sysfs file - * @buf: The value written to the sysfs file - * @count: The number of bytes written to the sysfs file - * - * This function is called when the pm_poweroff sysfs file is written to. - * - * This file contains three values separated by whitespace. The values - * are gpu_poweroff_time (the period of the poweroff timer, in ns), - * poweroff_shader_ticks (the number of poweroff timer ticks before an idle - * shader is powered off), and poweroff_gpu_ticks (the number of poweroff timer - * ticks before the GPU is powered off), in that order. - * - * Return: @c count if the function succeeded. An error code on failure. - */ -static ssize_t set_pm_poweroff(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct kbase_device *kbdev; - int items; - s64 gpu_poweroff_time; - int poweroff_shader_ticks, poweroff_gpu_ticks; - - kbdev = to_kbase_device(dev); - if (!kbdev) - return -ENODEV; - - items = sscanf(buf, "%llu %u %u", &gpu_poweroff_time, - &poweroff_shader_ticks, - &poweroff_gpu_ticks); - if (items != 3) { - dev_err(kbdev->dev, "Couldn't process pm_poweroff write operation.\n" - "Use format \n"); - return -EINVAL; - } - - kbdev->pm.gpu_poweroff_time = HR_TIMER_DELAY_NSEC(gpu_poweroff_time); - kbdev->pm.poweroff_shader_ticks = poweroff_shader_ticks; - kbdev->pm.poweroff_gpu_ticks = poweroff_gpu_ticks; - - return count; -} - -/** - * show_pm_poweroff - Show callback for the pm_poweroff sysfs entry. - * @dev: The device this sysfs file is for. - * @attr: The attributes of the sysfs file. - * @buf: The output buffer to receive the GPU information. - * - * This function is called to get the current period used for the DVFS sample - * timer. - * - * Return: The number of bytes output to buf. - */ -static ssize_t show_pm_poweroff(struct device *dev, - struct device_attribute *attr, char * const buf) -{ - struct kbase_device *kbdev; - ssize_t ret; - - kbdev = to_kbase_device(dev); - if (!kbdev) - return -ENODEV; - - ret = scnprintf(buf, PAGE_SIZE, "%llu %u %u\n", - ktime_to_ns(kbdev->pm.gpu_poweroff_time), - kbdev->pm.poweroff_shader_ticks, - kbdev->pm.poweroff_gpu_ticks); - - return ret; -} - -static DEVICE_ATTR(pm_poweroff, S_IRUGO | S_IWUSR, show_pm_poweroff, - set_pm_poweroff); - -/** - * set_reset_timeout - Store callback for the reset_timeout sysfs file. - * @dev: The device with sysfs file is for - * @attr: The attributes of the sysfs file - * @buf: The value written to the sysfs file - * @count: The number of bytes written to the sysfs file - * - * This function is called when the reset_timeout sysfs file is written to. It - * checks the data written, and if valid updates the reset timeout. - * - * Return: @c count if the function succeeded. An error code on failure. - */ -static ssize_t set_reset_timeout(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct kbase_device *kbdev; - int ret; - int reset_timeout; - - kbdev = to_kbase_device(dev); - if (!kbdev) - return -ENODEV; - - ret = kstrtoint(buf, 0, &reset_timeout); - if (ret || reset_timeout <= 0) { - dev_err(kbdev->dev, "Couldn't process reset_timeout write operation.\n" - "Use format \n"); - return -EINVAL; - } - - kbdev->reset_timeout_ms = reset_timeout; - dev_dbg(kbdev->dev, "Reset timeout: %dms\n", reset_timeout); - - return count; -} - -/** - * show_reset_timeout - Show callback for the reset_timeout sysfs entry. - * @dev: The device this sysfs file is for. - * @attr: The attributes of the sysfs file. - * @buf: The output buffer to receive the GPU information. - * - * This function is called to get the current reset timeout. - * - * Return: The number of bytes output to buf. - */ -static ssize_t show_reset_timeout(struct device *dev, - struct device_attribute *attr, char * const buf) -{ - struct kbase_device *kbdev; - ssize_t ret; - - kbdev = to_kbase_device(dev); - if (!kbdev) - return -ENODEV; - - ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->reset_timeout_ms); - - return ret; -} - -static DEVICE_ATTR(reset_timeout, S_IRUGO | S_IWUSR, show_reset_timeout, - set_reset_timeout); - - - -static ssize_t show_mem_pool_size(struct device *dev, - struct device_attribute *attr, char * const buf) -{ - struct kbase_device *kbdev; - ssize_t ret; - - kbdev = to_kbase_device(dev); - if (!kbdev) - return -ENODEV; - - ret = scnprintf(buf, PAGE_SIZE, "%zu\n", - kbase_mem_pool_size(&kbdev->mem_pool)); - - return ret; -} - -static ssize_t set_mem_pool_size(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct kbase_device *kbdev; - size_t new_size; - int err; - - kbdev = to_kbase_device(dev); - if (!kbdev) - return -ENODEV; - - err = kstrtoul(buf, 0, (unsigned long *)&new_size); - if (err) - return err; - - kbase_mem_pool_trim(&kbdev->mem_pool, new_size); - - return count; -} - -static DEVICE_ATTR(mem_pool_size, S_IRUGO | S_IWUSR, show_mem_pool_size, - set_mem_pool_size); - -static ssize_t show_mem_pool_max_size(struct device *dev, - struct device_attribute *attr, char * const buf) -{ - struct kbase_device *kbdev; - ssize_t ret; - - kbdev = to_kbase_device(dev); - if (!kbdev) - return -ENODEV; - - ret = scnprintf(buf, PAGE_SIZE, "%zu\n", - kbase_mem_pool_max_size(&kbdev->mem_pool)); - - return ret; -} - -static ssize_t set_mem_pool_max_size(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct kbase_device *kbdev; - size_t new_max_size; - int err; - - kbdev = to_kbase_device(dev); - if (!kbdev) - return -ENODEV; - - err = kstrtoul(buf, 0, (unsigned long *)&new_max_size); - if (err) - return -EINVAL; - - kbase_mem_pool_set_max_size(&kbdev->mem_pool, new_max_size); - - return count; -} - -static DEVICE_ATTR(mem_pool_max_size, S_IRUGO | S_IWUSR, show_mem_pool_max_size, - set_mem_pool_max_size); - - - -static int kbasep_secure_mode_init(struct kbase_device *kbdev) -{ - -#ifdef SECURE_CALLBACKS - kbdev->secure_ops = SECURE_CALLBACKS; - kbdev->secure_mode_support = false; - - if (kbdev->secure_ops) { - int err; - - /* Make sure secure mode is disabled on startup */ - err = kbdev->secure_ops->secure_mode_disable(kbdev); - - /* secure_mode_disable() returns -EINVAL if not supported */ - kbdev->secure_mode_support = (err != -EINVAL); - } -#endif - - return 0; -} - -#ifdef CONFIG_MALI_NO_MALI -static int kbase_common_reg_map(struct kbase_device *kbdev) -{ - return 0; -} -static void kbase_common_reg_unmap(struct kbase_device * const kbdev) -{ -} -#else /* CONFIG_MALI_NO_MALI */ -static int kbase_common_reg_map(struct kbase_device *kbdev) -{ - int err = -ENOMEM; - - if (!request_mem_region(kbdev->reg_start, kbdev->reg_size, dev_name(kbdev->dev))) { - dev_err(kbdev->dev, "Register window unavailable\n"); - err = -EIO; - goto out_region; - } - - kbdev->reg = ioremap(kbdev->reg_start, kbdev->reg_size); - if (!kbdev->reg) { - dev_err(kbdev->dev, "Can't remap register window\n"); - err = -EINVAL; - goto out_ioremap; - } - - return 0; - - out_ioremap: - release_mem_region(kbdev->reg_start, kbdev->reg_size); - out_region: - return err; -} - -static void kbase_common_reg_unmap(struct kbase_device * const kbdev) -{ - iounmap(kbdev->reg); - release_mem_region(kbdev->reg_start, kbdev->reg_size); -} -#endif /* CONFIG_MALI_NO_MALI */ - - -#ifdef CONFIG_DEBUG_FS - -#if KBASE_GPU_RESET_EN -#include - -static void trigger_quirks_reload(struct kbase_device *kbdev) -{ - kbase_pm_context_active(kbdev); - if (kbase_prepare_to_reset_gpu(kbdev)) - kbase_reset_gpu(kbdev); - kbase_pm_context_idle(kbdev); -} - -#define MAKE_QUIRK_ACCESSORS(type) \ -static int type##_quirks_set(void *data, u64 val) \ -{ \ - struct kbase_device *kbdev; \ - kbdev = (struct kbase_device *)data; \ - kbdev->hw_quirks_##type = (u32)val; \ - trigger_quirks_reload(kbdev); \ - return 0;\ -} \ -\ -static int type##_quirks_get(void *data, u64 *val) \ -{ \ - struct kbase_device *kbdev;\ - kbdev = (struct kbase_device *)data;\ - *val = kbdev->hw_quirks_##type;\ - return 0;\ -} \ -DEFINE_SIMPLE_ATTRIBUTE(fops_##type##_quirks, type##_quirks_get,\ - type##_quirks_set, "%llu\n") - -MAKE_QUIRK_ACCESSORS(sc); -MAKE_QUIRK_ACCESSORS(tiler); -MAKE_QUIRK_ACCESSORS(mmu); - -#endif /* KBASE_GPU_RESET_EN */ - -static int kbasep_secure_mode_seq_show(struct seq_file *m, void *p) -{ - struct kbase_device *kbdev = m->private; - - if (!kbdev->secure_mode_support) - seq_puts(m, "unsupported\n"); - else - seq_printf(m, "%s\n", kbdev->secure_mode ? "Y" : "N"); - - return 0; -} - -static int kbasep_secure_mode_debugfs_open(struct inode *in, struct file *file) -{ - return single_open(file, kbasep_secure_mode_seq_show, in->i_private); -} - -static const struct file_operations kbasep_secure_mode_debugfs_fops = { - .open = kbasep_secure_mode_debugfs_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static int kbase_device_debugfs_init(struct kbase_device *kbdev) -{ - struct dentry *debugfs_ctx_defaults_directory; - int err; - - kbdev->mali_debugfs_directory = debugfs_create_dir(kbdev->devname, - NULL); - if (!kbdev->mali_debugfs_directory) { - dev_err(kbdev->dev, "Couldn't create mali debugfs directory\n"); - err = -ENOMEM; - goto out; - } - - kbdev->debugfs_ctx_directory = debugfs_create_dir("ctx", - kbdev->mali_debugfs_directory); - if (!kbdev->debugfs_ctx_directory) { - dev_err(kbdev->dev, "Couldn't create mali debugfs ctx directory\n"); - err = -ENOMEM; - goto out; - } - - debugfs_ctx_defaults_directory = debugfs_create_dir("defaults", - kbdev->debugfs_ctx_directory); - if (!debugfs_ctx_defaults_directory) { - dev_err(kbdev->dev, "Couldn't create mali debugfs ctx defaults directory\n"); - err = -ENOMEM; - goto out; - } - kbase_debug_job_fault_dev_init(kbdev); - kbasep_gpu_memory_debugfs_init(kbdev); -#if KBASE_GPU_RESET_EN - debugfs_create_file("quirks_sc", 0644, - kbdev->mali_debugfs_directory, kbdev, - &fops_sc_quirks); - debugfs_create_file("quirks_tiler", 0644, - kbdev->mali_debugfs_directory, kbdev, - &fops_tiler_quirks); - debugfs_create_file("quirks_mmu", 0644, - kbdev->mali_debugfs_directory, kbdev, - &fops_mmu_quirks); -#endif /* KBASE_GPU_RESET_EN */ - -#ifndef CONFIG_MALI_COH_USER - debugfs_create_bool("infinite_cache", 0644, - debugfs_ctx_defaults_directory, - &kbdev->infinite_cache_active_default); -#endif /* CONFIG_MALI_COH_USER */ - - debugfs_create_size_t("mem_pool_max_size", 0644, - debugfs_ctx_defaults_directory, - &kbdev->mem_pool_max_size_default); - -#if KBASE_TRACE_ENABLE - kbasep_trace_debugfs_init(kbdev); -#endif /* KBASE_TRACE_ENABLE */ - -#ifdef CONFIG_MALI_TRACE_TIMELINE - kbasep_trace_timeline_debugfs_init(kbdev); -#endif /* CONFIG_MALI_TRACE_TIMELINE */ - - debugfs_create_file("secure_mode", S_IRUGO, - kbdev->mali_debugfs_directory, kbdev, - &kbasep_secure_mode_debugfs_fops); - - return 0; - -out: - debugfs_remove_recursive(kbdev->mali_debugfs_directory); - return err; -} - -static void kbase_device_debugfs_term(struct kbase_device *kbdev) -{ - debugfs_remove_recursive(kbdev->mali_debugfs_directory); -} - -#else /* CONFIG_DEBUG_FS */ -static inline int kbase_device_debugfs_init(struct kbase_device *kbdev) -{ - return 0; -} - -static inline void kbase_device_debugfs_term(struct kbase_device *kbdev) { } -#endif /* CONFIG_DEBUG_FS */ - -static void kbase_device_coherency_init(struct kbase_device *kbdev, u32 gpu_id) -{ - u32 selected_coherency = COHERENCY_NONE; - /* COHERENCY_NONE is always supported */ - u32 supported_coherency_bitmap = COHERENCY_FEATURE_BIT(COHERENCY_NONE); - -#ifdef CONFIG_OF - const void *coherency_override_dts; - u32 override_coherency; -#endif /* CONFIG_OF */ - - kbdev->system_coherency = selected_coherency; - - /* device tree may override the coherency */ -#ifdef CONFIG_OF - coherency_override_dts = of_get_property(kbdev->dev->of_node, - "override-coherency", - NULL); - if (coherency_override_dts) { - - override_coherency = be32_to_cpup(coherency_override_dts); - - if ((override_coherency <= COHERENCY_NONE) && - (supported_coherency_bitmap & - COHERENCY_FEATURE_BIT(override_coherency))) { - - kbdev->system_coherency = override_coherency; - - dev_info(kbdev->dev, - "Using coherency override, mode %u set from dtb", - override_coherency); - } else - dev_warn(kbdev->dev, - "Ignoring invalid coherency override, mode %u set from dtb", - override_coherency); - } - -#endif /* CONFIG_OF */ - - kbdev->gpu_props.props.raw_props.coherency_features = - kbdev->system_coherency; -} - -#ifdef CONFIG_MALI_FPGA_BUS_LOGGER - -/* Callback used by the kbase bus logger client, to initiate a GPU reset - * when the bus log is restarted. GPU reset is used as reference point - * in HW bus log analyses. - */ -static void kbase_logging_started_cb(void *data) -{ - struct kbase_device *kbdev = (struct kbase_device *)data; - - if (kbase_prepare_to_reset_gpu(kbdev)) - kbase_reset_gpu(kbdev); - dev_info(kbdev->dev, "KBASE - Bus logger restarted\n"); -} -#endif - - -static int kbase_common_device_init(struct kbase_device *kbdev) -{ - int err; - struct mali_base_gpu_core_props *core_props; - enum { - inited_mem = (1u << 0), - inited_js = (1u << 1), - inited_pm_runtime_init = (1u << 6), -#ifdef CONFIG_MALI_DEVFREQ - inited_devfreq = (1u << 9), -#endif /* CONFIG_MALI_DEVFREQ */ -#ifdef CONFIG_MALI_MIPE_ENABLED - inited_tlstream = (1u << 10), -#endif /* CONFIG_MALI_MIPE_ENABLED */ - inited_backend_early = (1u << 11), - inited_backend_late = (1u << 12), - inited_device = (1u << 13), - inited_vinstr = (1u << 19), - inited_ipa = (1u << 20) - }; - - int inited = 0; - u32 gpu_id; -#if defined(CONFIG_MALI_PLATFORM_VEXPRESS) - u32 ve_logic_tile = 0; -#endif /* CONFIG_MALI_PLATFORM_VEXPRESS */ - - dev_set_drvdata(kbdev->dev, kbdev); - - err = kbase_backend_early_init(kbdev); - if (err) - goto out_partial; - inited |= inited_backend_early; - - scnprintf(kbdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name, - kbase_dev_nr++); - - kbase_disjoint_init(kbdev); - - /* obtain min/max configured gpu frequencies */ - core_props = &(kbdev->gpu_props.props.core_props); - - /* For versatile express platforms, min and max values of GPU frequency - * depend on the type of the logic tile; these values may not be known - * at the build time so in some cases a platform config file with wrong - * GPU freguency values may be included; to ensure the correct value of - * min and max GPU frequency is obtained, the type of the logic tile is - * read from the corresponding register on the platform and frequency - * values assigned accordingly.*/ -#if defined(CONFIG_MALI_PLATFORM_VEXPRESS) - ve_logic_tile = kbase_get_platform_logic_tile_type(); - - switch (ve_logic_tile) { - case 0x217: - /* Virtex 6, HBI0217 */ - core_props->gpu_freq_khz_min = VE_VIRTEX6_GPU_FREQ_MIN; - core_props->gpu_freq_khz_max = VE_VIRTEX6_GPU_FREQ_MAX; - break; - case 0x247: - /* Virtex 7, HBI0247 */ - core_props->gpu_freq_khz_min = VE_VIRTEX7_GPU_FREQ_MIN; - core_props->gpu_freq_khz_max = VE_VIRTEX7_GPU_FREQ_MAX; - break; - default: - /* all other logic tiles, i.e., Virtex 5 HBI0192 - * or unsuccessful reading from the platform - - * fall back to the config_platform default */ - core_props->gpu_freq_khz_min = GPU_FREQ_KHZ_MIN; - core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX; - break; - } -#else - core_props->gpu_freq_khz_min = GPU_FREQ_KHZ_MIN; - core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX; -#endif /* CONFIG_MALI_PLATFORM_VEXPRESS */ - - kbdev->gpu_props.irq_throttle_time_us = DEFAULT_IRQ_THROTTLE_TIME_US; - - err = kbase_device_init(kbdev); - if (err) { - dev_err(kbdev->dev, "Can't initialize device (%d)\n", err); - goto out_partial; - } - - inited |= inited_device; - - kbdev->vinstr_ctx = kbase_vinstr_init(kbdev); - if (!kbdev->vinstr_ctx) { - dev_err(kbdev->dev, "Can't initialize virtual instrumentation core\n"); - goto out_partial; - } - - inited |= inited_vinstr; - - kbdev->ipa_ctx = kbase_ipa_init(kbdev); - if (!kbdev->ipa_ctx) { - dev_err(kbdev->dev, "Can't initialize IPA\n"); - goto out_partial; - } - - inited |= inited_ipa; - - if (kbdev->pm.callback_power_runtime_init) { - err = kbdev->pm.callback_power_runtime_init(kbdev); - if (err) - goto out_partial; - - inited |= inited_pm_runtime_init; - } - - err = kbase_mem_init(kbdev); - if (err) - goto out_partial; - - inited |= inited_mem; - - gpu_id = kbdev->gpu_props.props.raw_props.gpu_id; - gpu_id &= GPU_ID_VERSION_PRODUCT_ID; - gpu_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT; - - kbase_device_coherency_init(kbdev, gpu_id); - - err = kbasep_secure_mode_init(kbdev); - if (err) - goto out_partial; - - err = kbasep_js_devdata_init(kbdev); - if (err) - goto out_partial; - - inited |= inited_js; - -#ifdef CONFIG_MALI_MIPE_ENABLED - err = kbase_tlstream_init(); - if (err) { - dev_err(kbdev->dev, "Couldn't initialize timeline stream\n"); - goto out_partial; - } - inited |= inited_tlstream; -#endif /* CONFIG_MALI_MIPE_ENABLED */ - - err = kbase_backend_late_init(kbdev); - if (err) - goto out_partial; - inited |= inited_backend_late; - -#ifdef CONFIG_MALI_DEVFREQ - err = kbase_devfreq_init(kbdev); - if (err) { - dev_err(kbdev->dev, "Couldn't initialize devfreq\n"); - goto out_partial; - } - inited |= inited_devfreq; -#endif /* CONFIG_MALI_DEVFREQ */ - - err = kbase_device_debugfs_init(kbdev); - if (err) - goto out_partial; - - /* intialise the kctx list */ - mutex_init(&kbdev->kctx_list_lock); - INIT_LIST_HEAD(&kbdev->kctx_list); - - kbdev->mdev.minor = MISC_DYNAMIC_MINOR; - kbdev->mdev.name = kbdev->devname; - kbdev->mdev.fops = &kbase_fops; - kbdev->mdev.parent = get_device(kbdev->dev); - - err = misc_register(&kbdev->mdev); - if (err) { - dev_err(kbdev->dev, "Couldn't register misc dev %s\n", kbdev->devname); - goto out_misc; - } - - { - const struct list_head *dev_list = kbase_dev_list_get(); - - list_add(&kbdev->entry, &kbase_dev_list); - kbase_dev_list_put(dev_list); - } - - dev_info(kbdev->dev, "Probed as %s\n", dev_name(kbdev->mdev.this_device)); - - return 0; - -out_misc: - put_device(kbdev->dev); - kbase_device_debugfs_term(kbdev); -out_partial: - if (inited & inited_ipa) - kbase_ipa_term(kbdev->ipa_ctx); - if (inited & inited_vinstr) - kbase_vinstr_term(kbdev->vinstr_ctx); -#ifdef CONFIG_MALI_DEVFREQ - if (inited & inited_devfreq) - kbase_devfreq_term(kbdev); -#endif /* CONFIG_MALI_DEVFREQ */ - if (inited & inited_backend_late) - kbase_backend_late_term(kbdev); -#ifdef CONFIG_MALI_MIPE_ENABLED - if (inited & inited_tlstream) - kbase_tlstream_term(); -#endif /* CONFIG_MALI_MIPE_ENABLED */ - - if (inited & inited_js) - kbasep_js_devdata_halt(kbdev); - - if (inited & inited_mem) - kbase_mem_halt(kbdev); - - if (inited & inited_js) - kbasep_js_devdata_term(kbdev); - - if (inited & inited_mem) - kbase_mem_term(kbdev); - - if (inited & inited_pm_runtime_init) { - if (kbdev->pm.callback_power_runtime_term) - kbdev->pm.callback_power_runtime_term(kbdev); - } - - if (inited & inited_device) - kbase_device_term(kbdev); - - if (inited & inited_backend_early) - kbase_backend_early_term(kbdev); - - return err; -} - - -static struct attribute *kbase_attrs[] = { -#ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS - &dev_attr_sc_split.attr, -#endif -#ifdef CONFIG_MALI_DEBUG - &dev_attr_debug_command.attr, - &dev_attr_js_softstop_always.attr, -#endif -#if !MALI_CUSTOMER_RELEASE - &dev_attr_force_replay.attr, -#endif - &dev_attr_js_timeouts.attr, - &dev_attr_gpuinfo.attr, - &dev_attr_dvfs_period.attr, - &dev_attr_pm_poweroff.attr, - &dev_attr_reset_timeout.attr, - &dev_attr_js_scheduling_period.attr, - &dev_attr_power_policy.attr, - &dev_attr_core_availability_policy.attr, - &dev_attr_core_mask.attr, - &dev_attr_mem_pool_size.attr, - &dev_attr_mem_pool_max_size.attr, - NULL -}; - -static const struct attribute_group kbase_attr_group = { - .attrs = kbase_attrs, -}; - -static int kbase_common_device_remove(struct kbase_device *kbdev); - -static int kbase_platform_device_probe(struct platform_device *pdev) -{ - struct kbase_device *kbdev; - struct resource *reg_res; - int err = 0; - int i; - -#ifdef CONFIG_OF - err = kbase_platform_early_init(); - if (err) { - dev_err(&pdev->dev, "Early platform initialization failed\n"); - return err; - } -#endif - - kbdev = kbase_device_alloc(); - if (!kbdev) { - dev_err(&pdev->dev, "Can't allocate device\n"); - err = -ENOMEM; - goto out; - } -#ifdef CONFIG_MALI_NO_MALI - err = gpu_device_create(kbdev); - if (err) { - dev_err(&pdev->dev, "Can't initialize dummy model\n"); - goto out_midg; - } -#endif /* CONFIG_MALI_NO_MALI */ - - kbdev->dev = &pdev->dev; - /* 3 IRQ resources */ - for (i = 0; i < 3; i++) { - struct resource *irq_res; - int irqtag; - - irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i); - if (!irq_res) { - dev_err(kbdev->dev, "No IRQ resource at index %d\n", i); - err = -ENOENT; - goto out_platform_irq; - } - -#ifdef CONFIG_OF - if (!strcmp(irq_res->name, "JOB")) { - irqtag = JOB_IRQ_TAG; - } else if (!strcmp(irq_res->name, "MMU")) { - irqtag = MMU_IRQ_TAG; - } else if (!strcmp(irq_res->name, "GPU")) { - irqtag = GPU_IRQ_TAG; - } else { - dev_err(&pdev->dev, "Invalid irq res name: '%s'\n", - irq_res->name); - err = -EINVAL; - goto out_irq_name; - } -#else - irqtag = i; -#endif /* CONFIG_OF */ - kbdev->irqs[irqtag].irq = irq_res->start; - kbdev->irqs[irqtag].flags = (irq_res->flags & IRQF_TRIGGER_MASK); - } - /* the first memory resource is the physical address of the GPU - * registers */ - reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!reg_res) { - dev_err(kbdev->dev, "Invalid register resource\n"); - err = -ENOENT; - goto out_platform_mem; - } - - kbdev->reg_start = reg_res->start; - kbdev->reg_size = resource_size(reg_res); - - err = kbase_common_reg_map(kbdev); - if (err) - goto out_reg_map; - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \ - && defined(CONFIG_REGULATOR) - kbdev->regulator = regulator_get_optional(kbdev->dev, "mali"); - if (IS_ERR_OR_NULL(kbdev->regulator)) { - dev_info(kbdev->dev, "Continuing without Mali regulator control\n"); - kbdev->regulator = NULL; - /* Allow probe to continue without regulator */ - } -#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */ - -#ifdef CONFIG_MALI_PLATFORM_DEVICETREE - pm_runtime_enable(kbdev->dev); -#endif - kbdev->clock = clk_get(kbdev->dev, "clk_mali"); - if (IS_ERR_OR_NULL(kbdev->clock)) { - dev_info(kbdev->dev, "Continuing without Mali clock control\n"); - kbdev->clock = NULL; - /* Allow probe to continue without clock. */ - } else { - err = clk_prepare_enable(kbdev->clock); - if (err) { - dev_err(kbdev->dev, - "Failed to prepare and enable clock (%d)\n", err); - goto out_clock_prepare; - } - } - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) && defined(CONFIG_OF) \ - && defined(CONFIG_PM_OPP) - /* Register the OPPs if they are available in device tree */ - if (of_init_opp_table(kbdev->dev) < 0) - dev_dbg(kbdev->dev, "OPP table not found\n"); -#endif - - - err = kbase_common_device_init(kbdev); - if (err) { - dev_err(kbdev->dev, "Failed kbase_common_device_init\n"); - goto out_common_init; - } - - err = sysfs_create_group(&kbdev->dev->kobj, &kbase_attr_group); - if (err) { - dev_err(&pdev->dev, "Failed to create sysfs entries\n"); - goto out_sysfs; - } - -#ifdef CONFIG_MALI_FPGA_BUS_LOGGER - err = bl_core_client_register(kbdev->devname, - kbase_logging_started_cb, - kbdev, &kbdev->buslogger, - THIS_MODULE, NULL); - if (err) { - dev_err(kbdev->dev, "Couldn't register bus log client\n"); - goto out_bl_core_register; - } - - bl_core_set_threshold(kbdev->buslogger, 1024*1024*1024); -#endif - return 0; - -#ifdef CONFIG_MALI_FPGA_BUS_LOGGER -out_bl_core_register: - sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group); -#endif - -out_sysfs: - kbase_common_device_remove(kbdev); -out_common_init: -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) - of_free_opp_table(kbdev->dev); -#endif - clk_disable_unprepare(kbdev->clock); -out_clock_prepare: - clk_put(kbdev->clock); -#ifdef CONFIG_MALI_PLATFORM_DEVICETREE - pm_runtime_disable(kbdev->dev); -#endif -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \ - && defined(CONFIG_REGULATOR) - regulator_put(kbdev->regulator); -#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */ - kbase_common_reg_unmap(kbdev); -out_reg_map: -out_platform_mem: -#ifdef CONFIG_OF -out_irq_name: -#endif -out_platform_irq: -#ifdef CONFIG_MALI_NO_MALI - gpu_device_destroy(kbdev); -out_midg: -#endif /* CONFIG_MALI_NO_MALI */ - kbase_device_free(kbdev); -out: - return err; -} - -static int kbase_common_device_remove(struct kbase_device *kbdev) -{ - kbase_ipa_term(kbdev->ipa_ctx); - kbase_vinstr_term(kbdev->vinstr_ctx); - sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group); - -#ifdef CONFIG_MALI_FPGA_BUS_LOGGER - if (kbdev->buslogger) - bl_core_client_unregister(kbdev->buslogger); -#endif - -#ifdef CONFIG_DEBUG_FS - debugfs_remove_recursive(kbdev->mali_debugfs_directory); -#endif -#ifdef CONFIG_MALI_DEVFREQ - kbase_devfreq_term(kbdev); -#endif - - kbase_backend_late_term(kbdev); - - if (kbdev->pm.callback_power_runtime_term) - kbdev->pm.callback_power_runtime_term(kbdev); -#ifdef CONFIG_MALI_PLATFORM_DEVICETREE - pm_runtime_disable(kbdev->dev); -#endif - -#ifdef CONFIG_MALI_MIPE_ENABLED - kbase_tlstream_term(); -#endif /* CONFIG_MALI_MIPE_ENABLED */ - - kbasep_js_devdata_halt(kbdev); - kbase_mem_halt(kbdev); - - kbasep_js_devdata_term(kbdev); - kbase_mem_term(kbdev); - kbase_backend_early_term(kbdev); - - { - const struct list_head *dev_list = kbase_dev_list_get(); - - list_del(&kbdev->entry); - kbase_dev_list_put(dev_list); - } - misc_deregister(&kbdev->mdev); - put_device(kbdev->dev); - kbase_common_reg_unmap(kbdev); - kbase_device_term(kbdev); - if (kbdev->clock) { - clk_disable_unprepare(kbdev->clock); - clk_put(kbdev->clock); - kbdev->clock = NULL; - } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \ - && defined(CONFIG_REGULATOR) - regulator_put(kbdev->regulator); -#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */ -#ifdef CONFIG_MALI_NO_MALI - gpu_device_destroy(kbdev); -#endif /* CONFIG_MALI_NO_MALI */ - kbase_device_free(kbdev); - - return 0; -} - -static int kbase_platform_device_remove(struct platform_device *pdev) -{ - struct kbase_device *kbdev = to_kbase_device(&pdev->dev); - - if (!kbdev) - return -ENODEV; - - return kbase_common_device_remove(kbdev); -} - -/** Suspend callback from the OS. - * - * This is called by Linux when the device should suspend. - * - * @param dev The device to suspend - * - * @return A standard Linux error code - */ -static int kbase_device_suspend(struct device *dev) -{ - struct kbase_device *kbdev = to_kbase_device(dev); - - if (!kbdev) - return -ENODEV; - -#if defined(CONFIG_PM_DEVFREQ) && \ - (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) - devfreq_suspend_device(kbdev->devfreq); -#endif -#if defined(CONFIG_MALI_MIDGARD_DVFS) - mali_pm_statue = 1; -#endif - - kbase_pm_suspend(kbdev); - return 0; -} - -/** Resume callback from the OS. - * - * This is called by Linux when the device should resume from suspension. - * - * @param dev The device to resume - * - * @return A standard Linux error code - */ -static int kbase_device_resume(struct device *dev) -{ - struct kbase_device *kbdev = to_kbase_device(dev); - - if (!kbdev) - return -ENODEV; - - kbase_pm_resume(kbdev); - -#if defined(CONFIG_PM_DEVFREQ) && \ - (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) - devfreq_resume_device(kbdev->devfreq); -#endif -#if defined(CONFIG_MALI_MIDGARD_DVFS) - mali_pm_statue = 0; -#endif - return 0; -} - -/** Runtime suspend callback from the OS. - * - * This is called by Linux when the device should prepare for a condition in which it will - * not be able to communicate with the CPU(s) and RAM due to power management. - * - * @param dev The device to suspend - * - * @return A standard Linux error code - */ -#ifdef KBASE_PM_RUNTIME -static int kbase_device_runtime_suspend(struct device *dev) -{ - struct kbase_device *kbdev = to_kbase_device(dev); - - if (!kbdev) - return -ENODEV; - -#if defined(CONFIG_PM_DEVFREQ) && \ - (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) - devfreq_suspend_device(kbdev->devfreq); -#endif -#if defined(CONFIG_MALI_MIDGARD_DVFS) - mali_pm_statue = 1; -#endif - - if (kbdev->pm.backend.callback_power_runtime_off) { - kbdev->pm.backend.callback_power_runtime_off(kbdev); - dev_dbg(dev, "runtime suspend\n"); - } - return 0; -} -#endif /* KBASE_PM_RUNTIME */ - -/** Runtime resume callback from the OS. - * - * This is called by Linux when the device should go into a fully active state. - * - * @param dev The device to suspend - * - * @return A standard Linux error code - */ - -#ifdef KBASE_PM_RUNTIME -int kbase_device_runtime_resume(struct device *dev) -{ - int ret = 0; - struct kbase_device *kbdev = to_kbase_device(dev); - - if (!kbdev) - return -ENODEV; - - if (kbdev->pm.backend.callback_power_runtime_on) { - ret = kbdev->pm.backend.callback_power_runtime_on(kbdev); - dev_dbg(dev, "runtime resume\n"); - } - -#if defined(CONFIG_PM_DEVFREQ) && \ - (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) - devfreq_resume_device(kbdev->devfreq); -#endif -#if defined(CONFIG_MALI_MIDGARD_DVFS) - mali_pm_statue = 0; -#endif - - return ret; -} -#endif /* KBASE_PM_RUNTIME */ - -/** Runtime idle callback from the OS. - * - * This is called by Linux when the device appears to be inactive and it might be - * placed into a low power state - * - * @param dev The device to suspend - * - * @return A standard Linux error code - */ - -#ifdef KBASE_PM_RUNTIME -static int kbase_device_runtime_idle(struct device *dev) -{ - /* Avoid pm_runtime_suspend being called */ - return 1; -} -#endif /* KBASE_PM_RUNTIME */ -#ifndef CONFIG_MALI_DEVFREQ -static int mali_os_freeze(struct device *device) -{ - mali_dev_freeze(); - return kbase_device_suspend(device); -} - -static int mali_os_restore(struct device *device) -{ - mali_dev_restore(); - return kbase_device_resume(device); -} -#endif - - -/** The power management operations for the platform driver. - */ -static const struct dev_pm_ops kbase_pm_ops = { - .suspend = kbase_device_suspend, - .resume = kbase_device_resume, -#ifndef CONFIG_MALI_DEVFREQ - .freeze = mali_os_freeze, - .thaw = kbase_device_resume, - .restore = mali_os_restore, -#endif -#ifdef KBASE_PM_RUNTIME - .runtime_suspend = kbase_device_runtime_suspend, - .runtime_resume = kbase_device_runtime_resume, - .runtime_idle = kbase_device_runtime_idle, -#endif /* KBASE_PM_RUNTIME */ -}; - -#ifdef CONFIG_OF -static const struct of_device_id kbase_dt_ids[] = { - { .compatible = "arm,malit6xx" }, - { .compatible = "arm,mali-midgard" }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, kbase_dt_ids); -#endif - -static struct platform_driver kbase_platform_driver = { - .probe = kbase_platform_device_probe, - .remove = kbase_platform_device_remove, - .driver = { - .name = kbase_drv_name, - .owner = THIS_MODULE, - .pm = &kbase_pm_ops, - .of_match_table = of_match_ptr(kbase_dt_ids), - }, -}; - -/* - * The driver will not provide a shortcut to create the Mali platform device - * anymore when using Device Tree. - */ -#ifdef CONFIG_OF -module_platform_driver(kbase_platform_driver); -#else - -static int __init kbase_driver_init(void) -{ - int ret; - - ret = kbase_platform_early_init(); - if (ret) - return ret; - -#ifndef CONFIG_MACH_MANTA -#ifdef CONFIG_MALI_PLATFORM_FAKE - ret = kbase_platform_fake_register(); - if (ret) - return ret; -#endif -#endif - ret = platform_driver_register(&kbase_platform_driver); -#ifndef CONFIG_MACH_MANTA -#ifdef CONFIG_MALI_PLATFORM_FAKE - if (ret) - kbase_platform_fake_unregister(); -#endif -#endif - return ret; -} - -static void __exit kbase_driver_exit(void) -{ - platform_driver_unregister(&kbase_platform_driver); -#ifndef CONFIG_MACH_MANTA -#ifdef CONFIG_MALI_PLATFORM_FAKE - kbase_platform_fake_unregister(); -#endif -#endif -} - -module_init(kbase_driver_init); -module_exit(kbase_driver_exit); - -#endif /* CONFIG_OF */ - -MODULE_LICENSE("GPL"); -MODULE_VERSION(MALI_RELEASE_NAME " (UK version " \ - __stringify(BASE_UK_VERSION_MAJOR) "." \ - __stringify(BASE_UK_VERSION_MINOR) ")"); - -#if defined(CONFIG_MALI_GATOR_SUPPORT) || defined(CONFIG_MALI_SYSTEM_TRACE) -#define CREATE_TRACE_POINTS -#endif - -#ifdef CONFIG_MALI_GATOR_SUPPORT -/* Create the trace points (otherwise we just get code to call a tracepoint) */ -#include "mali_linux_trace.h" - -EXPORT_TRACEPOINT_SYMBOL_GPL(mali_job_slots_event); -EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_status); -EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_on); -EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_off); -EXPORT_TRACEPOINT_SYMBOL_GPL(mali_page_fault_insert_pages); -EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_in_use); -EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_released); -EXPORT_TRACEPOINT_SYMBOL_GPL(mali_total_alloc_pages_change); -EXPORT_TRACEPOINT_SYMBOL_GPL(mali_sw_counter); - -void kbase_trace_mali_pm_status(u32 event, u64 value) -{ - trace_mali_pm_status(event, value); -} - -void kbase_trace_mali_pm_power_off(u32 event, u64 value) -{ - trace_mali_pm_power_off(event, value); -} - -void kbase_trace_mali_pm_power_on(u32 event, u64 value) -{ - trace_mali_pm_power_on(event, value); -} - -void kbase_trace_mali_job_slots_event(u32 event, const struct kbase_context *kctx, u8 atom_id) -{ - trace_mali_job_slots_event(event, (kctx != NULL ? kctx->tgid : 0), (kctx != NULL ? kctx->pid : 0), atom_id); -} - -void kbase_trace_mali_page_fault_insert_pages(int event, u32 value) -{ - trace_mali_page_fault_insert_pages(event, value); -} - -void kbase_trace_mali_mmu_as_in_use(int event) -{ - trace_mali_mmu_as_in_use(event); -} - -void kbase_trace_mali_mmu_as_released(int event) -{ - trace_mali_mmu_as_released(event); -} - -void kbase_trace_mali_total_alloc_pages_change(long long int event) -{ - trace_mali_total_alloc_pages_change(event); -} -#endif /* CONFIG_MALI_GATOR_SUPPORT */ -#ifdef CONFIG_MALI_SYSTEM_TRACE -#include "mali_linux_kbase_trace.h" -#endif diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_debug.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_debug.c deleted file mode 100755 index fb57ac2e31adb..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_debug.c +++ /dev/null @@ -1,39 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#include - -static struct kbasep_debug_assert_cb kbasep_debug_assert_registered_cb = { - NULL, - NULL -}; - -void kbase_debug_assert_register_hook(kbase_debug_assert_hook *func, void *param) -{ - kbasep_debug_assert_registered_cb.func = func; - kbasep_debug_assert_registered_cb.param = param; -} - -void kbasep_debug_assert_call_hook(void) -{ - if (kbasep_debug_assert_registered_cb.func != NULL) - kbasep_debug_assert_registered_cb.func(kbasep_debug_assert_registered_cb.param); -} -KBASE_EXPORT_SYMBOL(kbasep_debug_assert_call_hook); - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_debug.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_debug.h deleted file mode 100755 index 5fff2892bb554..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_debug.h +++ /dev/null @@ -1,164 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#ifndef _KBASE_DEBUG_H -#define _KBASE_DEBUG_H - -#include - -/** @brief If equals to 0, a trace containing the file, line, and function will be displayed before each message. */ -#define KBASE_DEBUG_SKIP_TRACE 0 - -/** @brief If different from 0, the trace will only contain the file and line. */ -#define KBASE_DEBUG_SKIP_FUNCTION_NAME 0 - -/** @brief Disable the asserts tests if set to 1. Default is to disable the asserts in release. */ -#ifndef KBASE_DEBUG_DISABLE_ASSERTS -#ifdef CONFIG_MALI_DEBUG -#define KBASE_DEBUG_DISABLE_ASSERTS 0 -#else -#define KBASE_DEBUG_DISABLE_ASSERTS 1 -#endif -#endif /* KBASE_DEBUG_DISABLE_ASSERTS */ - -/** Function type that is called on an KBASE_DEBUG_ASSERT() or KBASE_DEBUG_ASSERT_MSG() */ -typedef void (kbase_debug_assert_hook) (void *); - -struct kbasep_debug_assert_cb { - kbase_debug_assert_hook *func; - void *param; -}; - -/** - * @def KBASEP_DEBUG_PRINT_TRACE - * @brief Private macro containing the format of the trace to display before every message - * @sa KBASE_DEBUG_SKIP_TRACE, KBASE_DEBUG_SKIP_FUNCTION_NAME - */ -#if !KBASE_DEBUG_SKIP_TRACE -#define KBASEP_DEBUG_PRINT_TRACE \ - "In file: " __FILE__ " line: " CSTD_STR2(__LINE__) -#if !KBASE_DEBUG_SKIP_FUNCTION_NAME -#define KBASEP_DEBUG_PRINT_FUNCTION __func__ -#else -#define KBASEP_DEBUG_PRINT_FUNCTION "" -#endif -#else -#define KBASEP_DEBUG_PRINT_TRACE "" -#endif - -/** - * @def KBASEP_DEBUG_ASSERT_OUT(trace, function, ...) - * @brief (Private) system printing function associated to the @see KBASE_DEBUG_ASSERT_MSG event. - * @param trace location in the code from where the message is printed - * @param function function from where the message is printed - * @param ... Format string followed by format arguments. - * @note function parameter cannot be concatenated with other strings - */ -/* Select the correct system output function*/ -#ifdef CONFIG_MALI_DEBUG -#define KBASEP_DEBUG_ASSERT_OUT(trace, function, ...)\ - do { \ - pr_err("Mali: %s function:%s ", trace, function);\ - pr_err(__VA_ARGS__);\ - pr_err("\n");\ - } while (false) -#else -#define KBASEP_DEBUG_ASSERT_OUT(trace, function, ...) CSTD_NOP() -#endif - -#ifdef CONFIG_MALI_DEBUG -#define KBASE_CALL_ASSERT_HOOK() kbasep_debug_assert_call_hook() -#else -#define KBASE_CALL_ASSERT_HOOK() CSTD_NOP() -#endif - -/** - * @def KBASE_DEBUG_ASSERT(expr) - * @brief Calls @see KBASE_PRINT_ASSERT and prints the expression @a expr if @a expr is false - * - * @note This macro does nothing if the flag @see KBASE_DEBUG_DISABLE_ASSERTS is set to 1 - * - * @param expr Boolean expression - */ -#define KBASE_DEBUG_ASSERT(expr) \ - KBASE_DEBUG_ASSERT_MSG(expr, #expr) - -#if KBASE_DEBUG_DISABLE_ASSERTS -#define KBASE_DEBUG_ASSERT_MSG(expr, ...) CSTD_NOP() -#else - /** - * @def KBASE_DEBUG_ASSERT_MSG(expr, ...) - * @brief Calls @see KBASEP_DEBUG_ASSERT_OUT and prints the given message if @a expr is false - * - * @note This macro does nothing if the flag @see KBASE_DEBUG_DISABLE_ASSERTS is set to 1 - * - * @param expr Boolean expression - * @param ... Message to display when @a expr is false, as a format string followed by format arguments. - */ -#define KBASE_DEBUG_ASSERT_MSG(expr, ...) \ - do { \ - if (!(expr)) { \ - KBASEP_DEBUG_ASSERT_OUT(KBASEP_DEBUG_PRINT_TRACE, KBASEP_DEBUG_PRINT_FUNCTION, __VA_ARGS__);\ - KBASE_CALL_ASSERT_HOOK();\ - BUG();\ - } \ - } while (false) -#endif /* KBASE_DEBUG_DISABLE_ASSERTS */ - -/** - * @def KBASE_DEBUG_CODE( X ) - * @brief Executes the code inside the macro only in debug mode - * - * @param X Code to compile only in debug mode. - */ -#ifdef CONFIG_MALI_DEBUG -#define KBASE_DEBUG_CODE(X) X -#else -#define KBASE_DEBUG_CODE(X) CSTD_NOP() -#endif /* CONFIG_MALI_DEBUG */ - -/** @} */ - -/** - * @brief Register a function to call on ASSERT - * - * Such functions will \b only be called during Debug mode, and for debugging - * features \b only. Do not rely on them to be called in general use. - * - * To disable the hook, supply NULL to \a func. - * - * @note This function is not thread-safe, and should only be used to - * register/deregister once in the module's lifetime. - * - * @param[in] func the function to call when an assert is triggered. - * @param[in] param the parameter to pass to \a func when calling it - */ -void kbase_debug_assert_register_hook(kbase_debug_assert_hook *func, void *param); - -/** - * @brief Call a debug assert hook previously registered with kbase_debug_assert_register_hook() - * - * @note This function is not thread-safe with respect to multiple threads - * registering functions and parameters with - * kbase_debug_assert_register_hook(). Otherwise, thread safety is the - * responsibility of the registered hook. - */ -void kbasep_debug_assert_call_hook(void); - -#endif /* _KBASE_DEBUG_H */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.c deleted file mode 100755 index 41ce05130d8f9..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.c +++ /dev/null @@ -1,447 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include "mali_kbase_debug_job_fault.h" - -#ifdef CONFIG_DEBUG_FS - -static bool kbase_is_job_fault_event_pending(struct list_head *event_list) -{ - bool ret; - - ret = (!list_empty(event_list)); - - return ret; -} - -static bool kbase_ctx_has_no_event_pending( - struct kbase_context *kctx, struct list_head *event_list) -{ - struct base_job_fault_event *event; - - if (list_empty(event_list)) - return true; - list_for_each_entry(event, event_list, head) { - if (event->katom->kctx == kctx) - return false; - } - return false; -} - -/* wait until the fault happen and copy the event */ -static int kbase_job_fault_event_wait(struct kbase_device *kbdev, - struct list_head *event_list, - struct base_job_fault_event *event) -{ - struct base_job_fault_event *event_in; - - if (list_empty(event_list)) { - if (wait_event_interruptible(kbdev->job_fault_wq, - kbase_is_job_fault_event_pending(event_list))) - return -ERESTARTSYS; - } - - event_in = list_entry(event_list->next, - struct base_job_fault_event, head); - - event->event_code = event_in->event_code; - event->katom = event_in->katom; - return 0; - -} - -/* remove the event from the queue */ -static struct base_job_fault_event *kbase_job_fault_event_dequeue( - struct kbase_device *kbdev, struct list_head *event_list) -{ - struct base_job_fault_event *event; - - event = list_entry(event_list->next, - struct base_job_fault_event, head); - list_del(event_list->next); - - return event; - -} - -/* Remove all the following atoms after the failed atom in the same context - * Call the postponed bottom half of job done. - * Then, this context could be rescheduled. - */ -static void kbase_job_fault_resume_event_cleanup(struct kbase_context *kctx) -{ - struct list_head *event_list = &kctx->job_fault_resume_event_list; - - while (!list_empty(event_list)) { - struct base_job_fault_event *event; - - event = kbase_job_fault_event_dequeue(kctx->kbdev, - &kctx->job_fault_resume_event_list); - kbase_jd_done_worker(&event->katom->work); - } - -} - -/* Remove all the failed atoms that belong to different contexts - * Resume all the contexts that were suspend due to failed job - */ -static void kbase_job_fault_event_cleanup(struct kbase_device *kbdev) -{ - struct list_head *event_list = &kbdev->job_fault_event_list; - - while (!list_empty(event_list)) { - - kbase_job_fault_event_dequeue(kbdev, event_list); - wake_up(&kbdev->job_fault_resume_wq); - } -} - -static void kbase_job_fault_resume_worker(struct work_struct *data) -{ - struct base_job_fault_event *event = container_of(data, - struct base_job_fault_event, job_fault_work); - struct kbase_context *kctx; - struct kbase_jd_atom *katom; - - katom = event->katom; - kctx = katom->kctx; - - dev_info(kctx->kbdev->dev, "Job dumping wait\n"); - - /* When it was waked up, it need to check if queue is empty or the - * failed atom belongs to different context. If yes, wake up. Both - * of them mean the failed job has been dumped. Please note, it - * should never happen that the job_fault_event_list has the two - * atoms belong to the same context. - */ - wait_event(kctx->kbdev->job_fault_resume_wq, - kbase_ctx_has_no_event_pending(kctx, - &kctx->kbdev->job_fault_event_list)); - - atomic_set(&kctx->job_fault_count, 0); - kbase_jd_done_worker(&katom->work); - - /* In case the following atoms were scheduled during failed job dump - * the job_done_worker was held. We need to rerun it after the dump - * was finished - */ - kbase_job_fault_resume_event_cleanup(kctx); - - dev_info(kctx->kbdev->dev, "Job dumping finish, resume scheduler\n"); -} - -static struct base_job_fault_event *kbase_job_fault_event_queue( - struct list_head *event_list, - struct kbase_jd_atom *atom, - u32 completion_code) -{ - struct base_job_fault_event *event; - - event = &atom->fault_event; - - event->katom = atom; - event->event_code = completion_code; - - list_add_tail(&event->head, event_list); - - return event; - -} - -static void kbase_job_fault_event_post(struct kbase_device *kbdev, - struct kbase_jd_atom *katom, u32 completion_code) -{ - struct base_job_fault_event *event; - - event = kbase_job_fault_event_queue(&kbdev->job_fault_event_list, - katom, completion_code); - - wake_up_interruptible(&kbdev->job_fault_wq); - - INIT_WORK(&event->job_fault_work, kbase_job_fault_resume_worker); - queue_work(kbdev->job_fault_resume_workq, &event->job_fault_work); - - dev_info(katom->kctx->kbdev->dev, "Job fault happen, start dump: %d_%d", - katom->kctx->tgid, katom->kctx->id); - -} - -/* - * This function will process the job fault - * Get the register copy - * Send the failed job dump event - * Create a Wait queue to wait until the job dump finish - */ - -bool kbase_debug_job_fault_process(struct kbase_jd_atom *katom, - u32 completion_code) -{ - struct kbase_context *kctx = katom->kctx; - - /* Check if dumping is in the process - * only one atom of each context can be dumped at the same time - * If the atom belongs to different context, it can be dumped - */ - if (atomic_read(&kctx->job_fault_count) > 0) { - kbase_job_fault_event_queue( - &kctx->job_fault_resume_event_list, - katom, completion_code); - dev_info(kctx->kbdev->dev, "queue:%d\n", - kbase_jd_atom_id(kctx, katom)); - return true; - } - - if (kctx->kbdev->job_fault_debug == true) { - - if (completion_code != BASE_JD_EVENT_DONE) { - - if (kbase_job_fault_get_reg_snapshot(kctx) == false) { - dev_warn(kctx->kbdev->dev, "get reg dump failed\n"); - return false; - } - - kbase_job_fault_event_post(kctx->kbdev, katom, - completion_code); - atomic_inc(&kctx->job_fault_count); - dev_info(kctx->kbdev->dev, "post:%d\n", - kbase_jd_atom_id(kctx, katom)); - return true; - - } - } - return false; - -} - -static int debug_job_fault_show(struct seq_file *m, void *v) -{ - struct kbase_device *kbdev = m->private; - struct base_job_fault_event *event = (struct base_job_fault_event *)v; - struct kbase_context *kctx = event->katom->kctx; - int i; - - dev_info(kbdev->dev, "debug job fault seq show:%d_%d, %d", - kctx->tgid, kctx->id, event->reg_offset); - - if (kctx->reg_dump == NULL) { - dev_warn(kbdev->dev, "reg dump is NULL"); - return -1; - } - - if (kctx->reg_dump[event->reg_offset] == - REGISTER_DUMP_TERMINATION_FLAG) { - /* Return the error here to stop the read. And the - * following next() will not be called. The stop can - * get the real event resource and release it - */ - return -1; - } - - if (event->reg_offset == 0) - seq_printf(m, "%d_%d\n", kctx->tgid, kctx->id); - - for (i = 0; i < 50; i++) { - if (kctx->reg_dump[event->reg_offset] == - REGISTER_DUMP_TERMINATION_FLAG) { - break; - } - seq_printf(m, "%08x: %08x\n", - kctx->reg_dump[event->reg_offset], - kctx->reg_dump[1+event->reg_offset]); - event->reg_offset += 2; - - } - - - return 0; -} -static void *debug_job_fault_next(struct seq_file *m, void *v, loff_t *pos) -{ - struct kbase_device *kbdev = m->private; - struct base_job_fault_event *event = (struct base_job_fault_event *)v; - - dev_info(kbdev->dev, "debug job fault seq next:%d, %d", - event->reg_offset, (int)*pos); - - return event; -} - -static void *debug_job_fault_start(struct seq_file *m, loff_t *pos) -{ - struct kbase_device *kbdev = m->private; - struct base_job_fault_event *event; - - dev_info(kbdev->dev, "fault job seq start:%d", (int)*pos); - - /* The condition is trick here. It needs make sure the - * fault hasn't happened and the dumping hasn't been started, - * or the dumping has finished - */ - if (*pos == 0) { - event = kmalloc(sizeof(*event), GFP_KERNEL); - event->reg_offset = 0; - if (kbase_job_fault_event_wait(kbdev, - &kbdev->job_fault_event_list, event)) { - kfree(event); - return NULL; - } - - /* The cache flush workaround is called in bottom half of - * job done but we delayed it. Now we should clean cache - * earlier. Then the GPU memory dump should be correct. - */ - if (event->katom->need_cache_flush_cores_retained) { - kbase_gpu_cacheclean(kbdev, event->katom); - event->katom->need_cache_flush_cores_retained = 0; - } - - } else - return NULL; - - return event; -} - -static void debug_job_fault_stop(struct seq_file *m, void *v) -{ - struct kbase_device *kbdev = m->private; - - /* here we wake up the kbase_jd_done_worker after stop, it needs - * get the memory dump before the register dump in debug daemon, - * otherwise, the memory dump may be incorrect. - */ - - if (v != NULL) { - kfree(v); - dev_info(kbdev->dev, "debug job fault seq stop stage 1"); - - } else { - if (!list_empty(&kbdev->job_fault_event_list)) { - kbase_job_fault_event_dequeue(kbdev, - &kbdev->job_fault_event_list); - wake_up(&kbdev->job_fault_resume_wq); - } - dev_info(kbdev->dev, "debug job fault seq stop stage 2"); - } - -} - -static const struct seq_operations ops = { - .start = debug_job_fault_start, - .next = debug_job_fault_next, - .stop = debug_job_fault_stop, - .show = debug_job_fault_show, -}; - -static int debug_job_fault_open(struct inode *in, struct file *file) -{ - struct kbase_device *kbdev = in->i_private; - - seq_open(file, &ops); - - ((struct seq_file *)file->private_data)->private = kbdev; - dev_info(kbdev->dev, "debug job fault seq open"); - - kbdev->job_fault_debug = true; - - return 0; - -} - -static int debug_job_fault_release(struct inode *in, struct file *file) -{ - struct kbase_device *kbdev = in->i_private; - - seq_release(in, file); - - kbdev->job_fault_debug = false; - - /* Clean the unprocessed job fault. After that, all the suspended - * contexts could be rescheduled. - */ - kbase_job_fault_event_cleanup(kbdev); - - dev_info(kbdev->dev, "debug job fault seq close"); - - return 0; -} - -static const struct file_operations kbasep_debug_job_fault_fops = { - .open = debug_job_fault_open, - .read = seq_read, - .llseek = seq_lseek, - .release = debug_job_fault_release, -}; - -static int kbase_job_fault_event_init(struct kbase_device *kbdev) -{ - - INIT_LIST_HEAD(&kbdev->job_fault_event_list); - - init_waitqueue_head(&(kbdev->job_fault_wq)); - init_waitqueue_head(&(kbdev->job_fault_resume_wq)); - - kbdev->job_fault_resume_workq = alloc_workqueue( - "kbase_job_fault_resume_work_queue", WQ_MEM_RECLAIM, 1); - - return 0; -} - -/* - * Initialize debugfs entry for job fault dump - */ -void kbase_debug_job_fault_dev_init(struct kbase_device *kbdev) -{ - debugfs_create_file("job_fault", S_IRUGO, - kbdev->mali_debugfs_directory, kbdev, - &kbasep_debug_job_fault_fops); - - kbase_job_fault_event_init(kbdev); - kbdev->job_fault_debug = false; - -} - -/* - * Initialize the relevant data structure per context - */ -void kbase_debug_job_fault_context_init(struct kbase_context *kctx) -{ - - /* We need allocate double size register range - * Because this memory will keep the register address and value - */ - kctx->reg_dump = kmalloc(0x4000 * 2, GFP_KERNEL); - if (kctx->reg_dump == NULL) - return; - - if (kbase_debug_job_fault_reg_snapshot_init(kctx, 0x4000) == false) { - kfree(kctx->reg_dump); - kctx->reg_dump = NULL; - } - INIT_LIST_HEAD(&kctx->job_fault_resume_event_list); - atomic_set(&kctx->job_fault_count, 0); - -} - -/* - * release the relevant resource per context - */ -void kbase_debug_job_fault_context_exit(struct kbase_context *kctx) -{ - kfree(kctx->reg_dump); -} - -#endif diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.h deleted file mode 100755 index 3734046f3fd9f..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#ifndef _KBASE_DEBUG_JOB_FAULT_H -#define _KBASE_DEBUG_JOB_FAULT_H - -#include -#include -#include - -#define REGISTER_DUMP_TERMINATION_FLAG 0xFFFFFFFF - -/** - * kbase_debug_job_fault_dev_init - Initialize job fault debug sysfs - * and create the fault event wait queue per device - * @kbdev: Device pointer - */ -void kbase_debug_job_fault_dev_init(struct kbase_device *kbdev); - -/** - * kbase_debug_job_fault_context_init - Initialize the relevant - * data structure per context - * @kctx: KBase context pointer - */ -void kbase_debug_job_fault_context_init(struct kbase_context *kctx); - -/** - * kbase_debug_job_fault_context_exit - Release the relevant - * resource per context - * @kctx: KBase context pointer - */ -void kbase_debug_job_fault_context_exit(struct kbase_context *kctx); - -/** - * kbase_debug_job_fault_process - Process the failed job. - * It will send a event and wake up the job fault waiting queue - * Then create a work queue to wait for job dump finish - * This function should be called in the interrupt handler and before - * jd_done that make sure the jd_done_worker will be delayed until the - * job dump finish - * @katom: The failed atom pointer - * @completion_code: the job status - * @return true if dump is going on - */ -bool kbase_debug_job_fault_process(struct kbase_jd_atom *katom, - u32 completion_code); - - -/** - * kbase_debug_job_fault_reg_snapshot_init - Set the interested registers - * address during the job fault process, the relevant registers will - * be saved when a job fault happen - * @kctx: KBase context pointer - * @reg_range: Maximum register address space - * @return true if initializing successfully - */ -bool kbase_debug_job_fault_reg_snapshot_init(struct kbase_context *kctx, - int reg_range); - -/** - * kbase_job_fault_get_reg_snapshot - Read the interested registers for - * failed job dump - * @kctx: KBase context pointer - * @return true if getting registers successfully - */ -bool kbase_job_fault_get_reg_snapshot(struct kbase_context *kctx); - -#endif /*_KBASE_DEBUG_JOB_FAULT_H*/ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.c deleted file mode 100755 index 1a3198e5b535b..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.c +++ /dev/null @@ -1,251 +0,0 @@ -/* - * - * (C) COPYRIGHT 2013-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/* - * Debugfs interface to dump the memory visible to the GPU - */ - -#include "mali_kbase_debug_mem_view.h" -#include "mali_kbase.h" - -#include -#include - -#if CONFIG_DEBUG_FS - -struct debug_mem_mapping { - struct list_head node; - - struct kbase_mem_phy_alloc *alloc; - unsigned long flags; - - u64 start_pfn; - size_t nr_pages; -}; - -struct debug_mem_data { - struct list_head mapping_list; - struct kbase_context *kctx; -}; - -struct debug_mem_seq_off { - struct list_head *lh; - size_t offset; -}; - -static void *debug_mem_start(struct seq_file *m, loff_t *_pos) -{ - struct debug_mem_data *mem_data = m->private; - struct debug_mem_seq_off *data; - struct debug_mem_mapping *map; - loff_t pos = *_pos; - - list_for_each_entry(map, &mem_data->mapping_list, node) { - if (pos >= map->nr_pages) { - pos -= map->nr_pages; - } else { - data = kmalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return NULL; - data->lh = &map->node; - data->offset = pos; - return data; - } - } - - /* Beyond the end */ - return NULL; -} - -static void debug_mem_stop(struct seq_file *m, void *v) -{ - kfree(v); -} - -static void *debug_mem_next(struct seq_file *m, void *v, loff_t *pos) -{ - struct debug_mem_data *mem_data = m->private; - struct debug_mem_seq_off *data = v; - struct debug_mem_mapping *map; - - map = list_entry(data->lh, struct debug_mem_mapping, node); - - if (data->offset < map->nr_pages - 1) { - data->offset++; - ++*pos; - return data; - } - - if (list_is_last(data->lh, &mem_data->mapping_list)) - return NULL; - - data->lh = data->lh->next; - data->offset = 0; - ++*pos; - - return data; -} - -static int debug_mem_show(struct seq_file *m, void *v) -{ - struct debug_mem_data *mem_data = m->private; - struct debug_mem_seq_off *data = v; - struct debug_mem_mapping *map; - int i, j; - struct page *page; - uint32_t *mapping; - pgprot_t prot = PAGE_KERNEL; - - map = list_entry(data->lh, struct debug_mem_mapping, node); - - kbase_gpu_vm_lock(mem_data->kctx); - - if (data->offset >= map->alloc->nents) { - seq_printf(m, "%016llx: Unbacked page\n\n", (map->start_pfn + - data->offset) << PAGE_SHIFT); - goto out; - } - - if (!(map->flags & KBASE_REG_CPU_CACHED)) - prot = pgprot_writecombine(prot); - - page = pfn_to_page(PFN_DOWN(map->alloc->pages[data->offset])); - mapping = vmap(&page, 1, VM_MAP, prot); - - for (i = 0; i < PAGE_SIZE; i += 4*sizeof(*mapping)) { - seq_printf(m, "%016llx:", i + ((map->start_pfn + - data->offset) << PAGE_SHIFT)); - - for (j = 0; j < 4*sizeof(*mapping); j += sizeof(*mapping)) - seq_printf(m, " %08x", mapping[(i+j)/sizeof(*mapping)]); - seq_putc(m, '\n'); - } - - vunmap(mapping); - - seq_putc(m, '\n'); - -out: - kbase_gpu_vm_unlock(mem_data->kctx); - return 0; -} - -static const struct seq_operations ops = { - .start = debug_mem_start, - .next = debug_mem_next, - .stop = debug_mem_stop, - .show = debug_mem_show, -}; - -static int debug_mem_open(struct inode *i, struct file *file) -{ - struct file *kctx_file = i->i_private; - struct kbase_context *kctx = kctx_file->private_data; - struct rb_node *p; - struct debug_mem_data *mem_data; - int ret; - - ret = seq_open(file, &ops); - - if (ret) - return ret; - - mem_data = kmalloc(sizeof(*mem_data), GFP_KERNEL); - mem_data->kctx = kctx; - - INIT_LIST_HEAD(&mem_data->mapping_list); - - get_file(kctx_file); - - kbase_gpu_vm_lock(kctx); - - for (p = rb_first(&kctx->reg_rbtree); p; p = rb_next(p)) { - struct kbase_va_region *reg; - struct debug_mem_mapping *mapping; - - reg = rb_entry(p, struct kbase_va_region, rblink); - - if (reg->gpu_alloc == NULL) - /* Empty region - ignore */ - continue; - - mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); - - mapping->alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc); - mapping->start_pfn = reg->start_pfn; - mapping->nr_pages = reg->nr_pages; - mapping->flags = reg->flags; - list_add_tail(&mapping->node, &mem_data->mapping_list); - } - - kbase_gpu_vm_unlock(kctx); - - ((struct seq_file *)file->private_data)->private = mem_data; - - return 0; -} - -static int debug_mem_release(struct inode *inode, struct file *file) -{ - struct file *kctx_file = inode->i_private; - struct seq_file *sfile = file->private_data; - struct debug_mem_data *mem_data = sfile->private; - struct debug_mem_mapping *mapping; - - seq_release(inode, file); - - while (!list_empty(&mem_data->mapping_list)) { - mapping = list_first_entry(&mem_data->mapping_list, - struct debug_mem_mapping, node); - kbase_mem_phy_alloc_put(mapping->alloc); - list_del(&mapping->node); - kfree(mapping); - } - - kfree(mem_data); - - fput(kctx_file); - - return 0; -} - -static const struct file_operations kbase_debug_mem_view_fops = { - .open = debug_mem_open, - .release = debug_mem_release, - .read = seq_read, - .llseek = seq_lseek -}; - -/** - * kbase_debug_mem_view_init - Initialise the mem_view sysfs file - * @kctx_file: The /dev/mali0 file instance for the context - * - * This function creates a "mem_view" file which can be used to get a view of - * the context's memory as the GPU sees it (i.e. using the GPU's page tables). - * - * The file is cleaned up by a call to debugfs_remove_recursive() deleting the - * parent directory. - */ -void kbase_debug_mem_view_init(struct file *kctx_file) -{ - struct kbase_context *kctx = kctx_file->private_data; - - debugfs_create_file("mem_view", S_IRUGO, kctx->kctx_dentry, kctx_file, - &kbase_debug_mem_view_fops); -} - -#endif diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.h deleted file mode 100755 index 20ab51a776c6c..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * - * (C) COPYRIGHT 2013-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#ifndef _KBASE_DEBUG_MEM_VIEW_H -#define _KBASE_DEBUG_MEM_VIEW_H - -#include - -void kbase_debug_mem_view_init(struct file *kctx_file); - -#endif diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_defs.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_defs.h deleted file mode 100755 index 86fc9e40ee1da..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_defs.h +++ /dev/null @@ -1,1224 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_kbase_defs.h - * - * Defintions (types, defines, etcs) common to Kbase. They are placed here to - * allow the hierarchy of header files to work. - */ - -#ifndef _KBASE_DEFS_H_ -#define _KBASE_DEFS_H_ - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#ifdef CONFIG_MALI_FPGA_BUS_LOGGER -#include -#endif - - -#ifdef CONFIG_KDS -#include -#endif /* CONFIG_KDS */ - -#ifdef CONFIG_SYNC -#include "sync.h" -#endif /* CONFIG_SYNC */ - -#ifdef CONFIG_DEBUG_FS -#include -#endif /* CONFIG_DEBUG_FS */ - -#ifdef CONFIG_PM_DEVFREQ -#include -#endif /* CONFIG_DEVFREQ */ - -#include -#include - -#if defined(CONFIG_PM_RUNTIME) || \ - (defined(CONFIG_PM) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) -#define KBASE_PM_RUNTIME 1 -#endif - -/** Enable SW tracing when set */ -#ifdef CONFIG_MALI_MIDGARD_ENABLE_TRACE -#define KBASE_TRACE_ENABLE 1 -#endif - -#ifndef KBASE_TRACE_ENABLE -#ifdef CONFIG_MALI_DEBUG -#define KBASE_TRACE_ENABLE 1 -#else -#define KBASE_TRACE_ENABLE 0 -#endif /* CONFIG_MALI_DEBUG */ -#endif /* KBASE_TRACE_ENABLE */ - -/** Dump Job slot trace on error (only active if KBASE_TRACE_ENABLE != 0) */ -#define KBASE_TRACE_DUMP_ON_JOB_SLOT_ERROR 1 - -/** - * Number of milliseconds before resetting the GPU when a job cannot be "zapped" from the hardware. - * Note that the time is actually ZAP_TIMEOUT+SOFT_STOP_RESET_TIMEOUT between the context zap starting and the GPU - * actually being reset to give other contexts time for their jobs to be soft-stopped and removed from the hardware - * before resetting. - */ -#define ZAP_TIMEOUT 1000 - -/** Number of milliseconds before we time out on a GPU soft/hard reset */ -#define RESET_TIMEOUT 500 - -/** - * Prevent soft-stops from occuring in scheduling situations - * - * This is not due to HW issues, but when scheduling is desired to be more predictable. - * - * Therefore, soft stop may still be disabled due to HW issues. - * - * @note Soft stop will still be used for non-scheduling purposes e.g. when terminating a context. - * - * @note if not in use, define this value to 0 instead of \#undef'ing it - */ -#define KBASE_DISABLE_SCHEDULING_SOFT_STOPS 0 - -/** - * Prevent hard-stops from occuring in scheduling situations - * - * This is not due to HW issues, but when scheduling is desired to be more predictable. - * - * @note Hard stop will still be used for non-scheduling purposes e.g. when terminating a context. - * - * @note if not in use, define this value to 0 instead of \#undef'ing it - */ -#define KBASE_DISABLE_SCHEDULING_HARD_STOPS 0 - -/** - * The maximum number of Job Slots to support in the Hardware. - * - * You can optimize this down if your target devices will only ever support a - * small number of job slots. - */ -#define BASE_JM_MAX_NR_SLOTS 3 - -/** - * The maximum number of Address Spaces to support in the Hardware. - * - * You can optimize this down if your target devices will only ever support a - * small number of Address Spaces - */ -#define BASE_MAX_NR_AS 16 - -/* mmu */ -#define MIDGARD_MMU_VA_BITS 48 - -#if MIDGARD_MMU_VA_BITS > 39 -#define MIDGARD_MMU_TOPLEVEL 0 -#else -#define MIDGARD_MMU_TOPLEVEL 1 -#endif - -#define GROWABLE_FLAGS_REQUIRED (KBASE_REG_PF_GROW | KBASE_REG_GPU_WR) - -/** setting in kbase_context::as_nr that indicates it's invalid */ -#define KBASEP_AS_NR_INVALID (-1) - -#define KBASE_LOCK_REGION_MAX_SIZE (63) -#define KBASE_LOCK_REGION_MIN_SIZE (11) - -#define KBASE_TRACE_SIZE_LOG2 8 /* 256 entries */ -#define KBASE_TRACE_SIZE (1 << KBASE_TRACE_SIZE_LOG2) -#define KBASE_TRACE_MASK ((1 << KBASE_TRACE_SIZE_LOG2)-1) - -#include "mali_kbase_js_defs.h" -#include "mali_kbase_hwaccess_defs.h" - -#define KBASEP_FORCE_REPLAY_DISABLED 0 - -/* Maximum force replay limit when randomization is enabled */ -#define KBASEP_FORCE_REPLAY_RANDOM_LIMIT 16 - -/** Atom has been previously soft-stoppped */ -#define KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED (1<<1) -/** Atom has been previously retried to execute */ -#define KBASE_KATOM_FLAGS_RERUN (1<<2) -#define KBASE_KATOM_FLAGS_JOBCHAIN (1<<3) -/** Atom has been previously hard-stopped. */ -#define KBASE_KATOM_FLAG_BEEN_HARD_STOPPED (1<<4) -/** Atom has caused us to enter disjoint state */ -#define KBASE_KATOM_FLAG_IN_DISJOINT (1<<5) -/* Atom has fail dependency on same-slot dependency */ -#define KBASE_KATOM_FLAG_FAIL_PREV (1<<6) -/* Atom blocked on cross-slot dependency */ -#define KBASE_KATOM_FLAG_X_DEP_BLOCKED (1<<7) -/* Atom has fail dependency on cross-slot dependency */ -#define KBASE_KATOM_FLAG_FAIL_BLOCKER (1<<8) -/* Atom has been submitted to JSCTX ringbuffers */ -#define KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED (1<<9) -/* Atom is currently holding a context reference */ -#define KBASE_KATOM_FLAG_HOLDING_CTX_REF (1<<10) -/* Atom requires GPU to be in secure mode */ -#define KBASE_KATOM_FLAG_SECURE (1<<11) - -/* SW related flags about types of JS_COMMAND action - * NOTE: These must be masked off by JS_COMMAND_MASK */ - -/** This command causes a disjoint event */ -#define JS_COMMAND_SW_CAUSES_DISJOINT 0x100 - -/** Bitmask of all SW related flags */ -#define JS_COMMAND_SW_BITS (JS_COMMAND_SW_CAUSES_DISJOINT) - -#if (JS_COMMAND_SW_BITS & JS_COMMAND_MASK) -#error JS_COMMAND_SW_BITS not masked off by JS_COMMAND_MASK. Must update JS_COMMAND_SW_<..> bitmasks -#endif - -/** Soft-stop command that causes a Disjoint event. This of course isn't - * entirely masked off by JS_COMMAND_MASK */ -#define JS_COMMAND_SOFT_STOP_WITH_SW_DISJOINT \ - (JS_COMMAND_SW_CAUSES_DISJOINT | JS_COMMAND_SOFT_STOP) - -#define KBASEP_ATOM_ID_INVALID BASE_JD_ATOM_COUNT - -#ifdef CONFIG_DEBUG_FS -struct base_job_fault_event { - - u32 event_code; - struct kbase_jd_atom *katom; - struct work_struct job_fault_work; - struct list_head head; - int reg_offset; -}; - -#endif - -struct kbase_jd_atom_dependency { - struct kbase_jd_atom *atom; - u8 dep_type; -}; - -/** - * @brief The function retrieves a read-only reference to the atom field from - * the kbase_jd_atom_dependency structure - * - * @param[in] dep kbase jd atom dependency. - * - * @return readonly reference to dependent ATOM. - */ -static inline const struct kbase_jd_atom *const kbase_jd_katom_dep_atom(const struct kbase_jd_atom_dependency *dep) -{ - LOCAL_ASSERT(dep != NULL); - - return (const struct kbase_jd_atom * const)(dep->atom); -} - -/** - * @brief The function retrieves a read-only reference to the dependency type field from - * the kbase_jd_atom_dependency structure - * - * @param[in] dep kbase jd atom dependency. - * - * @return A dependency type value. - */ -static inline const u8 kbase_jd_katom_dep_type(const struct kbase_jd_atom_dependency *dep) -{ - LOCAL_ASSERT(dep != NULL); - - return dep->dep_type; -} - -/** - * @brief Setter macro for dep_atom array entry in kbase_jd_atom - * - * @param[in] dep The kbase jd atom dependency. - * @param[in] a The ATOM to be set as a dependency. - * @param type The ATOM dependency type to be set. - * - */ -static inline void kbase_jd_katom_dep_set(const struct kbase_jd_atom_dependency *const_dep, - struct kbase_jd_atom *a, u8 type) -{ - struct kbase_jd_atom_dependency *dep; - - LOCAL_ASSERT(const_dep != NULL); - - dep = (struct kbase_jd_atom_dependency *)const_dep; - - dep->atom = a; - dep->dep_type = type; -} - -/** - * @brief Setter macro for dep_atom array entry in kbase_jd_atom - * - * @param[in] dep The kbase jd atom dependency to be cleared. - * - */ -static inline void kbase_jd_katom_dep_clear(const struct kbase_jd_atom_dependency *const_dep) -{ - struct kbase_jd_atom_dependency *dep; - - LOCAL_ASSERT(const_dep != NULL); - - dep = (struct kbase_jd_atom_dependency *)const_dep; - - dep->atom = NULL; - dep->dep_type = BASE_JD_DEP_TYPE_INVALID; -} - -enum kbase_atom_gpu_rb_state { - /* Atom is not currently present in slot ringbuffer */ - KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB, - /* Atom is in slot ringbuffer but is blocked on a previous atom */ - KBASE_ATOM_GPU_RB_WAITING_BLOCKED, - /* Atom is in slot ringbuffer but is waiting for cores to become - * available */ - KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE, - /* Atom is in slot ringbuffer but is blocked on affinity */ - KBASE_ATOM_GPU_RB_WAITING_AFFINITY, - /* Atom is in slot ringbuffer but is waiting for secure mode switch */ - KBASE_ATOM_GPU_RB_WAITING_SECURE_MODE, - /* Atom is in slot ringbuffer and ready to run */ - KBASE_ATOM_GPU_RB_READY, - /* Atom is in slot ringbuffer and has been submitted to the GPU */ - KBASE_ATOM_GPU_RB_SUBMITTED, - /* Atom must be returned to JS as soon as it reaches the head of the - * ringbuffer due to a previous failure */ - KBASE_ATOM_GPU_RB_RETURN_TO_JS -}; - -struct kbase_ext_res { - u64 gpu_address; - struct kbase_mem_phy_alloc *alloc; -}; - -struct kbase_jd_atom { - struct work_struct work; - ktime_t start_timestamp; - u64 time_spent_us; /**< Total time spent on the GPU in microseconds */ - - struct base_jd_udata udata; - struct kbase_context *kctx; - - struct list_head dep_head[2]; - struct list_head dep_item[2]; - const struct kbase_jd_atom_dependency dep[2]; - - u16 nr_extres; - struct kbase_ext_res *extres; - - u32 device_nr; - u64 affinity; - u64 jc; - enum kbase_atom_coreref_state coreref_state; -#ifdef CONFIG_KDS - struct list_head node; - struct kds_resource_set *kds_rset; - bool kds_dep_satisfied; -#endif /* CONFIG_KDS */ -#ifdef CONFIG_SYNC - struct sync_fence *fence; - struct sync_fence_waiter sync_waiter; -#endif /* CONFIG_SYNC */ - - /* Note: refer to kbasep_js_atom_retained_state, which will take a copy of some of the following members */ - enum base_jd_event_code event_code; - base_jd_core_req core_req; /**< core requirements */ - /** Job Slot to retry submitting to if submission from IRQ handler failed - * - * NOTE: see if this can be unified into the another member e.g. the event */ - int retry_submit_on_slot; - - union kbasep_js_policy_job_info sched_info; - /* JS atom priority with respect to other atoms on its kctx. */ - int sched_priority; - - int poking; /* BASE_HW_ISSUE_8316 */ - - wait_queue_head_t completed; - enum kbase_jd_atom_state status; -#ifdef CONFIG_GPU_TRACEPOINTS - int work_id; -#endif - /* Assigned after atom is completed. Used to check whether PRLAM-10676 workaround should be applied */ - int slot_nr; - - u32 atom_flags; - - /* Number of times this atom has been retried. Used by replay soft job. - */ - int retry_count; - - enum kbase_atom_gpu_rb_state gpu_rb_state; - - u64 need_cache_flush_cores_retained; - - atomic_t blocked; - - /* Pointer to atom that this atom has cross-slot dependency on */ - struct kbase_jd_atom *x_pre_dep; - /* Pointer to atom that has cross-slot dependency on this atom */ - struct kbase_jd_atom *x_post_dep; - - - struct kbase_jd_atom_backend backend; -#ifdef CONFIG_DEBUG_FS - struct base_job_fault_event fault_event; -#endif -}; - -static inline bool kbase_jd_katom_is_secure(const struct kbase_jd_atom *katom) -{ - return (bool)(katom->atom_flags & KBASE_KATOM_FLAG_SECURE); -} - -/* - * Theory of operations: - * - * Atom objects are statically allocated within the context structure. - * - * Each atom is the head of two lists, one for the "left" set of dependencies, one for the "right" set. - */ - -#define KBASE_JD_DEP_QUEUE_SIZE 256 - -struct kbase_jd_context { - struct mutex lock; - struct kbasep_js_kctx_info sched_info; - struct kbase_jd_atom atoms[BASE_JD_ATOM_COUNT]; - - /** Tracks all job-dispatch jobs. This includes those not tracked by - * the scheduler: 'not ready to run' and 'dependency-only' jobs. */ - u32 job_nr; - - /** Waitq that reflects whether there are no jobs (including SW-only - * dependency jobs). This is set when no jobs are present on the ctx, - * and clear when there are jobs. - * - * @note: Job Dispatcher knows about more jobs than the Job Scheduler: - * the Job Scheduler is unaware of jobs that are blocked on dependencies, - * and SW-only dependency jobs. - * - * This waitq can be waited upon to find out when the context jobs are all - * done/cancelled (including those that might've been blocked on - * dependencies) - and so, whether it can be terminated. However, it should - * only be terminated once it is neither present in the policy-queue (see - * kbasep_js_policy_try_evict_ctx() ) nor the run-pool (see - * kbasep_js_kctx_info::ctx::is_scheduled). - * - * Since the waitq is only set under kbase_jd_context::lock, - * the waiter should also briefly obtain and drop kbase_jd_context::lock to - * guarentee that the setter has completed its work on the kbase_context - * - * This must be updated atomically with: - * - kbase_jd_context::job_nr */ - wait_queue_head_t zero_jobs_wait; - - /** Job Done workqueue. */ - struct workqueue_struct *job_done_wq; - - spinlock_t tb_lock; - u32 *tb; - size_t tb_wrap_offset; - -#ifdef CONFIG_KDS - struct kds_callback kds_cb; -#endif /* CONFIG_KDS */ -#ifdef CONFIG_GPU_TRACEPOINTS - atomic_t work_id; -#endif -}; - -struct kbase_device_info { - u32 features; -}; - -/** Poking state for BASE_HW_ISSUE_8316 */ -enum { - KBASE_AS_POKE_STATE_IN_FLIGHT = 1<<0, - KBASE_AS_POKE_STATE_KILLING_POKE = 1<<1 -}; - -/** Poking state for BASE_HW_ISSUE_8316 */ -typedef u32 kbase_as_poke_state; - -struct kbase_mmu_setup { - u64 transtab; - u64 memattr; -}; - -/** - * Important: Our code makes assumptions that a struct kbase_as structure is always at - * kbase_device->as[number]. This is used to recover the containing - * struct kbase_device from a struct kbase_as structure. - * - * Therefore, struct kbase_as structures must not be allocated anywhere else. - */ -struct kbase_as { - int number; - - struct workqueue_struct *pf_wq; - struct work_struct work_pagefault; - struct work_struct work_busfault; - enum kbase_mmu_fault_type fault_type; - u32 fault_status; - u64 fault_addr; - struct mutex transaction_mutex; - - struct kbase_mmu_setup current_setup; - - /* BASE_HW_ISSUE_8316 */ - struct workqueue_struct *poke_wq; - struct work_struct poke_work; - /** Protected by kbasep_js_device_data::runpool_irq::lock */ - int poke_refcount; - /** Protected by kbasep_js_device_data::runpool_irq::lock */ - kbase_as_poke_state poke_state; - struct hrtimer poke_timer; -}; - -static inline int kbase_as_has_bus_fault(struct kbase_as *as) -{ - return as->fault_type == KBASE_MMU_FAULT_TYPE_BUS; -} - -static inline int kbase_as_has_page_fault(struct kbase_as *as) -{ - return as->fault_type == KBASE_MMU_FAULT_TYPE_PAGE; -} - -struct kbasep_mem_device { - atomic_t used_pages; /* Tracks usage of OS shared memory. Updated - when OS memory is allocated/freed. */ - -}; - -#define KBASE_TRACE_CODE(X) KBASE_TRACE_CODE_ ## X - -enum kbase_trace_code { - /* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE - * THIS MUST BE USED AT THE START OF THE ENUM */ -#define KBASE_TRACE_CODE_MAKE_CODE(X) KBASE_TRACE_CODE(X) -#include "mali_kbase_trace_defs.h" -#undef KBASE_TRACE_CODE_MAKE_CODE - /* Comma on its own, to extend the list */ - , - /* Must be the last in the enum */ - KBASE_TRACE_CODE_COUNT -}; - -#define KBASE_TRACE_FLAG_REFCOUNT (((u8)1) << 0) -#define KBASE_TRACE_FLAG_JOBSLOT (((u8)1) << 1) - -struct kbase_trace { - struct timespec timestamp; - u32 thread_id; - u32 cpu; - void *ctx; - bool katom; - int atom_number; - u64 atom_udata[2]; - u64 gpu_addr; - unsigned long info_val; - u8 code; - u8 jobslot; - u8 refcount; - u8 flags; -}; - -/** Event IDs for the power management framework. - * - * Any of these events might be missed, so they should not be relied upon to - * find the precise state of the GPU at a particular time in the - * trace. Overall, we should get a high percentage of these events for - * statisical purposes, and so a few missing should not be a problem */ -enum kbase_timeline_pm_event { - /* helper for tests */ - KBASEP_TIMELINE_PM_EVENT_FIRST, - - /** Event reserved for backwards compatibility with 'init' events */ - KBASE_TIMELINE_PM_EVENT_RESERVED_0 = KBASEP_TIMELINE_PM_EVENT_FIRST, - - /** The power state of the device has changed. - * - * Specifically, the device has reached a desired or available state. - */ - KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED, - - /** The GPU is becoming active. - * - * This event is sent when the first context is about to use the GPU. - */ - KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE, - - /** The GPU is becoming idle. - * - * This event is sent when the last context has finished using the GPU. - */ - KBASE_TIMELINE_PM_EVENT_GPU_IDLE, - - /** Event reserved for backwards compatibility with 'policy_change' - * events */ - KBASE_TIMELINE_PM_EVENT_RESERVED_4, - - /** Event reserved for backwards compatibility with 'system_suspend' - * events */ - KBASE_TIMELINE_PM_EVENT_RESERVED_5, - - /** Event reserved for backwards compatibility with 'system_resume' - * events */ - KBASE_TIMELINE_PM_EVENT_RESERVED_6, - - /** The job scheduler is requesting to power up/down cores. - * - * This event is sent when: - * - powered down cores are needed to complete a job - * - powered up cores are not needed anymore - */ - KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE, - - KBASEP_TIMELINE_PM_EVENT_LAST = KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE, -}; - -#ifdef CONFIG_MALI_TRACE_TIMELINE -struct kbase_trace_kctx_timeline { - atomic_t jd_atoms_in_flight; - u32 owner_tgid; -}; - -struct kbase_trace_kbdev_timeline { - /* Note: strictly speaking, not needed, because it's in sync with - * kbase_device::jm_slots[]::submitted_nr - * - * But it's kept as an example of how to add global timeline tracking - * information - * - * The caller must hold kbasep_js_device_data::runpool_irq::lock when - * accessing this */ - u8 slot_atoms_submitted[BASE_JM_MAX_NR_SLOTS]; - - /* Last UID for each PM event */ - atomic_t pm_event_uid[KBASEP_TIMELINE_PM_EVENT_LAST+1]; - /* Counter for generating PM event UIDs */ - atomic_t pm_event_uid_counter; - /* - * L2 transition state - true indicates that the transition is ongoing - * Expected to be protected by pm.power_change_lock */ - bool l2_transitioning; -}; -#endif /* CONFIG_MALI_TRACE_TIMELINE */ - - -struct kbasep_kctx_list_element { - struct list_head link; - struct kbase_context *kctx; -}; - -/** - * Data stored per device for power management. - * - * This structure contains data for the power management framework. There is one - * instance of this structure per device in the system. - */ -struct kbase_pm_device_data { - /** - * The lock protecting Power Management structures accessed outside of - * IRQ. - * - * This lock must also be held whenever the GPU is being powered on or - * off. - */ - struct mutex lock; - - /** The reference count of active contexts on this device. */ - int active_count; - /** Flag indicating suspending/suspended */ - bool suspending; - /* Wait queue set when active_count == 0 */ - wait_queue_head_t zero_active_count_wait; - - /** - * A bit mask identifying the available shader cores that are specified - * via sysfs - */ - u64 debug_core_mask; - - /** - * Lock protecting the power state of the device. - * - * This lock must be held when accessing the shader_available_bitmap, - * tiler_available_bitmap, l2_available_bitmap, shader_inuse_bitmap and - * tiler_inuse_bitmap fields of kbase_device, and the ca_in_transition - * and shader_poweroff_pending fields of kbase_pm_device_data. It is - * also held when the hardware power registers are being written to, to - * ensure that two threads do not conflict over the power transitions - * that the hardware should make. - */ - spinlock_t power_change_lock; - - /** - * Callback for initializing the runtime power management. - * - * @param kbdev The kbase device - * - * @return 0 on success, else error code - */ - int (*callback_power_runtime_init)(struct kbase_device *kbdev); - - /** - * Callback for terminating the runtime power management. - * - * @param kbdev The kbase device - */ - void (*callback_power_runtime_term)(struct kbase_device *kbdev); - - /* Time in milliseconds between each dvfs sample */ - u32 dvfs_period; - - /* Period of GPU poweroff timer */ - ktime_t gpu_poweroff_time; - - /* Number of ticks of GPU poweroff timer before shader is powered off */ - int poweroff_shader_ticks; - - /* Number of ticks of GPU poweroff timer before GPU is powered off */ - int poweroff_gpu_ticks; - - struct kbase_pm_backend_data backend; -}; - -/** - * struct kbase_secure_ops - Platform specific functions for GPU secure mode - * operations - * @secure_mode_enable: Callback to enable secure mode on the GPU - * @secure_mode_disable: Callback to disable secure mode on the GPU - */ -struct kbase_secure_ops { - /** - * secure_mode_enable() - Enable secure mode on the GPU - * @kbdev: The kbase device - * - * Return: 0 on success, non-zero on error - */ - int (*secure_mode_enable)(struct kbase_device *kbdev); - - /** - * secure_mode_disable() - Disable secure mode on the GPU - * @kbdev: The kbase device - * - * Return: 0 on success, non-zero on error - */ - int (*secure_mode_disable)(struct kbase_device *kbdev); -}; - - -/** - * struct kbase_mem_pool - Page based memory pool for kctx/kbdev - * @kbdev: Kbase device where memory is used - * @cur_size: Number of free pages currently in the pool (may exceed @max_size - * in some corner cases) - * @max_size: Maximum number of free pages in the pool - * @pool_lock: Lock protecting the pool - must be held when modifying @cur_size - * and @page_list - * @page_list: List of free pages in the pool - * @reclaim: Shrinker for kernel reclaim of free pages - * @next_pool: Pointer to next pool where pages can be allocated when this pool - * is empty. Pages will spill over to the next pool when this pool - * is full. Can be NULL if there is no next pool. - */ -struct kbase_mem_pool { - struct kbase_device *kbdev; - size_t cur_size; - size_t max_size; - spinlock_t pool_lock; - struct list_head page_list; - struct shrinker reclaim; - - struct kbase_mem_pool *next_pool; -}; - - -#define DEVNAME_SIZE 16 - -struct kbase_device { - s8 slot_submit_count_irq[BASE_JM_MAX_NR_SLOTS]; - - u32 hw_quirks_sc; - u32 hw_quirks_tiler; - u32 hw_quirks_mmu; - u32 hw_quirks_jm; - - struct list_head entry; - struct device *dev; - struct miscdevice mdev; - u64 reg_start; - size_t reg_size; - void __iomem *reg; - struct { - int irq; - int flags; - } irqs[3]; -#ifdef CONFIG_HAVE_CLK - struct clk *clock; -#endif -#ifdef CONFIG_REGULATOR - struct regulator *regulator; -#endif - char devname[DEVNAME_SIZE]; - -#ifdef CONFIG_MALI_NO_MALI - void *model; - struct kmem_cache *irq_slab; - struct workqueue_struct *irq_workq; - atomic_t serving_job_irq; - atomic_t serving_gpu_irq; - atomic_t serving_mmu_irq; - spinlock_t reg_op_lock; -#endif /* CONFIG_MALI_NO_MALI */ - - struct kbase_pm_device_data pm; - struct kbasep_js_device_data js_data; - struct kbase_mem_pool mem_pool; - struct kbasep_mem_device memdev; - struct kbase_mmu_mode const *mmu_mode; - - struct kbase_as as[BASE_MAX_NR_AS]; - - spinlock_t mmu_mask_change; - - struct kbase_gpu_props gpu_props; - - /** List of SW workarounds for HW issues */ - unsigned long hw_issues_mask[(BASE_HW_ISSUE_END + BITS_PER_LONG - 1) / BITS_PER_LONG]; - /** List of features available */ - unsigned long hw_features_mask[(BASE_HW_FEATURE_END + BITS_PER_LONG - 1) / BITS_PER_LONG]; - - /* Bitmaps of cores that are currently in use (running jobs). - * These should be kept up to date by the job scheduler. - * - * pm.power_change_lock should be held when accessing these members. - * - * kbase_pm_check_transitions_nolock() should be called when bits are - * cleared to update the power management system and allow transitions to - * occur. */ - u64 shader_inuse_bitmap; - - /* Refcount for cores in use */ - u32 shader_inuse_cnt[64]; - - /* Bitmaps of cores the JS needs for jobs ready to run */ - u64 shader_needed_bitmap; - - /* Refcount for cores needed */ - u32 shader_needed_cnt[64]; - - u32 tiler_inuse_cnt; - - u32 tiler_needed_cnt; - - /* struct for keeping track of the disjoint information - * - * The state is > 0 if the GPU is in a disjoint state. Otherwise 0 - * The count is the number of disjoint events that have occurred on the GPU - */ - struct { - atomic_t count; - atomic_t state; - } disjoint_event; - - /* Refcount for tracking users of the l2 cache, e.g. when using hardware counter instrumentation. */ - u32 l2_users_count; - - /* Bitmaps of cores that are currently available (powered up and the power policy is happy for jobs to be - * submitted to these cores. These are updated by the power management code. The job scheduler should avoid - * submitting new jobs to any cores that are not marked as available. - * - * pm.power_change_lock should be held when accessing these members. - */ - u64 shader_available_bitmap; - u64 tiler_available_bitmap; - u64 l2_available_bitmap; - - u64 shader_ready_bitmap; - u64 shader_transitioning_bitmap; - - s8 nr_hw_address_spaces; /**< Number of address spaces in the GPU (constant after driver initialisation) */ - s8 nr_user_address_spaces; /**< Number of address spaces available to user contexts */ - - /* Structure used for instrumentation and HW counters dumping */ - struct { - /* The lock should be used when accessing any of the following members */ - spinlock_t lock; - - struct kbase_context *kctx; - u64 addr; - - struct kbase_context *suspended_kctx; - struct kbase_uk_hwcnt_setup suspended_state; - - struct kbase_instr_backend backend; - } hwcnt; - - struct kbase_vinstr_context *vinstr_ctx; - - /*value to be written to the irq_throttle register each time an irq is served */ - atomic_t irq_throttle_cycles; - -#if KBASE_TRACE_ENABLE - spinlock_t trace_lock; - u16 trace_first_out; - u16 trace_next_in; - struct kbase_trace *trace_rbuf; -#endif - - /* This is used to override the current job scheduler values for - * JS_SCHEDULING_PERIOD_NS - * JS_SOFT_STOP_TICKS - * JS_SOFT_STOP_TICKS_CL - * JS_HARD_STOP_TICKS_SS - * JS_HARD_STOP_TICKS_CL - * JS_HARD_STOP_TICKS_DUMPING - * JS_RESET_TICKS_SS - * JS_RESET_TICKS_CL - * JS_RESET_TICKS_DUMPING. - * - * These values are set via the js_timeouts sysfs file. - */ - u32 js_scheduling_period_ns; - int js_soft_stop_ticks; - int js_soft_stop_ticks_cl; - int js_hard_stop_ticks_ss; - int js_hard_stop_ticks_cl; - int js_hard_stop_ticks_dumping; - int js_reset_ticks_ss; - int js_reset_ticks_cl; - int js_reset_ticks_dumping; - bool js_timeouts_updated; - - u32 reset_timeout_ms; - - struct mutex cacheclean_lock; - - /* Platform specific private data to be accessed by mali_kbase_config_xxx.c only */ - void *platform_context; - - /* List of kbase_contexts created */ - struct list_head kctx_list; - struct mutex kctx_list_lock; - -#ifdef CONFIG_MALI_MIDGARD_RT_PM - struct delayed_work runtime_pm_workqueue; -#endif - -#ifdef CONFIG_PM_DEVFREQ - struct devfreq_dev_profile devfreq_profile; - struct devfreq *devfreq; - unsigned long current_freq; - unsigned long current_voltage; -#ifdef CONFIG_DEVFREQ_THERMAL - struct devfreq_cooling_device *devfreq_cooling; -#endif -#endif - - struct kbase_ipa_context *ipa_ctx; - -#ifdef CONFIG_MALI_TRACE_TIMELINE - struct kbase_trace_kbdev_timeline timeline; -#endif - -#ifdef CONFIG_DEBUG_FS - /* directory for debugfs entries */ - struct dentry *mali_debugfs_directory; - /* Root directory for per context entry */ - struct dentry *debugfs_ctx_directory; - - /* failed job dump, used for separate debug process */ - bool job_fault_debug; - wait_queue_head_t job_fault_wq; - wait_queue_head_t job_fault_resume_wq; - struct workqueue_struct *job_fault_resume_workq; - struct list_head job_fault_event_list; - struct kbase_context *kctx_fault; - -#endif /* CONFIG_DEBUG_FS */ - - /* fbdump profiling controls set by gator */ - u32 kbase_profiling_controls[FBDUMP_CONTROL_MAX]; - - -#if MALI_CUSTOMER_RELEASE == 0 - /* Number of jobs that are run before a job is forced to fail and - * replay. May be KBASEP_FORCE_REPLAY_DISABLED, to disable forced - * failures. */ - int force_replay_limit; - /* Count of jobs between forced failures. Incremented on each job. A - * job is forced to fail once this is greater than or equal to - * force_replay_limit. */ - int force_replay_count; - /* Core requirement for jobs to be failed and replayed. May be zero. */ - base_jd_core_req force_replay_core_req; - /* true if force_replay_limit should be randomized. The random - * value will be in the range of 1 - KBASEP_FORCE_REPLAY_RANDOM_LIMIT. - */ - bool force_replay_random; -#endif - - /* Total number of created contexts */ - atomic_t ctx_num; - - struct kbase_hwaccess_data hwaccess; - - /* Count of page/bus faults waiting for workqueues to process */ - atomic_t faults_pending; - - /* true if GPU is powered off or power off operation is in progress */ - bool poweroff_pending; - - - /* defaults for new context created for this device */ - u32 infinite_cache_active_default; - size_t mem_pool_max_size_default; - - /* system coherency mode */ - u32 system_coherency; - - /* Secure operations */ - struct kbase_secure_ops *secure_ops; - - /* - * true when GPU is put into secure mode - */ - bool secure_mode; - - /* - * true if secure mode is supported - */ - bool secure_mode_support; - - -#ifdef CONFIG_MALI_DEBUG - wait_queue_head_t driver_inactive_wait; - bool driver_inactive; -#endif /* CONFIG_MALI_DEBUG */ - -#ifdef CONFIG_MALI_FPGA_BUS_LOGGER - /* - * Bus logger integration. - */ - struct bus_logger_client *buslogger; -#endif -}; - -/* JSCTX ringbuffer size must always be a power of 2 */ -#define JSCTX_RB_SIZE 256 -#define JSCTX_RB_MASK (JSCTX_RB_SIZE-1) - -/** - * struct jsctx_rb_entry - Entry in &struct jsctx_rb ring buffer - * @atom_id: Atom ID - */ -struct jsctx_rb_entry { - u16 atom_id; -}; - -/** - * struct jsctx_rb - JS context atom ring buffer - * @entries: Array of size %JSCTX_RB_SIZE which holds the &struct - * kbase_jd_atom pointers which make up the contents of the ring - * buffer. - * @read_idx: Index into @entries. Indicates the next entry in @entries to - * read, and is incremented when pulling an atom, and decremented - * when unpulling. - * HW access lock must be held when accessing. - * @write_idx: Index into @entries. Indicates the next entry to use when - * adding atoms into the ring buffer, and is incremented when - * adding a new atom. - * jctx->lock must be held when accessing. - * @running_idx: Index into @entries. Indicates the last valid entry, and is - * incremented when remving atoms from the ring buffer. - * HW access lock must be held when accessing. - * - * &struct jsctx_rb is a ring buffer of &struct kbase_jd_atom. - */ -struct jsctx_rb { - struct jsctx_rb_entry entries[JSCTX_RB_SIZE]; - - u16 read_idx; /* HW access lock must be held when accessing */ - u16 write_idx; /* jctx->lock must be held when accessing */ - u16 running_idx; /* HW access lock must be held when accessing */ -}; - -#define KBASE_API_VERSION(major, minor) ((((major) & 0xFFF) << 20) | \ - (((minor) & 0xFFF) << 8) | \ - ((0 & 0xFF) << 0)) - -struct kbase_context { - struct kbase_device *kbdev; - int id; /* System wide unique id */ - unsigned long api_version; - phys_addr_t pgd; - struct list_head event_list; - struct mutex event_mutex; - bool event_closed; - struct workqueue_struct *event_workq; - - bool is_compat; - - atomic_t setup_complete; - atomic_t setup_in_progress; - - u64 *mmu_teardown_pages; - - struct page *aliasing_sink_page; - - struct mutex reg_lock; /* To be converted to a rwlock? */ - struct rb_root reg_rbtree; /* Red-Black tree of GPU regions (live regions) */ - - unsigned long cookies; - struct kbase_va_region *pending_regions[BITS_PER_LONG]; - - wait_queue_head_t event_queue; - pid_t tgid; - pid_t pid; - - struct kbase_jd_context jctx; - atomic_t used_pages; - atomic_t nonmapped_pages; - - struct kbase_mem_pool mem_pool; - - struct list_head waiting_soft_jobs; -#ifdef CONFIG_KDS - struct list_head waiting_kds_resource; -#endif - /** This is effectively part of the Run Pool, because it only has a valid - * setting (!=KBASEP_AS_NR_INVALID) whilst the context is scheduled in - * - * The kbasep_js_device_data::runpool_irq::lock must be held whilst accessing - * this. - * - * If the context relating to this as_nr is required, you must use - * kbasep_js_runpool_retain_ctx() to ensure that the context doesn't disappear - * whilst you're using it. Alternatively, just hold the kbasep_js_device_data::runpool_irq::lock - * to ensure the context doesn't disappear (but this has restrictions on what other locks - * you can take whilst doing this) */ - int as_nr; - - /* NOTE: - * - * Flags are in jctx.sched_info.ctx.flags - * Mutable flags *must* be accessed under jctx.sched_info.ctx.jsctx_mutex - * - * All other flags must be added there */ - spinlock_t mm_update_lock; - struct mm_struct *process_mm; - -#ifdef CONFIG_MALI_TRACE_TIMELINE - struct kbase_trace_kctx_timeline timeline; -#endif -#ifdef CONFIG_DEBUG_FS - /* Content of mem_profile file */ - char *mem_profile_data; - /* Size of @c mem_profile_data */ - size_t mem_profile_size; - /* Spinlock guarding data */ - spinlock_t mem_profile_lock; - struct dentry *kctx_dentry; - - /* for job fault debug */ - unsigned int *reg_dump; - atomic_t job_fault_count; - /* This list will keep the following atoms during the dump - * in the same context - */ - struct list_head job_fault_resume_event_list; - -#endif /* CONFIG_DEBUG_FS */ - - struct jsctx_rb jsctx_rb - [KBASE_JS_ATOM_SCHED_PRIO_COUNT][BASE_JM_MAX_NR_SLOTS]; - - /* Number of atoms currently pulled from this context */ - atomic_t atoms_pulled; - /* Number of atoms currently pulled from this context, per slot */ - atomic_t atoms_pulled_slot[BASE_JM_MAX_NR_SLOTS]; - /* true if last kick() caused atoms to be pulled from this context */ - bool pulled; - /* true if infinite cache is to be enabled for new allocations. Existing - * allocations will not change. bool stored as a u32 per Linux API */ - u32 infinite_cache_active; - /* Bitmask of slots that can be pulled from */ - u32 slots_pullable; - - /* true if address space assignment is pending */ - bool as_pending; - - /* Backend specific data */ - struct kbase_context_backend backend; - - /* Work structure used for deferred ASID assignment */ - struct work_struct work; - - /* Only one userspace vinstr client per kbase context */ - struct kbase_vinstr_client *vinstr_cli; - struct mutex vinstr_cli_lock; - - /* Must hold queue_mutex when accessing */ - bool ctx_active; - - /* List of completed jobs waiting for events to be posted */ - struct list_head completed_jobs; - /* Number of work items currently pending on job_done_wq */ - atomic_t work_count; -}; - -enum kbase_reg_access_type { - REG_READ, - REG_WRITE -}; - -enum kbase_share_attr_bits { - /* (1ULL << 8) bit is reserved */ - SHARE_BOTH_BITS = (2ULL << 8), /* inner and outer shareable coherency */ - SHARE_INNER_BITS = (3ULL << 8) /* inner shareable coherency */ -}; - -/* Conversion helpers for setting up high resolution timers */ -#define HR_TIMER_DELAY_MSEC(x) (ns_to_ktime((x)*1000000U)) -#define HR_TIMER_DELAY_NSEC(x) (ns_to_ktime(x)) - -/* Maximum number of loops polling the GPU for a cache flush before we assume it must have completed */ -#define KBASE_CLEAN_CACHE_MAX_LOOPS 100000 -/* Maximum number of loops polling the GPU for an AS command to complete before we assume the GPU has hung */ -#define KBASE_AS_INACTIVE_MAX_LOOPS 100000 - -/* Maximum number of times a job can be replayed */ -#define BASEP_JD_REPLAY_LIMIT 15 - -#endif /* _KBASE_DEFS_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_device.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_device.c deleted file mode 100755 index 6b8a2854a7a7b..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_device.c +++ /dev/null @@ -1,657 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * Base kernel device APIs - */ - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include - -/* NOTE: Magic - 0x45435254 (TRCE in ASCII). - * Supports tracing feature provided in the base module. - * Please keep it in sync with the value of base module. - */ -#define TRACE_BUFFER_HEADER_SPECIAL 0x45435254 - -#if KBASE_TRACE_ENABLE -static const char *kbasep_trace_code_string[] = { - /* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE - * THIS MUST BE USED AT THE START OF THE ARRAY */ -#define KBASE_TRACE_CODE_MAKE_CODE(X) # X -#include "mali_kbase_trace_defs.h" -#undef KBASE_TRACE_CODE_MAKE_CODE -}; -#endif - -#define DEBUG_MESSAGE_SIZE 256 - -static int kbasep_trace_init(struct kbase_device *kbdev); -static void kbasep_trace_term(struct kbase_device *kbdev); -static void kbasep_trace_hook_wrapper(void *param); - -struct kbase_device *kbase_device_alloc(void) -{ - return kzalloc(sizeof(struct kbase_device), GFP_KERNEL); -} - -static int kbase_device_as_init(struct kbase_device *kbdev, int i) -{ - const char format[] = "mali_mmu%d"; - char name[sizeof(format)]; - const char poke_format[] = "mali_mmu%d_poker"; - char poke_name[sizeof(poke_format)]; - - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) - snprintf(poke_name, sizeof(poke_name), poke_format, i); - - snprintf(name, sizeof(name), format, i); - - kbdev->as[i].number = i; - kbdev->as[i].fault_addr = 0ULL; - - kbdev->as[i].pf_wq = alloc_workqueue(name, 0, 1); - if (!kbdev->as[i].pf_wq) - return -EINVAL; - - mutex_init(&kbdev->as[i].transaction_mutex); - INIT_WORK(&kbdev->as[i].work_pagefault, page_fault_worker); - INIT_WORK(&kbdev->as[i].work_busfault, bus_fault_worker); - - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) { - struct hrtimer *poke_timer = &kbdev->as[i].poke_timer; - struct work_struct *poke_work = &kbdev->as[i].poke_work; - - kbdev->as[i].poke_wq = alloc_workqueue(poke_name, 0, 1); - if (!kbdev->as[i].poke_wq) { - destroy_workqueue(kbdev->as[i].pf_wq); - return -EINVAL; - } - KBASE_DEBUG_ASSERT(!object_is_on_stack(poke_work)); - INIT_WORK(poke_work, kbasep_as_do_poke); - - hrtimer_init(poke_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - - poke_timer->function = kbasep_as_poke_timer_callback; - - kbdev->as[i].poke_refcount = 0; - kbdev->as[i].poke_state = 0u; - } - - return 0; -} - -static void kbase_device_as_term(struct kbase_device *kbdev, int i) -{ - destroy_workqueue(kbdev->as[i].pf_wq); - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) - destroy_workqueue(kbdev->as[i].poke_wq); -} - -static int kbase_device_all_as_init(struct kbase_device *kbdev) -{ - int i, err; - - for (i = 0; i < kbdev->nr_hw_address_spaces; i++) { - err = kbase_device_as_init(kbdev, i); - if (err) - goto free_workqs; - } - - return 0; - -free_workqs: - for (; i > 0; i--) - kbase_device_as_term(kbdev, i); - - return err; -} - -static void kbase_device_all_as_term(struct kbase_device *kbdev) -{ - int i; - - for (i = 0; i < kbdev->nr_hw_address_spaces; i++) - kbase_device_as_term(kbdev, i); -} - -int kbase_device_init(struct kbase_device * const kbdev) -{ - int i, err; - - spin_lock_init(&kbdev->mmu_mask_change); - /* Get the list of workarounds for issues on the current HW - * (identified by the GPU_ID register) - */ - err = kbase_hw_set_issues_mask(kbdev); - if (err) - goto fail; - - /* Set the list of features available on the current HW - * (identified by the GPU_ID register) - */ - kbase_hw_set_features_mask(kbdev); - - /* On Linux 4.0+, dma coherency is determined from device tree */ -#if defined(CONFIG_ARM64) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) - set_dma_ops(kbdev->dev, &noncoherent_swiotlb_dma_ops); -#endif - - /* Workaround a pre-3.13 Linux issue, where dma_mask is NULL when our - * device structure was created by device-tree - */ - if (!kbdev->dev->dma_mask) - kbdev->dev->dma_mask = &kbdev->dev->coherent_dma_mask; - - err = dma_set_mask(kbdev->dev, - DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits)); - if (err) - goto dma_set_mask_failed; - - err = dma_set_coherent_mask(kbdev->dev, - DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits)); - if (err) - goto dma_set_mask_failed; - - kbdev->nr_hw_address_spaces = kbdev->gpu_props.num_address_spaces; - - err = kbase_device_all_as_init(kbdev); - if (err) - goto as_init_failed; - - spin_lock_init(&kbdev->hwcnt.lock); - - err = kbasep_trace_init(kbdev); - if (err) - goto term_as; - - mutex_init(&kbdev->cacheclean_lock); - -#ifdef CONFIG_MALI_TRACE_TIMELINE - for (i = 0; i < BASE_JM_MAX_NR_SLOTS; ++i) - kbdev->timeline.slot_atoms_submitted[i] = 0; - - for (i = 0; i <= KBASEP_TIMELINE_PM_EVENT_LAST; ++i) - atomic_set(&kbdev->timeline.pm_event_uid[i], 0); -#endif /* CONFIG_MALI_TRACE_TIMELINE */ - - /* fbdump profiling controls set to 0 - fbdump not enabled until changed by gator */ - for (i = 0; i < FBDUMP_CONTROL_MAX; i++) - kbdev->kbase_profiling_controls[i] = 0; - - kbase_debug_assert_register_hook(&kbasep_trace_hook_wrapper, kbdev); - - atomic_set(&kbdev->ctx_num, 0); - - err = kbase_instr_backend_init(kbdev); - if (err) - goto term_trace; - - kbdev->pm.dvfs_period = DEFAULT_PM_DVFS_PERIOD; - - kbdev->reset_timeout_ms = DEFAULT_RESET_TIMEOUT_MS; - - kbdev->mmu_mode = kbase_mmu_mode_get_lpae(); - -#ifdef CONFIG_MALI_DEBUG - init_waitqueue_head(&kbdev->driver_inactive_wait); -#endif /* CONFIG_MALI_DEBUG */ - - return 0; -term_trace: - kbasep_trace_term(kbdev); -term_as: - kbase_device_all_as_term(kbdev); -as_init_failed: -dma_set_mask_failed: -fail: - return err; -} - -void kbase_device_term(struct kbase_device *kbdev) -{ - KBASE_DEBUG_ASSERT(kbdev); - -#if KBASE_TRACE_ENABLE - kbase_debug_assert_register_hook(NULL, NULL); -#endif - - kbase_instr_backend_term(kbdev); - - kbasep_trace_term(kbdev); - - kbase_device_all_as_term(kbdev); -} - -void kbase_device_free(struct kbase_device *kbdev) -{ - kfree(kbdev); -} - -void kbase_device_trace_buffer_install(struct kbase_context *kctx, u32 *tb, size_t size) -{ - unsigned long flags; - - KBASE_DEBUG_ASSERT(kctx); - KBASE_DEBUG_ASSERT(tb); - - /* set up the header */ - /* magic number in the first 4 bytes */ - tb[0] = TRACE_BUFFER_HEADER_SPECIAL; - /* Store (write offset = 0, wrap counter = 0, transaction active = no) - * write offset 0 means never written. - * Offsets 1 to (wrap_offset - 1) used to store values when trace started - */ - tb[1] = 0; - - /* install trace buffer */ - spin_lock_irqsave(&kctx->jctx.tb_lock, flags); - kctx->jctx.tb_wrap_offset = size / 8; - kctx->jctx.tb = tb; - spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags); -} - -void kbase_device_trace_buffer_uninstall(struct kbase_context *kctx) -{ - unsigned long flags; - - KBASE_DEBUG_ASSERT(kctx); - spin_lock_irqsave(&kctx->jctx.tb_lock, flags); - kctx->jctx.tb = NULL; - kctx->jctx.tb_wrap_offset = 0; - spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags); -} - -void kbase_device_trace_register_access(struct kbase_context *kctx, enum kbase_reg_access_type type, u16 reg_offset, u32 reg_value) -{ - unsigned long flags; - - spin_lock_irqsave(&kctx->jctx.tb_lock, flags); - if (kctx->jctx.tb) { - u16 wrap_count; - u16 write_offset; - u32 *tb = kctx->jctx.tb; - u32 header_word; - - header_word = tb[1]; - KBASE_DEBUG_ASSERT(0 == (header_word & 0x1)); - - wrap_count = (header_word >> 1) & 0x7FFF; - write_offset = (header_word >> 16) & 0xFFFF; - - /* mark as transaction in progress */ - tb[1] |= 0x1; - mb(); - - /* calculate new offset */ - write_offset++; - if (write_offset == kctx->jctx.tb_wrap_offset) { - /* wrap */ - write_offset = 1; - wrap_count++; - wrap_count &= 0x7FFF; /* 15bit wrap counter */ - } - - /* store the trace entry at the selected offset */ - tb[write_offset * 2 + 0] = (reg_offset & ~0x3) | ((type == REG_WRITE) ? 0x1 : 0x0); - tb[write_offset * 2 + 1] = reg_value; - mb(); - - /* new header word */ - header_word = (write_offset << 16) | (wrap_count << 1) | 0x0; /* transaction complete */ - tb[1] = header_word; - } - spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags); -} - -/* - * Device trace functions - */ -#if KBASE_TRACE_ENABLE - -static int kbasep_trace_init(struct kbase_device *kbdev) -{ - struct kbase_trace *rbuf; - - rbuf = kmalloc_array(KBASE_TRACE_SIZE, sizeof(*rbuf), GFP_KERNEL); - - if (!rbuf) - return -EINVAL; - - kbdev->trace_rbuf = rbuf; - spin_lock_init(&kbdev->trace_lock); - return 0; -} - -static void kbasep_trace_term(struct kbase_device *kbdev) -{ - kfree(kbdev->trace_rbuf); -} - -static void kbasep_trace_format_msg(struct kbase_trace *trace_msg, char *buffer, int len) -{ - s32 written = 0; - - /* Initial part of message */ - written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d.%.6d,%d,%d,%s,%p,", (int)trace_msg->timestamp.tv_sec, (int)(trace_msg->timestamp.tv_nsec / 1000), trace_msg->thread_id, trace_msg->cpu, kbasep_trace_code_string[trace_msg->code], trace_msg->ctx), 0); - - if (trace_msg->katom) - written += MAX(snprintf(buffer + written, MAX(len - written, 0), "atom %d (ud: 0x%llx 0x%llx)", trace_msg->atom_number, trace_msg->atom_udata[0], trace_msg->atom_udata[1]), 0); - - written += MAX(snprintf(buffer + written, MAX(len - written, 0), ",%.8llx,", trace_msg->gpu_addr), 0); - - /* NOTE: Could add function callbacks to handle different message types */ - /* Jobslot present */ - if (trace_msg->flags & KBASE_TRACE_FLAG_JOBSLOT) - written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->jobslot), 0); - - written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0); - - /* Refcount present */ - if (trace_msg->flags & KBASE_TRACE_FLAG_REFCOUNT) - written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->refcount), 0); - - written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0); - - /* Rest of message */ - written += MAX(snprintf(buffer + written, MAX(len - written, 0), "0x%.8lx", trace_msg->info_val), 0); -} - -static void kbasep_trace_dump_msg(struct kbase_device *kbdev, struct kbase_trace *trace_msg) -{ - char buffer[DEBUG_MESSAGE_SIZE]; - - kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE); - dev_dbg(kbdev->dev, "%s", buffer); -} - -void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, void *ctx, struct kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val) -{ - unsigned long irqflags; - struct kbase_trace *trace_msg; - - spin_lock_irqsave(&kbdev->trace_lock, irqflags); - - trace_msg = &kbdev->trace_rbuf[kbdev->trace_next_in]; - - /* Fill the message */ - trace_msg->thread_id = task_pid_nr(current); - trace_msg->cpu = task_cpu(current); - - getnstimeofday(&trace_msg->timestamp); - - trace_msg->code = code; - trace_msg->ctx = ctx; - - if (NULL == katom) { - trace_msg->katom = false; - } else { - trace_msg->katom = true; - trace_msg->atom_number = kbase_jd_atom_id(katom->kctx, katom); - trace_msg->atom_udata[0] = katom->udata.blob[0]; - trace_msg->atom_udata[1] = katom->udata.blob[1]; - } - - trace_msg->gpu_addr = gpu_addr; - trace_msg->jobslot = jobslot; - trace_msg->refcount = MIN((unsigned int)refcount, 0xFF); - trace_msg->info_val = info_val; - trace_msg->flags = flags; - - /* Update the ringbuffer indices */ - kbdev->trace_next_in = (kbdev->trace_next_in + 1) & KBASE_TRACE_MASK; - if (kbdev->trace_next_in == kbdev->trace_first_out) - kbdev->trace_first_out = (kbdev->trace_first_out + 1) & KBASE_TRACE_MASK; - - /* Done */ - - spin_unlock_irqrestore(&kbdev->trace_lock, irqflags); -} - -void kbasep_trace_clear(struct kbase_device *kbdev) -{ - unsigned long flags; - - spin_lock_irqsave(&kbdev->trace_lock, flags); - kbdev->trace_first_out = kbdev->trace_next_in; - spin_unlock_irqrestore(&kbdev->trace_lock, flags); -} - -void kbasep_trace_dump(struct kbase_device *kbdev) -{ - unsigned long flags; - u32 start; - u32 end; - - dev_dbg(kbdev->dev, "Dumping trace:\nsecs,nthread,cpu,code,ctx,katom,gpu_addr,jobslot,refcount,info_val"); - spin_lock_irqsave(&kbdev->trace_lock, flags); - start = kbdev->trace_first_out; - end = kbdev->trace_next_in; - - while (start != end) { - struct kbase_trace *trace_msg = &kbdev->trace_rbuf[start]; - - kbasep_trace_dump_msg(kbdev, trace_msg); - - start = (start + 1) & KBASE_TRACE_MASK; - } - dev_dbg(kbdev->dev, "TRACE_END"); - - spin_unlock_irqrestore(&kbdev->trace_lock, flags); - - KBASE_TRACE_CLEAR(kbdev); -} - -static void kbasep_trace_hook_wrapper(void *param) -{ - struct kbase_device *kbdev = (struct kbase_device *)param; - - kbasep_trace_dump(kbdev); -} - -#ifdef CONFIG_DEBUG_FS -struct trace_seq_state { - struct kbase_trace trace_buf[KBASE_TRACE_SIZE]; - u32 start; - u32 end; -}; - -static void *kbasep_trace_seq_start(struct seq_file *s, loff_t *pos) -{ - struct trace_seq_state *state = s->private; - int i; - - if (*pos > KBASE_TRACE_SIZE) - return NULL; - i = state->start + *pos; - if ((state->end >= state->start && i >= state->end) || - i >= state->end + KBASE_TRACE_SIZE) - return NULL; - - i &= KBASE_TRACE_MASK; - - return &state->trace_buf[i]; -} - -static void kbasep_trace_seq_stop(struct seq_file *s, void *data) -{ -} - -static void *kbasep_trace_seq_next(struct seq_file *s, void *data, loff_t *pos) -{ - struct trace_seq_state *state = s->private; - int i; - - (*pos)++; - - i = (state->start + *pos) & KBASE_TRACE_MASK; - if (i == state->end) - return NULL; - - return &state->trace_buf[i]; -} - -static int kbasep_trace_seq_show(struct seq_file *s, void *data) -{ - struct kbase_trace *trace_msg = data; - char buffer[DEBUG_MESSAGE_SIZE]; - - kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE); - seq_printf(s, "%s\n", buffer); - return 0; -} - -static const struct seq_operations kbasep_trace_seq_ops = { - .start = kbasep_trace_seq_start, - .next = kbasep_trace_seq_next, - .stop = kbasep_trace_seq_stop, - .show = kbasep_trace_seq_show, -}; - -static int kbasep_trace_debugfs_open(struct inode *inode, struct file *file) -{ - struct kbase_device *kbdev = inode->i_private; - unsigned long flags; - - struct trace_seq_state *state; - - state = __seq_open_private(file, &kbasep_trace_seq_ops, sizeof(*state)); - if (!state) - return -ENOMEM; - - spin_lock_irqsave(&kbdev->trace_lock, flags); - state->start = kbdev->trace_first_out; - state->end = kbdev->trace_next_in; - memcpy(state->trace_buf, kbdev->trace_rbuf, sizeof(state->trace_buf)); - spin_unlock_irqrestore(&kbdev->trace_lock, flags); - - return 0; -} - -static const struct file_operations kbasep_trace_debugfs_fops = { - .open = kbasep_trace_debugfs_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release_private, -}; - -void kbasep_trace_debugfs_init(struct kbase_device *kbdev) -{ - debugfs_create_file("mali_trace", S_IRUGO, - kbdev->mali_debugfs_directory, kbdev, - &kbasep_trace_debugfs_fops); -} - -#else -void kbasep_trace_debugfs_init(struct kbase_device *kbdev) -{ -} -#endif /* CONFIG_DEBUG_FS */ - -#else /* KBASE_TRACE_ENABLE */ -static int kbasep_trace_init(struct kbase_device *kbdev) -{ - CSTD_UNUSED(kbdev); - return 0; -} - -static void kbasep_trace_term(struct kbase_device *kbdev) -{ - CSTD_UNUSED(kbdev); -} - -static void kbasep_trace_hook_wrapper(void *param) -{ - CSTD_UNUSED(param); -} - -void kbasep_trace_dump(struct kbase_device *kbdev) -{ - CSTD_UNUSED(kbdev); -} -#endif /* KBASE_TRACE_ENABLE */ - -void kbase_set_profiling_control(struct kbase_device *kbdev, u32 control, u32 value) -{ - switch (control) { - case FBDUMP_CONTROL_ENABLE: - /* fall through */ - case FBDUMP_CONTROL_RATE: - /* fall through */ - case SW_COUNTER_ENABLE: - /* fall through */ - case FBDUMP_CONTROL_RESIZE_FACTOR: - kbdev->kbase_profiling_controls[control] = value; - break; - default: - dev_err(kbdev->dev, "Profiling control %d not found\n", control); - break; - } -} - -u32 kbase_get_profiling_control(struct kbase_device *kbdev, u32 control) -{ - u32 ret_value = 0; - - switch (control) { - case FBDUMP_CONTROL_ENABLE: - /* fall through */ - case FBDUMP_CONTROL_RATE: - /* fall through */ - case SW_COUNTER_ENABLE: - /* fall through */ - case FBDUMP_CONTROL_RESIZE_FACTOR: - ret_value = kbdev->kbase_profiling_controls[control]; - break; - default: - dev_err(kbdev->dev, "Profiling control %d not found\n", control); - break; - } - - return ret_value; -} - -/* - * Called by gator to control the production of - * profiling information at runtime - * */ - -void _mali_profiling_control(u32 action, u32 value) -{ - struct kbase_device *kbdev = NULL; - - /* find the first i.e. call with -1 */ - kbdev = kbase_find_device(-1); - - if (NULL != kbdev) - kbase_set_profiling_control(kbdev, action, value); -} -KBASE_EXPORT_SYMBOL(_mali_profiling_control); - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_disjoint_events.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_disjoint_events.c deleted file mode 100755 index f70bcccf4050f..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_disjoint_events.c +++ /dev/null @@ -1,76 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/* - * Base kernel disjoint events helper functions - */ - -#include - -void kbase_disjoint_init(struct kbase_device *kbdev) -{ - KBASE_DEBUG_ASSERT(kbdev != NULL); - - atomic_set(&kbdev->disjoint_event.count, 0); - atomic_set(&kbdev->disjoint_event.state, 0); -} - -/* increment the disjoint event count */ -void kbase_disjoint_event(struct kbase_device *kbdev) -{ - KBASE_DEBUG_ASSERT(kbdev != NULL); - - atomic_inc(&kbdev->disjoint_event.count); -} - -/* increment the state and the event counter */ -void kbase_disjoint_state_up(struct kbase_device *kbdev) -{ - KBASE_DEBUG_ASSERT(kbdev != NULL); - - atomic_inc(&kbdev->disjoint_event.state); - - kbase_disjoint_event(kbdev); -} - -/* decrement the state */ -void kbase_disjoint_state_down(struct kbase_device *kbdev) -{ - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(atomic_read(&kbdev->disjoint_event.state) > 0); - - kbase_disjoint_event(kbdev); - - atomic_dec(&kbdev->disjoint_event.state); -} - -/* increments the count only if the state is > 0 */ -void kbase_disjoint_event_potential(struct kbase_device *kbdev) -{ - KBASE_DEBUG_ASSERT(kbdev != NULL); - - if (atomic_read(&kbdev->disjoint_event.state)) - kbase_disjoint_event(kbdev); -} - -u32 kbase_disjoint_event_get(struct kbase_device *kbdev) -{ - KBASE_DEBUG_ASSERT(kbdev != NULL); - - return atomic_read(&kbdev->disjoint_event.count); -} -KBASE_EXPORT_TEST_API(kbase_disjoint_event_get); diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_event.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_event.c deleted file mode 100755 index 25b30f0b976e5..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_event.c +++ /dev/null @@ -1,224 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#include -#include - -#if defined(CONFIG_MALI_MIPE_ENABLED) -#include -#endif - -static struct base_jd_udata kbase_event_process(struct kbase_context *kctx, struct kbase_jd_atom *katom) -{ - struct base_jd_udata data; - - lockdep_assert_held(&kctx->jctx.lock); - - KBASE_DEBUG_ASSERT(kctx != NULL); - KBASE_DEBUG_ASSERT(katom != NULL); - KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED); - - data = katom->udata; - - KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_sub_return(1, &kctx->timeline.jd_atoms_in_flight)); - -#if defined(CONFIG_MALI_MIPE_ENABLED) - kbase_tlstream_tl_nret_atom_ctx(katom, kctx); - kbase_tlstream_tl_del_atom(katom); -#endif - - katom->status = KBASE_JD_ATOM_STATE_UNUSED; - - wake_up(&katom->completed); - - return data; -} - -int kbase_event_pending(struct kbase_context *ctx) -{ - int ret; - - KBASE_DEBUG_ASSERT(ctx); - - mutex_lock(&ctx->event_mutex); - ret = (!list_empty(&ctx->event_list)) || (true == ctx->event_closed); - mutex_unlock(&ctx->event_mutex); - - return ret; -} - -KBASE_EXPORT_TEST_API(kbase_event_pending); - -int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent) -{ - struct kbase_jd_atom *atom; - - KBASE_DEBUG_ASSERT(ctx); - - mutex_lock(&ctx->event_mutex); - - if (list_empty(&ctx->event_list)) { - if (!ctx->event_closed) { - mutex_unlock(&ctx->event_mutex); - return -1; - } - - /* generate the BASE_JD_EVENT_DRV_TERMINATED message on the fly */ - mutex_unlock(&ctx->event_mutex); - uevent->event_code = BASE_JD_EVENT_DRV_TERMINATED; - memset(&uevent->udata, 0, sizeof(uevent->udata)); - dev_dbg(ctx->kbdev->dev, - "event system closed, returning BASE_JD_EVENT_DRV_TERMINATED(0x%X)\n", - BASE_JD_EVENT_DRV_TERMINATED); - return 0; - } - - /* normal event processing */ - atom = list_entry(ctx->event_list.next, struct kbase_jd_atom, dep_item[0]); - list_del(ctx->event_list.next); - - mutex_unlock(&ctx->event_mutex); - - dev_dbg(ctx->kbdev->dev, "event dequeuing %p\n", (void *)atom); - uevent->event_code = atom->event_code; - uevent->atom_number = (atom - ctx->jctx.atoms); - - if (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) - kbase_jd_free_external_resources(atom); - - mutex_lock(&ctx->jctx.lock); - uevent->udata = kbase_event_process(ctx, atom); - mutex_unlock(&ctx->jctx.lock); - - return 0; -} - -KBASE_EXPORT_TEST_API(kbase_event_dequeue); - -/** - * kbase_event_process_noreport_worker - Worker for processing atoms that do not - * return an event but do have external - * resources - * @data: Work structure - */ -static void kbase_event_process_noreport_worker(struct work_struct *data) -{ - struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, - work); - struct kbase_context *kctx = katom->kctx; - - if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) - kbase_jd_free_external_resources(katom); - - mutex_lock(&kctx->jctx.lock); - kbase_event_process(kctx, katom); - mutex_unlock(&kctx->jctx.lock); -} - -/** - * kbase_event_process_noreport - Process atoms that do not return an event - * @kctx: Context pointer - * @katom: Atom to be processed - * - * Atoms that do not have external resources will be processed immediately. - * Atoms that do have external resources will be processed on a workqueue, in - * order to avoid locking issues. - */ -static void kbase_event_process_noreport(struct kbase_context *kctx, - struct kbase_jd_atom *katom) -{ - if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) { - INIT_WORK(&katom->work, kbase_event_process_noreport_worker); - queue_work(kctx->event_workq, &katom->work); - } else { - kbase_event_process(kctx, katom); - } -} - -void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *atom) -{ - if (atom->core_req & BASE_JD_REQ_EVENT_ONLY_ON_FAILURE) { - if (atom->event_code == BASE_JD_EVENT_DONE) { - /* Don't report the event */ - kbase_event_process_noreport(ctx, atom); - return; - } - } - - if (atom->core_req & BASEP_JD_REQ_EVENT_NEVER) { - /* Don't report the event */ - kbase_event_process_noreport(ctx, atom); - return; - } - - mutex_lock(&ctx->event_mutex); - list_add_tail(&atom->dep_item[0], &ctx->event_list); - mutex_unlock(&ctx->event_mutex); - - kbase_event_wakeup(ctx); -} -KBASE_EXPORT_TEST_API(kbase_event_post); - -void kbase_event_close(struct kbase_context *kctx) -{ - mutex_lock(&kctx->event_mutex); - kctx->event_closed = true; - mutex_unlock(&kctx->event_mutex); - kbase_event_wakeup(kctx); -} - -int kbase_event_init(struct kbase_context *kctx) -{ - KBASE_DEBUG_ASSERT(kctx); - - INIT_LIST_HEAD(&kctx->event_list); - mutex_init(&kctx->event_mutex); - kctx->event_closed = false; - kctx->event_workq = alloc_workqueue("kbase_event", WQ_MEM_RECLAIM, 1); - - if (NULL == kctx->event_workq) - return -EINVAL; - - return 0; -} - -KBASE_EXPORT_TEST_API(kbase_event_init); - -void kbase_event_cleanup(struct kbase_context *kctx) -{ - KBASE_DEBUG_ASSERT(kctx); - KBASE_DEBUG_ASSERT(kctx->event_workq); - - flush_workqueue(kctx->event_workq); - destroy_workqueue(kctx->event_workq); - - /* We use kbase_event_dequeue to remove the remaining events as that - * deals with all the cleanup needed for the atoms. - * - * Note: use of kctx->event_list without a lock is safe because this must be the last - * thread using it (because we're about to terminate the lock) - */ - while (!list_empty(&kctx->event_list)) { - struct base_jd_event_v2 event; - - kbase_event_dequeue(kctx, &event); - } -} - -KBASE_EXPORT_TEST_API(kbase_event_cleanup); diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gator.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gator.h deleted file mode 100755 index ce65b5562a2b5..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gator.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/* NB taken from gator */ -/* - * List of possible actions to be controlled by DS-5 Streamline. - * The following numbers are used by gator to control the frame buffer dumping - * and s/w counter reporting. We cannot use the enums in mali_uk_types.h because - * they are unknown inside gator. - */ -#ifndef _KBASE_GATOR_H_ -#define _KBASE_GATOR_H_ - -#ifdef CONFIG_MALI_GATOR_SUPPORT -#define GATOR_MAKE_EVENT(type, number) (((type) << 24) | ((number) << 16)) -#define GATOR_JOB_SLOT_START 1 -#define GATOR_JOB_SLOT_STOP 2 -#define GATOR_JOB_SLOT_SOFT_STOPPED 3 - -void kbase_trace_mali_job_slots_event(u32 event, const struct kbase_context *kctx, u8 atom_id); -void kbase_trace_mali_pm_status(u32 event, u64 value); -void kbase_trace_mali_pm_power_off(u32 event, u64 value); -void kbase_trace_mali_pm_power_on(u32 event, u64 value); -void kbase_trace_mali_page_fault_insert_pages(int event, u32 value); -void kbase_trace_mali_mmu_as_in_use(int event); -void kbase_trace_mali_mmu_as_released(int event); -void kbase_trace_mali_total_alloc_pages_change(long long int event); - -#endif /* CONFIG_MALI_GATOR_SUPPORT */ - -#endif /* _KBASE_GATOR_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gator_api.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gator_api.c deleted file mode 100755 index a2174b24ac3cd..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gator_api.c +++ /dev/null @@ -1,322 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include "mali_kbase.h" -#include "mali_kbase_hw.h" -#include "mali_kbase_mem_linux.h" -#include "mali_kbase_gator_api.h" -#include "mali_kbase_gator_hwcnt_names.h" -#include "mali_kbase_instr.h" - -#define MALI_MAX_CORES_PER_GROUP 4 -#define MALI_MAX_NUM_BLOCKS_PER_GROUP 8 -#define MALI_COUNTERS_PER_BLOCK 64 -#define MALI_BYTES_PER_COUNTER 4 - -struct kbase_gator_hwcnt_handles { - struct kbase_device *kbdev; - struct kbase_context *kctx; - u64 hwcnt_gpu_va; - void *hwcnt_cpu_va; - struct kbase_vmap_struct hwcnt_map; -}; - -const char * const *kbase_gator_hwcnt_init_names(uint32_t *total_counters) -{ - uint32_t gpu_id; - const char * const *hardware_counters; - struct kbase_device *kbdev; - - if (!total_counters) - return NULL; - - /* Get the first device - it doesn't matter in this case */ - kbdev = kbase_find_device(-1); - if (!kbdev) - return NULL; - - gpu_id = kbdev->gpu_props.props.core_props.product_id; - - switch (gpu_id) { - /* If we are using a Mali-T60x device */ - case GPU_ID_PI_T60X: - hardware_counters = hardware_counters_mali_t60x; - *total_counters = ARRAY_SIZE(hardware_counters_mali_t60x); - break; - /* If we are using a Mali-T62x device */ - case GPU_ID_PI_T62X: - hardware_counters = hardware_counters_mali_t62x; - *total_counters = ARRAY_SIZE(hardware_counters_mali_t62x); - break; - /* If we are using a Mali-T72x device */ - case GPU_ID_PI_T72X: - hardware_counters = hardware_counters_mali_t72x; - *total_counters = ARRAY_SIZE(hardware_counters_mali_t72x); - break; - /* If we are using a Mali-T76x device */ - case GPU_ID_PI_T76X: - hardware_counters = hardware_counters_mali_t76x; - *total_counters = ARRAY_SIZE(hardware_counters_mali_t76x); - break; - /* If we are using a Mali-T82x device */ - case GPU_ID_PI_T82X: - hardware_counters = hardware_counters_mali_t82x; - *total_counters = ARRAY_SIZE(hardware_counters_mali_t82x); - break; - /* If we are using a Mali-T83x device */ - case GPU_ID_PI_T83X: - hardware_counters = hardware_counters_mali_t83x; - *total_counters = ARRAY_SIZE(hardware_counters_mali_t83x); - break; - /* If we are using a Mali-T86x device */ - case GPU_ID_PI_T86X: - hardware_counters = hardware_counters_mali_t86x; - *total_counters = ARRAY_SIZE(hardware_counters_mali_t86x); - break; - /* If we are using a Mali-T88x device */ - case GPU_ID_PI_TFRX: - hardware_counters = hardware_counters_mali_t88x; - *total_counters = ARRAY_SIZE(hardware_counters_mali_t88x); - break; - default: - hardware_counters = NULL; - *total_counters = 0; - dev_err(kbdev->dev, "Unrecognized gpu ID: %u\n", gpu_id); - break; - } - - /* Release the kbdev reference. */ - kbase_release_device(kbdev); - - /* If we return a string array take a reference on the module (or fail). */ - if (hardware_counters && !try_module_get(THIS_MODULE)) - return NULL; - - return hardware_counters; -} -KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_init_names); - -void kbase_gator_hwcnt_term_names(void) -{ - /* Release the module reference. */ - module_put(THIS_MODULE); -} -KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_term_names); - -struct kbase_gator_hwcnt_handles *kbase_gator_hwcnt_init(struct kbase_gator_hwcnt_info *in_out_info) -{ - struct kbase_gator_hwcnt_handles *hand; - struct kbase_uk_hwcnt_setup setup; - int err; - uint32_t dump_size = 0, i = 0; - struct kbase_va_region *reg; - u64 flags; - u64 nr_pages; - u16 va_alignment = 0; - - if (!in_out_info) - return NULL; - - hand = kzalloc(sizeof(*hand), GFP_KERNEL); - if (!hand) - return NULL; - - /* Get the first device */ - hand->kbdev = kbase_find_device(-1); - if (!hand->kbdev) - goto free_hand; - - /* Create a kbase_context */ - hand->kctx = kbase_create_context(hand->kbdev, true); - if (!hand->kctx) - goto release_device; - - in_out_info->nr_cores = hand->kbdev->gpu_props.num_cores; - in_out_info->nr_core_groups = hand->kbdev->gpu_props.num_core_groups; - in_out_info->gpu_id = hand->kbdev->gpu_props.props.core_props.product_id; - - /* If we are using a v4 device (Mali-T6xx or Mali-T72x) */ - if (kbase_hw_has_feature(hand->kbdev, BASE_HW_FEATURE_V4)) { - uint32_t cg, j; - uint64_t core_mask; - - /* There are 8 hardware counters blocks per core group */ - in_out_info->hwc_layout = kmalloc(sizeof(enum hwc_type) * - MALI_MAX_NUM_BLOCKS_PER_GROUP * - in_out_info->nr_core_groups, GFP_KERNEL); - - if (!in_out_info->hwc_layout) - goto destroy_context; - - dump_size = in_out_info->nr_core_groups * - MALI_MAX_NUM_BLOCKS_PER_GROUP * - MALI_COUNTERS_PER_BLOCK * - MALI_BYTES_PER_COUNTER; - - for (cg = 0; cg < in_out_info->nr_core_groups; cg++) { - core_mask = hand->kbdev->gpu_props.props.coherency_info.group[cg].core_mask; - - for (j = 0; j < MALI_MAX_CORES_PER_GROUP; j++) { - if (core_mask & (1u << j)) - in_out_info->hwc_layout[i++] = SHADER_BLOCK; - else - in_out_info->hwc_layout[i++] = RESERVED_BLOCK; - } - - in_out_info->hwc_layout[i++] = TILER_BLOCK; - in_out_info->hwc_layout[i++] = MMU_L2_BLOCK; - - in_out_info->hwc_layout[i++] = RESERVED_BLOCK; - - if (0 == cg) - in_out_info->hwc_layout[i++] = JM_BLOCK; - else - in_out_info->hwc_layout[i++] = RESERVED_BLOCK; - } - /* If we are using any other device */ - } else { - uint32_t nr_l2, nr_sc, j; - uint64_t core_mask; - - nr_l2 = hand->kbdev->gpu_props.props.l2_props.num_l2_slices; - - core_mask = hand->kbdev->gpu_props.props.coherency_info.group[0].core_mask; - - nr_sc = hand->kbdev->gpu_props.props.coherency_info.group[0].num_cores; - - /* The job manager and tiler sets of counters - * are always present */ - in_out_info->hwc_layout = kmalloc(sizeof(enum hwc_type) * (2 + nr_sc + nr_l2), GFP_KERNEL); - - if (!in_out_info->hwc_layout) - goto destroy_context; - - dump_size = (2 + nr_sc + nr_l2) * MALI_COUNTERS_PER_BLOCK * MALI_BYTES_PER_COUNTER; - - in_out_info->hwc_layout[i++] = JM_BLOCK; - in_out_info->hwc_layout[i++] = TILER_BLOCK; - - for (j = 0; j < nr_l2; j++) - in_out_info->hwc_layout[i++] = MMU_L2_BLOCK; - - while (core_mask != 0ull) { - if ((core_mask & 1ull) != 0ull) - in_out_info->hwc_layout[i++] = SHADER_BLOCK; - else - in_out_info->hwc_layout[i++] = RESERVED_BLOCK; - core_mask >>= 1; - } - } - - in_out_info->nr_hwc_blocks = i; - - in_out_info->size = dump_size; - - flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR; - nr_pages = PFN_UP(dump_size); - reg = kbase_mem_alloc(hand->kctx, nr_pages, nr_pages, 0, - &flags, &hand->hwcnt_gpu_va, &va_alignment); - if (!reg) - goto free_layout; - - hand->hwcnt_cpu_va = kbase_vmap(hand->kctx, hand->hwcnt_gpu_va, - dump_size, &hand->hwcnt_map); - - if (!hand->hwcnt_cpu_va) - goto free_buffer; - - in_out_info->kernel_dump_buffer = hand->hwcnt_cpu_va; - memset(in_out_info->kernel_dump_buffer, 0, nr_pages * PAGE_SIZE); - - /*setup.dump_buffer = (uintptr_t)in_out_info->kernel_dump_buffer;*/ - setup.dump_buffer = hand->hwcnt_gpu_va; - setup.jm_bm = in_out_info->bitmask[0]; - setup.tiler_bm = in_out_info->bitmask[1]; - setup.shader_bm = in_out_info->bitmask[2]; - setup.mmu_l2_bm = in_out_info->bitmask[3]; - - err = kbase_instr_hwcnt_enable(hand->kctx, &setup); - if (err) - goto free_unmap; - - kbase_instr_hwcnt_clear(hand->kctx); - - return hand; - -free_unmap: - kbase_vunmap(hand->kctx, &hand->hwcnt_map); - -free_buffer: - kbase_mem_free(hand->kctx, hand->hwcnt_gpu_va); - -free_layout: - kfree(in_out_info->hwc_layout); - -destroy_context: - kbase_destroy_context(hand->kctx); - -release_device: - kbase_release_device(hand->kbdev); - -free_hand: - kfree(hand); - - return NULL; -} -KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_init); - -void kbase_gator_hwcnt_term(struct kbase_gator_hwcnt_info *in_out_info, struct kbase_gator_hwcnt_handles *opaque_handles) -{ - if (in_out_info) - kfree(in_out_info->hwc_layout); - - if (opaque_handles) { - kbase_instr_hwcnt_disable(opaque_handles->kctx); - kbase_vunmap(opaque_handles->kctx, &opaque_handles->hwcnt_map); - kbase_mem_free(opaque_handles->kctx, opaque_handles->hwcnt_gpu_va); - kbase_destroy_context(opaque_handles->kctx); - kbase_release_device(opaque_handles->kbdev); - kfree(opaque_handles); - } -} -KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_term); - -uint32_t kbase_gator_instr_hwcnt_dump_complete( - struct kbase_gator_hwcnt_handles *opaque_handles, - uint32_t * const success) -{ - bool ret_res, success_res; - - if (opaque_handles && success) { - ret_res = kbase_instr_hwcnt_dump_complete(opaque_handles->kctx, - &success_res); - *success = (uint32_t)success_res; - return (uint32_t)(ret_res != 0); - } - return 0; -} -KBASE_EXPORT_SYMBOL(kbase_gator_instr_hwcnt_dump_complete); - -uint32_t kbase_gator_instr_hwcnt_dump_irq(struct kbase_gator_hwcnt_handles *opaque_handles) -{ - if (opaque_handles) - return (kbase_instr_hwcnt_request_dump( - opaque_handles->kctx) == 0); - - return 0; -} -KBASE_EXPORT_SYMBOL(kbase_gator_instr_hwcnt_dump_irq); diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gator_api.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gator_api.h deleted file mode 100755 index ef9ac0f7b633d..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gator_api.h +++ /dev/null @@ -1,219 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#ifndef _KBASE_GATOR_API_H_ -#define _KBASE_GATOR_API_H_ - -/** - * @brief This file describes the API used by Gator to fetch hardware counters. - */ - -/* This define is used by the gator kernel module compile to select which DDK - * API calling convention to use. If not defined (legacy DDK) gator assumes - * version 1. The version to DDK release mapping is: - * Version 1 API: DDK versions r1px, r2px - * Version 2 API: DDK versions r3px, r4px - * Version 3 API: DDK version r5p0 and newer - * - * API Usage - * ========= - * - * 1] Call kbase_gator_hwcnt_init_names() to return the list of short counter - * names for the GPU present in this device. - * - * 2] Create a kbase_gator_hwcnt_info structure and set the counter enables for - * the counters you want enabled. The enables can all be set for simplicity in - * most use cases, but disabling some will let you minimize bandwidth impact. - * - * 3] Call kbase_gator_hwcnt_init() using the above structure, to create a - * counter context. On successful return the DDK will have populated the - * structure with a variety of useful information. - * - * 4] Call kbase_gator_hwcnt_dump_irq() to queue a non-blocking request for a - * counter dump. If this returns a non-zero value the request has been queued, - * otherwise the driver has been unable to do so (typically because of another - * user of the instrumentation exists concurrently). - * - * 5] Call kbase_gator_hwcnt_dump_complete() to test whether the previously - * requested dump has been succesful. If this returns non-zero the counter dump - * has resolved, but the value of *success must also be tested as the dump - * may have not been successful. If it returns zero the counter dump was - * abandoned due to the device being busy (typically because of another - * user of the instrumentation exists concurrently). - * - * 6] Process the counters stored in the buffer pointed to by ... - * - * kbase_gator_hwcnt_info->kernel_dump_buffer - * - * In pseudo code you can find all of the counters via this approach: - * - * - * hwcnt_info # pointer to kbase_gator_hwcnt_info structure - * hwcnt_name # pointer to name list - * - * u32 * hwcnt_data = (u32*)hwcnt_info->kernel_dump_buffer - * - * # Iterate over each 64-counter block in this GPU configuration - * for( i = 0; i < hwcnt_info->nr_hwc_blocks; i++) { - * hwc_type type = hwcnt_info->hwc_layout[i]; - * - * # Skip reserved type blocks - they contain no counters at all - * if( type == RESERVED_BLOCK ) { - * continue; - * } - * - * size_t name_offset = type * 64; - * size_t data_offset = i * 64; - * - * # Iterate over the names of the counters in this block type - * for( j = 0; j < 64; j++) { - * const char * name = hwcnt_name[name_offset+j]; - * - * # Skip empty name strings - there is no counter here - * if( name[0] == '\0' ) { - * continue; - * } - * - * u32 data = hwcnt_data[data_offset+j]; - * - * printk( "COUNTER: %s DATA: %u\n", name, data ); - * } - * } - * - * - * Note that in most implementations you typically want to either SUM or - * AVERAGE multiple instances of the same counter if, for example, you have - * multiple shader cores or multiple L2 caches. The most sensible view for - * analysis is to AVERAGE shader core counters, but SUM L2 cache and MMU - * counters. - * - * 7] Goto 4, repeating until you want to stop collecting counters. - * - * 8] Release the dump resources by calling kbase_gator_hwcnt_term(). - * - * 9] Release the name table resources by calling - * kbase_gator_hwcnt_term_names(). This function must only be called if - * init_names() returned a non-NULL value. - **/ - -#define MALI_DDK_GATOR_API_VERSION 3 - -enum hwc_type { - JM_BLOCK = 0, - TILER_BLOCK, - SHADER_BLOCK, - MMU_L2_BLOCK, - RESERVED_BLOCK -}; - -struct kbase_gator_hwcnt_info { - /* Passed from Gator to kbase */ - - /* the bitmask of enabled hardware counters for each counter block */ - uint16_t bitmask[4]; - - /* Passed from kbase to Gator */ - - /* ptr to counter dump memory */ - void *kernel_dump_buffer; - - /* size of counter dump memory */ - uint32_t size; - - /* the ID of the Mali device */ - uint32_t gpu_id; - - /* the number of shader cores in the GPU */ - uint32_t nr_cores; - - /* the number of core groups */ - uint32_t nr_core_groups; - - /* the memory layout of the performance counters */ - enum hwc_type *hwc_layout; - - /* the total number of hardware couter blocks */ - uint32_t nr_hwc_blocks; -}; - -/** - * @brief Opaque block of Mali data which Gator needs to return to the API later. - */ -struct kbase_gator_hwcnt_handles; - -/** - * @brief Initialize the resources Gator needs for performance profiling. - * - * @param in_out_info A pointer to a structure containing the enabled counters passed from Gator and all the Mali - * specific information that will be returned to Gator. On entry Gator must have populated the - * 'bitmask' field with the counters it wishes to enable for each class of counter block. - * Each entry in the array corresponds to a single counter class based on the "hwc_type" - * enumeration, and each bit corresponds to an enable for 4 sequential counters (LSB enables - * the first 4 counters in the block, and so on). See the GPU counter array as returned by - * kbase_gator_hwcnt_get_names() for the index values of each counter for the curernt GPU. - * - * @return Pointer to an opaque handle block on success, NULL on error. - */ -extern struct kbase_gator_hwcnt_handles *kbase_gator_hwcnt_init(struct kbase_gator_hwcnt_info *in_out_info); - -/** - * @brief Free all resources once Gator has finished using performance counters. - * - * @param in_out_info A pointer to a structure containing the enabled counters passed from Gator and all the - * Mali specific information that will be returned to Gator. - * @param opaque_handles A wrapper structure for kbase structures. - */ -extern void kbase_gator_hwcnt_term(struct kbase_gator_hwcnt_info *in_out_info, struct kbase_gator_hwcnt_handles *opaque_handles); - -/** - * @brief Poll whether a counter dump is successful. - * - * @param opaque_handles A wrapper structure for kbase structures. - * @param[out] success Non-zero on success, zero on failure. - * - * @return Zero if the dump is still pending, non-zero if the dump has completed. Note that a - * completed dump may not have dumped succesfully, so the caller must test for both - * a completed and successful dump before processing counters. - */ -extern uint32_t kbase_gator_instr_hwcnt_dump_complete(struct kbase_gator_hwcnt_handles *opaque_handles, uint32_t * const success); - -/** - * @brief Request the generation of a new counter dump. - * - * @param opaque_handles A wrapper structure for kbase structures. - * - * @return Zero if the hardware device is busy and cannot handle the request, non-zero otherwise. - */ -extern uint32_t kbase_gator_instr_hwcnt_dump_irq(struct kbase_gator_hwcnt_handles *opaque_handles); - -/** - * @brief This function is used to fetch the names table based on the Mali device in use. - * - * @param[out] total_counters The total number of counters short names in the Mali devices' list. - * - * @return Pointer to an array of strings of length *total_counters. - */ -extern const char * const *kbase_gator_hwcnt_init_names(uint32_t *total_counters); - -/** - * @brief This function is used to terminate the use of the names table. - * - * This function must only be called if the initial call to kbase_gator_hwcnt_init_names returned a non-NULL value. - */ -extern void kbase_gator_hwcnt_term_names(void); - -#endif diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names.h deleted file mode 100755 index d124e82edd0ae..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names.h +++ /dev/null @@ -1,2159 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#ifndef _KBASE_GATOR_HWCNT_NAMES_H_ -#define _KBASE_GATOR_HWCNT_NAMES_H_ - -/* - * "Short names" for hardware counters used by Streamline. Counters names are - * stored in accordance with their memory layout in the binary counter block - * emitted by the Mali GPU. Each "master" in the GPU emits a fixed-size block - * of 64 counters, and each GPU implements the same set of "masters" although - * the counters each master exposes within its block of 64 may vary. - * - * Counters which are an empty string are simply "holes" in the counter memory - * where no counter exists. - */ - -static const char * const hardware_counters_mali_t60x[] = { - /* Job Manager */ - "", - "", - "", - "", - "T60x_MESSAGES_SENT", - "T60x_MESSAGES_RECEIVED", - "T60x_GPU_ACTIVE", - "T60x_IRQ_ACTIVE", - "T60x_JS0_JOBS", - "T60x_JS0_TASKS", - "T60x_JS0_ACTIVE", - "", - "T60x_JS0_WAIT_READ", - "T60x_JS0_WAIT_ISSUE", - "T60x_JS0_WAIT_DEPEND", - "T60x_JS0_WAIT_FINISH", - "T60x_JS1_JOBS", - "T60x_JS1_TASKS", - "T60x_JS1_ACTIVE", - "", - "T60x_JS1_WAIT_READ", - "T60x_JS1_WAIT_ISSUE", - "T60x_JS1_WAIT_DEPEND", - "T60x_JS1_WAIT_FINISH", - "T60x_JS2_JOBS", - "T60x_JS2_TASKS", - "T60x_JS2_ACTIVE", - "", - "T60x_JS2_WAIT_READ", - "T60x_JS2_WAIT_ISSUE", - "T60x_JS2_WAIT_DEPEND", - "T60x_JS2_WAIT_FINISH", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - - /*Tiler */ - "", - "", - "", - "T60x_TI_JOBS_PROCESSED", - "T60x_TI_TRIANGLES", - "T60x_TI_QUADS", - "T60x_TI_POLYGONS", - "T60x_TI_POINTS", - "T60x_TI_LINES", - "T60x_TI_VCACHE_HIT", - "T60x_TI_VCACHE_MISS", - "T60x_TI_FRONT_FACING", - "T60x_TI_BACK_FACING", - "T60x_TI_PRIM_VISIBLE", - "T60x_TI_PRIM_CULLED", - "T60x_TI_PRIM_CLIPPED", - "T60x_TI_LEVEL0", - "T60x_TI_LEVEL1", - "T60x_TI_LEVEL2", - "T60x_TI_LEVEL3", - "T60x_TI_LEVEL4", - "T60x_TI_LEVEL5", - "T60x_TI_LEVEL6", - "T60x_TI_LEVEL7", - "T60x_TI_COMMAND_1", - "T60x_TI_COMMAND_2", - "T60x_TI_COMMAND_3", - "T60x_TI_COMMAND_4", - "T60x_TI_COMMAND_4_7", - "T60x_TI_COMMAND_8_15", - "T60x_TI_COMMAND_16_63", - "T60x_TI_COMMAND_64", - "T60x_TI_COMPRESS_IN", - "T60x_TI_COMPRESS_OUT", - "T60x_TI_COMPRESS_FLUSH", - "T60x_TI_TIMESTAMPS", - "T60x_TI_PCACHE_HIT", - "T60x_TI_PCACHE_MISS", - "T60x_TI_PCACHE_LINE", - "T60x_TI_PCACHE_STALL", - "T60x_TI_WRBUF_HIT", - "T60x_TI_WRBUF_MISS", - "T60x_TI_WRBUF_LINE", - "T60x_TI_WRBUF_PARTIAL", - "T60x_TI_WRBUF_STALL", - "T60x_TI_ACTIVE", - "T60x_TI_LOADING_DESC", - "T60x_TI_INDEX_WAIT", - "T60x_TI_INDEX_RANGE_WAIT", - "T60x_TI_VERTEX_WAIT", - "T60x_TI_PCACHE_WAIT", - "T60x_TI_WRBUF_WAIT", - "T60x_TI_BUS_READ", - "T60x_TI_BUS_WRITE", - "", - "", - "", - "", - "", - "T60x_TI_UTLB_STALL", - "T60x_TI_UTLB_REPLAY_MISS", - "T60x_TI_UTLB_REPLAY_FULL", - "T60x_TI_UTLB_NEW_MISS", - "T60x_TI_UTLB_HIT", - - /* Shader Core */ - "", - "", - "", - "", - "T60x_FRAG_ACTIVE", - "T60x_FRAG_PRIMITIVES", - "T60x_FRAG_PRIMITIVES_DROPPED", - "T60x_FRAG_CYCLES_DESC", - "T60x_FRAG_CYCLES_PLR", - "T60x_FRAG_CYCLES_VERT", - "T60x_FRAG_CYCLES_TRISETUP", - "T60x_FRAG_CYCLES_RAST", - "T60x_FRAG_THREADS", - "T60x_FRAG_DUMMY_THREADS", - "T60x_FRAG_QUADS_RAST", - "T60x_FRAG_QUADS_EZS_TEST", - "T60x_FRAG_QUADS_EZS_KILLED", - "T60x_FRAG_THREADS_LZS_TEST", - "T60x_FRAG_THREADS_LZS_KILLED", - "T60x_FRAG_CYCLES_NO_TILE", - "T60x_FRAG_NUM_TILES", - "T60x_FRAG_TRANS_ELIM", - "T60x_COMPUTE_ACTIVE", - "T60x_COMPUTE_TASKS", - "T60x_COMPUTE_THREADS", - "T60x_COMPUTE_CYCLES_DESC", - "T60x_TRIPIPE_ACTIVE", - "T60x_ARITH_WORDS", - "T60x_ARITH_CYCLES_REG", - "T60x_ARITH_CYCLES_L0", - "T60x_ARITH_FRAG_DEPEND", - "T60x_LS_WORDS", - "T60x_LS_ISSUES", - "T60x_LS_RESTARTS", - "T60x_LS_REISSUES_MISS", - "T60x_LS_REISSUES_VD", - "T60x_LS_REISSUE_ATTRIB_MISS", - "T60x_LS_NO_WB", - "T60x_TEX_WORDS", - "T60x_TEX_BUBBLES", - "T60x_TEX_WORDS_L0", - "T60x_TEX_WORDS_DESC", - "T60x_TEX_ISSUES", - "T60x_TEX_RECIRC_FMISS", - "T60x_TEX_RECIRC_DESC", - "T60x_TEX_RECIRC_MULTI", - "T60x_TEX_RECIRC_PMISS", - "T60x_TEX_RECIRC_CONF", - "T60x_LSC_READ_HITS", - "T60x_LSC_READ_MISSES", - "T60x_LSC_WRITE_HITS", - "T60x_LSC_WRITE_MISSES", - "T60x_LSC_ATOMIC_HITS", - "T60x_LSC_ATOMIC_MISSES", - "T60x_LSC_LINE_FETCHES", - "T60x_LSC_DIRTY_LINE", - "T60x_LSC_SNOOPS", - "T60x_AXI_TLB_STALL", - "T60x_AXI_TLB_MIESS", - "T60x_AXI_TLB_TRANSACTION", - "T60x_LS_TLB_MISS", - "T60x_LS_TLB_HIT", - "T60x_AXI_BEATS_READ", - "T60x_AXI_BEATS_WRITTEN", - - /*L2 and MMU */ - "", - "", - "", - "", - "T60x_MMU_HIT", - "T60x_MMU_NEW_MISS", - "T60x_MMU_REPLAY_FULL", - "T60x_MMU_REPLAY_MISS", - "T60x_MMU_TABLE_WALK", - "", - "", - "", - "", - "", - "", - "", - "T60x_UTLB_HIT", - "T60x_UTLB_NEW_MISS", - "T60x_UTLB_REPLAY_FULL", - "T60x_UTLB_REPLAY_MISS", - "T60x_UTLB_STALL", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "T60x_L2_EXT_WRITE_BEATS", - "T60x_L2_EXT_READ_BEATS", - "T60x_L2_ANY_LOOKUP", - "T60x_L2_READ_LOOKUP", - "T60x_L2_SREAD_LOOKUP", - "T60x_L2_READ_REPLAY", - "T60x_L2_READ_SNOOP", - "T60x_L2_READ_HIT", - "T60x_L2_CLEAN_MISS", - "T60x_L2_WRITE_LOOKUP", - "T60x_L2_SWRITE_LOOKUP", - "T60x_L2_WRITE_REPLAY", - "T60x_L2_WRITE_SNOOP", - "T60x_L2_WRITE_HIT", - "T60x_L2_EXT_READ_FULL", - "T60x_L2_EXT_READ_HALF", - "T60x_L2_EXT_WRITE_FULL", - "T60x_L2_EXT_WRITE_HALF", - "T60x_L2_EXT_READ", - "T60x_L2_EXT_READ_LINE", - "T60x_L2_EXT_WRITE", - "T60x_L2_EXT_WRITE_LINE", - "T60x_L2_EXT_WRITE_SMALL", - "T60x_L2_EXT_BARRIER", - "T60x_L2_EXT_AR_STALL", - "T60x_L2_EXT_R_BUF_FULL", - "T60x_L2_EXT_RD_BUF_FULL", - "T60x_L2_EXT_R_RAW", - "T60x_L2_EXT_W_STALL", - "T60x_L2_EXT_W_BUF_FULL", - "T60x_L2_EXT_R_W_HAZARD", - "T60x_L2_TAG_HAZARD", - "T60x_L2_SNOOP_FULL", - "T60x_L2_REPLAY_FULL" -}; -static const char * const hardware_counters_mali_t62x[] = { - /* Job Manager */ - "", - "", - "", - "", - "T62x_MESSAGES_SENT", - "T62x_MESSAGES_RECEIVED", - "T62x_GPU_ACTIVE", - "T62x_IRQ_ACTIVE", - "T62x_JS0_JOBS", - "T62x_JS0_TASKS", - "T62x_JS0_ACTIVE", - "", - "T62x_JS0_WAIT_READ", - "T62x_JS0_WAIT_ISSUE", - "T62x_JS0_WAIT_DEPEND", - "T62x_JS0_WAIT_FINISH", - "T62x_JS1_JOBS", - "T62x_JS1_TASKS", - "T62x_JS1_ACTIVE", - "", - "T62x_JS1_WAIT_READ", - "T62x_JS1_WAIT_ISSUE", - "T62x_JS1_WAIT_DEPEND", - "T62x_JS1_WAIT_FINISH", - "T62x_JS2_JOBS", - "T62x_JS2_TASKS", - "T62x_JS2_ACTIVE", - "", - "T62x_JS2_WAIT_READ", - "T62x_JS2_WAIT_ISSUE", - "T62x_JS2_WAIT_DEPEND", - "T62x_JS2_WAIT_FINISH", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - - /*Tiler */ - "", - "", - "", - "T62x_TI_JOBS_PROCESSED", - "T62x_TI_TRIANGLES", - "T62x_TI_QUADS", - "T62x_TI_POLYGONS", - "T62x_TI_POINTS", - "T62x_TI_LINES", - "T62x_TI_VCACHE_HIT", - "T62x_TI_VCACHE_MISS", - "T62x_TI_FRONT_FACING", - "T62x_TI_BACK_FACING", - "T62x_TI_PRIM_VISIBLE", - "T62x_TI_PRIM_CULLED", - "T62x_TI_PRIM_CLIPPED", - "T62x_TI_LEVEL0", - "T62x_TI_LEVEL1", - "T62x_TI_LEVEL2", - "T62x_TI_LEVEL3", - "T62x_TI_LEVEL4", - "T62x_TI_LEVEL5", - "T62x_TI_LEVEL6", - "T62x_TI_LEVEL7", - "T62x_TI_COMMAND_1", - "T62x_TI_COMMAND_2", - "T62x_TI_COMMAND_3", - "T62x_TI_COMMAND_4", - "T62x_TI_COMMAND_5_7", - "T62x_TI_COMMAND_8_15", - "T62x_TI_COMMAND_16_63", - "T62x_TI_COMMAND_64", - "T62x_TI_COMPRESS_IN", - "T62x_TI_COMPRESS_OUT", - "T62x_TI_COMPRESS_FLUSH", - "T62x_TI_TIMESTAMPS", - "T62x_TI_PCACHE_HIT", - "T62x_TI_PCACHE_MISS", - "T62x_TI_PCACHE_LINE", - "T62x_TI_PCACHE_STALL", - "T62x_TI_WRBUF_HIT", - "T62x_TI_WRBUF_MISS", - "T62x_TI_WRBUF_LINE", - "T62x_TI_WRBUF_PARTIAL", - "T62x_TI_WRBUF_STALL", - "T62x_TI_ACTIVE", - "T62x_TI_LOADING_DESC", - "T62x_TI_INDEX_WAIT", - "T62x_TI_INDEX_RANGE_WAIT", - "T62x_TI_VERTEX_WAIT", - "T62x_TI_PCACHE_WAIT", - "T62x_TI_WRBUF_WAIT", - "T62x_TI_BUS_READ", - "T62x_TI_BUS_WRITE", - "", - "", - "", - "", - "", - "T62x_TI_UTLB_STALL", - "T62x_TI_UTLB_REPLAY_MISS", - "T62x_TI_UTLB_REPLAY_FULL", - "T62x_TI_UTLB_NEW_MISS", - "T62x_TI_UTLB_HIT", - - /* Shader Core */ - "", - "", - "", - "T62x_SHADER_CORE_ACTIVE", - "T62x_FRAG_ACTIVE", - "T62x_FRAG_PRIMITIVES", - "T62x_FRAG_PRIMITIVES_DROPPED", - "T62x_FRAG_CYCLES_DESC", - "T62x_FRAG_CYCLES_FPKQ_ACTIVE", - "T62x_FRAG_CYCLES_VERT", - "T62x_FRAG_CYCLES_TRISETUP", - "T62x_FRAG_CYCLES_EZS_ACTIVE", - "T62x_FRAG_THREADS", - "T62x_FRAG_DUMMY_THREADS", - "T62x_FRAG_QUADS_RAST", - "T62x_FRAG_QUADS_EZS_TEST", - "T62x_FRAG_QUADS_EZS_KILLED", - "T62x_FRAG_THREADS_LZS_TEST", - "T62x_FRAG_THREADS_LZS_KILLED", - "T62x_FRAG_CYCLES_NO_TILE", - "T62x_FRAG_NUM_TILES", - "T62x_FRAG_TRANS_ELIM", - "T62x_COMPUTE_ACTIVE", - "T62x_COMPUTE_TASKS", - "T62x_COMPUTE_THREADS", - "T62x_COMPUTE_CYCLES_DESC", - "T62x_TRIPIPE_ACTIVE", - "T62x_ARITH_WORDS", - "T62x_ARITH_CYCLES_REG", - "T62x_ARITH_CYCLES_L0", - "T62x_ARITH_FRAG_DEPEND", - "T62x_LS_WORDS", - "T62x_LS_ISSUES", - "T62x_LS_RESTARTS", - "T62x_LS_REISSUES_MISS", - "T62x_LS_REISSUES_VD", - "T62x_LS_REISSUE_ATTRIB_MISS", - "T62x_LS_NO_WB", - "T62x_TEX_WORDS", - "T62x_TEX_BUBBLES", - "T62x_TEX_WORDS_L0", - "T62x_TEX_WORDS_DESC", - "T62x_TEX_ISSUES", - "T62x_TEX_RECIRC_FMISS", - "T62x_TEX_RECIRC_DESC", - "T62x_TEX_RECIRC_MULTI", - "T62x_TEX_RECIRC_PMISS", - "T62x_TEX_RECIRC_CONF", - "T62x_LSC_READ_HITS", - "T62x_LSC_READ_MISSES", - "T62x_LSC_WRITE_HITS", - "T62x_LSC_WRITE_MISSES", - "T62x_LSC_ATOMIC_HITS", - "T62x_LSC_ATOMIC_MISSES", - "T62x_LSC_LINE_FETCHES", - "T62x_LSC_DIRTY_LINE", - "T62x_LSC_SNOOPS", - "T62x_AXI_TLB_STALL", - "T62x_AXI_TLB_MIESS", - "T62x_AXI_TLB_TRANSACTION", - "T62x_LS_TLB_MISS", - "T62x_LS_TLB_HIT", - "T62x_AXI_BEATS_READ", - "T62x_AXI_BEATS_WRITTEN", - - /*L2 and MMU */ - "", - "", - "", - "", - "T62x_MMU_HIT", - "T62x_MMU_NEW_MISS", - "T62x_MMU_REPLAY_FULL", - "T62x_MMU_REPLAY_MISS", - "T62x_MMU_TABLE_WALK", - "", - "", - "", - "", - "", - "", - "", - "T62x_UTLB_HIT", - "T62x_UTLB_NEW_MISS", - "T62x_UTLB_REPLAY_FULL", - "T62x_UTLB_REPLAY_MISS", - "T62x_UTLB_STALL", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "T62x_L2_EXT_WRITE_BEATS", - "T62x_L2_EXT_READ_BEATS", - "T62x_L2_ANY_LOOKUP", - "T62x_L2_READ_LOOKUP", - "T62x_L2_SREAD_LOOKUP", - "T62x_L2_READ_REPLAY", - "T62x_L2_READ_SNOOP", - "T62x_L2_READ_HIT", - "T62x_L2_CLEAN_MISS", - "T62x_L2_WRITE_LOOKUP", - "T62x_L2_SWRITE_LOOKUP", - "T62x_L2_WRITE_REPLAY", - "T62x_L2_WRITE_SNOOP", - "T62x_L2_WRITE_HIT", - "T62x_L2_EXT_READ_FULL", - "T62x_L2_EXT_READ_HALF", - "T62x_L2_EXT_WRITE_FULL", - "T62x_L2_EXT_WRITE_HALF", - "T62x_L2_EXT_READ", - "T62x_L2_EXT_READ_LINE", - "T62x_L2_EXT_WRITE", - "T62x_L2_EXT_WRITE_LINE", - "T62x_L2_EXT_WRITE_SMALL", - "T62x_L2_EXT_BARRIER", - "T62x_L2_EXT_AR_STALL", - "T62x_L2_EXT_R_BUF_FULL", - "T62x_L2_EXT_RD_BUF_FULL", - "T62x_L2_EXT_R_RAW", - "T62x_L2_EXT_W_STALL", - "T62x_L2_EXT_W_BUF_FULL", - "T62x_L2_EXT_R_W_HAZARD", - "T62x_L2_TAG_HAZARD", - "T62x_L2_SNOOP_FULL", - "T62x_L2_REPLAY_FULL" -}; - -static const char * const hardware_counters_mali_t72x[] = { - /* Job Manager */ - "", - "", - "", - "", - "T72x_GPU_ACTIVE", - "T72x_IRQ_ACTIVE", - "T72x_JS0_JOBS", - "T72x_JS0_TASKS", - "T72x_JS0_ACTIVE", - "T72x_JS1_JOBS", - "T72x_JS1_TASKS", - "T72x_JS1_ACTIVE", - "T72x_JS2_JOBS", - "T72x_JS2_TASKS", - "T72x_JS2_ACTIVE", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - - /*Tiler */ - "", - "", - "", - "T72x_TI_JOBS_PROCESSED", - "T72x_TI_TRIANGLES", - "T72x_TI_QUADS", - "T72x_TI_POLYGONS", - "T72x_TI_POINTS", - "T72x_TI_LINES", - "T72x_TI_FRONT_FACING", - "T72x_TI_BACK_FACING", - "T72x_TI_PRIM_VISIBLE", - "T72x_TI_PRIM_CULLED", - "T72x_TI_PRIM_CLIPPED", - "", - "", - "", - "", - "", - "", - "", - "", - "T72x_TI_ACTIVE", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - - /* Shader Core */ - "", - "", - "", - "", - "T72x_FRAG_ACTIVE", - "T72x_FRAG_PRIMITIVES", - "T72x_FRAG_PRIMITIVES_DROPPED", - "T72x_FRAG_THREADS", - "T72x_FRAG_DUMMY_THREADS", - "T72x_FRAG_QUADS_RAST", - "T72x_FRAG_QUADS_EZS_TEST", - "T72x_FRAG_QUADS_EZS_KILLED", - "T72x_FRAG_THREADS_LZS_TEST", - "T72x_FRAG_THREADS_LZS_KILLED", - "T72x_FRAG_CYCLES_NO_TILE", - "T72x_FRAG_NUM_TILES", - "T72x_FRAG_TRANS_ELIM", - "T72x_COMPUTE_ACTIVE", - "T72x_COMPUTE_TASKS", - "T72x_COMPUTE_THREADS", - "T72x_TRIPIPE_ACTIVE", - "T72x_ARITH_WORDS", - "T72x_ARITH_CYCLES_REG", - "T72x_LS_WORDS", - "T72x_LS_ISSUES", - "T72x_LS_RESTARTS", - "T72x_LS_REISSUES_MISS", - "T72x_TEX_WORDS", - "T72x_TEX_BUBBLES", - "T72x_TEX_ISSUES", - "T72x_LSC_READ_HITS", - "T72x_LSC_READ_MISSES", - "T72x_LSC_WRITE_HITS", - "T72x_LSC_WRITE_MISSES", - "T72x_LSC_ATOMIC_HITS", - "T72x_LSC_ATOMIC_MISSES", - "T72x_LSC_LINE_FETCHES", - "T72x_LSC_DIRTY_LINE", - "T72x_LSC_SNOOPS", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - - /*L2 and MMU */ - "", - "", - "", - "", - "T72x_L2_EXT_WRITE_BEAT", - "T72x_L2_EXT_READ_BEAT", - "T72x_L2_READ_SNOOP", - "T72x_L2_READ_HIT", - "T72x_L2_WRITE_SNOOP", - "T72x_L2_WRITE_HIT", - "T72x_L2_EXT_WRITE_SMALL", - "T72x_L2_EXT_BARRIER", - "T72x_L2_EXT_AR_STALL", - "T72x_L2_EXT_W_STALL", - "T72x_L2_SNOOP_FULL", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" -}; - -static const char * const hardware_counters_mali_t76x[] = { - /* Job Manager */ - "", - "", - "", - "", - "T76x_MESSAGES_SENT", - "T76x_MESSAGES_RECEIVED", - "T76x_GPU_ACTIVE", - "T76x_IRQ_ACTIVE", - "T76x_JS0_JOBS", - "T76x_JS0_TASKS", - "T76x_JS0_ACTIVE", - "", - "T76x_JS0_WAIT_READ", - "T76x_JS0_WAIT_ISSUE", - "T76x_JS0_WAIT_DEPEND", - "T76x_JS0_WAIT_FINISH", - "T76x_JS1_JOBS", - "T76x_JS1_TASKS", - "T76x_JS1_ACTIVE", - "", - "T76x_JS1_WAIT_READ", - "T76x_JS1_WAIT_ISSUE", - "T76x_JS1_WAIT_DEPEND", - "T76x_JS1_WAIT_FINISH", - "T76x_JS2_JOBS", - "T76x_JS2_TASKS", - "T76x_JS2_ACTIVE", - "", - "T76x_JS2_WAIT_READ", - "T76x_JS2_WAIT_ISSUE", - "T76x_JS2_WAIT_DEPEND", - "T76x_JS2_WAIT_FINISH", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - - /*Tiler */ - "", - "", - "", - "T76x_TI_JOBS_PROCESSED", - "T76x_TI_TRIANGLES", - "T76x_TI_QUADS", - "T76x_TI_POLYGONS", - "T76x_TI_POINTS", - "T76x_TI_LINES", - "T76x_TI_VCACHE_HIT", - "T76x_TI_VCACHE_MISS", - "T76x_TI_FRONT_FACING", - "T76x_TI_BACK_FACING", - "T76x_TI_PRIM_VISIBLE", - "T76x_TI_PRIM_CULLED", - "T76x_TI_PRIM_CLIPPED", - "T76x_TI_LEVEL0", - "T76x_TI_LEVEL1", - "T76x_TI_LEVEL2", - "T76x_TI_LEVEL3", - "T76x_TI_LEVEL4", - "T76x_TI_LEVEL5", - "T76x_TI_LEVEL6", - "T76x_TI_LEVEL7", - "T76x_TI_COMMAND_1", - "T76x_TI_COMMAND_2", - "T76x_TI_COMMAND_3", - "T76x_TI_COMMAND_4", - "T76x_TI_COMMAND_5_7", - "T76x_TI_COMMAND_8_15", - "T76x_TI_COMMAND_16_63", - "T76x_TI_COMMAND_64", - "T76x_TI_COMPRESS_IN", - "T76x_TI_COMPRESS_OUT", - "T76x_TI_COMPRESS_FLUSH", - "T76x_TI_TIMESTAMPS", - "T76x_TI_PCACHE_HIT", - "T76x_TI_PCACHE_MISS", - "T76x_TI_PCACHE_LINE", - "T76x_TI_PCACHE_STALL", - "T76x_TI_WRBUF_HIT", - "T76x_TI_WRBUF_MISS", - "T76x_TI_WRBUF_LINE", - "T76x_TI_WRBUF_PARTIAL", - "T76x_TI_WRBUF_STALL", - "T76x_TI_ACTIVE", - "T76x_TI_LOADING_DESC", - "T76x_TI_INDEX_WAIT", - "T76x_TI_INDEX_RANGE_WAIT", - "T76x_TI_VERTEX_WAIT", - "T76x_TI_PCACHE_WAIT", - "T76x_TI_WRBUF_WAIT", - "T76x_TI_BUS_READ", - "T76x_TI_BUS_WRITE", - "", - "", - "", - "", - "", - "T76x_TI_UTLB_HIT", - "T76x_TI_UTLB_NEW_MISS", - "T76x_TI_UTLB_REPLAY_FULL", - "T76x_TI_UTLB_REPLAY_MISS", - "T76x_TI_UTLB_STALL", - - /* Shader Core */ - "", - "", - "", - "", - "T76x_FRAG_ACTIVE", - "T76x_FRAG_PRIMITIVES", - "T76x_FRAG_PRIMITIVES_DROPPED", - "T76x_FRAG_CYCLES_DESC", - "T76x_FRAG_CYCLES_FPKQ_ACTIVE", - "T76x_FRAG_CYCLES_VERT", - "T76x_FRAG_CYCLES_TRISETUP", - "T76x_FRAG_CYCLES_EZS_ACTIVE", - "T76x_FRAG_THREADS", - "T76x_FRAG_DUMMY_THREADS", - "T76x_FRAG_QUADS_RAST", - "T76x_FRAG_QUADS_EZS_TEST", - "T76x_FRAG_QUADS_EZS_KILLED", - "T76x_FRAG_THREADS_LZS_TEST", - "T76x_FRAG_THREADS_LZS_KILLED", - "T76x_FRAG_CYCLES_NO_TILE", - "T76x_FRAG_NUM_TILES", - "T76x_FRAG_TRANS_ELIM", - "T76x_COMPUTE_ACTIVE", - "T76x_COMPUTE_TASKS", - "T76x_COMPUTE_THREADS", - "T76x_COMPUTE_CYCLES_DESC", - "T76x_TRIPIPE_ACTIVE", - "T76x_ARITH_WORDS", - "T76x_ARITH_CYCLES_REG", - "T76x_ARITH_CYCLES_L0", - "T76x_ARITH_FRAG_DEPEND", - "T76x_LS_WORDS", - "T76x_LS_ISSUES", - "T76x_LS_REISSUE_ATTR", - "T76x_LS_REISSUES_VARY", - "T76x_LS_VARY_RV_MISS", - "T76x_LS_VARY_RV_HIT", - "T76x_LS_NO_UNPARK", - "T76x_TEX_WORDS", - "T76x_TEX_BUBBLES", - "T76x_TEX_WORDS_L0", - "T76x_TEX_WORDS_DESC", - "T76x_TEX_ISSUES", - "T76x_TEX_RECIRC_FMISS", - "T76x_TEX_RECIRC_DESC", - "T76x_TEX_RECIRC_MULTI", - "T76x_TEX_RECIRC_PMISS", - "T76x_TEX_RECIRC_CONF", - "T76x_LSC_READ_HITS", - "T76x_LSC_READ_OP", - "T76x_LSC_WRITE_HITS", - "T76x_LSC_WRITE_OP", - "T76x_LSC_ATOMIC_HITS", - "T76x_LSC_ATOMIC_OP", - "T76x_LSC_LINE_FETCHES", - "T76x_LSC_DIRTY_LINE", - "T76x_LSC_SNOOPS", - "T76x_AXI_TLB_STALL", - "T76x_AXI_TLB_MIESS", - "T76x_AXI_TLB_TRANSACTION", - "T76x_LS_TLB_MISS", - "T76x_LS_TLB_HIT", - "T76x_AXI_BEATS_READ", - "T76x_AXI_BEATS_WRITTEN", - - /*L2 and MMU */ - "", - "", - "", - "", - "T76x_MMU_HIT", - "T76x_MMU_NEW_MISS", - "T76x_MMU_REPLAY_FULL", - "T76x_MMU_REPLAY_MISS", - "T76x_MMU_TABLE_WALK", - "T76x_MMU_REQUESTS", - "", - "", - "T76x_UTLB_HIT", - "T76x_UTLB_NEW_MISS", - "T76x_UTLB_REPLAY_FULL", - "T76x_UTLB_REPLAY_MISS", - "T76x_UTLB_STALL", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "T76x_L2_EXT_WRITE_BEATS", - "T76x_L2_EXT_READ_BEATS", - "T76x_L2_ANY_LOOKUP", - "T76x_L2_READ_LOOKUP", - "T76x_L2_SREAD_LOOKUP", - "T76x_L2_READ_REPLAY", - "T76x_L2_READ_SNOOP", - "T76x_L2_READ_HIT", - "T76x_L2_CLEAN_MISS", - "T76x_L2_WRITE_LOOKUP", - "T76x_L2_SWRITE_LOOKUP", - "T76x_L2_WRITE_REPLAY", - "T76x_L2_WRITE_SNOOP", - "T76x_L2_WRITE_HIT", - "T76x_L2_EXT_READ_FULL", - "", - "T76x_L2_EXT_WRITE_FULL", - "T76x_L2_EXT_R_W_HAZARD", - "T76x_L2_EXT_READ", - "T76x_L2_EXT_READ_LINE", - "T76x_L2_EXT_WRITE", - "T76x_L2_EXT_WRITE_LINE", - "T76x_L2_EXT_WRITE_SMALL", - "T76x_L2_EXT_BARRIER", - "T76x_L2_EXT_AR_STALL", - "T76x_L2_EXT_R_BUF_FULL", - "T76x_L2_EXT_RD_BUF_FULL", - "T76x_L2_EXT_R_RAW", - "T76x_L2_EXT_W_STALL", - "T76x_L2_EXT_W_BUF_FULL", - "T76x_L2_EXT_R_BUF_FULL", - "T76x_L2_TAG_HAZARD", - "T76x_L2_SNOOP_FULL", - "T76x_L2_REPLAY_FULL" -}; - -static const char * const hardware_counters_mali_t82x[] = { - /* Job Manager */ - "", - "", - "", - "", - "T82x_MESSAGES_SENT", - "T82x_MESSAGES_RECEIVED", - "T82x_GPU_ACTIVE", - "T82x_IRQ_ACTIVE", - "T82x_JS0_JOBS", - "T82x_JS0_TASKS", - "T82x_JS0_ACTIVE", - "", - "T82x_JS0_WAIT_READ", - "T82x_JS0_WAIT_ISSUE", - "T82x_JS0_WAIT_DEPEND", - "T82x_JS0_WAIT_FINISH", - "T82x_JS1_JOBS", - "T82x_JS1_TASKS", - "T82x_JS1_ACTIVE", - "", - "T82x_JS1_WAIT_READ", - "T82x_JS1_WAIT_ISSUE", - "T82x_JS1_WAIT_DEPEND", - "T82x_JS1_WAIT_FINISH", - "T82x_JS2_JOBS", - "T82x_JS2_TASKS", - "T82x_JS2_ACTIVE", - "", - "T82x_JS2_WAIT_READ", - "T82x_JS2_WAIT_ISSUE", - "T82x_JS2_WAIT_DEPEND", - "T82x_JS2_WAIT_FINISH", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - - /*Tiler */ - "", - "", - "", - "T82x_TI_JOBS_PROCESSED", - "T82x_TI_TRIANGLES", - "T82x_TI_QUADS", - "T82x_TI_POLYGONS", - "T82x_TI_POINTS", - "T82x_TI_LINES", - "T82x_TI_FRONT_FACING", - "T82x_TI_BACK_FACING", - "T82x_TI_PRIM_VISIBLE", - "T82x_TI_PRIM_CULLED", - "T82x_TI_PRIM_CLIPPED", - "", - "", - "", - "", - "", - "", - "", - "", - "T82x_TI_ACTIVE", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - - /* Shader Core */ - "", - "", - "", - "", - "T82x_FRAG_ACTIVE", - "T82x_FRAG_PRIMITIVES", - "T82x_FRAG_PRIMITIVES_DROPPED", - "T82x_FRAG_CYCLES_DESC", - "T82x_FRAG_CYCLES_FPKQ_ACTIVE", - "T82x_FRAG_CYCLES_VERT", - "T82x_FRAG_CYCLES_TRISETUP", - "T82x_FRAG_CYCLES_EZS_ACTIVE", - "T82x_FRAG_THREADS", - "T82x_FRAG_DUMMY_THREADS", - "T82x_FRAG_QUADS_RAST", - "T82x_FRAG_QUADS_EZS_TEST", - "T82x_FRAG_QUADS_EZS_KILLED", - "T82x_FRAG_THREADS_LZS_TEST", - "T82x_FRAG_THREADS_LZS_KILLED", - "T82x_FRAG_CYCLES_NO_TILE", - "T82x_FRAG_NUM_TILES", - "T82x_FRAG_TRANS_ELIM", - "T82x_COMPUTE_ACTIVE", - "T82x_COMPUTE_TASKS", - "T82x_COMPUTE_THREADS", - "T82x_COMPUTE_CYCLES_DESC", - "T82x_TRIPIPE_ACTIVE", - "T82x_ARITH_WORDS", - "T82x_ARITH_CYCLES_REG", - "T82x_ARITH_CYCLES_L0", - "T82x_ARITH_FRAG_DEPEND", - "T82x_LS_WORDS", - "T82x_LS_ISSUES", - "T82x_LS_REISSUE_ATTR", - "T82x_LS_REISSUES_VARY", - "T82x_LS_VARY_RV_MISS", - "T82x_LS_VARY_RV_HIT", - "T82x_LS_NO_UNPARK", - "T82x_TEX_WORDS", - "T82x_TEX_BUBBLES", - "T82x_TEX_WORDS_L0", - "T82x_TEX_WORDS_DESC", - "T82x_TEX_ISSUES", - "T82x_TEX_RECIRC_FMISS", - "T82x_TEX_RECIRC_DESC", - "T82x_TEX_RECIRC_MULTI", - "T82x_TEX_RECIRC_PMISS", - "T82x_TEX_RECIRC_CONF", - "T82x_LSC_READ_HITS", - "T82x_LSC_READ_OP", - "T82x_LSC_WRITE_HITS", - "T82x_LSC_WRITE_OP", - "T82x_LSC_ATOMIC_HITS", - "T82x_LSC_ATOMIC_OP", - "T82x_LSC_LINE_FETCHES", - "T82x_LSC_DIRTY_LINE", - "T82x_LSC_SNOOPS", - "T82x_AXI_TLB_STALL", - "T82x_AXI_TLB_MIESS", - "T82x_AXI_TLB_TRANSACTION", - "T82x_LS_TLB_MISS", - "T82x_LS_TLB_HIT", - "T82x_AXI_BEATS_READ", - "T82x_AXI_BEATS_WRITTEN", - - /*L2 and MMU */ - "", - "", - "", - "", - "T82x_MMU_HIT", - "T82x_MMU_NEW_MISS", - "T82x_MMU_REPLAY_FULL", - "T82x_MMU_REPLAY_MISS", - "T82x_MMU_TABLE_WALK", - "T82x_MMU_REQUESTS", - "", - "", - "T82x_UTLB_HIT", - "T82x_UTLB_NEW_MISS", - "T82x_UTLB_REPLAY_FULL", - "T82x_UTLB_REPLAY_MISS", - "T82x_UTLB_STALL", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "T82x_L2_EXT_WRITE_BEATS", - "T82x_L2_EXT_READ_BEATS", - "T82x_L2_ANY_LOOKUP", - "T82x_L2_READ_LOOKUP", - "T82x_L2_SREAD_LOOKUP", - "T82x_L2_READ_REPLAY", - "T82x_L2_READ_SNOOP", - "T82x_L2_READ_HIT", - "T82x_L2_CLEAN_MISS", - "T82x_L2_WRITE_LOOKUP", - "T82x_L2_SWRITE_LOOKUP", - "T82x_L2_WRITE_REPLAY", - "T82x_L2_WRITE_SNOOP", - "T82x_L2_WRITE_HIT", - "T82x_L2_EXT_READ_FULL", - "", - "T82x_L2_EXT_WRITE_FULL", - "T82x_L2_EXT_R_W_HAZARD", - "T82x_L2_EXT_READ", - "T82x_L2_EXT_READ_LINE", - "T82x_L2_EXT_WRITE", - "T82x_L2_EXT_WRITE_LINE", - "T82x_L2_EXT_WRITE_SMALL", - "T82x_L2_EXT_BARRIER", - "T82x_L2_EXT_AR_STALL", - "T82x_L2_EXT_R_BUF_FULL", - "T82x_L2_EXT_RD_BUF_FULL", - "T82x_L2_EXT_R_RAW", - "T82x_L2_EXT_W_STALL", - "T82x_L2_EXT_W_BUF_FULL", - "T82x_L2_EXT_R_BUF_FULL", - "T82x_L2_TAG_HAZARD", - "T82x_L2_SNOOP_FULL", - "T82x_L2_REPLAY_FULL" -}; - -static const char * const hardware_counters_mali_t83x[] = { - /* Job Manager */ - "", - "", - "", - "", - "T83x_MESSAGES_SENT", - "T83x_MESSAGES_RECEIVED", - "T83x_GPU_ACTIVE", - "T83x_IRQ_ACTIVE", - "T83x_JS0_JOBS", - "T83x_JS0_TASKS", - "T83x_JS0_ACTIVE", - "", - "T83x_JS0_WAIT_READ", - "T83x_JS0_WAIT_ISSUE", - "T83x_JS0_WAIT_DEPEND", - "T83x_JS0_WAIT_FINISH", - "T83x_JS1_JOBS", - "T83x_JS1_TASKS", - "T83x_JS1_ACTIVE", - "", - "T83x_JS1_WAIT_READ", - "T83x_JS1_WAIT_ISSUE", - "T83x_JS1_WAIT_DEPEND", - "T83x_JS1_WAIT_FINISH", - "T83x_JS2_JOBS", - "T83x_JS2_TASKS", - "T83x_JS2_ACTIVE", - "", - "T83x_JS2_WAIT_READ", - "T83x_JS2_WAIT_ISSUE", - "T83x_JS2_WAIT_DEPEND", - "T83x_JS2_WAIT_FINISH", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - - /*Tiler */ - "", - "", - "", - "T83x_TI_JOBS_PROCESSED", - "T83x_TI_TRIANGLES", - "T83x_TI_QUADS", - "T83x_TI_POLYGONS", - "T83x_TI_POINTS", - "T83x_TI_LINES", - "T83x_TI_FRONT_FACING", - "T83x_TI_BACK_FACING", - "T83x_TI_PRIM_VISIBLE", - "T83x_TI_PRIM_CULLED", - "T83x_TI_PRIM_CLIPPED", - "", - "", - "", - "", - "", - "", - "", - "", - "T83x_TI_ACTIVE", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - - /* Shader Core */ - "", - "", - "", - "", - "T83x_FRAG_ACTIVE", - "T83x_FRAG_PRIMITIVES", - "T83x_FRAG_PRIMITIVES_DROPPED", - "T83x_FRAG_CYCLES_DESC", - "T83x_FRAG_CYCLES_FPKQ_ACTIVE", - "T83x_FRAG_CYCLES_VERT", - "T83x_FRAG_CYCLES_TRISETUP", - "T83x_FRAG_CYCLES_EZS_ACTIVE", - "T83x_FRAG_THREADS", - "T83x_FRAG_DUMMY_THREADS", - "T83x_FRAG_QUADS_RAST", - "T83x_FRAG_QUADS_EZS_TEST", - "T83x_FRAG_QUADS_EZS_KILLED", - "T83x_FRAG_THREADS_LZS_TEST", - "T83x_FRAG_THREADS_LZS_KILLED", - "T83x_FRAG_CYCLES_NO_TILE", - "T83x_FRAG_NUM_TILES", - "T83x_FRAG_TRANS_ELIM", - "T83x_COMPUTE_ACTIVE", - "T83x_COMPUTE_TASKS", - "T83x_COMPUTE_THREADS", - "T83x_COMPUTE_CYCLES_DESC", - "T83x_TRIPIPE_ACTIVE", - "T83x_ARITH_WORDS", - "T83x_ARITH_CYCLES_REG", - "T83x_ARITH_CYCLES_L0", - "T83x_ARITH_FRAG_DEPEND", - "T83x_LS_WORDS", - "T83x_LS_ISSUES", - "T83x_LS_REISSUE_ATTR", - "T83x_LS_REISSUES_VARY", - "T83x_LS_VARY_RV_MISS", - "T83x_LS_VARY_RV_HIT", - "T83x_LS_NO_UNPARK", - "T83x_TEX_WORDS", - "T83x_TEX_BUBBLES", - "T83x_TEX_WORDS_L0", - "T83x_TEX_WORDS_DESC", - "T83x_TEX_ISSUES", - "T83x_TEX_RECIRC_FMISS", - "T83x_TEX_RECIRC_DESC", - "T83x_TEX_RECIRC_MULTI", - "T83x_TEX_RECIRC_PMISS", - "T83x_TEX_RECIRC_CONF", - "T83x_LSC_READ_HITS", - "T83x_LSC_READ_OP", - "T83x_LSC_WRITE_HITS", - "T83x_LSC_WRITE_OP", - "T83x_LSC_ATOMIC_HITS", - "T83x_LSC_ATOMIC_OP", - "T83x_LSC_LINE_FETCHES", - "T83x_LSC_DIRTY_LINE", - "T83x_LSC_SNOOPS", - "T83x_AXI_TLB_STALL", - "T83x_AXI_TLB_MIESS", - "T83x_AXI_TLB_TRANSACTION", - "T83x_LS_TLB_MISS", - "T83x_LS_TLB_HIT", - "T83x_AXI_BEATS_READ", - "T83x_AXI_BEATS_WRITTEN", - - /*L2 and MMU */ - "", - "", - "", - "", - "T83x_MMU_HIT", - "T83x_MMU_NEW_MISS", - "T83x_MMU_REPLAY_FULL", - "T83x_MMU_REPLAY_MISS", - "T83x_MMU_TABLE_WALK", - "T83x_MMU_REQUESTS", - "", - "", - "T83x_UTLB_HIT", - "T83x_UTLB_NEW_MISS", - "T83x_UTLB_REPLAY_FULL", - "T83x_UTLB_REPLAY_MISS", - "T83x_UTLB_STALL", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "T83x_L2_EXT_WRITE_BEATS", - "T83x_L2_EXT_READ_BEATS", - "T83x_L2_ANY_LOOKUP", - "T83x_L2_READ_LOOKUP", - "T83x_L2_SREAD_LOOKUP", - "T83x_L2_READ_REPLAY", - "T83x_L2_READ_SNOOP", - "T83x_L2_READ_HIT", - "T83x_L2_CLEAN_MISS", - "T83x_L2_WRITE_LOOKUP", - "T83x_L2_SWRITE_LOOKUP", - "T83x_L2_WRITE_REPLAY", - "T83x_L2_WRITE_SNOOP", - "T83x_L2_WRITE_HIT", - "T83x_L2_EXT_READ_FULL", - "", - "T83x_L2_EXT_WRITE_FULL", - "T83x_L2_EXT_R_W_HAZARD", - "T83x_L2_EXT_READ", - "T83x_L2_EXT_READ_LINE", - "T83x_L2_EXT_WRITE", - "T83x_L2_EXT_WRITE_LINE", - "T83x_L2_EXT_WRITE_SMALL", - "T83x_L2_EXT_BARRIER", - "T83x_L2_EXT_AR_STALL", - "T83x_L2_EXT_R_BUF_FULL", - "T83x_L2_EXT_RD_BUF_FULL", - "T83x_L2_EXT_R_RAW", - "T83x_L2_EXT_W_STALL", - "T83x_L2_EXT_W_BUF_FULL", - "T83x_L2_EXT_R_BUF_FULL", - "T83x_L2_TAG_HAZARD", - "T83x_L2_SNOOP_FULL", - "T83x_L2_REPLAY_FULL" -}; - -static const char * const hardware_counters_mali_t86x[] = { - /* Job Manager */ - "", - "", - "", - "", - "T86x_MESSAGES_SENT", - "T86x_MESSAGES_RECEIVED", - "T86x_GPU_ACTIVE", - "T86x_IRQ_ACTIVE", - "T86x_JS0_JOBS", - "T86x_JS0_TASKS", - "T86x_JS0_ACTIVE", - "", - "T86x_JS0_WAIT_READ", - "T86x_JS0_WAIT_ISSUE", - "T86x_JS0_WAIT_DEPEND", - "T86x_JS0_WAIT_FINISH", - "T86x_JS1_JOBS", - "T86x_JS1_TASKS", - "T86x_JS1_ACTIVE", - "", - "T86x_JS1_WAIT_READ", - "T86x_JS1_WAIT_ISSUE", - "T86x_JS1_WAIT_DEPEND", - "T86x_JS1_WAIT_FINISH", - "T86x_JS2_JOBS", - "T86x_JS2_TASKS", - "T86x_JS2_ACTIVE", - "", - "T86x_JS2_WAIT_READ", - "T86x_JS2_WAIT_ISSUE", - "T86x_JS2_WAIT_DEPEND", - "T86x_JS2_WAIT_FINISH", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - - /*Tiler */ - "", - "", - "", - "T86x_TI_JOBS_PROCESSED", - "T86x_TI_TRIANGLES", - "T86x_TI_QUADS", - "T86x_TI_POLYGONS", - "T86x_TI_POINTS", - "T86x_TI_LINES", - "T86x_TI_VCACHE_HIT", - "T86x_TI_VCACHE_MISS", - "T86x_TI_FRONT_FACING", - "T86x_TI_BACK_FACING", - "T86x_TI_PRIM_VISIBLE", - "T86x_TI_PRIM_CULLED", - "T86x_TI_PRIM_CLIPPED", - "T86x_TI_LEVEL0", - "T86x_TI_LEVEL1", - "T86x_TI_LEVEL2", - "T86x_TI_LEVEL3", - "T86x_TI_LEVEL4", - "T86x_TI_LEVEL5", - "T86x_TI_LEVEL6", - "T86x_TI_LEVEL7", - "T86x_TI_COMMAND_1", - "T86x_TI_COMMAND_2", - "T86x_TI_COMMAND_3", - "T86x_TI_COMMAND_4", - "T86x_TI_COMMAND_5_7", - "T86x_TI_COMMAND_8_15", - "T86x_TI_COMMAND_16_63", - "T86x_TI_COMMAND_64", - "T86x_TI_COMPRESS_IN", - "T86x_TI_COMPRESS_OUT", - "T86x_TI_COMPRESS_FLUSH", - "T86x_TI_TIMESTAMPS", - "T86x_TI_PCACHE_HIT", - "T86x_TI_PCACHE_MISS", - "T86x_TI_PCACHE_LINE", - "T86x_TI_PCACHE_STALL", - "T86x_TI_WRBUF_HIT", - "T86x_TI_WRBUF_MISS", - "T86x_TI_WRBUF_LINE", - "T86x_TI_WRBUF_PARTIAL", - "T86x_TI_WRBUF_STALL", - "T86x_TI_ACTIVE", - "T86x_TI_LOADING_DESC", - "T86x_TI_INDEX_WAIT", - "T86x_TI_INDEX_RANGE_WAIT", - "T86x_TI_VERTEX_WAIT", - "T86x_TI_PCACHE_WAIT", - "T86x_TI_WRBUF_WAIT", - "T86x_TI_BUS_READ", - "T86x_TI_BUS_WRITE", - "", - "", - "", - "", - "", - "T86x_TI_UTLB_HIT", - "T86x_TI_UTLB_NEW_MISS", - "T86x_TI_UTLB_REPLAY_FULL", - "T86x_TI_UTLB_REPLAY_MISS", - "T86x_TI_UTLB_STALL", - - /* Shader Core */ - "", - "", - "", - "", - "T86x_FRAG_ACTIVE", - "T86x_FRAG_PRIMITIVES", - "T86x_FRAG_PRIMITIVES_DROPPED", - "T86x_FRAG_CYCLES_DESC", - "T86x_FRAG_CYCLES_FPKQ_ACTIVE", - "T86x_FRAG_CYCLES_VERT", - "T86x_FRAG_CYCLES_TRISETUP", - "T86x_FRAG_CYCLES_EZS_ACTIVE", - "T86x_FRAG_THREADS", - "T86x_FRAG_DUMMY_THREADS", - "T86x_FRAG_QUADS_RAST", - "T86x_FRAG_QUADS_EZS_TEST", - "T86x_FRAG_QUADS_EZS_KILLED", - "T86x_FRAG_THREADS_LZS_TEST", - "T86x_FRAG_THREADS_LZS_KILLED", - "T86x_FRAG_CYCLES_NO_TILE", - "T86x_FRAG_NUM_TILES", - "T86x_FRAG_TRANS_ELIM", - "T86x_COMPUTE_ACTIVE", - "T86x_COMPUTE_TASKS", - "T86x_COMPUTE_THREADS", - "T86x_COMPUTE_CYCLES_DESC", - "T86x_TRIPIPE_ACTIVE", - "T86x_ARITH_WORDS", - "T86x_ARITH_CYCLES_REG", - "T86x_ARITH_CYCLES_L0", - "T86x_ARITH_FRAG_DEPEND", - "T86x_LS_WORDS", - "T86x_LS_ISSUES", - "T86x_LS_REISSUE_ATTR", - "T86x_LS_REISSUES_VARY", - "T86x_LS_VARY_RV_MISS", - "T86x_LS_VARY_RV_HIT", - "T86x_LS_NO_UNPARK", - "T86x_TEX_WORDS", - "T86x_TEX_BUBBLES", - "T86x_TEX_WORDS_L0", - "T86x_TEX_WORDS_DESC", - "T86x_TEX_ISSUES", - "T86x_TEX_RECIRC_FMISS", - "T86x_TEX_RECIRC_DESC", - "T86x_TEX_RECIRC_MULTI", - "T86x_TEX_RECIRC_PMISS", - "T86x_TEX_RECIRC_CONF", - "T86x_LSC_READ_HITS", - "T86x_LSC_READ_OP", - "T86x_LSC_WRITE_HITS", - "T86x_LSC_WRITE_OP", - "T86x_LSC_ATOMIC_HITS", - "T86x_LSC_ATOMIC_OP", - "T86x_LSC_LINE_FETCHES", - "T86x_LSC_DIRTY_LINE", - "T86x_LSC_SNOOPS", - "T86x_AXI_TLB_STALL", - "T86x_AXI_TLB_MIESS", - "T86x_AXI_TLB_TRANSACTION", - "T86x_LS_TLB_MISS", - "T86x_LS_TLB_HIT", - "T86x_AXI_BEATS_READ", - "T86x_AXI_BEATS_WRITTEN", - - /*L2 and MMU */ - "", - "", - "", - "", - "T86x_MMU_HIT", - "T86x_MMU_NEW_MISS", - "T86x_MMU_REPLAY_FULL", - "T86x_MMU_REPLAY_MISS", - "T86x_MMU_TABLE_WALK", - "T86x_MMU_REQUESTS", - "", - "", - "T86x_UTLB_HIT", - "T86x_UTLB_NEW_MISS", - "T86x_UTLB_REPLAY_FULL", - "T86x_UTLB_REPLAY_MISS", - "T86x_UTLB_STALL", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "T86x_L2_EXT_WRITE_BEATS", - "T86x_L2_EXT_READ_BEATS", - "T86x_L2_ANY_LOOKUP", - "T86x_L2_READ_LOOKUP", - "T86x_L2_SREAD_LOOKUP", - "T86x_L2_READ_REPLAY", - "T86x_L2_READ_SNOOP", - "T86x_L2_READ_HIT", - "T86x_L2_CLEAN_MISS", - "T86x_L2_WRITE_LOOKUP", - "T86x_L2_SWRITE_LOOKUP", - "T86x_L2_WRITE_REPLAY", - "T86x_L2_WRITE_SNOOP", - "T86x_L2_WRITE_HIT", - "T86x_L2_EXT_READ_FULL", - "", - "T86x_L2_EXT_WRITE_FULL", - "T86x_L2_EXT_R_W_HAZARD", - "T86x_L2_EXT_READ", - "T86x_L2_EXT_READ_LINE", - "T86x_L2_EXT_WRITE", - "T86x_L2_EXT_WRITE_LINE", - "T86x_L2_EXT_WRITE_SMALL", - "T86x_L2_EXT_BARRIER", - "T86x_L2_EXT_AR_STALL", - "T86x_L2_EXT_R_BUF_FULL", - "T86x_L2_EXT_RD_BUF_FULL", - "T86x_L2_EXT_R_RAW", - "T86x_L2_EXT_W_STALL", - "T86x_L2_EXT_W_BUF_FULL", - "T86x_L2_EXT_R_BUF_FULL", - "T86x_L2_TAG_HAZARD", - "T86x_L2_SNOOP_FULL", - "T86x_L2_REPLAY_FULL" -}; - -static const char * const hardware_counters_mali_t88x[] = { - /* Job Manager */ - "", - "", - "", - "", - "T88x_MESSAGES_SENT", - "T88x_MESSAGES_RECEIVED", - "T88x_GPU_ACTIVE", - "T88x_IRQ_ACTIVE", - "T88x_JS0_JOBS", - "T88x_JS0_TASKS", - "T88x_JS0_ACTIVE", - "", - "T88x_JS0_WAIT_READ", - "T88x_JS0_WAIT_ISSUE", - "T88x_JS0_WAIT_DEPEND", - "T88x_JS0_WAIT_FINISH", - "T88x_JS1_JOBS", - "T88x_JS1_TASKS", - "T88x_JS1_ACTIVE", - "", - "T88x_JS1_WAIT_READ", - "T88x_JS1_WAIT_ISSUE", - "T88x_JS1_WAIT_DEPEND", - "T88x_JS1_WAIT_FINISH", - "T88x_JS2_JOBS", - "T88x_JS2_TASKS", - "T88x_JS2_ACTIVE", - "", - "T88x_JS2_WAIT_READ", - "T88x_JS2_WAIT_ISSUE", - "T88x_JS2_WAIT_DEPEND", - "T88x_JS2_WAIT_FINISH", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - - /*Tiler */ - "", - "", - "", - "T88x_TI_JOBS_PROCESSED", - "T88x_TI_TRIANGLES", - "T88x_TI_QUADS", - "T88x_TI_POLYGONS", - "T88x_TI_POINTS", - "T88x_TI_LINES", - "T88x_TI_VCACHE_HIT", - "T88x_TI_VCACHE_MISS", - "T88x_TI_FRONT_FACING", - "T88x_TI_BACK_FACING", - "T88x_TI_PRIM_VISIBLE", - "T88x_TI_PRIM_CULLED", - "T88x_TI_PRIM_CLIPPED", - "T88x_TI_LEVEL0", - "T88x_TI_LEVEL1", - "T88x_TI_LEVEL2", - "T88x_TI_LEVEL3", - "T88x_TI_LEVEL4", - "T88x_TI_LEVEL5", - "T88x_TI_LEVEL6", - "T88x_TI_LEVEL7", - "T88x_TI_COMMAND_1", - "T88x_TI_COMMAND_2", - "T88x_TI_COMMAND_3", - "T88x_TI_COMMAND_4", - "T88x_TI_COMMAND_5_7", - "T88x_TI_COMMAND_8_15", - "T88x_TI_COMMAND_16_63", - "T88x_TI_COMMAND_64", - "T88x_TI_COMPRESS_IN", - "T88x_TI_COMPRESS_OUT", - "T88x_TI_COMPRESS_FLUSH", - "T88x_TI_TIMESTAMPS", - "T88x_TI_PCACHE_HIT", - "T88x_TI_PCACHE_MISS", - "T88x_TI_PCACHE_LINE", - "T88x_TI_PCACHE_STALL", - "T88x_TI_WRBUF_HIT", - "T88x_TI_WRBUF_MISS", - "T88x_TI_WRBUF_LINE", - "T88x_TI_WRBUF_PARTIAL", - "T88x_TI_WRBUF_STALL", - "T88x_TI_ACTIVE", - "T88x_TI_LOADING_DESC", - "T88x_TI_INDEX_WAIT", - "T88x_TI_INDEX_RANGE_WAIT", - "T88x_TI_VERTEX_WAIT", - "T88x_TI_PCACHE_WAIT", - "T88x_TI_WRBUF_WAIT", - "T88x_TI_BUS_READ", - "T88x_TI_BUS_WRITE", - "", - "", - "", - "", - "", - "T88x_TI_UTLB_HIT", - "T88x_TI_UTLB_NEW_MISS", - "T88x_TI_UTLB_REPLAY_FULL", - "T88x_TI_UTLB_REPLAY_MISS", - "T88x_TI_UTLB_STALL", - - /* Shader Core */ - "", - "", - "", - "", - "T88x_FRAG_ACTIVE", - "T88x_FRAG_PRIMITIVES", - "T88x_FRAG_PRIMITIVES_DROPPED", - "T88x_FRAG_CYCLES_DESC", - "T88x_FRAG_CYCLES_FPKQ_ACTIVE", - "T88x_FRAG_CYCLES_VERT", - "T88x_FRAG_CYCLES_TRISETUP", - "T88x_FRAG_CYCLES_EZS_ACTIVE", - "T88x_FRAG_THREADS", - "T88x_FRAG_DUMMY_THREADS", - "T88x_FRAG_QUADS_RAST", - "T88x_FRAG_QUADS_EZS_TEST", - "T88x_FRAG_QUADS_EZS_KILLED", - "T88x_FRAG_THREADS_LZS_TEST", - "T88x_FRAG_THREADS_LZS_KILLED", - "T88x_FRAG_CYCLES_NO_TILE", - "T88x_FRAG_NUM_TILES", - "T88x_FRAG_TRANS_ELIM", - "T88x_COMPUTE_ACTIVE", - "T88x_COMPUTE_TASKS", - "T88x_COMPUTE_THREADS", - "T88x_COMPUTE_CYCLES_DESC", - "T88x_TRIPIPE_ACTIVE", - "T88x_ARITH_WORDS", - "T88x_ARITH_CYCLES_REG", - "T88x_ARITH_CYCLES_L0", - "T88x_ARITH_FRAG_DEPEND", - "T88x_LS_WORDS", - "T88x_LS_ISSUES", - "T88x_LS_REISSUE_ATTR", - "T88x_LS_REISSUES_VARY", - "T88x_LS_VARY_RV_MISS", - "T88x_LS_VARY_RV_HIT", - "T88x_LS_NO_UNPARK", - "T88x_TEX_WORDS", - "T88x_TEX_BUBBLES", - "T88x_TEX_WORDS_L0", - "T88x_TEX_WORDS_DESC", - "T88x_TEX_ISSUES", - "T88x_TEX_RECIRC_FMISS", - "T88x_TEX_RECIRC_DESC", - "T88x_TEX_RECIRC_MULTI", - "T88x_TEX_RECIRC_PMISS", - "T88x_TEX_RECIRC_CONF", - "T88x_LSC_READ_HITS", - "T88x_LSC_READ_OP", - "T88x_LSC_WRITE_HITS", - "T88x_LSC_WRITE_OP", - "T88x_LSC_ATOMIC_HITS", - "T88x_LSC_ATOMIC_OP", - "T88x_LSC_LINE_FETCHES", - "T88x_LSC_DIRTY_LINE", - "T88x_LSC_SNOOPS", - "T88x_AXI_TLB_STALL", - "T88x_AXI_TLB_MIESS", - "T88x_AXI_TLB_TRANSACTION", - "T88x_LS_TLB_MISS", - "T88x_LS_TLB_HIT", - "T88x_AXI_BEATS_READ", - "T88x_AXI_BEATS_WRITTEN", - - /*L2 and MMU */ - "", - "", - "", - "", - "T88x_MMU_HIT", - "T88x_MMU_NEW_MISS", - "T88x_MMU_REPLAY_FULL", - "T88x_MMU_REPLAY_MISS", - "T88x_MMU_TABLE_WALK", - "T88x_MMU_REQUESTS", - "", - "", - "T88x_UTLB_HIT", - "T88x_UTLB_NEW_MISS", - "T88x_UTLB_REPLAY_FULL", - "T88x_UTLB_REPLAY_MISS", - "T88x_UTLB_STALL", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "T88x_L2_EXT_WRITE_BEATS", - "T88x_L2_EXT_READ_BEATS", - "T88x_L2_ANY_LOOKUP", - "T88x_L2_READ_LOOKUP", - "T88x_L2_SREAD_LOOKUP", - "T88x_L2_READ_REPLAY", - "T88x_L2_READ_SNOOP", - "T88x_L2_READ_HIT", - "T88x_L2_CLEAN_MISS", - "T88x_L2_WRITE_LOOKUP", - "T88x_L2_SWRITE_LOOKUP", - "T88x_L2_WRITE_REPLAY", - "T88x_L2_WRITE_SNOOP", - "T88x_L2_WRITE_HIT", - "T88x_L2_EXT_READ_FULL", - "", - "T88x_L2_EXT_WRITE_FULL", - "T88x_L2_EXT_R_W_HAZARD", - "T88x_L2_EXT_READ", - "T88x_L2_EXT_READ_LINE", - "T88x_L2_EXT_WRITE", - "T88x_L2_EXT_WRITE_LINE", - "T88x_L2_EXT_WRITE_SMALL", - "T88x_L2_EXT_BARRIER", - "T88x_L2_EXT_AR_STALL", - "T88x_L2_EXT_R_BUF_FULL", - "T88x_L2_EXT_RD_BUF_FULL", - "T88x_L2_EXT_R_RAW", - "T88x_L2_EXT_W_STALL", - "T88x_L2_EXT_W_BUF_FULL", - "T88x_L2_EXT_R_BUF_FULL", - "T88x_L2_TAG_HAZARD", - "T88x_L2_SNOOP_FULL", - "T88x_L2_REPLAY_FULL" -}; - -#endif diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.c deleted file mode 100755 index ca264049653c4..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.c +++ /dev/null @@ -1,98 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include - -#ifdef CONFIG_DEBUG_FS -/** Show callback for the @c gpu_memory debugfs file. - * - * This function is called to get the contents of the @c gpu_memory debugfs - * file. This is a report of current gpu memory usage. - * - * @param sfile The debugfs entry - * @param data Data associated with the entry - * - * @return 0 if successfully prints data in debugfs entry file - * -1 if it encountered an error - */ - -static int kbasep_gpu_memory_seq_show(struct seq_file *sfile, void *data) -{ - ssize_t ret = 0; - struct list_head *entry; - const struct list_head *kbdev_list; - - kbdev_list = kbase_dev_list_get(); - list_for_each(entry, kbdev_list) { - struct kbase_device *kbdev = NULL; - struct kbasep_kctx_list_element *element; - - kbdev = list_entry(entry, struct kbase_device, entry); - /* output the total memory usage and cap for this device */ - ret = seq_printf(sfile, "%-16s %10u\n", - kbdev->devname, - atomic_read(&(kbdev->memdev.used_pages))); - mutex_lock(&kbdev->kctx_list_lock); - list_for_each_entry(element, &kbdev->kctx_list, link) { - /* output the memory usage and cap for each kctx - * opened on this device */ - ret = seq_printf(sfile, " %s-0x%p %10u\n", - "kctx", - element->kctx, - atomic_read(&(element->kctx->used_pages))); - } - mutex_unlock(&kbdev->kctx_list_lock); - } - kbase_dev_list_put(kbdev_list); - return ret; -} - -/* - * File operations related to debugfs entry for gpu_memory - */ -static int kbasep_gpu_memory_debugfs_open(struct inode *in, struct file *file) -{ - return single_open(file, kbasep_gpu_memory_seq_show , NULL); -} - -static const struct file_operations kbasep_gpu_memory_debugfs_fops = { - .open = kbasep_gpu_memory_debugfs_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -/* - * Initialize debugfs entry for gpu_memory - */ -void kbasep_gpu_memory_debugfs_init(struct kbase_device *kbdev) -{ - debugfs_create_file("gpu_memory", S_IRUGO, - kbdev->mali_debugfs_directory, NULL, - &kbasep_gpu_memory_debugfs_fops); - return; -} - -#else -/* - * Stub functions for when debugfs is disabled - */ -void kbasep_gpu_memory_debugfs_init(struct kbase_device *kbdev) -{ - return; -} -#endif diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.h deleted file mode 100755 index 3cf30a4e767ec..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_kbase_gpu_memory_debugfs.h - * Header file for gpu_memory entry in debugfs - * - */ - -#ifndef _KBASE_GPU_MEMORY_H -#define _KBASE_GPU_MEMORY_H - -#include -#include -#include - -/** - * @brief Initialize gpu_memory debugfs entry - */ -void kbasep_gpu_memory_debugfs_init(struct kbase_device *kbdev); - -#endif /*_KBASE_GPU_MEMORY_H*/ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gpuprops.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gpuprops.c deleted file mode 100755 index d632a0bbb1bc8..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gpuprops.c +++ /dev/null @@ -1,299 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * Base kernel property query APIs - */ - -#include -#include -#include -#include -#include -#include - -/** - * KBASE_UBFX32 - Extracts bits from a 32-bit bitfield. - * @value: The value from which to extract bits. - * @offset: The first bit to extract (0 being the LSB). - * @size: The number of bits to extract. - * - * Context: @offset + @size <= 32. - * - * Return: Bits [@offset, @offset + @size) from @value. - */ -/* from mali_cdsb.h */ -#define KBASE_UBFX32(value, offset, size) \ - (((u32)(value) >> (u32)(offset)) & (u32)((1ULL << (u32)(size)) - 1)) - -int kbase_gpuprops_uk_get_props(struct kbase_context *kctx, struct kbase_uk_gpuprops * const kbase_props) -{ - kbase_gpu_clk_speed_func get_gpu_speed_mhz; - u32 gpu_speed_mhz; - int rc = 1; - - KBASE_DEBUG_ASSERT(NULL != kctx); - KBASE_DEBUG_ASSERT(NULL != kbase_props); - - /* Current GPU speed is requested from the system integrator via the GPU_SPEED_FUNC function. - * If that function fails, or the function is not provided by the system integrator, we report the maximum - * GPU speed as specified by GPU_FREQ_KHZ_MAX. - */ - get_gpu_speed_mhz = (kbase_gpu_clk_speed_func) GPU_SPEED_FUNC; - if (get_gpu_speed_mhz != NULL) { - rc = get_gpu_speed_mhz(&gpu_speed_mhz); -#ifdef CONFIG_MALI_DEBUG - /* Issue a warning message when the reported GPU speed falls outside the min/max range */ - if (rc == 0) { - u32 gpu_speed_khz = gpu_speed_mhz * 1000; - - if (gpu_speed_khz < kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_min || - gpu_speed_khz > kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max) - dev_warn(kctx->kbdev->dev, "GPU Speed is outside of min/max range (got %lu Khz, min %lu Khz, max %lu Khz)\n", - (unsigned long)gpu_speed_khz, - (unsigned long)kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_min, - (unsigned long)kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max); - } -#endif /* CONFIG_MALI_DEBUG */ - } - if (kctx->kbdev->clock) { - gpu_speed_mhz = clk_get_rate(kctx->kbdev->clock) / 1000000; - rc = 0; - } - if (rc != 0) - gpu_speed_mhz = kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max / 1000; - - kctx->kbdev->gpu_props.props.core_props.gpu_speed_mhz = gpu_speed_mhz; - - memcpy(&kbase_props->props, &kctx->kbdev->gpu_props.props, sizeof(kbase_props->props)); - - /* Before API 8.2 they expect L3 cache info here, which was always 0 */ - if (kctx->api_version < KBASE_API_VERSION(8, 2)) - kbase_props->props.raw_props.suspend_size = 0; - - - return 0; -} - -static void kbase_gpuprops_construct_coherent_groups(base_gpu_props * const props) -{ - struct mali_base_gpu_coherent_group *current_group; - u64 group_present; - u64 group_mask; - u64 first_set, first_set_prev; - u32 num_groups = 0; - - KBASE_DEBUG_ASSERT(NULL != props); - - props->coherency_info.coherency = props->raw_props.mem_features; - props->coherency_info.num_core_groups = hweight64(props->raw_props.l2_present); - - if (props->coherency_info.coherency & GROUPS_L2_COHERENT) { - /* Group is l2 coherent */ - group_present = props->raw_props.l2_present; - } else { - /* Group is l1 coherent */ - group_present = props->raw_props.shader_present; - } - - /* - * The coherent group mask can be computed from the l2 present - * register. - * - * For the coherent group n: - * group_mask[n] = (first_set[n] - 1) & ~(first_set[n-1] - 1) - * where first_set is group_present with only its nth set-bit kept - * (i.e. the position from where a new group starts). - * - * For instance if the groups are l2 coherent and l2_present=0x0..01111: - * The first mask is: - * group_mask[1] = (first_set[1] - 1) & ~(first_set[0] - 1) - * = (0x0..010 - 1) & ~(0x0..01 - 1) - * = 0x0..00f - * The second mask is: - * group_mask[2] = (first_set[2] - 1) & ~(first_set[1] - 1) - * = (0x0..100 - 1) & ~(0x0..010 - 1) - * = 0x0..0f0 - * And so on until all the bits from group_present have been cleared - * (i.e. there is no group left). - */ - - current_group = props->coherency_info.group; - first_set = group_present & ~(group_present - 1); - - while (group_present != 0 && num_groups < BASE_MAX_COHERENT_GROUPS) { - group_present -= first_set; /* Clear the current group bit */ - first_set_prev = first_set; - - first_set = group_present & ~(group_present - 1); - group_mask = (first_set - 1) & ~(first_set_prev - 1); - - /* Populate the coherent_group structure for each group */ - current_group->core_mask = group_mask & props->raw_props.shader_present; - current_group->num_cores = hweight64(current_group->core_mask); - - num_groups++; - current_group++; - } - - if (group_present != 0) - pr_warn("Too many coherent groups (keeping only %d groups).\n", BASE_MAX_COHERENT_GROUPS); - - props->coherency_info.num_groups = num_groups; -} - -/** - * kbase_gpuprops_get_props - Get the GPU configuration - * @gpu_props: The &base_gpu_props structure - * @kbdev: The &struct kbase_device structure for the device - * - * Fill the &base_gpu_props structure with values from the GPU configuration - * registers. Only the raw properties are filled in this function - */ -static void kbase_gpuprops_get_props(base_gpu_props * const gpu_props, struct kbase_device *kbdev) -{ - struct kbase_gpuprops_regdump regdump; - int i; - - KBASE_DEBUG_ASSERT(NULL != kbdev); - KBASE_DEBUG_ASSERT(NULL != gpu_props); - - /* Dump relevant registers */ - kbase_backend_gpuprops_get(kbdev, ®dump); - - gpu_props->raw_props.gpu_id = regdump.gpu_id; - gpu_props->raw_props.tiler_features = regdump.tiler_features; - gpu_props->raw_props.mem_features = regdump.mem_features; - gpu_props->raw_props.mmu_features = regdump.mmu_features; - gpu_props->raw_props.l2_features = regdump.l2_features; - gpu_props->raw_props.suspend_size = regdump.suspend_size; - - gpu_props->raw_props.as_present = regdump.as_present; - gpu_props->raw_props.js_present = regdump.js_present; - gpu_props->raw_props.shader_present = ((u64) regdump.shader_present_hi << 32) + regdump.shader_present_lo; - gpu_props->raw_props.tiler_present = ((u64) regdump.tiler_present_hi << 32) + regdump.tiler_present_lo; - gpu_props->raw_props.l2_present = ((u64) regdump.l2_present_hi << 32) + regdump.l2_present_lo; - - for (i = 0; i < GPU_MAX_JOB_SLOTS; i++) - gpu_props->raw_props.js_features[i] = regdump.js_features[i]; - - for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++) - gpu_props->raw_props.texture_features[i] = regdump.texture_features[i]; - - gpu_props->raw_props.thread_max_barrier_size = regdump.thread_max_barrier_size; - gpu_props->raw_props.thread_max_threads = regdump.thread_max_threads; - gpu_props->raw_props.thread_max_workgroup_size = regdump.thread_max_workgroup_size; - gpu_props->raw_props.thread_features = regdump.thread_features; - -} - -/** - * kbase_gpuprops_calculate_props - Calculate the derived properties - * @gpu_props: The &base_gpu_props structure - * @kbdev: The &struct kbase_device structure for the device - * - * Fill the &base_gpu_props structure with values derived from the GPU - * configuration registers - */ -static void kbase_gpuprops_calculate_props(base_gpu_props * const gpu_props, struct kbase_device *kbdev) -{ - int i; - - /* Populate the base_gpu_props structure */ - gpu_props->core_props.version_status = KBASE_UBFX32(gpu_props->raw_props.gpu_id, 0U, 4); - gpu_props->core_props.minor_revision = KBASE_UBFX32(gpu_props->raw_props.gpu_id, 4U, 8); - gpu_props->core_props.major_revision = KBASE_UBFX32(gpu_props->raw_props.gpu_id, 12U, 4); - gpu_props->core_props.product_id = KBASE_UBFX32(gpu_props->raw_props.gpu_id, 16U, 16); - gpu_props->core_props.log2_program_counter_size = KBASE_GPU_PC_SIZE_LOG2; - gpu_props->core_props.gpu_available_memory_size = totalram_pages << PAGE_SHIFT; - - for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++) - gpu_props->core_props.texture_features[i] = gpu_props->raw_props.texture_features[i]; - - gpu_props->l2_props.log2_line_size = KBASE_UBFX32(gpu_props->raw_props.l2_features, 0U, 8); - gpu_props->l2_props.log2_cache_size = KBASE_UBFX32(gpu_props->raw_props.l2_features, 16U, 8); - - /* Field with number of l2 slices is added to MEM_FEATURES register - * since t76x. Below code assumes that for older GPU reserved bits will - * be read as zero. */ - gpu_props->l2_props.num_l2_slices = - KBASE_UBFX32(gpu_props->raw_props.mem_features, 8U, 4) + 1; - - gpu_props->tiler_props.bin_size_bytes = 1 << KBASE_UBFX32(gpu_props->raw_props.tiler_features, 0U, 6); - gpu_props->tiler_props.max_active_levels = KBASE_UBFX32(gpu_props->raw_props.tiler_features, 8U, 4); - - if (gpu_props->raw_props.thread_max_threads == 0) - gpu_props->thread_props.max_threads = THREAD_MT_DEFAULT; - else - gpu_props->thread_props.max_threads = gpu_props->raw_props.thread_max_threads; - - if (gpu_props->raw_props.thread_max_workgroup_size == 0) - gpu_props->thread_props.max_workgroup_size = THREAD_MWS_DEFAULT; - else - gpu_props->thread_props.max_workgroup_size = gpu_props->raw_props.thread_max_workgroup_size; - - if (gpu_props->raw_props.thread_max_barrier_size == 0) - gpu_props->thread_props.max_barrier_size = THREAD_MBS_DEFAULT; - else - gpu_props->thread_props.max_barrier_size = gpu_props->raw_props.thread_max_barrier_size; - - gpu_props->thread_props.max_registers = KBASE_UBFX32(gpu_props->raw_props.thread_features, 0U, 16); - gpu_props->thread_props.max_task_queue = KBASE_UBFX32(gpu_props->raw_props.thread_features, 16U, 8); - gpu_props->thread_props.max_thread_group_split = KBASE_UBFX32(gpu_props->raw_props.thread_features, 24U, 6); - gpu_props->thread_props.impl_tech = KBASE_UBFX32(gpu_props->raw_props.thread_features, 30U, 2); - - /* If values are not specified, then use defaults */ - if (gpu_props->thread_props.max_registers == 0) { - gpu_props->thread_props.max_registers = THREAD_MR_DEFAULT; - gpu_props->thread_props.max_task_queue = THREAD_MTQ_DEFAULT; - gpu_props->thread_props.max_thread_group_split = THREAD_MTGS_DEFAULT; - } - /* Initialize the coherent_group structure for each group */ - kbase_gpuprops_construct_coherent_groups(gpu_props); -} - -void kbase_gpuprops_set(struct kbase_device *kbdev) -{ - struct kbase_gpu_props *gpu_props; - struct gpu_raw_gpu_props *raw; - - KBASE_DEBUG_ASSERT(NULL != kbdev); - gpu_props = &kbdev->gpu_props; - raw = &gpu_props->props.raw_props; - - /* Initialize the base_gpu_props structure from the hardware */ - kbase_gpuprops_get_props(&gpu_props->props, kbdev); - - /* Populate the derived properties */ - kbase_gpuprops_calculate_props(&gpu_props->props, kbdev); - - /* Populate kbase-only fields */ - gpu_props->l2_props.associativity = KBASE_UBFX32(raw->l2_features, 8U, 8); - gpu_props->l2_props.external_bus_width = KBASE_UBFX32(raw->l2_features, 24U, 8); - - gpu_props->mem.core_group = KBASE_UBFX32(raw->mem_features, 0U, 1); - - gpu_props->mmu.va_bits = KBASE_UBFX32(raw->mmu_features, 0U, 8); - gpu_props->mmu.pa_bits = KBASE_UBFX32(raw->mmu_features, 8U, 8); - - gpu_props->num_cores = hweight64(raw->shader_present); - gpu_props->num_core_groups = hweight64(raw->l2_present); - gpu_props->num_address_spaces = hweight32(raw->as_present); - gpu_props->num_job_slots = hweight32(raw->js_present); -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gpuprops.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gpuprops.h deleted file mode 100755 index af97d97bf9452..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gpuprops.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_kbase_gpuprops.h - * Base kernel property query APIs - */ - -#ifndef _KBASE_GPUPROPS_H_ -#define _KBASE_GPUPROPS_H_ - -#include "mali_kbase_gpuprops_types.h" - -/* Forward definition - see mali_kbase.h */ -struct kbase_device; - -/** - * @brief Set up Kbase GPU properties. - * - * Set up Kbase GPU properties with information from the GPU registers - * - * @param kbdev The struct kbase_device structure for the device - */ -void kbase_gpuprops_set(struct kbase_device *kbdev); - -/** - * @brief Provide GPU properties to userside through UKU call. - * - * Fill the struct kbase_uk_gpuprops with values from GPU configuration registers. - * - * @param kctx The struct kbase_context structure - * @param kbase_props A copy of the struct kbase_uk_gpuprops structure from userspace - * - * @return 0 on success. Any other value indicates failure. - */ -int kbase_gpuprops_uk_get_props(struct kbase_context *kctx, struct kbase_uk_gpuprops * const kbase_props); - -#endif /* _KBASE_GPUPROPS_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gpuprops_types.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gpuprops_types.h deleted file mode 100755 index 463fead4b05d9..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_gpuprops_types.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_kbase_gpuprops_types.h - * Base kernel property query APIs - */ - -#ifndef _KBASE_GPUPROPS_TYPES_H_ -#define _KBASE_GPUPROPS_TYPES_H_ - -#include "mali_base_kernel.h" - -#define KBASE_GPU_SPEED_MHZ 123 -#define KBASE_GPU_PC_SIZE_LOG2 24U - -struct kbase_gpuprops_regdump { - u32 gpu_id; - u32 l2_features; - u32 suspend_size; /* API 8.2+ */ - u32 tiler_features; - u32 mem_features; - u32 mmu_features; - u32 as_present; - u32 js_present; - u32 thread_max_threads; - u32 thread_max_workgroup_size; - u32 thread_max_barrier_size; - u32 thread_features; - u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS]; - u32 js_features[GPU_MAX_JOB_SLOTS]; - u32 shader_present_lo; - u32 shader_present_hi; - u32 tiler_present_lo; - u32 tiler_present_hi; - u32 l2_present_lo; - u32 l2_present_hi; -}; - -struct kbase_gpu_cache_props { - u8 associativity; - u8 external_bus_width; -}; - -struct kbase_gpu_mem_props { - u8 core_group; -}; - -struct kbase_gpu_mmu_props { - u8 va_bits; - u8 pa_bits; -}; - -struct kbase_gpu_props { - /* kernel-only properties */ - u8 num_cores; - u8 num_core_groups; - u8 num_address_spaces; - u8 num_job_slots; - - struct kbase_gpu_cache_props l2_props; - - struct kbase_gpu_mem_props mem; - struct kbase_gpu_mmu_props mmu; - - /** - * Implementation specific irq throttle value (us), should be adjusted during integration. - */ - int irq_throttle_time_us; - - /* Properties shared with userspace */ - base_gpu_props props; -}; - -#endif /* _KBASE_GPUPROPS_TYPES_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hw.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hw.c deleted file mode 100755 index fac65d4f22865..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hw.c +++ /dev/null @@ -1,214 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * Run-time work-arounds helpers - */ - -#include -#include -#include -#include "mali_kbase.h" -#include "mali_kbase_hw.h" - -void kbase_hw_set_features_mask(struct kbase_device *kbdev) -{ - const enum base_hw_feature *features; - u32 gpu_id; - - gpu_id = kbdev->gpu_props.props.raw_props.gpu_id; - gpu_id &= GPU_ID_VERSION_PRODUCT_ID; - gpu_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT; - - switch (gpu_id) { - case GPU_ID_PI_TFRX: - /* FALLTHROUGH */ - case GPU_ID_PI_T86X: - features = base_hw_features_tFxx; - break; - case GPU_ID_PI_T83X: - features = base_hw_features_t83x; - break; - case GPU_ID_PI_T82X: - features = base_hw_features_t82x; - break; - case GPU_ID_PI_T76X: - features = base_hw_features_t76x; - break; - case GPU_ID_PI_T72X: - features = base_hw_features_t72x; - break; - case GPU_ID_PI_T62X: - features = base_hw_features_t62x; - break; - case GPU_ID_PI_T60X: - features = base_hw_features_t60x; - break; - default: - features = base_hw_features_generic; - break; - } - - for (; *features != BASE_HW_FEATURE_END; features++) - set_bit(*features, &kbdev->hw_features_mask[0]); -} - -int kbase_hw_set_issues_mask(struct kbase_device *kbdev) -{ - const enum base_hw_issue *issues; - u32 gpu_id; - u32 impl_tech; - - gpu_id = kbdev->gpu_props.props.raw_props.gpu_id; - impl_tech = kbdev->gpu_props.props.thread_props.impl_tech; - - if (impl_tech != IMPLEMENTATION_MODEL) { - switch (gpu_id) { - case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 0, GPU_ID_S_15DEV0): - issues = base_hw_issues_t60x_r0p0_15dev0; - break; - case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 0, GPU_ID_S_EAC): - issues = base_hw_issues_t60x_r0p0_eac; - break; - case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 1, 0): - issues = base_hw_issues_t60x_r0p1; - break; - case GPU_ID_MAKE(GPU_ID_PI_T62X, 0, 1, 0): - issues = base_hw_issues_t62x_r0p1; - break; - case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 0, 0): - case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 0, 1): - issues = base_hw_issues_t62x_r1p0; - break; - case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 1, 0): - issues = base_hw_issues_t62x_r1p1; - break; - case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 0, 1): - issues = base_hw_issues_t76x_r0p0; - break; - case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 1, 1): - issues = base_hw_issues_t76x_r0p1; - break; - case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 1, 9): - issues = base_hw_issues_t76x_r0p1_50rel0; - break; - case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 2, 1): - issues = base_hw_issues_t76x_r0p2; - break; - case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 3, 1): - issues = base_hw_issues_t76x_r0p3; - break; - case GPU_ID_MAKE(GPU_ID_PI_T76X, 1, 0, 0): - issues = base_hw_issues_t76x_r1p0; - break; - case GPU_ID_MAKE(GPU_ID_PI_T72X, 0, 0, 0): - case GPU_ID_MAKE(GPU_ID_PI_T72X, 0, 0, 1): - case GPU_ID_MAKE(GPU_ID_PI_T72X, 0, 0, 2): - issues = base_hw_issues_t72x_r0p0; - break; - case GPU_ID_MAKE(GPU_ID_PI_T72X, 1, 0, 0): - issues = base_hw_issues_t72x_r1p0; - break; - case GPU_ID_MAKE(GPU_ID_PI_T72X, 1, 1, 0): - issues = base_hw_issues_t72x_r1p1; - break; - case GPU_ID_MAKE(GPU_ID_PI_TFRX, 0, 1, 2): - issues = base_hw_issues_tFRx_r0p1; - break; - case GPU_ID_MAKE(GPU_ID_PI_TFRX, 0, 2, 0): - issues = base_hw_issues_tFRx_r0p2; - break; - case GPU_ID_MAKE(GPU_ID_PI_TFRX, 1, 0, 0): - case GPU_ID_MAKE(GPU_ID_PI_TFRX, 1, 0, 8): - issues = base_hw_issues_tFRx_r1p0; - break; - case GPU_ID_MAKE(GPU_ID_PI_TFRX, 2, 0, 0): - issues = base_hw_issues_tFRx_r2p0; - break; - case GPU_ID_MAKE(GPU_ID_PI_T86X, 0, 2, 0): - issues = base_hw_issues_t86x_r0p2; - break; - case GPU_ID_MAKE(GPU_ID_PI_T86X, 1, 0, 0): - case GPU_ID_MAKE(GPU_ID_PI_T86X, 1, 0, 8): - issues = base_hw_issues_t86x_r1p0; - break; - case GPU_ID_MAKE(GPU_ID_PI_T86X, 2, 0, 0): - issues = base_hw_issues_t86x_r2p0; - break; - case GPU_ID_MAKE(GPU_ID_PI_T83X, 0, 1, 0): - issues = base_hw_issues_t83x_r0p1; - break; - case GPU_ID_MAKE(GPU_ID_PI_T83X, 1, 0, 0): - case GPU_ID_MAKE(GPU_ID_PI_T83X, 1, 0, 8): - issues = base_hw_issues_t83x_r1p0; - break; - case GPU_ID_MAKE(GPU_ID_PI_T82X, 0, 0, 0): - issues = base_hw_issues_t82x_r0p0; - break; - case GPU_ID_MAKE(GPU_ID_PI_T82X, 0, 1, 0): - issues = base_hw_issues_t82x_r0p1; - break; - case GPU_ID_MAKE(GPU_ID_PI_T82X, 1, 0, 0): - case GPU_ID_MAKE(GPU_ID_PI_T82X, 1, 0, 8): - issues = base_hw_issues_t82x_r1p0; - break; - default: - dev_err(kbdev->dev, "Unknown GPU ID %x", gpu_id); - return -EINVAL; - } - } else { - /* Software model */ - switch (gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT) { - case GPU_ID_PI_T60X: - issues = base_hw_issues_model_t60x; - break; - case GPU_ID_PI_T62X: - issues = base_hw_issues_model_t62x; - break; - case GPU_ID_PI_T72X: - issues = base_hw_issues_model_t72x; - break; - case GPU_ID_PI_T76X: - issues = base_hw_issues_model_t76x; - break; - case GPU_ID_PI_TFRX: - issues = base_hw_issues_model_tFRx; - break; - case GPU_ID_PI_T86X: - issues = base_hw_issues_model_t86x; - break; - case GPU_ID_PI_T83X: - issues = base_hw_issues_model_t83x; - break; - case GPU_ID_PI_T82X: - issues = base_hw_issues_model_t82x; - break; - default: - dev_err(kbdev->dev, "Unknown GPU ID %x", gpu_id); - return -EINVAL; - } - } - - dev_info(kbdev->dev, "GPU identified as 0x%04x r%dp%d status %d", (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >> GPU_ID_VERSION_PRODUCT_ID_SHIFT, (gpu_id & GPU_ID_VERSION_MAJOR) >> GPU_ID_VERSION_MAJOR_SHIFT, (gpu_id & GPU_ID_VERSION_MINOR) >> GPU_ID_VERSION_MINOR_SHIFT, (gpu_id & GPU_ID_VERSION_STATUS) >> GPU_ID_VERSION_STATUS_SHIFT); - - for (; *issues != BASE_HW_ISSUE_END; issues++) - set_bit(*issues, &kbdev->hw_issues_mask[0]); - - return 0; -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hw.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hw.h deleted file mode 100755 index fce7d29558e5a..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hw.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file - * Run-time work-arounds helpers - */ - -#ifndef _KBASE_HW_H_ -#define _KBASE_HW_H_ - -#include "mali_kbase_defs.h" - -/** - * @brief Tell whether a work-around should be enabled - */ -#define kbase_hw_has_issue(kbdev, issue)\ - test_bit(issue, &(kbdev)->hw_issues_mask[0]) - -/** - * @brief Tell whether a feature is supported - */ -#define kbase_hw_has_feature(kbdev, feature)\ - test_bit(feature, &(kbdev)->hw_features_mask[0]) - -/** - * @brief Set the HW issues mask depending on the GPU ID - */ -int kbase_hw_set_issues_mask(struct kbase_device *kbdev); - -/** - * @brief Set the features mask depending on the GPU ID - */ -void kbase_hw_set_features_mask(struct kbase_device *kbdev); - -#endif /* _KBASE_HW_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwaccess_backend.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwaccess_backend.h deleted file mode 100755 index b09be99e6b4e1..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwaccess_backend.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - -/* - * HW access backend common APIs - */ - -#ifndef _KBASE_HWACCESS_BACKEND_H_ -#define _KBASE_HWACCESS_BACKEND_H_ - -/** - * kbase_backend_early_init - Perform any backend-specific initialization. - * @kbdev: Device pointer - * - * Return: 0 on success, or an error code on failure. - */ -int kbase_backend_early_init(struct kbase_device *kbdev); - -/** - * kbase_backend_late_init - Perform any backend-specific initialization. - * @kbdev: Device pointer - * - * Return: 0 on success, or an error code on failure. - */ -int kbase_backend_late_init(struct kbase_device *kbdev); - -/** - * kbase_backend_early_term - Perform any backend-specific termination. - * @kbdev: Device pointer - */ -void kbase_backend_early_term(struct kbase_device *kbdev); - -/** - * kbase_backend_late_term - Perform any backend-specific termination. - * @kbdev: Device pointer - */ -void kbase_backend_late_term(struct kbase_device *kbdev); - -#endif /* _KBASE_HWACCESS_BACKEND_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwaccess_defs.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwaccess_defs.h deleted file mode 100755 index 261453e8f1ac9..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwaccess_defs.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - -/** - * @file mali_kbase_hwaccess_gpu_defs.h - * HW access common definitions - */ - -#ifndef _KBASE_HWACCESS_DEFS_H_ -#define _KBASE_HWACCESS_DEFS_H_ - -#include - -/* The kbasep_js_device_data::runpool_irq::lock (a spinlock) must be held when - * accessing this structure */ -struct kbase_hwaccess_data { - struct kbase_context *active_kctx; - - struct kbase_backend_data backend; -}; - -#endif /* _KBASE_HWACCESS_DEFS_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwaccess_gpuprops.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwaccess_gpuprops.h deleted file mode 100755 index f93ca9d86802b..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwaccess_gpuprops.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - -/** - * Base kernel property query backend APIs - */ - -#ifndef _KBASE_HWACCESS_GPUPROPS_H_ -#define _KBASE_HWACCESS_GPUPROPS_H_ - -/** - * kbase_backend_gpuprops_get() - Fill @regdump with GPU properties read from - * GPU - * @kbdev: Device pointer - * @regdump: Pointer to struct kbase_gpuprops_regdump structure - */ -void kbase_backend_gpuprops_get(struct kbase_device *kbdev, - struct kbase_gpuprops_regdump *regdump); - -#endif /* _KBASE_HWACCESS_GPUPROPS_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwaccess_instr.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwaccess_instr.h deleted file mode 100755 index 5de2b7535bb47..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwaccess_instr.h +++ /dev/null @@ -1,116 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * HW Access instrumentation common APIs - */ - -#ifndef _KBASE_HWACCESS_INSTR_H_ -#define _KBASE_HWACCESS_INSTR_H_ - -#include - -/** - * kbase_instr_hwcnt_enable_internal - Enable HW counters collection - * @kbdev: Kbase device - * @kctx: Kbase context - * @setup: HW counter setup parameters - * - * Context: might sleep, waiting for reset to complete - * - * Return: 0 on success - */ -int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev, - struct kbase_context *kctx, - struct kbase_uk_hwcnt_setup *setup); - -/** - * kbase_instr_hwcnt_disable_internal - Disable HW counters collection - * @kctx: Kbase context - * - * Context: might sleep, waiting for an ongoing dump to complete - * - * Return: 0 on success - */ -int kbase_instr_hwcnt_disable_internal(struct kbase_context *kctx); - -/** - * kbase_instr_hwcnt_request_dump() - Request HW counter dump from GPU - * @kctx: Kbase context - * - * Caller must either wait for kbase_instr_hwcnt_dump_complete() to return true, - * of call kbase_instr_hwcnt_wait_for_dump(). - * - * Return: 0 on success - */ -int kbase_instr_hwcnt_request_dump(struct kbase_context *kctx); - -/** - * kbase_instr_hwcnt_wait_for_dump() - Wait until pending HW counter dump has - * completed. - * @kctx: Kbase context - * - * Context: will sleep, waiting for dump to complete - * - * Return: 0 on success - */ -int kbase_instr_hwcnt_wait_for_dump(struct kbase_context *kctx); - -/** - * kbase_instr_hwcnt_dump_complete - Tell whether the HW counters dump has - * completed - * @kctx: Kbase context - * @success: Set to true if successful - * - * Context: does not sleep. - * - * Return: true if the dump is complete - */ -bool kbase_instr_hwcnt_dump_complete(struct kbase_context *kctx, - bool * const success); - -/** - * kbase_instr_hwcnt_clear() - Clear HW counters - * @kctx: Kbase context - * - * Context: might sleep, waiting for reset to complete - * - * Return: 0 on success - */ -int kbase_instr_hwcnt_clear(struct kbase_context *kctx); - -/** - * kbase_instr_backend_init() - Initialise the instrumentation backend - * @kbdev: Kbase device - * - * This function should be called during driver initialization. - * - * Return: 0 on success - */ -int kbase_instr_backend_init(struct kbase_device *kbdev); - -/** - * kbase_instr_backend_init() - Terminate the instrumentation backend - * @kbdev: Kbase device - * - * This function should be called during driver termination. - */ -void kbase_instr_backend_term(struct kbase_device *kbdev); - -#endif /* _KBASE_HWACCESS_INSTR_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwaccess_jm.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwaccess_jm.h deleted file mode 100755 index 6bddaa81073b7..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwaccess_jm.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - -/* - * HW access job manager common APIs - */ - -#ifndef _KBASE_HWACCESS_JM_H_ -#define _KBASE_HWACCESS_JM_H_ - -/** - * kbase_backend_run_atom() - Run an atom on the GPU - * @kbdev: Device pointer - * @atom: Atom to run - * - * Caller must hold the HW access lock - */ -void kbase_backend_run_atom(struct kbase_device *kbdev, - struct kbase_jd_atom *katom); - -/** - * kbase_backend_find_free_address_space() - Find a free address space. - * @kbdev: Device pointer - * @kctx: Context pointer - * - * If no address spaces are currently free, then this function can evict an - * idle context from the runpool, freeing up the address space it was using. - * - * The address space is marked as in use. The caller must either assign a - * context using kbase_gpu_use_ctx(), or release it using - * kbase_gpu_release_free_address_space() - * - * Return: Number of free address space, or KBASEP_AS_NR_INVALID if none - * available - */ -int kbase_backend_find_free_address_space(struct kbase_device *kbdev, - struct kbase_context *kctx); - -/** - * kbase_backend_release_free_address_space() - Release an address space. - * @kbdev: Device pointer - * @as_nr: Address space to release - * - * The address space must have been returned by - * kbase_gpu_find_free_address_space(). - */ -void kbase_backend_release_free_address_space(struct kbase_device *kbdev, - int as_nr); - -/** - * kbase_backend_use_ctx() - Activate a currently unscheduled context, using the - * provided address space. - * @kbdev: Device pointer - * @kctx: Context pointer. May be NULL - * @as_nr: Free address space to use - * - * kbase_gpu_next_job() will pull atoms from the active context. - * - * Return: true if successful, false if ASID not assigned. If kctx->as_pending - * is true then ASID assignment will complete at some point in the - * future and will re-start scheduling, otherwise no ASIDs are available - */ -bool kbase_backend_use_ctx(struct kbase_device *kbdev, - struct kbase_context *kctx, - int as_nr); - -/** - * kbase_backend_use_ctx_sched() - Activate a context. - * @kbdev: Device pointer - * @kctx: Context pointer - * - * kbase_gpu_next_job() will pull atoms from the active context. - * - * The context must already be scheduled and assigned to an address space. If - * the context is not scheduled, then kbase_gpu_use_ctx() should be used - * instead. - * - * Caller must hold runpool_irq.lock - * - * Return: true if context is now active, false otherwise (ie if context does - * not have an address space assigned) - */ -bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev, - struct kbase_context *kctx); - -/** - * kbase_backend_release_ctx_irq - Release a context from the GPU. This will - * de-assign the assigned address space. - * @kbdev: Device pointer - * @kctx: Context pointer - * - * Caller must hold as->transaction_mutex and runpool_irq.lock - */ -void kbase_backend_release_ctx_irq(struct kbase_device *kbdev, - struct kbase_context *kctx); - -/** - * kbase_backend_release_ctx_noirq - Release a context from the GPU. This will - * de-assign the assigned address space. - * @kbdev: Device pointer - * @kctx: Context pointer - * - * Caller must hold as->transaction_mutex - * - * This function must perform any operations that could not be performed in IRQ - * context by kbase_backend_release_ctx_irq(). - */ -void kbase_backend_release_ctx_noirq(struct kbase_device *kbdev, - struct kbase_context *kctx); - -/** - * kbase_backend_complete_wq() - Perform backend-specific actions required on - * completing an atom. - * @kbdev: Device pointer - * @katom: Pointer to the atom to complete - * - * This function should only be called from kbase_jd_done_worker() or - * js_return_worker(). - * - * Return: true if atom has completed, false if atom should be re-submitted - */ -void kbase_backend_complete_wq(struct kbase_device *kbdev, - struct kbase_jd_atom *katom); - -/** - * kbase_backend_complete_wq_post_sched - Perform backend-specific actions - * required on completing an atom, after - * any scheduling has taken place. - * @kbdev: Device pointer - * @core_req: Core requirements of atom - * @affinity: Affinity of atom - * @coreref_state: Coreref state of atom - * - * This function should only be called from kbase_jd_done_worker() or - * js_return_worker(). - */ -void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev, - base_jd_core_req core_req, u64 affinity, - enum kbase_atom_coreref_state coreref_state); - -/** - * kbase_backend_reset() - The GPU is being reset. Cancel all jobs on the GPU - * and remove any others from the ringbuffers. - * @kbdev: Device pointer - * @end_timestamp: Timestamp of reset - */ -void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp); - -/** - * kbase_backend_inspect_head() - Return the atom currently at the head of slot - * @js - * @kbdev: Device pointer - * @js: Job slot to inspect - * - * Return : Atom currently at the head of slot @js, or NULL - */ -struct kbase_jd_atom *kbase_backend_inspect_head(struct kbase_device *kbdev, - int js); - -/** - * kbase_backend_inspect_tail - Return the atom currently at the tail of slot - * @js - * @kbdev: Device pointer - * @js: Job slot to inspect - * - * Return : Atom currently at the head of slot @js, or NULL - */ -struct kbase_jd_atom *kbase_backend_inspect_tail(struct kbase_device *kbdev, - int js); - -/** - * kbase_backend_nr_atoms_on_slot() - Return the number of atoms currently on a - * slot. - * @kbdev: Device pointer - * @js: Job slot to inspect - * - * Return : Number of atoms currently on slot - */ -int kbase_backend_nr_atoms_on_slot(struct kbase_device *kbdev, int js); - -/** - * kbase_backend_nr_atoms_submitted() - Return the number of atoms on a slot - * that are currently on the GPU. - * @kbdev: Device pointer - * @js: Job slot to inspect - * - * Return : Number of atoms currently on slot @js that are currently on the GPU. - */ -int kbase_backend_nr_atoms_submitted(struct kbase_device *kbdev, int js); - -/** - * kbase_backend_ctx_count_changed() - Number of contexts ready to submit jobs - * has changed. - * @kbdev: Device pointer - * - * Perform any required backend-specific actions (eg starting/stopping - * scheduling timers). - */ -void kbase_backend_ctx_count_changed(struct kbase_device *kbdev); - -/** - * kbase_backend_slot_free() - Return the number of jobs that can be currently - * submitted to slot @js. - * @kbdev: Device pointer - * @js: Job slot to inspect - * - * Return : Number of jobs that can be submitted. - */ -int kbase_backend_slot_free(struct kbase_device *kbdev, int js); - -/** - * kbase_job_check_enter_disjoint - potentially leave disjoint state - * @kbdev: kbase device - * @target_katom: atom which is finishing - * - * Work out whether to leave disjoint state when finishing an atom that was - * originated by kbase_job_check_enter_disjoint(). - */ -void kbase_job_check_leave_disjoint(struct kbase_device *kbdev, - struct kbase_jd_atom *target_katom); - -/** - * kbase_backend_jm_kill_jobs_from_kctx - Kill all jobs that are currently - * running from a context - * @kctx: Context pointer - * - * This is used in response to a page fault to remove all jobs from the faulting - * context from the hardware. - */ -void kbase_backend_jm_kill_jobs_from_kctx(struct kbase_context *kctx); - -/** - * kbase_jm_wait_for_zero_jobs - Wait for context to have zero jobs running, and - * to be descheduled. - * @kctx: Context pointer - * - * This should be called following kbase_js_zap_context(), to ensure the context - * can be safely destroyed. - */ -void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx); - -#if KBASE_GPU_RESET_EN -/** - * kbase_prepare_to_reset_gpu - Prepare for resetting the GPU. - * @kbdev: Device pointer - * - * This function just soft-stops all the slots to ensure that as many jobs as - * possible are saved. - * - * Return: a boolean which should be interpreted as follows: - * - true - Prepared for reset, kbase_reset_gpu should be called. - * - false - Another thread is performing a reset, kbase_reset_gpu should - * not be called. - */ -bool kbase_prepare_to_reset_gpu(struct kbase_device *kbdev); - -/** - * kbase_reset_gpu - Reset the GPU - * @kbdev: Device pointer - * - * This function should be called after kbase_prepare_to_reset_gpu if it returns - * true. It should never be called without a corresponding call to - * kbase_prepare_to_reset_gpu. - * - * After this function is called (or not called if kbase_prepare_to_reset_gpu - * returned false), the caller should wait for kbdev->reset_waitq to be - * signalled to know when the reset has completed. - */ -void kbase_reset_gpu(struct kbase_device *kbdev); - -/** - * kbase_prepare_to_reset_gpu_locked - Prepare for resetting the GPU. - * @kbdev: Device pointer - * - * This function just soft-stops all the slots to ensure that as many jobs as - * possible are saved. - * - * Return: a boolean which should be interpreted as follows: - * - true - Prepared for reset, kbase_reset_gpu should be called. - * - false - Another thread is performing a reset, kbase_reset_gpu should - * not be called. - */ -bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev); - -/** - * kbase_reset_gpu_locked - Reset the GPU - * @kbdev: Device pointer - * - * This function should be called after kbase_prepare_to_reset_gpu if it - * returns true. It should never be called without a corresponding call to - * kbase_prepare_to_reset_gpu. - * - * After this function is called (or not called if kbase_prepare_to_reset_gpu - * returned false), the caller should wait for kbdev->reset_waitq to be - * signalled to know when the reset has completed. - */ -void kbase_reset_gpu_locked(struct kbase_device *kbdev); -#endif - -/** - * kbase_job_slot_hardstop - Hard-stop the specified job slot - * @kctx: The kbase context that contains the job(s) that should - * be hard-stopped - * @js: The job slot to hard-stop - * @target_katom: The job that should be hard-stopped (or NULL for all - * jobs from the context) - * Context: - * The job slot lock must be held when calling this function. - */ -void kbase_job_slot_hardstop(struct kbase_context *kctx, int js, - struct kbase_jd_atom *target_katom); - -#endif /* _KBASE_HWACCESS_JM_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwaccess_pm.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwaccess_pm.h deleted file mode 100755 index dbdcd3def220d..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwaccess_pm.h +++ /dev/null @@ -1,206 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - -/** - * @file mali_kbase_hwaccess_pm.h - * HW access power manager common APIs - */ - -#ifndef _KBASE_HWACCESS_PM_H_ -#define _KBASE_HWACCESS_PM_H_ - -#include -#include - -#include - -/* Forward definition - see mali_kbase.h */ -struct kbase_device; - -/* Functions common to all HW access backends */ - -/** - * Initialize the power management framework. - * - * Must be called before any other power management function - * - * @param kbdev The kbase device structure for the device (must be a valid - * pointer) - * - * @return 0 if the power management framework was successfully - * initialized. - */ -int kbase_hwaccess_pm_init(struct kbase_device *kbdev); - -/** - * Terminate the power management framework. - * - * No power management functions may be called after this (except - * @ref kbase_pm_init) - * - * @param kbdev The kbase device structure for the device (must be a valid - * pointer) - */ -void kbase_hwaccess_pm_term(struct kbase_device *kbdev); - -/** - * kbase_hwaccess_pm_powerup - Power up the GPU. - * @kbdev: The kbase device structure for the device (must be a valid pointer) - * @flags: Flags to pass on to kbase_pm_init_hw - * - * Power up GPU after all modules have been initialized and interrupt handlers - * installed. - * - * Return: 0 if powerup was successful. - */ -int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev, - unsigned int flags); - -/** - * Halt the power management framework. - * - * Should ensure that no new interrupts are generated, but allow any currently - * running interrupt handlers to complete successfully. The GPU is forced off by - * the time this function returns, regardless of whether or not the active power - * policy asks for the GPU to be powered off. - * - * @param kbdev The kbase device structure for the device (must be a valid - * pointer) - */ -void kbase_hwaccess_pm_halt(struct kbase_device *kbdev); - -/** - * Perform any backend-specific actions to suspend the GPU - * - * @param kbdev The kbase device structure for the device (must be a valid - * pointer) - */ -void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev); - -/** - * Perform any backend-specific actions to resume the GPU from a suspend - * - * @param kbdev The kbase device structure for the device (must be a valid - * pointer) - */ -void kbase_hwaccess_pm_resume(struct kbase_device *kbdev); - -/** - * Perform any required actions for activating the GPU. Called when the first - * context goes active. - * - * @param kbdev The kbase device structure for the device (must be a valid - * pointer) - */ -void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev); - -/** - * Perform any required actions for idling the GPU. Called when the last - * context goes idle. - * - * @param kbdev The kbase device structure for the device (must be a valid - * pointer) - */ -void kbase_hwaccess_pm_gpu_idle(struct kbase_device *kbdev); - - -/** - * Set the debug core mask. - * - * This determines which cores the power manager is allowed to use. - * - * @param kbdev The kbase device structure for the device (must be a - * valid pointer) - * @param new_core_mask The core mask to use - */ -void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev, - u64 new_core_mask); - - -/** - * Get the current policy. - * - * Returns the policy that is currently active. - * - * @param kbdev The kbase device structure for the device (must be a valid - * pointer) - * - * @return The current policy - */ -const struct kbase_pm_ca_policy -*kbase_pm_ca_get_policy(struct kbase_device *kbdev); - -/** - * Change the policy to the one specified. - * - * @param kbdev The kbase device structure for the device (must be a valid - * pointer) - * @param policy The policy to change to (valid pointer returned from - * @ref kbase_pm_ca_list_policies) - */ -void kbase_pm_ca_set_policy(struct kbase_device *kbdev, - const struct kbase_pm_ca_policy *policy); - -/** - * Retrieve a static list of the available policies. - * - * @param[out] policies An array pointer to take the list of policies. This may - * be NULL. The contents of this array must not be - * modified. - * - * @return The number of policies - */ -int -kbase_pm_ca_list_policies(const struct kbase_pm_ca_policy * const **policies); - - -/** - * Get the current policy. - * - * Returns the policy that is currently active. - * - * @param kbdev The kbase device structure for the device (must be a valid - * pointer) - * - * @return The current policy - */ -const struct kbase_pm_policy *kbase_pm_get_policy(struct kbase_device *kbdev); - -/** - * Change the policy to the one specified. - * - * @param kbdev The kbase device structure for the device (must be a valid - * pointer) - * @param policy The policy to change to (valid pointer returned from - * @ref kbase_pm_list_policies) - */ -void kbase_pm_set_policy(struct kbase_device *kbdev, - const struct kbase_pm_policy *policy); - -/** - * Retrieve a static list of the available policies. - * - * @param[out] policies An array pointer to take the list of policies. This may - * be NULL. The contents of this array must not be - * modified. - * - * @return The number of policies - */ -int kbase_pm_list_policies(const struct kbase_pm_policy * const **policies); - -#endif /* _KBASE_HWACCESS_PM_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwaccess_time.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwaccess_time.h deleted file mode 100755 index 89d26eaf09a4d..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwaccess_time.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - -/** - * - */ - -#ifndef _KBASE_BACKEND_TIME_H_ -#define _KBASE_BACKEND_TIME_H_ - -/** - * kbase_backend_get_gpu_time() - Get current GPU time - * @kbdev: Device pointer - * @cycle_counter: Pointer to u64 to store cycle counter in - * @system_time: Pointer to u64 to store system time in - * @ts: Pointer to struct timespec to store current monotonic - * time in - */ -void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter, - u64 *system_time, struct timespec *ts); - -/** - * kbase_wait_write_flush() - Wait for GPU write flush - * @kctx: Context pointer - * - * Wait 1000 GPU clock cycles. This delay is known to give the GPU time to flush - * its write buffer. - * - * If GPU resets occur then the counters are reset to zero, the delay may not be - * as expected. - * - * This function is only in use for BASE_HW_ISSUE_6367 - */ -#ifndef CONFIG_MALI_NO_MALI -void kbase_wait_write_flush(struct kbase_context *kctx); -#endif - -#endif /* _KBASE_BACKEND_TIME_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwcnt_reader.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwcnt_reader.h deleted file mode 100755 index cf7bf1b35dc59..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_hwcnt_reader.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * - * (C) COPYRIGHT 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#ifndef _KBASE_HWCNT_READER_H_ -#define _KBASE_HWCNT_READER_H_ - -/* The ids of ioctl commands. */ -#define KBASE_HWCNT_READER 0xBE -#define KBASE_HWCNT_READER_GET_HWVER _IOR(KBASE_HWCNT_READER, 0x00, u32) -#define KBASE_HWCNT_READER_GET_BUFFER_SIZE _IOR(KBASE_HWCNT_READER, 0x01, u32) -#define KBASE_HWCNT_READER_DUMP _IOW(KBASE_HWCNT_READER, 0x10, u32) -#define KBASE_HWCNT_READER_CLEAR _IOW(KBASE_HWCNT_READER, 0x11, u32) -#define KBASE_HWCNT_READER_GET_BUFFER _IOR(KBASE_HWCNT_READER, 0x20,\ - struct kbase_hwcnt_reader_metadata) -#define KBASE_HWCNT_READER_PUT_BUFFER _IOW(KBASE_HWCNT_READER, 0x21,\ - struct kbase_hwcnt_reader_metadata) -#define KBASE_HWCNT_READER_SET_INTERVAL _IOW(KBASE_HWCNT_READER, 0x30, u32) -#define KBASE_HWCNT_READER_ENABLE_EVENT _IOW(KBASE_HWCNT_READER, 0x40, u32) -#define KBASE_HWCNT_READER_DISABLE_EVENT _IOW(KBASE_HWCNT_READER, 0x41, u32) -#define KBASE_HWCNT_READER_GET_API_VERSION _IOW(KBASE_HWCNT_READER, 0xFF, u32) - -/** - * struct kbase_hwcnt_reader_metadata - hwcnt reader sample buffer metadata - * @timestamp: time when sample was collected - * @event_id: id of an event that triggered sample collection - * @buffer_idx: position in sampling area where sample buffer was stored - */ -struct kbase_hwcnt_reader_metadata { - u64 timestamp; - u32 event_id; - u32 buffer_idx; -}; - -/** - * enum base_hwcnt_reader_event - hwcnt dumping events - * @BASE_HWCNT_READER_EVENT_MANUAL: manual request for dump - * @BASE_HWCNT_READER_EVENT_PERIODIC: periodic dump - * @BASE_HWCNT_READER_EVENT_PREJOB: prejob dump request - * @BASE_HWCNT_READER_EVENT_POSTJOB: postjob dump request - * @BASE_HWCNT_READER_EVENT_COUNT: number of supported events - */ -enum base_hwcnt_reader_event { - BASE_HWCNT_READER_EVENT_MANUAL, - BASE_HWCNT_READER_EVENT_PERIODIC, - BASE_HWCNT_READER_EVENT_PREJOB, - BASE_HWCNT_READER_EVENT_POSTJOB, - - BASE_HWCNT_READER_EVENT_COUNT -}; - -#endif /* _KBASE_HWCNT_READER_H_ */ - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_instr.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_instr.c deleted file mode 100755 index 314ae0819d501..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_instr.c +++ /dev/null @@ -1,137 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * Base kernel instrumentation APIs. - */ - -#include -#include - -void kbase_instr_hwcnt_suspend(struct kbase_device *kbdev) -{ - struct kbase_context *kctx; - - KBASE_DEBUG_ASSERT(kbdev); - KBASE_DEBUG_ASSERT(!kbdev->hwcnt.suspended_kctx); - - kctx = kbdev->hwcnt.kctx; - kbdev->hwcnt.suspended_kctx = kctx; - - /* Relevant state was saved into hwcnt.suspended_state when enabling the - * counters */ - - if (kctx) { - KBASE_DEBUG_ASSERT(kctx->jctx.sched_info.ctx.flags & - KBASE_CTX_FLAG_PRIVILEGED); - kbase_instr_hwcnt_disable(kctx); - } -} - -void kbase_instr_hwcnt_resume(struct kbase_device *kbdev) -{ - struct kbase_context *kctx; - - KBASE_DEBUG_ASSERT(kbdev); - - kctx = kbdev->hwcnt.suspended_kctx; - kbdev->hwcnt.suspended_kctx = NULL; - - if (kctx) { - int err; - - err = kbase_instr_hwcnt_enable_internal(kbdev, kctx, - &kbdev->hwcnt.suspended_state); - WARN(err, "Failed to restore instrumented hardware counters on resume\n"); - } -} - -int kbase_instr_hwcnt_enable(struct kbase_context *kctx, - struct kbase_uk_hwcnt_setup *setup) -{ - struct kbase_device *kbdev; - bool access_allowed; - int err; - - kbdev = kctx->kbdev; - - /* Determine if the calling task has access to this capability */ - access_allowed = kbase_security_has_capability(kctx, - KBASE_SEC_INSTR_HW_COUNTERS_COLLECT, - KBASE_SEC_FLAG_NOAUDIT); - if (!access_allowed) - return -EINVAL; - - /* Mark the context as active so the GPU is kept turned on */ - /* A suspend won't happen here, because we're in a syscall from a - * userspace thread. */ - kbase_pm_context_active(kbdev); - - /* Schedule the context in */ - kbasep_js_schedule_privileged_ctx(kbdev, kctx); - err = kbase_instr_hwcnt_enable_internal(kbdev, kctx, setup); - if (err) { - /* Release the context. This had its own Power Manager Active - * reference */ - kbasep_js_release_privileged_ctx(kbdev, kctx); - - /* Also release our Power Manager Active reference */ - kbase_pm_context_idle(kbdev); - } - - return err; -} -KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_enable); - -int kbase_instr_hwcnt_disable(struct kbase_context *kctx) -{ - int err = -EINVAL; - struct kbase_device *kbdev = kctx->kbdev; - - err = kbase_instr_hwcnt_disable_internal(kctx); - if (err) - goto out; - - /* Release the context. This had its own Power Manager Active reference - */ - kbasep_js_release_privileged_ctx(kbdev, kctx); - - /* Also release our Power Manager Active reference */ - kbase_pm_context_idle(kbdev); - - dev_dbg(kbdev->dev, "HW counters dumping disabled for context %p", - kctx); -out: - return err; -} -KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_disable); - -int kbase_instr_hwcnt_dump(struct kbase_context *kctx) -{ - int err; - - err = kbase_instr_hwcnt_request_dump(kctx); - if (err) - return err; - - err = kbase_instr_hwcnt_wait_for_dump(kctx); - return err; -} -KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_dump); - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_instr.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_instr.h deleted file mode 100755 index ac3355e53634d..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_instr.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * Instrumentation API definitions - */ - -#ifndef _KBASE_INSTR_H_ -#define _KBASE_INSTR_H_ - -#include - -/** - * kbase_instr_hwcnt_enable() - Enable HW counters collection - * @kctx: Kbase context - * @setup: &struct kbase_uk_hwcnt_setup containing configuration - * - * Return: 0 on success - */ -int kbase_instr_hwcnt_enable(struct kbase_context *kctx, - struct kbase_uk_hwcnt_setup *setup); - -/** - * kbase_instr_hwcnt_disable() - Disable HW counters collection - * @kctx: Kbase context - * - * Return: 0 on success - */ -int kbase_instr_hwcnt_disable(struct kbase_context *kctx); - -/** - * kbase_instr_hwcnt_dump() - Trigger dump of HW counters and wait for - * completion - * @kctx: Kbase context - * - * Context: might sleep, waiting for dump to complete - * - * Return: 0 on success - */ -int kbase_instr_hwcnt_dump(struct kbase_context *kctx); - -/** - * kbase_instr_hwcnt_suspend() - GPU is suspending, stop HW counter collection - * @kbdev: Kbase device - * - * It's assumed that there's only one privileged context. - * - * Safe to do this without lock when doing an OS suspend, because it only - * changes in response to user-space IOCTLs - */ -void kbase_instr_hwcnt_suspend(struct kbase_device *kbdev); - -/** - * kbase_instr_hwcnt_resume() - GPU is resuming, resume HW counter collection - * @kbdev: Kbase device - */ -void kbase_instr_hwcnt_resume(struct kbase_device *kbdev); - -#endif /* _KBASE_INSTR_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_ipa.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_ipa.c deleted file mode 100755 index 433103c0d331d..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_ipa.c +++ /dev/null @@ -1,264 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - -#include -#include - -#include - -#define NR_IPA_GROUPS 8 - -/** - * struct ipa_group - represents a single IPA group - * @name: name of the IPA group - * @capacitance: capacitance constant for IPA group - */ -struct ipa_group { - const char *name; - u32 capacitance; -}; - -/** - * struct kbase_ipa_context - IPA context per device - * @kbdev: pointer to kbase device - * @groups: array of IPA groups for this context - * @ipa_lock: protects the entire IPA context - */ -struct kbase_ipa_context { - struct kbase_device *kbdev; - struct ipa_group groups[NR_IPA_GROUPS]; - struct mutex ipa_lock; -}; - -static struct ipa_group ipa_groups_def_v4[] = { - { .name = "group0", .capacitance = 0 }, - { .name = "group1", .capacitance = 0 }, - { .name = "group2", .capacitance = 0 }, - { .name = "group3", .capacitance = 0 }, - { .name = "group4", .capacitance = 0 }, - { .name = "group5", .capacitance = 0 }, - { .name = "group6", .capacitance = 0 }, - { .name = "group7", .capacitance = 0 }, -}; - -static struct ipa_group ipa_groups_def_v5[] = { - { .name = "group0", .capacitance = 0 }, - { .name = "group1", .capacitance = 0 }, - { .name = "group2", .capacitance = 0 }, - { .name = "group3", .capacitance = 0 }, - { .name = "group4", .capacitance = 0 }, - { .name = "group5", .capacitance = 0 }, - { .name = "group6", .capacitance = 0 }, - { .name = "group7", .capacitance = 0 }, -}; - -static ssize_t show_ipa_group(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct kbase_device *kbdev = dev_get_drvdata(dev); - struct kbase_ipa_context *ctx = kbdev->ipa_ctx; - ssize_t count = -EINVAL; - size_t i; - - mutex_lock(&ctx->ipa_lock); - for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) { - if (!strcmp(ctx->groups[i].name, attr->attr.name)) { - count = snprintf(buf, PAGE_SIZE, "%lu\n", - (unsigned long)ctx->groups[i].capacitance); - break; - } - } - mutex_unlock(&ctx->ipa_lock); - return count; -} - -static ssize_t set_ipa_group(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct kbase_device *kbdev = dev_get_drvdata(dev); - struct kbase_ipa_context *ctx = kbdev->ipa_ctx; - unsigned long capacitance; - size_t i; - int err; - - err = kstrtoul(buf, 0, &capacitance); - if (err < 0) - return err; - if (capacitance > U32_MAX) - return -ERANGE; - - mutex_lock(&ctx->ipa_lock); - for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) { - if (!strcmp(ctx->groups[i].name, attr->attr.name)) { - ctx->groups[i].capacitance = capacitance; - mutex_unlock(&ctx->ipa_lock); - return count; - } - } - mutex_unlock(&ctx->ipa_lock); - return -EINVAL; -} - -static DEVICE_ATTR(group0, S_IRUGO | S_IWUSR, show_ipa_group, set_ipa_group); -static DEVICE_ATTR(group1, S_IRUGO | S_IWUSR, show_ipa_group, set_ipa_group); -static DEVICE_ATTR(group2, S_IRUGO | S_IWUSR, show_ipa_group, set_ipa_group); -static DEVICE_ATTR(group3, S_IRUGO | S_IWUSR, show_ipa_group, set_ipa_group); -static DEVICE_ATTR(group4, S_IRUGO | S_IWUSR, show_ipa_group, set_ipa_group); -static DEVICE_ATTR(group5, S_IRUGO | S_IWUSR, show_ipa_group, set_ipa_group); -static DEVICE_ATTR(group6, S_IRUGO | S_IWUSR, show_ipa_group, set_ipa_group); -static DEVICE_ATTR(group7, S_IRUGO | S_IWUSR, show_ipa_group, set_ipa_group); - -static struct attribute *kbase_ipa_attrs[] = { - &dev_attr_group0.attr, - &dev_attr_group1.attr, - &dev_attr_group2.attr, - &dev_attr_group3.attr, - &dev_attr_group4.attr, - &dev_attr_group5.attr, - &dev_attr_group6.attr, - &dev_attr_group7.attr, - NULL, -}; - -static struct attribute_group kbase_ipa_attr_group = { - .name = "ipa", - .attrs = kbase_ipa_attrs, -}; - -static void init_ipa_groups(struct kbase_ipa_context *ctx) -{ - struct kbase_device *kbdev = ctx->kbdev; - struct ipa_group *defs; - size_t i, len; - - if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_V4)) { - defs = ipa_groups_def_v4; - len = ARRAY_SIZE(ipa_groups_def_v4); - } else { - defs = ipa_groups_def_v5; - len = ARRAY_SIZE(ipa_groups_def_v5); - } - - for (i = 0; i < len; i++) { - ctx->groups[i].name = defs[i].name; - ctx->groups[i].capacitance = defs[i].capacitance; - } -} - -#if defined(CONFIG_OF) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) -static int update_ipa_groups_from_dt(struct kbase_ipa_context *ctx) -{ - struct kbase_device *kbdev = ctx->kbdev; - struct device_node *np, *child; - struct ipa_group *group; - size_t nr_groups; - size_t i; - int err; - - np = of_find_node_by_name(kbdev->dev->of_node, "ipa-groups"); - if (!np) - return 0; - - nr_groups = 0; - for_each_available_child_of_node(np, child) - nr_groups++; - if (!nr_groups || nr_groups > ARRAY_SIZE(ctx->groups)) { - dev_err(kbdev->dev, "invalid number of IPA groups: %zu", nr_groups); - err = -EINVAL; - goto err0; - } - - for_each_available_child_of_node(np, child) { - const char *name; - u32 capacitance; - - name = of_get_property(child, "label", NULL); - if (!name) { - dev_err(kbdev->dev, "label missing for IPA group"); - err = -EINVAL; - goto err0; - } - err = of_property_read_u32(child, "capacitance", - &capacitance); - if (err < 0) { - dev_err(kbdev->dev, "capacitance missing for IPA group"); - goto err0; - } - - for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) { - group = &ctx->groups[i]; - if (!strcmp(group->name, name)) { - group->capacitance = capacitance; - break; - } - } - } - - of_node_put(np); - return 0; -err0: - of_node_put(np); - return err; -} -#else -static int update_ipa_groups_from_dt(struct kbase_ipa_context *ctx) -{ - return 0; -} -#endif - -static int reset_ipa_groups(struct kbase_ipa_context *ctx) -{ - init_ipa_groups(ctx); - return update_ipa_groups_from_dt(ctx); -} - -struct kbase_ipa_context *kbase_ipa_init(struct kbase_device *kbdev) -{ - struct kbase_ipa_context *ctx; - int err; - - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return NULL; - - mutex_init(&ctx->ipa_lock); - ctx->kbdev = kbdev; - - err = reset_ipa_groups(ctx); - if (err < 0) - goto err0; - - err = sysfs_create_group(&kbdev->dev->kobj, &kbase_ipa_attr_group); - if (err < 0) - goto err0; - - return ctx; -err0: - kfree(ctx); - return NULL; -} - -void kbase_ipa_term(struct kbase_ipa_context *ctx) -{ - struct kbase_device *kbdev = ctx->kbdev; - - sysfs_remove_group(&kbdev->dev->kobj, &kbase_ipa_attr_group); - kfree(ctx); -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_ipa.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_ipa.h deleted file mode 100755 index ed123759ff3c9..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_ipa.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -struct kbase_ipa_context; - -/** - * kbase_ipa_init - initialize the kbase ipa core - * @kbdev: kbase device - * - * Return: pointer to the IPA context or NULL on failure - */ -struct kbase_ipa_context *kbase_ipa_init(struct kbase_device *kbdev); - -/** - * kbase_ipa_term - terminate the kbase ipa core - * @ctx: pointer to the IPA context - */ -void kbase_ipa_term(struct kbase_ipa_context *ctx); diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_jd.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_jd.c deleted file mode 100755 index dd2d187d5cd92..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_jd.c +++ /dev/null @@ -1,1795 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#if defined(CONFIG_DMA_SHARED_BUFFER) -#include -#endif /* defined(CONFIG_DMA_SHARED_BUFFER) */ -#ifdef CONFIG_COMPAT -#include -#endif -#include -#include -#ifdef CONFIG_UMP -#include -#endif /* CONFIG_UMP */ -#include -#include -#include - -#include -#include - -#if defined(CONFIG_MALI_MIPE_ENABLED) -#include -#endif - -#define beenthere(kctx, f, a...) dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a) - -#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) -/* random32 was renamed to prandom_u32 in 3.8 */ -#define prandom_u32 random32 -#endif - -/* Return whether katom will run on the GPU or not. Currently only soft jobs and - * dependency-only atoms do not run on the GPU */ -#define IS_GPU_ATOM(katom) (!((katom->core_req & BASE_JD_REQ_SOFT_JOB) || \ - ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE) == \ - BASE_JD_REQ_DEP))) -/* - * This is the kernel side of the API. Only entry points are: - * - kbase_jd_submit(): Called from userspace to submit a single bag - * - kbase_jd_done(): Called from interrupt context to track the - * completion of a job. - * Callouts: - * - to the job manager (enqueue a job) - * - to the event subsystem (signals the completion/failure of bag/job-chains). - */ - -static void __user * -get_compat_pointer(struct kbase_context *kctx, const union kbase_pointer *p) -{ -#ifdef CONFIG_COMPAT - if (kctx->is_compat) - return compat_ptr(p->compat_value); -#endif - return p->value; -} - -/* Runs an atom, either by handing to the JS or by immediately running it in the case of soft-jobs - * - * Returns whether the JS needs a reschedule. - * - * Note that the caller must also check the atom status and - * if it is KBASE_JD_ATOM_STATE_COMPLETED must call jd_done_nolock - */ -static int jd_run_atom(struct kbase_jd_atom *katom) -{ - struct kbase_context *kctx = katom->kctx; - - KBASE_DEBUG_ASSERT(katom->status != KBASE_JD_ATOM_STATE_UNUSED); - - if ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP) { - /* Dependency only atom */ - katom->status = KBASE_JD_ATOM_STATE_COMPLETED; - return 0; - } else if (katom->core_req & BASE_JD_REQ_SOFT_JOB) { - /* Soft-job */ - if ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE) - == BASE_JD_REQ_SOFT_REPLAY) { - if (!kbase_replay_process(katom)) - katom->status = KBASE_JD_ATOM_STATE_COMPLETED; - } else if (kbase_process_soft_job(katom) == 0) { - kbase_finish_soft_job(katom); - katom->status = KBASE_JD_ATOM_STATE_COMPLETED; - } else { - /* The job has not completed */ - list_add_tail(&katom->dep_item[0], &kctx->waiting_soft_jobs); - } - return 0; - } - - katom->status = KBASE_JD_ATOM_STATE_IN_JS; - /* Queue an action about whether we should try scheduling a context */ - return kbasep_js_add_job(kctx, katom); -} - -#ifdef CONFIG_KDS - -/* Add the katom to the kds waiting list. - * Atoms must be added to the waiting list after a successful call to kds_async_waitall. - * The caller must hold the kbase_jd_context.lock */ - -static void kbase_jd_kds_waiters_add(struct kbase_jd_atom *katom) -{ - struct kbase_context *kctx; - - KBASE_DEBUG_ASSERT(katom); - - kctx = katom->kctx; - - list_add_tail(&katom->node, &kctx->waiting_kds_resource); -} - -/* Remove the katom from the kds waiting list. - * Atoms must be removed from the waiting list before a call to kds_resource_set_release_sync. - * The supplied katom must first have been added to the list with a call to kbase_jd_kds_waiters_add. - * The caller must hold the kbase_jd_context.lock */ - -static void kbase_jd_kds_waiters_remove(struct kbase_jd_atom *katom) -{ - KBASE_DEBUG_ASSERT(katom); - list_del(&katom->node); -} - -static void kds_dep_clear(void *callback_parameter, void *callback_extra_parameter) -{ - struct kbase_jd_atom *katom; - struct kbase_jd_context *ctx; - struct kbase_device *kbdev; - - katom = (struct kbase_jd_atom *)callback_parameter; - KBASE_DEBUG_ASSERT(katom); - ctx = &katom->kctx->jctx; - kbdev = katom->kctx->kbdev; - KBASE_DEBUG_ASSERT(kbdev); - - mutex_lock(&ctx->lock); - - /* KDS resource has already been satisfied (e.g. due to zapping) */ - if (katom->kds_dep_satisfied) - goto out; - - /* This atom's KDS dependency has now been met */ - katom->kds_dep_satisfied = true; - - /* Check whether the atom's other dependencies were already met. If - * katom is a GPU atom then the job scheduler may be able to represent - * the dependencies, hence we may attempt to submit it before they are - * met. Other atoms must have had both dependencies resolved */ - if (IS_GPU_ATOM(katom) || - (!kbase_jd_katom_dep_atom(&katom->dep[0]) && - !kbase_jd_katom_dep_atom(&katom->dep[1]))) { - /* katom dep complete, attempt to run it */ - bool resched = false; - - resched = jd_run_atom(katom); - - if (katom->status == KBASE_JD_ATOM_STATE_COMPLETED) { - /* The atom has already finished */ - resched |= jd_done_nolock(katom, NULL); - } - - if (resched) - kbase_js_sched_all(kbdev); - } - out: - mutex_unlock(&ctx->lock); -} - -static void kbase_cancel_kds_wait_job(struct kbase_jd_atom *katom) -{ - KBASE_DEBUG_ASSERT(katom); - - /* Prevent job_done_nolock from being called twice on an atom when - * there is a race between job completion and cancellation */ - - if (katom->status == KBASE_JD_ATOM_STATE_QUEUED) { - /* Wait was cancelled - zap the atom */ - katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; - if (jd_done_nolock(katom, NULL)) - kbase_js_sched_all(katom->kctx->kbdev); - } -} -#endif /* CONFIG_KDS */ - -#ifdef CONFIG_DMA_SHARED_BUFFER -static int kbase_jd_umm_map(struct kbase_context *kctx, struct kbase_va_region *reg) -{ - struct sg_table *sgt; - struct scatterlist *s; - int i; - phys_addr_t *pa; - int err; - size_t count = 0; - struct kbase_mem_phy_alloc *alloc; - - alloc = reg->gpu_alloc; - - KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM); - KBASE_DEBUG_ASSERT(NULL == alloc->imported.umm.sgt); - sgt = dma_buf_map_attachment(alloc->imported.umm.dma_attachment, DMA_BIDIRECTIONAL); - - if (IS_ERR_OR_NULL(sgt)) - return -EINVAL; - - /* save for later */ - alloc->imported.umm.sgt = sgt; - - pa = kbase_get_gpu_phy_pages(reg); - KBASE_DEBUG_ASSERT(pa); - - for_each_sg(sgt->sgl, s, sgt->nents, i) { - int j; - size_t pages = PFN_UP(sg_dma_len(s)); - - WARN_ONCE(sg_dma_len(s) & (PAGE_SIZE-1), - "sg_dma_len(s)=%u is not a multiple of PAGE_SIZE\n", - sg_dma_len(s)); - - WARN_ONCE(sg_dma_address(s) & (PAGE_SIZE-1), - "sg_dma_address(s)=%llx is not aligned to PAGE_SIZE\n", - (unsigned long long) sg_dma_address(s)); - - for (j = 0; (j < pages) && (count < reg->nr_pages); j++, count++) - *pa++ = sg_dma_address(s) + (j << PAGE_SHIFT); - WARN_ONCE(j < pages, - "sg list from dma_buf_map_attachment > dma_buf->size=%zu\n", - alloc->imported.umm.dma_buf->size); - } - - if (WARN_ONCE(count < reg->nr_pages, - "sg list from dma_buf_map_attachment < dma_buf->size=%zu\n", - alloc->imported.umm.dma_buf->size)) { - err = -EINVAL; - goto out; - } - - /* Update nents as we now have pages to map */ - alloc->nents = count; - - err = kbase_mmu_insert_pages(kctx, reg->start_pfn, kbase_get_gpu_phy_pages(reg), kbase_reg_current_backed_size(reg), reg->flags | KBASE_REG_GPU_WR | KBASE_REG_GPU_RD); - -out: - if (err) { - dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment, alloc->imported.umm.sgt, DMA_BIDIRECTIONAL); - alloc->imported.umm.sgt = NULL; - } - - return err; -} - -static void kbase_jd_umm_unmap(struct kbase_context *kctx, struct kbase_mem_phy_alloc *alloc) -{ - KBASE_DEBUG_ASSERT(kctx); - KBASE_DEBUG_ASSERT(alloc); - KBASE_DEBUG_ASSERT(alloc->imported.umm.dma_attachment); - KBASE_DEBUG_ASSERT(alloc->imported.umm.sgt); - dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment, - alloc->imported.umm.sgt, DMA_BIDIRECTIONAL); - alloc->imported.umm.sgt = NULL; - alloc->nents = 0; -} -#endif /* CONFIG_DMA_SHARED_BUFFER */ - -void kbase_jd_free_external_resources(struct kbase_jd_atom *katom) -{ -#ifdef CONFIG_KDS - if (katom->kds_rset) { - struct kbase_jd_context *jctx = &katom->kctx->jctx; - - /* - * As the atom is no longer waiting, remove it from - * the waiting list. - */ - - mutex_lock(&jctx->lock); - kbase_jd_kds_waiters_remove(katom); - mutex_unlock(&jctx->lock); - - /* Release the kds resource or cancel if zapping */ - kds_resource_set_release_sync(&katom->kds_rset); - } -#endif /* CONFIG_KDS */ -} - -static void kbase_jd_post_external_resources(struct kbase_jd_atom *katom) -{ - KBASE_DEBUG_ASSERT(katom); - KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES); - -#ifdef CONFIG_KDS - /* Prevent the KDS resource from triggering the atom in case of zapping */ - if (katom->kds_rset) - katom->kds_dep_satisfied = true; -#endif /* CONFIG_KDS */ - - kbase_gpu_vm_lock(katom->kctx); - /* only roll back if extres is non-NULL */ - if (katom->extres) { - u32 res_no; - - res_no = katom->nr_extres; - while (res_no-- > 0) { - struct kbase_mem_phy_alloc *alloc; - - alloc = katom->extres[res_no].alloc; -#ifdef CONFIG_DMA_SHARED_BUFFER - if (alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) { - alloc->imported.umm.current_mapping_usage_count--; - - if (0 == alloc->imported.umm.current_mapping_usage_count) { - struct kbase_va_region *reg; - - reg = kbase_region_tracker_find_region_base_address( - katom->kctx, - katom->extres[res_no].gpu_address); - - if (reg && reg->gpu_alloc == alloc) - kbase_mmu_teardown_pages( - katom->kctx, - reg->start_pfn, - kbase_reg_current_backed_size(reg)); - - kbase_jd_umm_unmap(katom->kctx, alloc); - } - } -#endif /* CONFIG_DMA_SHARED_BUFFER */ - kbase_mem_phy_alloc_put(alloc); - } - kfree(katom->extres); - katom->extres = NULL; - } - kbase_gpu_vm_unlock(katom->kctx); -} - -#if (defined(CONFIG_KDS) && defined(CONFIG_UMP)) || defined(CONFIG_DMA_SHARED_BUFFER_USES_KDS) -static void add_kds_resource(struct kds_resource *kds_res, struct kds_resource **kds_resources, u32 *kds_res_count, unsigned long *kds_access_bitmap, bool exclusive) -{ - u32 i; - - for (i = 0; i < *kds_res_count; i++) { - /* Duplicate resource, ignore */ - if (kds_resources[i] == kds_res) - return; - } - - kds_resources[*kds_res_count] = kds_res; - if (exclusive) - set_bit(*kds_res_count, kds_access_bitmap); - (*kds_res_count)++; -} -#endif - -/* - * Set up external resources needed by this job. - * - * jctx.lock must be held when this is called. - */ - -static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const struct base_jd_atom_v2 *user_atom) -{ - int err_ret_val = -EINVAL; - u32 res_no; -#ifdef CONFIG_KDS - u32 kds_res_count = 0; - struct kds_resource **kds_resources = NULL; - unsigned long *kds_access_bitmap = NULL; -#endif /* CONFIG_KDS */ - struct base_external_resource *input_extres; - - KBASE_DEBUG_ASSERT(katom); - KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES); - - /* no resources encoded, early out */ - if (!katom->nr_extres) - return -EINVAL; - - katom->extres = kmalloc_array(katom->nr_extres, sizeof(*katom->extres), GFP_KERNEL); - if (NULL == katom->extres) { - err_ret_val = -ENOMEM; - goto early_err_out; - } - - /* copy user buffer to the end of our real buffer. - * Make sure the struct sizes haven't changed in a way - * we don't support */ - BUILD_BUG_ON(sizeof(*input_extres) > sizeof(*katom->extres)); - input_extres = (struct base_external_resource *) - (((unsigned char *)katom->extres) + - (sizeof(*katom->extres) - sizeof(*input_extres)) * - katom->nr_extres); - - if (copy_from_user(input_extres, - get_compat_pointer(katom->kctx, &user_atom->extres_list), - sizeof(*input_extres) * katom->nr_extres) != 0) { - err_ret_val = -EINVAL; - goto early_err_out; - } -#ifdef CONFIG_KDS - /* assume we have to wait for all */ - KBASE_DEBUG_ASSERT(0 != katom->nr_extres); - kds_resources = kmalloc_array(katom->nr_extres, sizeof(struct kds_resource *), GFP_KERNEL); - - if (NULL == kds_resources) { - err_ret_val = -ENOMEM; - goto early_err_out; - } - - KBASE_DEBUG_ASSERT(0 != katom->nr_extres); - kds_access_bitmap = kzalloc(sizeof(unsigned long) * ((katom->nr_extres + BITS_PER_LONG - 1) / BITS_PER_LONG), GFP_KERNEL); - - if (NULL == kds_access_bitmap) { - err_ret_val = -ENOMEM; - goto early_err_out; - } -#endif /* CONFIG_KDS */ - - /* need to keep the GPU VM locked while we set up UMM buffers */ - kbase_gpu_vm_lock(katom->kctx); - for (res_no = 0; res_no < katom->nr_extres; res_no++) { - struct base_external_resource *res; - struct kbase_va_region *reg; - - res = &input_extres[res_no]; - reg = kbase_region_tracker_find_region_enclosing_address(katom->kctx, - res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE); - /* did we find a matching region object? */ - if (NULL == reg || (reg->flags & KBASE_REG_FREE)) { - /* roll back */ - goto failed_loop; - } - - if (!(katom->core_req & BASE_JD_REQ_SOFT_JOB) && - (reg->flags & KBASE_REG_SECURE)) { - katom->atom_flags |= KBASE_KATOM_FLAG_SECURE; - if ((katom->core_req & BASE_JD_REQ_FS) == 0) { - WARN_RATELIMIT(1, "Secure non-fragment jobs not supported"); - goto failed_loop; - } - } - - /* decide what needs to happen for this resource */ - switch (reg->gpu_alloc->type) { - case BASE_MEM_IMPORT_TYPE_UMP: - { -#if defined(CONFIG_KDS) && defined(CONFIG_UMP) - struct kds_resource *kds_res; - - kds_res = ump_dd_kds_resource_get(reg->gpu_alloc->imported.ump_handle); - if (kds_res) - add_kds_resource(kds_res, kds_resources, &kds_res_count, - kds_access_bitmap, - res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE); -#endif /*defined(CONFIG_KDS) && defined(CONFIG_UMP) */ - break; - } -#ifdef CONFIG_DMA_SHARED_BUFFER - case BASE_MEM_IMPORT_TYPE_UMM: - { -#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS - struct kds_resource *kds_res; - - kds_res = get_dma_buf_kds_resource(reg->gpu_alloc->imported.umm.dma_buf); - if (kds_res) - add_kds_resource(kds_res, kds_resources, &kds_res_count, kds_access_bitmap, res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE); -#endif - reg->gpu_alloc->imported.umm.current_mapping_usage_count++; - if (1 == reg->gpu_alloc->imported.umm.current_mapping_usage_count) { - /* use a local variable to not pollute err_ret_val - * with a potential success value as some other gotos depend - * on the default error code stored in err_ret_val */ - int tmp; - - tmp = kbase_jd_umm_map(katom->kctx, reg); - if (tmp) { - /* failed to map this buffer, roll back */ - err_ret_val = tmp; - reg->gpu_alloc->imported.umm.current_mapping_usage_count--; - goto failed_loop; - } - } - break; - } -#endif - default: - goto failed_loop; - } - - /* finish with updating out array with the data we found */ - /* NOTE: It is important that this is the last thing we do (or - * at least not before the first write) as we overwrite elements - * as we loop and could be overwriting ourself, so no writes - * until the last read for an element. - * */ - katom->extres[res_no].gpu_address = reg->start_pfn << PAGE_SHIFT; /* save the start_pfn (as an address, not pfn) to use fast lookup later */ - katom->extres[res_no].alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc); - } - /* successfully parsed the extres array */ - /* drop the vm lock before we call into kds */ - kbase_gpu_vm_unlock(katom->kctx); - -#ifdef CONFIG_KDS - if (kds_res_count) { - int wait_failed; - - /* We have resources to wait for with kds */ - katom->kds_dep_satisfied = false; - - wait_failed = kds_async_waitall(&katom->kds_rset, - &katom->kctx->jctx.kds_cb, katom, NULL, - kds_res_count, kds_access_bitmap, - kds_resources); - - if (wait_failed) - goto failed_kds_setup; - else - kbase_jd_kds_waiters_add(katom); - } else { - /* Nothing to wait for, so kds dep met */ - katom->kds_dep_satisfied = true; - } - kfree(kds_resources); - kfree(kds_access_bitmap); -#endif /* CONFIG_KDS */ - - /* all done OK */ - return 0; - -/* error handling section */ - -#ifdef CONFIG_KDS - failed_kds_setup: - - /* lock before we unmap */ - kbase_gpu_vm_lock(katom->kctx); -#endif /* CONFIG_KDS */ - - failed_loop: - /* undo the loop work */ - while (res_no-- > 0) { - struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc; -#ifdef CONFIG_DMA_SHARED_BUFFER - if (alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) { - alloc->imported.umm.current_mapping_usage_count--; - - if (0 == alloc->imported.umm.current_mapping_usage_count) { - struct kbase_va_region *reg; - - reg = kbase_region_tracker_find_region_base_address( - katom->kctx, - katom->extres[res_no].gpu_address); - - if (reg && reg->gpu_alloc == alloc) - kbase_mmu_teardown_pages(katom->kctx, - reg->start_pfn, - kbase_reg_current_backed_size(reg)); - - kbase_jd_umm_unmap(katom->kctx, alloc); - } - } -#endif /* CONFIG_DMA_SHARED_BUFFER */ - kbase_mem_phy_alloc_put(alloc); - } - kbase_gpu_vm_unlock(katom->kctx); - - early_err_out: - kfree(katom->extres); - katom->extres = NULL; -#ifdef CONFIG_KDS - kfree(kds_resources); - kfree(kds_access_bitmap); -#endif /* CONFIG_KDS */ - return err_ret_val; -} - -static inline void jd_resolve_dep(struct list_head *out_list, - struct kbase_jd_atom *katom, - u8 d, - bool ctx_is_dying) -{ - u8 other_d = !d; - - while (!list_empty(&katom->dep_head[d])) { - struct kbase_jd_atom *dep_atom; - u8 dep_type; - - dep_atom = list_entry(katom->dep_head[d].next, - struct kbase_jd_atom, dep_item[d]); - - list_del(katom->dep_head[d].next); - - dep_type = kbase_jd_katom_dep_type(&dep_atom->dep[d]); - kbase_jd_katom_dep_clear(&dep_atom->dep[d]); - - if (katom->event_code != BASE_JD_EVENT_DONE && - (dep_type != BASE_JD_DEP_TYPE_ORDER || ctx_is_dying)) { - /* Atom failed, so remove the other dependencies and immediately fail the atom */ - if (kbase_jd_katom_dep_atom(&dep_atom->dep[other_d])) { - list_del(&dep_atom->dep_item[other_d]); - kbase_jd_katom_dep_clear(&dep_atom->dep[other_d]); - } -#ifdef CONFIG_KDS - if (!dep_atom->kds_dep_satisfied) { - /* Just set kds_dep_satisfied to true. If the callback happens after this then it will early out and - * do nothing. If the callback doesn't happen then kbase_jd_post_external_resources will clean up - */ - dep_atom->kds_dep_satisfied = true; - } -#endif - - dep_atom->event_code = katom->event_code; - KBASE_DEBUG_ASSERT(dep_atom->status != - KBASE_JD_ATOM_STATE_UNUSED); - dep_atom->status = KBASE_JD_ATOM_STATE_COMPLETED; - - list_add_tail(&dep_atom->dep_item[0], out_list); - } else if (!kbase_jd_katom_dep_atom(&dep_atom->dep[other_d])) { -#ifdef CONFIG_KDS - if (dep_atom->kds_dep_satisfied) -#endif - list_add_tail(&dep_atom->dep_item[0], out_list); - } - } -} - -KBASE_EXPORT_TEST_API(jd_resolve_dep); - -#if MALI_CUSTOMER_RELEASE == 0 -static void jd_force_failure(struct kbase_device *kbdev, struct kbase_jd_atom *katom) -{ - kbdev->force_replay_count++; - - if (kbdev->force_replay_count >= kbdev->force_replay_limit) { - kbdev->force_replay_count = 0; - katom->event_code = BASE_JD_EVENT_FORCE_REPLAY; - - if (kbdev->force_replay_random) - kbdev->force_replay_limit = - (prandom_u32() % KBASEP_FORCE_REPLAY_RANDOM_LIMIT) + 1; - - dev_info(kbdev->dev, "force_replay : promoting to error\n"); - } -} - -/** Test to see if atom should be forced to fail. - * - * This function will check if an atom has a replay job as a dependent. If so - * then it will be considered for forced failure. */ -static void jd_check_force_failure(struct kbase_jd_atom *katom) -{ - struct kbase_context *kctx = katom->kctx; - struct kbase_device *kbdev = kctx->kbdev; - int i; - - if ((kbdev->force_replay_limit == KBASEP_FORCE_REPLAY_DISABLED) || - (katom->core_req & BASEP_JD_REQ_EVENT_NEVER)) - return; - - for (i = 1; i < BASE_JD_ATOM_COUNT; i++) { - if (kbase_jd_katom_dep_atom(&kctx->jctx.atoms[i].dep[0]) == katom || - kbase_jd_katom_dep_atom(&kctx->jctx.atoms[i].dep[1]) == katom) { - struct kbase_jd_atom *dep_atom = &kctx->jctx.atoms[i]; - - if ((dep_atom->core_req & BASEP_JD_REQ_ATOM_TYPE) == - BASE_JD_REQ_SOFT_REPLAY && - (dep_atom->core_req & kbdev->force_replay_core_req) - == kbdev->force_replay_core_req) { - jd_force_failure(kbdev, katom); - return; - } - } - } -} -#endif - -/* - * Perform the necessary handling of an atom that has finished running - * on the GPU. - * - * Note that if this is a soft-job that has had kbase_prepare_soft_job called on it then the caller - * is responsible for calling kbase_finish_soft_job *before* calling this function. - * - * The caller must hold the kbase_jd_context.lock. - */ -bool jd_done_nolock(struct kbase_jd_atom *katom, - struct list_head *completed_jobs_ctx) -{ - struct kbase_context *kctx = katom->kctx; - struct kbase_device *kbdev = kctx->kbdev; - struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info; - struct list_head completed_jobs; - struct list_head runnable_jobs; - bool need_to_try_schedule_context = false; - int i; - - INIT_LIST_HEAD(&completed_jobs); - INIT_LIST_HEAD(&runnable_jobs); - - KBASE_DEBUG_ASSERT(katom->status != KBASE_JD_ATOM_STATE_UNUSED); - -#if MALI_CUSTOMER_RELEASE == 0 - jd_check_force_failure(katom); -#endif - - - /* This is needed in case an atom is failed due to being invalid, this - * can happen *before* the jobs that the atom depends on have completed */ - for (i = 0; i < 2; i++) { - if (kbase_jd_katom_dep_atom(&katom->dep[i])) { - list_del(&katom->dep_item[i]); - kbase_jd_katom_dep_clear(&katom->dep[i]); - } - } - - /* With PRLAM-10817 or PRLAM-10959 the last tile of a fragment job being soft-stopped can fail with - * BASE_JD_EVENT_TILE_RANGE_FAULT. - * - * So here if the fragment job failed with TILE_RANGE_FAULT and it has been soft-stopped, then we promote the - * error code to BASE_JD_EVENT_DONE - */ - - if ((kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10817) || kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10959)) && - katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT) { - if ((katom->core_req & BASE_JD_REQ_FS) && (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED)) { - /* Promote the failure to job done */ - katom->event_code = BASE_JD_EVENT_DONE; - katom->atom_flags = katom->atom_flags & (~KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED); - } - } - - katom->status = KBASE_JD_ATOM_STATE_COMPLETED; - list_add_tail(&katom->dep_item[0], &completed_jobs); - - while (!list_empty(&completed_jobs)) { - katom = list_entry(completed_jobs.prev, struct kbase_jd_atom, dep_item[0]); - list_del(completed_jobs.prev); - - KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED); - - for (i = 0; i < 2; i++) - jd_resolve_dep(&runnable_jobs, katom, i, - js_kctx_info->ctx.is_dying); - - if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) - kbase_jd_post_external_resources(katom); - - while (!list_empty(&runnable_jobs)) { - struct kbase_jd_atom *node; - - node = list_entry(runnable_jobs.next, - struct kbase_jd_atom, dep_item[0]); - - list_del(runnable_jobs.next); - - KBASE_DEBUG_ASSERT(node->status != KBASE_JD_ATOM_STATE_UNUSED); - - if (node->status != KBASE_JD_ATOM_STATE_COMPLETED) { - need_to_try_schedule_context |= jd_run_atom(node); - } else { - node->event_code = katom->event_code; - - if ((node->core_req & BASEP_JD_REQ_ATOM_TYPE) - == BASE_JD_REQ_SOFT_REPLAY) { - if (kbase_replay_process(node)) - /* Don't complete this atom */ - continue; - } else if (node->core_req & - BASE_JD_REQ_SOFT_JOB) { - /* If this is a fence wait then remove it from the list of sync waiters. */ - if (BASE_JD_REQ_SOFT_FENCE_WAIT == node->core_req) - list_del(&node->dep_item[0]); - - kbase_finish_soft_job(node); - } - node->status = KBASE_JD_ATOM_STATE_COMPLETED; - } - - if (node->status == KBASE_JD_ATOM_STATE_COMPLETED) - list_add_tail(&node->dep_item[0], &completed_jobs); - } - - /* Register a completed job as a disjoint event when the GPU - * is in a disjoint state (ie. being reset or replaying jobs). - */ - kbase_disjoint_event_potential(kctx->kbdev); - if (completed_jobs_ctx) - list_add_tail(&katom->dep_item[0], completed_jobs_ctx); - else - kbase_event_post(kctx, katom); - - /* Decrement and check the TOTAL number of jobs. This includes - * those not tracked by the scheduler: 'not ready to run' and - * 'dependency-only' jobs. */ - if (--kctx->jctx.job_nr == 0) - wake_up(&kctx->jctx.zero_jobs_wait); /* All events are safely queued now, and we can signal any waiter - * that we've got no more jobs (so we can be safely terminated) */ - } - - return need_to_try_schedule_context; -} - -KBASE_EXPORT_TEST_API(jd_done_nolock); - -#ifdef CONFIG_GPU_TRACEPOINTS -enum { - CORE_REQ_DEP_ONLY, - CORE_REQ_SOFT, - CORE_REQ_COMPUTE, - CORE_REQ_FRAGMENT, - CORE_REQ_VERTEX, - CORE_REQ_TILER, - CORE_REQ_FRAGMENT_VERTEX, - CORE_REQ_FRAGMENT_VERTEX_TILER, - CORE_REQ_FRAGMENT_TILER, - CORE_REQ_VERTEX_TILER, - CORE_REQ_UNKNOWN -}; -static const char * const core_req_strings[] = { - "Dependency Only Job", - "Soft Job", - "Compute Shader Job", - "Fragment Shader Job", - "Vertex/Geometry Shader Job", - "Tiler Job", - "Fragment Shader + Vertex/Geometry Shader Job", - "Fragment Shader + Vertex/Geometry Shader Job + Tiler Job", - "Fragment Shader + Tiler Job", - "Vertex/Geometry Shader Job + Tiler Job", - "Unknown Job" -}; -static const char *kbasep_map_core_reqs_to_string(base_jd_core_req core_req) -{ - if (core_req & BASE_JD_REQ_SOFT_JOB) - return core_req_strings[CORE_REQ_SOFT]; - if (core_req & BASE_JD_REQ_ONLY_COMPUTE) - return core_req_strings[CORE_REQ_COMPUTE]; - switch (core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) { - case BASE_JD_REQ_DEP: - return core_req_strings[CORE_REQ_DEP_ONLY]; - case BASE_JD_REQ_FS: - return core_req_strings[CORE_REQ_FRAGMENT]; - case BASE_JD_REQ_CS: - return core_req_strings[CORE_REQ_VERTEX]; - case BASE_JD_REQ_T: - return core_req_strings[CORE_REQ_TILER]; - case (BASE_JD_REQ_FS | BASE_JD_REQ_CS): - return core_req_strings[CORE_REQ_FRAGMENT_VERTEX]; - case (BASE_JD_REQ_FS | BASE_JD_REQ_T): - return core_req_strings[CORE_REQ_FRAGMENT_TILER]; - case (BASE_JD_REQ_CS | BASE_JD_REQ_T): - return core_req_strings[CORE_REQ_VERTEX_TILER]; - case (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T): - return core_req_strings[CORE_REQ_FRAGMENT_VERTEX_TILER]; - } - return core_req_strings[CORE_REQ_UNKNOWN]; -} -#endif - -bool jd_submit_atom(struct kbase_context *kctx, - const struct base_jd_atom_v2 *user_atom, - struct kbase_jd_atom *katom) -{ - struct kbase_jd_context *jctx = &kctx->jctx; - base_jd_core_req core_req; - int queued = 0; - int i; - int sched_prio; - bool ret; - - /* Update the TOTAL number of jobs. This includes those not tracked by - * the scheduler: 'not ready to run' and 'dependency-only' jobs. */ - jctx->job_nr++; - - core_req = user_atom->core_req; - - katom->start_timestamp.tv64 = 0; - katom->time_spent_us = 0; - katom->udata = user_atom->udata; - katom->kctx = kctx; - katom->nr_extres = user_atom->nr_extres; - katom->extres = NULL; - katom->device_nr = user_atom->device_nr; - katom->affinity = 0; - katom->jc = user_atom->jc; - katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED; - katom->core_req = core_req; - katom->atom_flags = 0; - katom->retry_count = 0; - katom->need_cache_flush_cores_retained = 0; - katom->x_pre_dep = NULL; - katom->x_post_dep = NULL; -#ifdef CONFIG_KDS - /* Start by assuming that the KDS dependencies are satisfied, - * kbase_jd_pre_external_resources will correct this if there are dependencies */ - katom->kds_dep_satisfied = true; - katom->kds_rset = NULL; -#endif /* CONFIG_KDS */ - - /* Don't do anything if there is a mess up with dependencies. - This is done in a separate cycle to check both the dependencies at ones, otherwise - it will be extra complexity to deal with 1st dependency ( just added to the list ) - if only the 2nd one has invalid config. - */ - for (i = 0; i < 2; i++) { - int dep_atom_number = user_atom->pre_dep[i].atom_id; - base_jd_dep_type dep_atom_type = user_atom->pre_dep[i].dependency_type; - - if (dep_atom_number) { - if (dep_atom_type != BASE_JD_DEP_TYPE_ORDER && - dep_atom_type != BASE_JD_DEP_TYPE_DATA) { - katom->event_code = BASE_JD_EVENT_JOB_CONFIG_FAULT; - katom->status = KBASE_JD_ATOM_STATE_COMPLETED; -#if defined(CONFIG_MALI_MIPE_ENABLED) - kbase_tlstream_tl_new_atom( - katom, - kbase_jd_atom_id(kctx, katom)); - kbase_tlstream_tl_ret_atom_ctx( - katom, kctx); -#endif - ret = jd_done_nolock(katom, NULL); - goto out; - } - } - } - - /* Add dependencies */ - for (i = 0; i < 2; i++) { - int dep_atom_number = user_atom->pre_dep[i].atom_id; - base_jd_dep_type dep_atom_type; - struct kbase_jd_atom *dep_atom = &jctx->atoms[dep_atom_number]; - - dep_atom_type = user_atom->pre_dep[i].dependency_type; - kbase_jd_katom_dep_clear(&katom->dep[i]); - - if (!dep_atom_number) - continue; - - if (dep_atom->status == KBASE_JD_ATOM_STATE_UNUSED || - dep_atom->status == KBASE_JD_ATOM_STATE_COMPLETED) { - - if (dep_atom->event_code == BASE_JD_EVENT_DONE) - continue; - /* don't stop this atom if it has an order dependency - * only to the failed one, try to submit it throught - * the normal path - */ - if (dep_atom_type == BASE_JD_DEP_TYPE_ORDER && - dep_atom->event_code > BASE_JD_EVENT_ACTIVE) { - continue; - } - - if (i == 1 && kbase_jd_katom_dep_atom(&katom->dep[0])) { - /* Remove the previous dependency */ - list_del(&katom->dep_item[0]); - kbase_jd_katom_dep_clear(&katom->dep[0]); - } - - /* Atom has completed, propagate the error code if any */ - katom->event_code = dep_atom->event_code; - katom->status = KBASE_JD_ATOM_STATE_QUEUED; -#if defined(CONFIG_MALI_MIPE_ENABLED) - kbase_tlstream_tl_new_atom( - katom, - kbase_jd_atom_id(kctx, katom)); - kbase_tlstream_tl_ret_atom_ctx(katom, kctx); -#endif - if ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE) - == BASE_JD_REQ_SOFT_REPLAY) { - if (kbase_replay_process(katom)) { - ret = false; - goto out; - } - } - ret = jd_done_nolock(katom, NULL); - - goto out; - } else { - /* Atom is in progress, add this atom to the list */ - list_add_tail(&katom->dep_item[i], &dep_atom->dep_head[i]); - kbase_jd_katom_dep_set(&katom->dep[i], dep_atom, dep_atom_type); - queued = 1; - } - } - - /* These must occur after the above loop to ensure that an atom that - * depends on a previous atom with the same number behaves as expected */ - katom->event_code = BASE_JD_EVENT_DONE; - katom->status = KBASE_JD_ATOM_STATE_QUEUED; - -#if defined(CONFIG_MALI_MIPE_ENABLED) - kbase_tlstream_tl_new_atom( - katom, - kbase_jd_atom_id(kctx, katom)); - kbase_tlstream_tl_ret_atom_ctx(katom, kctx); -#endif - - /* Reject atoms with job chain = NULL, as these cause issues with soft-stop */ - if (!katom->jc && (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_DEP) { - dev_warn(kctx->kbdev->dev, "Rejecting atom with jc = NULL"); - katom->event_code = BASE_JD_EVENT_JOB_INVALID; - ret = jd_done_nolock(katom, NULL); - goto out; - } - - /* Reject atoms with an invalid device_nr */ - if ((katom->core_req & BASE_JD_REQ_SPECIFIC_COHERENT_GROUP) && - (katom->device_nr >= kctx->kbdev->gpu_props.num_core_groups)) { - dev_warn(kctx->kbdev->dev, - "Rejecting atom with invalid device_nr %d", - katom->device_nr); - katom->event_code = BASE_JD_EVENT_JOB_INVALID; - ret = jd_done_nolock(katom, NULL); - goto out; - } - - /* For invalid priority, be most lenient and choose the default */ - sched_prio = kbasep_js_atom_prio_to_sched_prio(user_atom->prio); - if (sched_prio == KBASE_JS_ATOM_SCHED_PRIO_INVALID) - sched_prio = KBASE_JS_ATOM_SCHED_PRIO_DEFAULT; - katom->sched_priority = sched_prio; - - if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) { - /* handle what we need to do to access the external resources */ - if (kbase_jd_pre_external_resources(katom, user_atom) != 0) { - /* setup failed (no access, bad resource, unknown resource types, etc.) */ - katom->event_code = BASE_JD_EVENT_JOB_INVALID; - ret = jd_done_nolock(katom, NULL); - goto out; - } - } - - /* Validate the atom. Function will return error if the atom is - * malformed. - * - * Soft-jobs never enter the job scheduler but have their own initialize method. - * - * If either fail then we immediately complete the atom with an error. - */ - if ((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0) { - if (!kbase_js_is_atom_valid(kctx->kbdev, katom)) { - katom->event_code = BASE_JD_EVENT_JOB_INVALID; - ret = jd_done_nolock(katom, NULL); - goto out; - } - } else { - /* Soft-job */ - if (kbase_prepare_soft_job(katom) != 0) { - katom->event_code = BASE_JD_EVENT_JOB_INVALID; - ret = jd_done_nolock(katom, NULL); - goto out; - } - } - -#ifdef CONFIG_GPU_TRACEPOINTS - katom->work_id = atomic_inc_return(&jctx->work_id); - trace_gpu_job_enqueue((u32)kctx, katom->work_id, kbasep_map_core_reqs_to_string(katom->core_req)); -#endif - - if (queued && !IS_GPU_ATOM(katom)) { - ret = false; - goto out; - } -#ifdef CONFIG_KDS - if (!katom->kds_dep_satisfied) { - /* Queue atom due to KDS dependency */ - ret = false; - goto out; - } -#endif /* CONFIG_KDS */ - - if ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE) - == BASE_JD_REQ_SOFT_REPLAY) { - if (kbase_replay_process(katom)) - ret = false; - else - ret = jd_done_nolock(katom, NULL); - - goto out; - } else if (katom->core_req & BASE_JD_REQ_SOFT_JOB) { - if (kbase_process_soft_job(katom) == 0) { - kbase_finish_soft_job(katom); - ret = jd_done_nolock(katom, NULL); - goto out; - } - /* The job has not yet completed */ - list_add_tail(&katom->dep_item[0], &kctx->waiting_soft_jobs); - ret = false; - } else if ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_DEP) { - katom->status = KBASE_JD_ATOM_STATE_IN_JS; - ret = kbasep_js_add_job(kctx, katom); - /* If job was cancelled then resolve immediately */ - if (katom->event_code == BASE_JD_EVENT_JOB_CANCELLED) - ret = jd_done_nolock(katom, NULL); - } else { - /* This is a pure dependency. Resolve it immediately */ - ret = jd_done_nolock(katom, NULL); - } - - out: - return ret; -} - -#ifdef BASE_LEGACY_UK6_SUPPORT -int kbase_jd_submit(struct kbase_context *kctx, - const struct kbase_uk_job_submit *submit_data, - int uk6_atom) -#else -int kbase_jd_submit(struct kbase_context *kctx, - const struct kbase_uk_job_submit *submit_data) -#endif /* BASE_LEGACY_UK6_SUPPORT */ -{ - struct kbase_jd_context *jctx = &kctx->jctx; - int err = 0; - int i; - bool need_to_try_schedule_context = false; - struct kbase_device *kbdev; - void __user *user_addr; - - /* - * kbase_jd_submit isn't expected to fail and so all errors with the jobs - * are reported by immediately falling them (through event system) - */ - kbdev = kctx->kbdev; - - beenthere(kctx, "%s", "Enter"); - - if ((kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_SUBMIT_DISABLED) != 0) { - dev_err(kbdev->dev, "Attempt to submit to a context that has SUBMIT_DISABLED set on it"); - return -EINVAL; - } - -#ifdef BASE_LEGACY_UK6_SUPPORT - if ((uk6_atom && submit_data->stride != - sizeof(struct base_jd_atom_v2_uk6)) || - submit_data->stride != sizeof(base_jd_atom_v2)) { -#else - if (submit_data->stride != sizeof(base_jd_atom_v2)) { -#endif /* BASE_LEGACY_UK6_SUPPORT */ - dev_err(kbdev->dev, "Stride passed to job_submit doesn't match kernel"); - return -EINVAL; - } - - user_addr = get_compat_pointer(kctx, &submit_data->addr); - - KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_add_return(submit_data->nr_atoms, &kctx->timeline.jd_atoms_in_flight)); - - for (i = 0; i < submit_data->nr_atoms; i++) { - struct base_jd_atom_v2 user_atom; - struct kbase_jd_atom *katom; - -#ifdef BASE_LEGACY_UK6_SUPPORT - if (uk6_atom) { - struct base_jd_atom_v2_uk6 user_atom_v6; - base_jd_dep_type dep_types[2] = {BASE_JD_DEP_TYPE_DATA, BASE_JD_DEP_TYPE_DATA}; - - if (copy_from_user(&user_atom_v6, user_addr, - sizeof(user_atom_v6))) { - err = -EINVAL; - KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, - atomic_sub_return( - submit_data->nr_atoms - i, - &kctx->timeline.jd_atoms_in_flight)); - break; - } - /* Convert from UK6 atom format to UK7 format */ - user_atom.jc = user_atom_v6.jc; - user_atom.udata = user_atom_v6.udata; - user_atom.extres_list = user_atom_v6.extres_list; - user_atom.nr_extres = user_atom_v6.nr_extres; - user_atom.core_req = user_atom_v6.core_req; - - /* atom number 0 is used for no dependency atoms */ - if (!user_atom_v6.pre_dep[0]) - dep_types[0] = BASE_JD_DEP_TYPE_INVALID; - - base_jd_atom_dep_set(&user_atom.pre_dep[0], - user_atom_v6.pre_dep[0], - dep_types[0]); - - /* atom number 0 is used for no dependency atoms */ - if (!user_atom_v6.pre_dep[1]) - dep_types[1] = BASE_JD_DEP_TYPE_INVALID; - - base_jd_atom_dep_set(&user_atom.pre_dep[1], - user_atom_v6.pre_dep[1], - dep_types[1]); - - user_atom.atom_number = user_atom_v6.atom_number; - user_atom.prio = user_atom_v6.prio; - user_atom.device_nr = user_atom_v6.device_nr; - } else { -#endif /* BASE_LEGACY_UK6_SUPPORT */ - if (copy_from_user(&user_atom, user_addr, sizeof(user_atom)) != 0) { - err = -EINVAL; - KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_sub_return(submit_data->nr_atoms - i, &kctx->timeline.jd_atoms_in_flight)); - break; - } -#ifdef BASE_LEGACY_UK6_SUPPORT - } -#endif /* BASE_LEGACY_UK6_SUPPORT */ - - user_addr = (void __user *)((uintptr_t) user_addr + submit_data->stride); - - mutex_lock(&jctx->lock); -#ifndef compiletime_assert -#define compiletime_assert_defined -#define compiletime_assert(x, msg) do { switch (0) { case 0: case (x):; } } \ -while (false) -#endif - compiletime_assert((1 << (8*sizeof(user_atom.atom_number))) == - BASE_JD_ATOM_COUNT, - "BASE_JD_ATOM_COUNT and base_atom_id type out of sync"); - compiletime_assert(sizeof(user_atom.pre_dep[0].atom_id) == - sizeof(user_atom.atom_number), - "BASE_JD_ATOM_COUNT and base_atom_id type out of sync"); -#ifdef compiletime_assert_defined -#undef compiletime_assert -#undef compiletime_assert_defined -#endif - katom = &jctx->atoms[user_atom.atom_number]; - - while (katom->status != KBASE_JD_ATOM_STATE_UNUSED) { - /* Atom number is already in use, wait for the atom to - * complete - */ - mutex_unlock(&jctx->lock); - - /* This thread will wait for the atom to complete. Due - * to thread scheduling we are not sure that the other - * thread that owns the atom will also schedule the - * context, so we force the scheduler to be active and - * hence eventually schedule this context at some point - * later. - */ - kbase_js_sched_all(kbdev); - - if (wait_event_killable(katom->completed, - katom->status == - KBASE_JD_ATOM_STATE_UNUSED) != 0) { - /* We're being killed so the result code - * doesn't really matter - */ - return 0; - } - mutex_lock(&jctx->lock); - } - - need_to_try_schedule_context |= - jd_submit_atom(kctx, &user_atom, katom); - - /* Register a completed job as a disjoint event when the GPU is in a disjoint state - * (ie. being reset or replaying jobs). - */ - kbase_disjoint_event_potential(kbdev); - - mutex_unlock(&jctx->lock); - } - - if (need_to_try_schedule_context) - kbase_js_sched_all(kbdev); - - return err; -} - -KBASE_EXPORT_TEST_API(kbase_jd_submit); - -void kbase_jd_done_worker(struct work_struct *data) -{ - struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, work); - struct kbase_jd_context *jctx; - struct kbase_context *kctx; - struct kbasep_js_kctx_info *js_kctx_info; - union kbasep_js_policy *js_policy; - struct kbase_device *kbdev; - struct kbasep_js_device_data *js_devdata; - u64 cache_jc = katom->jc; - struct kbasep_js_atom_retained_state katom_retained_state; - bool schedule = false; - bool context_idle; - base_jd_core_req core_req = katom->core_req; - u64 affinity = katom->affinity; - enum kbase_atom_coreref_state coreref_state = katom->coreref_state; - - /* Soft jobs should never reach this function */ - KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0); - - kctx = katom->kctx; - jctx = &kctx->jctx; - kbdev = kctx->kbdev; - js_kctx_info = &kctx->jctx.sched_info; - js_devdata = &kbdev->js_data; - js_policy = &kbdev->js_data.policy; - - KBASE_TRACE_ADD(kbdev, JD_DONE_WORKER, kctx, katom, katom->jc, 0); - - kbase_backend_complete_wq(kbdev, katom); - - /* - * Begin transaction on JD context and JS context - */ - mutex_lock(&jctx->lock); - mutex_lock(&js_devdata->queue_mutex); - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); - - /* This worker only gets called on contexts that are scheduled *in*. This is - * because it only happens in response to an IRQ from a job that was - * running. - */ - KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled); - - if (katom->event_code == BASE_JD_EVENT_STOPPED) { - /* Atom has been promoted to stopped */ - unsigned long flags; - - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - mutex_unlock(&js_devdata->queue_mutex); - mutex_unlock(&jctx->lock); - - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - - katom->status = KBASE_JD_ATOM_STATE_IN_JS; - kbase_js_unpull(kctx, katom); - - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - - return; - } - - if (katom->event_code != BASE_JD_EVENT_DONE) - dev_err(kbdev->dev, - "t6xx: GPU fault 0x%02lx from job slot %d\n", - (unsigned long)katom->event_code, - katom->slot_nr); - - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) - kbase_as_poking_timer_release_atom(kbdev, kctx, katom); - - /* Retain state before the katom disappears */ - kbasep_js_atom_retained_state_copy(&katom_retained_state, katom); - - if (!kbasep_js_has_atom_finished(&katom_retained_state)) { - mutex_lock(&js_devdata->runpool_mutex); - kbasep_js_clear_job_retry_submit(katom); - /* An atom that has been hard-stopped might have previously - * been soft-stopped and has just finished before the hard-stop - * occurred. For this reason, clear the hard-stopped flag */ - katom->atom_flags &= ~(KBASE_KATOM_FLAG_BEEN_HARD_STOPPED); - mutex_unlock(&js_devdata->runpool_mutex); - } - - if (kbasep_js_has_atom_finished(&katom_retained_state)) - schedule = true; - - context_idle = kbase_js_complete_atom_wq(kctx, katom); - - KBASE_DEBUG_ASSERT(kbasep_js_has_atom_finished(&katom_retained_state)); - - kbasep_js_remove_job(kbdev, kctx, katom); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - mutex_unlock(&js_devdata->queue_mutex); - katom->atom_flags &= ~KBASE_KATOM_FLAG_HOLDING_CTX_REF; - /* jd_done_nolock() requires the jsctx_mutex lock to be dropped */ - schedule |= jd_done_nolock(katom, &kctx->completed_jobs); - - /* katom may have been freed now, do not use! */ - - if (context_idle) { - unsigned long flags; - - mutex_lock(&js_devdata->queue_mutex); - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - - /* If kbase_sched() has scheduled this context back in then - * ctx_active will have been set after we marked it as inactive, - * and another pm reference will have been taken, so drop our - * reference. But do not call kbase_jm_idle_ctx(), as the - * context is active and fast-starting is allowed. - * - * If an atom has been fast-started then kctx->atoms_pulled will - * be non-zero but ctx_active will still be false (as the - * previous pm reference has been inherited). Do NOT drop our - * reference, as it has been re-used, and leave the context as - * active. - * - * If no new atoms have been started then ctx_active will still - * be false and atoms_pulled will be zero, so drop the reference - * and call kbase_jm_idle_ctx(). - * - * As the checks are done under both the queue_mutex and - * runpool_irq.lock is should be impossible for this to race - * with the scheduler code. - */ - if (kctx->ctx_active || !atomic_read(&kctx->atoms_pulled)) { - /* Calling kbase_jm_idle_ctx() here will ensure that - * atoms are not fast-started when we drop the - * runpool_irq.lock. This is not performed if ctx_active - * is set as in that case another pm reference has been - * taken and a fast-start would be valid. - */ - if (!kctx->ctx_active) - kbase_jm_idle_ctx(kbdev, kctx); - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, - flags); - - kbase_pm_context_idle(kbdev); - } else { - kctx->ctx_active = true; - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, - flags); - } - mutex_unlock(&js_devdata->queue_mutex); - } - - /* - * Transaction complete - */ - mutex_unlock(&jctx->lock); - - /* Job is now no longer running, so can now safely release the context - * reference, and handle any actions that were logged against the atom's retained state */ - - kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx, &katom_retained_state); - - if (schedule) - kbase_js_sched_all(kbdev); - - if (!atomic_dec_return(&kctx->work_count)) { - /* If worker now idle then post all events that jd_done_nolock() - * has queued */ - mutex_lock(&jctx->lock); - while (!list_empty(&kctx->completed_jobs)) { - struct kbase_jd_atom *atom = list_entry( - kctx->completed_jobs.next, - struct kbase_jd_atom, dep_item[0]); - list_del(kctx->completed_jobs.next); - - kbase_event_post(kctx, atom); - } - mutex_unlock(&jctx->lock); - } - - kbase_backend_complete_wq_post_sched(kbdev, core_req, affinity, - coreref_state); - - KBASE_TRACE_ADD(kbdev, JD_DONE_WORKER_END, kctx, NULL, cache_jc, 0); -} - -/** - * jd_cancel_worker - Work queue job cancel function. - * @data: a &struct work_struct - * - * Only called as part of 'Zapping' a context (which occurs on termination). - * Operates serially with the kbase_jd_done_worker() on the work queue. - * - * This can only be called on contexts that aren't scheduled. - * - * We don't need to release most of the resources that would occur on - * kbase_jd_done() or kbase_jd_done_worker(), because the atoms here must not be - * running (by virtue of only being called on contexts that aren't - * scheduled). - */ -static void jd_cancel_worker(struct work_struct *data) -{ - struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, work); - struct kbase_jd_context *jctx; - struct kbase_context *kctx; - struct kbasep_js_kctx_info *js_kctx_info; - bool need_to_try_schedule_context; - bool attr_state_changed; - struct kbase_device *kbdev; - - /* Soft jobs should never reach this function */ - KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0); - - kctx = katom->kctx; - kbdev = kctx->kbdev; - jctx = &kctx->jctx; - js_kctx_info = &kctx->jctx.sched_info; - - KBASE_TRACE_ADD(kbdev, JD_CANCEL_WORKER, kctx, katom, katom->jc, 0); - - /* This only gets called on contexts that are scheduled out. Hence, we must - * make sure we don't de-ref the number of running jobs (there aren't - * any), nor must we try to schedule out the context (it's already - * scheduled out). - */ - KBASE_DEBUG_ASSERT(!js_kctx_info->ctx.is_scheduled); - - /* Scheduler: Remove the job from the system */ - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); - attr_state_changed = kbasep_js_remove_cancelled_job(kbdev, kctx, katom); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - - mutex_lock(&jctx->lock); - - need_to_try_schedule_context = jd_done_nolock(katom, NULL); - /* Because we're zapping, we're not adding any more jobs to this ctx, so no need to - * schedule the context. There's also no need for the jsctx_mutex to have been taken - * around this too. */ - KBASE_DEBUG_ASSERT(!need_to_try_schedule_context); - - /* katom may have been freed now, do not use! */ - mutex_unlock(&jctx->lock); - - if (attr_state_changed) - kbase_js_sched_all(kbdev); -} - -/** - * jd_evict_worker - Work queue job evict function - * @data: a &struct work_struct - * - * Only called as part of evicting failed jobs. This is only called on jobs that - * were never submitted to HW Access. Jobs that were submitted are handled - * through kbase_jd_done_worker(). - * Operates serially with the kbase_jd_done_worker() on the work queue. - * - * We don't need to release most of the resources that would occur on - * kbase_jd_done() or kbase_jd_done_worker(), because the atoms here must not be - * running (by virtue of having not been submitted to HW Access). - */ -static void jd_evict_worker(struct work_struct *data) -{ - struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, - work); - struct kbase_jd_context *jctx; - struct kbase_context *kctx; - struct kbasep_js_kctx_info *js_kctx_info; - struct kbase_device *kbdev; - - /* Soft jobs should never reach this function */ - KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0); - - kctx = katom->kctx; - kbdev = kctx->kbdev; - jctx = &kctx->jctx; - js_kctx_info = &kctx->jctx.sched_info; - - KBASE_TRACE_ADD(kbdev, JD_CANCEL_WORKER, kctx, katom, katom->jc, 0); - - /* Scheduler: Remove the job from the system */ - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); - kbasep_js_remove_cancelled_job(kbdev, kctx, katom); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - - mutex_lock(&jctx->lock); - jd_done_nolock(katom, NULL); - /* katom may have been freed now, do not use! */ - mutex_unlock(&jctx->lock); - - kbase_js_sched_all(kbdev); -} - -/** - * kbase_jd_done - Complete a job that has been removed from the Hardware - * @katom: atom which has been completed - * @slot_nr: slot the atom was on - * @end_timestamp: completion time - * @done_code: completion code - * - * This must be used whenever a job has been removed from the Hardware, e.g.: - * An IRQ indicates that the job finished (for both error and 'done' codes), or - * the job was evicted from the JS_HEAD_NEXT registers during a Soft/Hard stop. - * - * Some work is carried out immediately, and the rest is deferred onto a - * workqueue - * - * Context: - * This can be called safely from atomic context. - * The caller must hold kbasep_js_device_data.runpool_irq.lock - */ -void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr, - ktime_t *end_timestamp, kbasep_js_atom_done_code done_code) -{ - struct kbase_context *kctx; - struct kbase_device *kbdev; - - KBASE_DEBUG_ASSERT(katom); - kctx = katom->kctx; - KBASE_DEBUG_ASSERT(kctx); - kbdev = kctx->kbdev; - KBASE_DEBUG_ASSERT(kbdev); - - if (done_code & KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT) - katom->event_code = BASE_JD_EVENT_REMOVED_FROM_NEXT; - - KBASE_TRACE_ADD(kbdev, JD_DONE, kctx, katom, katom->jc, 0); - - kbase_job_check_leave_disjoint(kbdev, katom); - - katom->slot_nr = slot_nr; - - atomic_inc(&kctx->work_count); - -#ifdef CONFIG_DEBUG_FS - /* a failed job happened and is waiting for dumping*/ - if (kbase_debug_job_fault_process(katom, katom->event_code)) - return; -#endif - - WARN_ON(work_pending(&katom->work)); - KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work)); - INIT_WORK(&katom->work, kbase_jd_done_worker); - queue_work(kctx->jctx.job_done_wq, &katom->work); -} - -KBASE_EXPORT_TEST_API(kbase_jd_done); - -void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom) -{ - struct kbase_context *kctx; - struct kbasep_js_kctx_info *js_kctx_info; - - KBASE_DEBUG_ASSERT(NULL != kbdev); - KBASE_DEBUG_ASSERT(NULL != katom); - kctx = katom->kctx; - KBASE_DEBUG_ASSERT(NULL != kctx); - - js_kctx_info = &kctx->jctx.sched_info; - - KBASE_TRACE_ADD(kbdev, JD_CANCEL, kctx, katom, katom->jc, 0); - - /* This should only be done from a context that is not scheduled */ - KBASE_DEBUG_ASSERT(!js_kctx_info->ctx.is_scheduled); - - WARN_ON(work_pending(&katom->work)); - - katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; - - KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work)); - INIT_WORK(&katom->work, jd_cancel_worker); - queue_work(kctx->jctx.job_done_wq, &katom->work); -} - -void kbase_jd_evict(struct kbase_device *kbdev, struct kbase_jd_atom *katom) -{ - struct kbase_context *kctx; - struct kbasep_js_kctx_info *js_kctx_info; - - KBASE_DEBUG_ASSERT(NULL != kbdev); - KBASE_DEBUG_ASSERT(NULL != katom); - kctx = katom->kctx; - KBASE_DEBUG_ASSERT(NULL != kctx); - - js_kctx_info = &kctx->jctx.sched_info; - - KBASE_TRACE_ADD(kbdev, JD_CANCEL, kctx, katom, katom->jc, 0); - - /* This should only be done from a context that is currently scheduled - */ - KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled); - - WARN_ON(work_pending(&katom->work)); - - KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work)); - INIT_WORK(&katom->work, jd_evict_worker); - queue_work(kctx->jctx.job_done_wq, &katom->work); -} - -void kbase_jd_zap_context(struct kbase_context *kctx) -{ - struct kbase_jd_atom *katom; - struct list_head *entry, *tmp; - struct kbase_device *kbdev; - - KBASE_DEBUG_ASSERT(kctx); - - kbdev = kctx->kbdev; - - KBASE_TRACE_ADD(kbdev, JD_ZAP_CONTEXT, kctx, NULL, 0u, 0u); - - kbase_js_zap_context(kctx); - - mutex_lock(&kctx->jctx.lock); - - /* - * While holding the struct kbase_jd_context lock clean up jobs which are known to kbase but are - * queued outside the job scheduler. - */ - - list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) { - katom = list_entry(entry, struct kbase_jd_atom, dep_item[0]); - kbase_cancel_soft_job(katom); - } - - -#ifdef CONFIG_KDS - - /* For each job waiting on a kds resource, cancel the wait and force the job to - * complete early, this is done so that we don't leave jobs outstanding waiting - * on kds resources which may never be released when contexts are zapped, resulting - * in a hang. - * - * Note that we can safely iterate over the list as the struct kbase_jd_context lock is held, - * this prevents items being removed when calling job_done_nolock in kbase_cancel_kds_wait_job. - */ - - list_for_each(entry, &kctx->waiting_kds_resource) { - katom = list_entry(entry, struct kbase_jd_atom, node); - - kbase_cancel_kds_wait_job(katom); - } -#endif - - mutex_unlock(&kctx->jctx.lock); - - kbase_jm_wait_for_zero_jobs(kctx); -} - -KBASE_EXPORT_TEST_API(kbase_jd_zap_context); - -int kbase_jd_init(struct kbase_context *kctx) -{ - int i; - int mali_err = 0; -#ifdef CONFIG_KDS - int err; -#endif /* CONFIG_KDS */ - - KBASE_DEBUG_ASSERT(kctx); - - kctx->jctx.job_done_wq = alloc_workqueue("mali_jd", 0, 1); - if (NULL == kctx->jctx.job_done_wq) { - mali_err = -ENOMEM; - goto out1; - } - - for (i = 0; i < BASE_JD_ATOM_COUNT; i++) { - init_waitqueue_head(&kctx->jctx.atoms[i].completed); - - INIT_LIST_HEAD(&kctx->jctx.atoms[i].dep_head[0]); - INIT_LIST_HEAD(&kctx->jctx.atoms[i].dep_head[1]); - - /* Catch userspace attempting to use an atom which doesn't exist as a pre-dependency */ - kctx->jctx.atoms[i].event_code = BASE_JD_EVENT_JOB_INVALID; - kctx->jctx.atoms[i].status = KBASE_JD_ATOM_STATE_UNUSED; - } - - mutex_init(&kctx->jctx.lock); - - init_waitqueue_head(&kctx->jctx.zero_jobs_wait); - - spin_lock_init(&kctx->jctx.tb_lock); - -#ifdef CONFIG_KDS - err = kds_callback_init(&kctx->jctx.kds_cb, 0, kds_dep_clear); - if (0 != err) { - mali_err = -EINVAL; - goto out2; - } -#endif /* CONFIG_KDS */ - - kctx->jctx.job_nr = 0; - INIT_LIST_HEAD(&kctx->completed_jobs); - atomic_set(&kctx->work_count, 0); - - return 0; - -#ifdef CONFIG_KDS - out2: - destroy_workqueue(kctx->jctx.job_done_wq); -#endif /* CONFIG_KDS */ - out1: - return mali_err; -} - -KBASE_EXPORT_TEST_API(kbase_jd_init); - -void kbase_jd_exit(struct kbase_context *kctx) -{ - KBASE_DEBUG_ASSERT(kctx); - -#ifdef CONFIG_KDS - kds_callback_term(&kctx->jctx.kds_cb); -#endif /* CONFIG_KDS */ - /* Work queue is emptied by this */ - destroy_workqueue(kctx->jctx.job_done_wq); -} - -KBASE_EXPORT_TEST_API(kbase_jd_exit); diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.c deleted file mode 100755 index b37f280a6475a..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.c +++ /dev/null @@ -1,114 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include - -#include - -#ifdef CONFIG_DEBUG_FS - -/** - * kbasep_jd_debugfs_atoms_show - Show callback for the JD atoms debugfs file. - * @sfile: The debugfs entry - * @data: Data associated with the entry - * - * This function is called to get the contents of the JD atoms debugfs file. - * This is a report of all atoms managed by kbase_jd_context.atoms - * - * Return: 0 if successfully prints data in debugfs entry file, failure - * otherwise - */ -static int kbasep_jd_debugfs_atoms_show(struct seq_file *sfile, void *data) -{ - struct kbase_context *kctx = sfile->private; - struct kbase_jd_atom *atoms; - unsigned long irq_flags; - int i; - - KBASE_DEBUG_ASSERT(kctx != NULL); - - /* Print table heading */ - seq_puts(sfile, "atom id,core reqs,status,coreref status,predeps,start time,time on gpu\n"); - - atoms = kctx->jctx.atoms; - /* General atom states */ - mutex_lock(&kctx->jctx.lock); - /* JS-related states */ - spin_lock_irqsave(&kctx->kbdev->js_data.runpool_irq.lock, irq_flags); - for (i = 0; i != BASE_JD_ATOM_COUNT; ++i) { - struct kbase_jd_atom *atom = &atoms[i]; - s64 start_timestamp = 0; - - if (atom->status == KBASE_JD_ATOM_STATE_UNUSED) - continue; - - /* start_timestamp is cleared as soon as the atom leaves UNUSED state - * and set before a job is submitted to the h/w, a non-zero value means - * it is valid */ - if (ktime_to_ns(atom->start_timestamp)) - start_timestamp = ktime_to_ns( - ktime_sub(ktime_get(), atom->start_timestamp)); - - seq_printf(sfile, - "%i,%u,%u,%u,%u %u,%lli,%llu\n", - i, atom->core_req, atom->status, atom->coreref_state, - (unsigned)(atom->dep[0].atom ? - atom->dep[0].atom - atoms : 0), - (unsigned)(atom->dep[1].atom ? - atom->dep[1].atom - atoms : 0), - (signed long long)start_timestamp, - (unsigned long long)(atom->time_spent_us ? - atom->time_spent_us * 1000 : start_timestamp) - ); - } - spin_unlock_irqrestore(&kctx->kbdev->js_data.runpool_irq.lock, irq_flags); - mutex_unlock(&kctx->jctx.lock); - - return 0; -} - - -/** - * kbasep_jd_debugfs_atoms_open - open operation for atom debugfs file - * @in: &struct inode pointer - * @file: &struct file pointer - * - * Return: file descriptor - */ -static int kbasep_jd_debugfs_atoms_open(struct inode *in, struct file *file) -{ - return single_open(file, kbasep_jd_debugfs_atoms_show, in->i_private); -} - -static const struct file_operations kbasep_jd_debugfs_atoms_fops = { - .open = kbasep_jd_debugfs_atoms_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -void kbasep_jd_debugfs_ctx_add(struct kbase_context *kctx) -{ - KBASE_DEBUG_ASSERT(kctx != NULL); - - /* Expose all atoms */ - debugfs_create_file("atoms", S_IRUGO, kctx->kctx_dentry, kctx, - &kbasep_jd_debugfs_atoms_fops); - -} - -#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.h deleted file mode 100755 index 703e4cf6a5f4e..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * @file mali_kbase_jd_debugfs.h - * Header file for job dispatcher-related entries in debugfs - */ - -#ifndef _KBASE_JD_DEBUGFS_H -#define _KBASE_JD_DEBUGFS_H - -#include - -#include - -/** - * kbasep_jd_debugfs_ctx_add() - Add debugfs entries for JD system - * - * @kctx Pointer to kbase_context - */ -void kbasep_jd_debugfs_ctx_add(struct kbase_context *kctx); - -#endif /*_KBASE_JD_DEBUGFS_H*/ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_jm.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_jm.c deleted file mode 100755 index 63425322452b6..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_jm.c +++ /dev/null @@ -1,130 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - -/* - * HW access job manager common APIs - */ - -#include -#include "mali_kbase_hwaccess_jm.h" -#include "mali_kbase_jm.h" - -/** - * kbase_jm_next_job() - Attempt to run the next @nr_jobs_to_submit jobs on slot - * @js on the active context. - * @kbdev: Device pointer - * @js: Job slot to run on - * @nr_jobs_to_submit: Number of jobs to attempt to submit - * - * Return: true if slot can still be submitted on, false if slot is now full. - */ -static bool kbase_jm_next_job(struct kbase_device *kbdev, int js, - int nr_jobs_to_submit) -{ - struct kbase_context *kctx; - int i; - - kctx = kbdev->hwaccess.active_kctx; - - if (!kctx) - return true; - - for (i = 0; i < nr_jobs_to_submit; i++) { - struct kbase_jd_atom *katom = kbase_js_pull(kctx, js); - - if (!katom) - return true; /* Context has no jobs on this slot */ - - kbase_backend_run_atom(kbdev, katom); - } - - return false; /* Slot ringbuffer should now be full */ -} - -u32 kbase_jm_kick(struct kbase_device *kbdev, u32 js_mask) -{ - u32 ret_mask = 0; - - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - while (js_mask) { - int js = ffs(js_mask) - 1; - int nr_jobs_to_submit = kbase_backend_slot_free(kbdev, js); - - if (kbase_jm_next_job(kbdev, js, nr_jobs_to_submit)) - ret_mask |= (1 << js); - - js_mask &= ~(1 << js); - } - - return ret_mask; -} - -void kbase_jm_try_kick(struct kbase_device *kbdev, u32 js_mask) -{ - struct kbasep_js_device_data *js_devdata = &kbdev->js_data; - - lockdep_assert_held(&js_devdata->runpool_irq.lock); - - if (!down_trylock(&js_devdata->schedule_sem)) { - kbase_jm_kick(kbdev, js_mask); - up(&js_devdata->schedule_sem); - } -} - -void kbase_jm_try_kick_all(struct kbase_device *kbdev) -{ - struct kbasep_js_device_data *js_devdata = &kbdev->js_data; - - lockdep_assert_held(&js_devdata->runpool_irq.lock); - - if (!down_trylock(&js_devdata->schedule_sem)) { - kbase_jm_kick_all(kbdev); - up(&js_devdata->schedule_sem); - } -} - -void kbase_jm_idle_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) -{ - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - if (kbdev->hwaccess.active_kctx == kctx) - kbdev->hwaccess.active_kctx = NULL; -} - -void kbase_jm_return_atom_to_js(struct kbase_device *kbdev, - struct kbase_jd_atom *katom) -{ - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - if (katom->event_code != BASE_JD_EVENT_STOPPED && - katom->event_code != BASE_JD_EVENT_REMOVED_FROM_NEXT) { - kbase_js_complete_atom(katom, NULL); - } else { - kbase_js_unpull(katom->kctx, katom); - } -} - -void kbase_jm_complete(struct kbase_device *kbdev, struct kbase_jd_atom *katom, - ktime_t *end_timestamp) -{ - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - kbase_js_complete_atom(katom, end_timestamp); -} - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_jm.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_jm.h deleted file mode 100755 index 27aca3a699f47..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_jm.h +++ /dev/null @@ -1,106 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - -/* - * Job manager common APIs - */ - -#ifndef _KBASE_JM_H_ -#define _KBASE_JM_H_ - -/** - * kbase_jm_kick() - Indicate that there are jobs ready to run. - * @kbdev: Device pointer - * @js_mask: Mask of the job slots that can be pulled from. - * - * Caller must hold the runpool_irq lock and schedule_sem semaphore - * - * Return: Mask of the job slots that can still be submitted to. - */ -u32 kbase_jm_kick(struct kbase_device *kbdev, u32 js_mask); - -/** - * kbase_jm_kick_all() - Indicate that there are jobs ready to run on all job - * slots. - * @kbdev: Device pointer - * - * Caller must hold the runpool_irq lock and schedule_sem semaphore - * - * Return: Mask of the job slots that can still be submitted to. - */ -static inline u32 kbase_jm_kick_all(struct kbase_device *kbdev) -{ - return kbase_jm_kick(kbdev, (1 << kbdev->gpu_props.num_job_slots) - 1); -} - -/** - * kbase_jm_try_kick - Attempt to call kbase_jm_kick - * @kbdev: Device pointer - * @js_mask: Mask of the job slots that can be pulled from - * Context: Caller must hold runpool_irq lock - * - * If schedule_sem can be immediately obtained then this function will call - * kbase_jm_kick() otherwise it will do nothing. - */ -void kbase_jm_try_kick(struct kbase_device *kbdev, u32 js_mask); - -/** - * kbase_jm_try_kick_all() - Attempt to call kbase_jm_kick_all - * @kbdev: Device pointer - * Context: Caller must hold runpool_irq lock - * - * If schedule_sem can be immediately obtained then this function will call - * kbase_jm_kick_all() otherwise it will do nothing. - */ -void kbase_jm_try_kick_all(struct kbase_device *kbdev); - -/** - * kbase_jm_idle_ctx() - Mark a context as idle. - * @kbdev: Device pointer - * @kctx: Context to mark as idle - * - * No more atoms will be pulled from this context until it is marked as active - * by kbase_js_use_ctx(). - * - * The context should have no atoms currently pulled from it - * (kctx->atoms_pulled == 0). - * - * Caller must hold the runpool_irq lock - */ -void kbase_jm_idle_ctx(struct kbase_device *kbdev, struct kbase_context *kctx); - -/** - * kbase_jm_return_atom_to_js() - Return an atom to the job scheduler that has - * been soft-stopped or will fail due to a - * dependency - * @kbdev: Device pointer - * @katom: Atom that has been stopped or will be failed - */ -void kbase_jm_return_atom_to_js(struct kbase_device *kbdev, - struct kbase_jd_atom *katom); - -/** - * kbase_jm_complete() - Complete an atom - * @kbdev: Device pointer - * @katom: Atom that has completed - * @end_timestamp: Timestamp of atom completion - */ -void kbase_jm_complete(struct kbase_device *kbdev, struct kbase_jd_atom *katom, - ktime_t *end_timestamp); - -#endif /* _KBASE_JM_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js.c deleted file mode 100755 index 54b8d9bcd1e19..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js.c +++ /dev/null @@ -1,3164 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * Job Scheduler Implementation - */ -#include -#include -#if defined(CONFIG_MALI_GATOR_SUPPORT) -#include -#endif -#if defined(CONFIG_MALI_MIPE_ENABLED) -#include -#endif -#include - -#include -#include - -#include "mali_kbase_jm.h" -#include "mali_kbase_hwaccess_jm.h" - -/* - * Private types - */ - -/* Bitpattern indicating the result of releasing a context */ -enum { - /* The context was descheduled - caller should try scheduling in a new - * one to keep the runpool full */ - KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED = (1u << 0), - /* Ctx attributes were changed - caller should try scheduling all - * contexts */ - KBASEP_JS_RELEASE_RESULT_SCHED_ALL = (1u << 1) -}; - -typedef u32 kbasep_js_release_result; - -const int kbasep_js_atom_priority_to_relative[BASE_JD_NR_PRIO_LEVELS] = { - KBASE_JS_ATOM_SCHED_PRIO_MED, /* BASE_JD_PRIO_MEDIUM */ - KBASE_JS_ATOM_SCHED_PRIO_HIGH, /* BASE_JD_PRIO_HIGH */ - KBASE_JS_ATOM_SCHED_PRIO_LOW /* BASE_JD_PRIO_LOW */ -}; - -const base_jd_prio -kbasep_js_relative_priority_to_atom[KBASE_JS_ATOM_SCHED_PRIO_COUNT] = { - BASE_JD_PRIO_HIGH, /* KBASE_JS_ATOM_SCHED_PRIO_HIGH */ - BASE_JD_PRIO_MEDIUM, /* KBASE_JS_ATOM_SCHED_PRIO_MED */ - BASE_JD_PRIO_LOW /* KBASE_JS_ATOM_SCHED_PRIO_LOW */ -}; - - -/* - * Private function prototypes - */ -static kbasep_js_release_result kbasep_js_runpool_release_ctx_internal( - struct kbase_device *kbdev, struct kbase_context *kctx, - struct kbasep_js_atom_retained_state *katom_retained_state); - -static int kbase_js_get_slot(struct kbase_device *kbdev, - struct kbase_jd_atom *katom); - -static void kbase_js_foreach_ctx_job(struct kbase_context *kctx, - kbasep_js_policy_ctx_job_cb callback); - -static bool kbase_js_evict_atom(struct kbase_context *kctx, - struct kbase_jd_atom *katom_evict, - struct kbase_jd_atom *start_katom, - struct kbase_jd_atom *head_katom, - struct list_head *evict_list, - struct jsctx_rb *rb, int idx); - -/* Helper for trace subcodes */ -#if KBASE_TRACE_ENABLE -static int kbasep_js_trace_get_refcnt(struct kbase_device *kbdev, - struct kbase_context *kctx) -{ - unsigned long flags; - struct kbasep_js_device_data *js_devdata; - int as_nr; - int refcnt = 0; - - js_devdata = &kbdev->js_data; - - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - as_nr = kctx->as_nr; - if (as_nr != KBASEP_AS_NR_INVALID) { - struct kbasep_js_per_as_data *js_per_as_data; - - js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr]; - - refcnt = js_per_as_data->as_busy_refcount; - } - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - - return refcnt; -} - -static int kbasep_js_trace_get_refcnt_nolock(struct kbase_device *kbdev, - struct kbase_context *kctx) -{ - struct kbasep_js_device_data *js_devdata; - int as_nr; - int refcnt = 0; - - js_devdata = &kbdev->js_data; - - as_nr = kctx->as_nr; - if (as_nr != KBASEP_AS_NR_INVALID) { - struct kbasep_js_per_as_data *js_per_as_data; - - js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr]; - - refcnt = js_per_as_data->as_busy_refcount; - } - - return refcnt; -} -#else /* KBASE_TRACE_ENABLE */ -static int kbasep_js_trace_get_refcnt(struct kbase_device *kbdev, - struct kbase_context *kctx) -{ - CSTD_UNUSED(kbdev); - CSTD_UNUSED(kctx); - return 0; -} -static int kbasep_js_trace_get_refcnt_nolock(struct kbase_device *kbdev, - struct kbase_context *kctx) -{ - CSTD_UNUSED(kbdev); - CSTD_UNUSED(kctx); - return 0; -} -#endif /* KBASE_TRACE_ENABLE */ - -/* - * Private types - */ -enum { - JS_DEVDATA_INIT_NONE = 0, - JS_DEVDATA_INIT_CONSTANTS = (1 << 0), - JS_DEVDATA_INIT_POLICY = (1 << 1), - JS_DEVDATA_INIT_ALL = ((1 << 2) - 1) -}; - -enum { - JS_KCTX_INIT_NONE = 0, - JS_KCTX_INIT_CONSTANTS = (1 << 0), - JS_KCTX_INIT_POLICY = (1 << 1), - JS_KCTX_INIT_ALL = ((1 << 2) - 1) -}; - -/* - * Private functions - */ - -/** - * core_reqs_from_jsn_features - Convert JSn_FEATURES to core requirements - * @features: JSn_FEATURE register value - * - * Given a JSn_FEATURE register value returns the core requirements that match - * - * Return: Core requirement bit mask - */ -static base_jd_core_req core_reqs_from_jsn_features(u16 features) -{ - base_jd_core_req core_req = 0u; - - if ((features & JS_FEATURE_SET_VALUE_JOB) != 0) - core_req |= BASE_JD_REQ_V; - - if ((features & JS_FEATURE_CACHE_FLUSH_JOB) != 0) - core_req |= BASE_JD_REQ_CF; - - if ((features & JS_FEATURE_COMPUTE_JOB) != 0) - core_req |= BASE_JD_REQ_CS; - - if ((features & JS_FEATURE_TILER_JOB) != 0) - core_req |= BASE_JD_REQ_T; - - if ((features & JS_FEATURE_FRAGMENT_JOB) != 0) - core_req |= BASE_JD_REQ_FS; - - return core_req; -} - -static void kbase_js_sync_timers(struct kbase_device *kbdev) -{ - mutex_lock(&kbdev->js_data.runpool_mutex); - kbase_backend_ctx_count_changed(kbdev); - mutex_unlock(&kbdev->js_data.runpool_mutex); -} - -/* Hold the kbasep_js_device_data::runpool_irq::lock for this */ -bool kbasep_js_runpool_retain_ctx_nolock(struct kbase_device *kbdev, - struct kbase_context *kctx) -{ - struct kbasep_js_device_data *js_devdata; - struct kbasep_js_per_as_data *js_per_as_data; - bool result = false; - int as_nr; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(kctx != NULL); - js_devdata = &kbdev->js_data; - - as_nr = kctx->as_nr; - if (as_nr != KBASEP_AS_NR_INVALID) { - int new_refcnt; - - KBASE_DEBUG_ASSERT(as_nr >= 0); - js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr]; - - KBASE_DEBUG_ASSERT(js_per_as_data->kctx != NULL); - - new_refcnt = ++(js_per_as_data->as_busy_refcount); - - KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_RETAIN_CTX_NOLOCK, kctx, - NULL, 0u, new_refcnt); - result = true; - } - - return result; -} - -/** - * jsctx_rb_is_empty_prio(): - Check if ring buffer is empty - * @kctx: Pointer to kbase context with ring buffer. - * @js: Job slot id to check. - * @prio: Priority to check. - * - * Caller must hold runpool_irq.lock - * - * Return: true if the ring buffer is empty, false otherwise. - */ -static inline bool -jsctx_rb_is_empty_prio(struct kbase_context *kctx, int js, int prio) -{ - struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js]; - - lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock); - - return rb->running_idx == rb->write_idx; -} - -/** - * jsctx_rb_none_to_pull_prio(): - Check if there are no pullable atoms - * @kctx: Pointer to kbase context with ring buffer. - * @js: Job slot id to check. - * @prio: Priority to check. - * - * Return true if there are no atoms to pull. There may be running atoms in the - * ring buffer even if there are no atoms to pull. It is also possible for the - * ring buffer to be full (with running atoms) when this functions returns - * true. - * - * Caller must hold runpool_irq.lock - * - * Return: true if there are no atoms to pull, false otherwise. - */ -static inline bool -jsctx_rb_none_to_pull_prio(struct kbase_context *kctx, int js, int prio) -{ - struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js]; - - lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock); - - return rb->read_idx == rb->write_idx; -} - -/** - * jsctx_rb_none_to_pull(): - Check if all priority ring buffers have no - * pullable atoms - * @kctx: Pointer to kbase context with ring buffer. - * @js: Job slot id to check. - * - * Caller must hold runpool_irq.lock - * - * Return: true if the ring buffers for all priorities have no pullable atoms, - * false otherwise. - */ -static inline bool -jsctx_rb_none_to_pull(struct kbase_context *kctx, int js) -{ - int prio; - - lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock); - - for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) { - if (!jsctx_rb_none_to_pull_prio(kctx, js, prio)) - return false; - } - - return true; -} - -/** - * jsctx_rb_compact_prio(): - Compact a ring buffer - * @kctx: Pointer to kbase context with ring buffer. - * @js: Job slot id to compact. - * @prio: Priority id to compact. - */ -static inline void -jsctx_rb_compact_prio(struct kbase_context *kctx, int js, int prio) -{ - struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js]; - u16 compact_idx = rb->write_idx - 1; - u16 end_idx = rb->running_idx - 1; - u16 i; - - lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock); - lockdep_assert_held(&kctx->jctx.lock); - - for (i = compact_idx; i != end_idx; i--) { - if (rb->entries[i & JSCTX_RB_MASK].atom_id != - KBASEP_ATOM_ID_INVALID) { - WARN_ON(compact_idx < rb->running_idx); - rb->entries[compact_idx & JSCTX_RB_MASK].atom_id = - rb->entries[i & JSCTX_RB_MASK].atom_id; - - compact_idx--; - } - if (rb->read_idx == i) - rb->read_idx = compact_idx + 1; - } - - rb->running_idx = compact_idx + 1; -} - -/** - * jsctx_rb_compact(): - Compact all priority ring buffers - * @kctx: Pointer to kbase context with ring buffer. - * @js: Job slot id to compact. - */ -static inline void -jsctx_rb_compact(struct kbase_context *kctx, int js) -{ - int prio; - - for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) - jsctx_rb_compact_prio(kctx, js, prio); -} - -/** - * jsctx_rb_foreach_prio(): - Execute callback for each entry in ring buffer - * @kctx: Pointer to kbase context with ring buffer. - * @js: Job slot id to iterate. - * @prio: Priority id to iterate. - * @callback: Function pointer to callback. - * - * Iterate over a ring buffer and invoke @callback for each entry in buffer, and - * remove the entry from the buffer. - * - * If entries are added to the ring buffer while this is running those entries - * may, or may not be covered. To ensure that all entries in the buffer have - * been enumerated when this function returns jsctx->lock must be held when - * calling this function. - * - * The HW access lock, js_data.runpool_irq.lock, must always be held when - * calling this function. - */ -static void -jsctx_rb_foreach_prio(struct kbase_context *kctx, int js, int prio, - kbasep_js_policy_ctx_job_cb callback) -{ - struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js]; - struct kbase_jd_atom *katom; - u16 write_idx = ACCESS_ONCE(rb->write_idx); - - lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock); - - /* There must be no jobs currently in HW access */ - WARN_ON(rb->read_idx != rb->running_idx); - - /* Invoke callback on all kbase_jd_atoms in the ring buffer, and - * removes them from the buffer */ - while (rb->read_idx != write_idx) { - int id = rb->entries[rb->read_idx & JSCTX_RB_MASK].atom_id; - - katom = kbase_jd_atom_from_id(kctx, id); - - rb->read_idx++; - rb->running_idx++; - - callback(kctx->kbdev, katom); - } -} - -/** - * jsctx_rb_foreach(): - Execute callback for each entry in all priority rb - * @kctx: Pointer to kbase context with ring buffer. - * @js: Job slot id to iterate. - * @callback: Function pointer to callback. - * - * Iterate over all the different priorities, and for each call - * jsctx_rb_foreach_prio() to iterate over the ring buffer and invoke @callback - * for each entry in buffer, and remove the entry from the buffer. - */ -static inline void -jsctx_rb_foreach(struct kbase_context *kctx, int js, - kbasep_js_policy_ctx_job_cb callback) -{ - int prio; - - for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) - jsctx_rb_foreach_prio(kctx, js, prio, callback); -} - -/** - * jsctx_rb_peek_prio(): - Check buffer and get next atom - * @kctx: Pointer to kbase context with ring buffer. - * @js: Job slot id to check. - * @prio: Priority id to check. - * - * Check the ring buffer for the specified @js and @prio and return a pointer to - * the next atom, unless the ring buffer is empty. - * - * Return: Pointer to next atom in buffer, or NULL if there is no atom. - */ -static inline struct kbase_jd_atom * -jsctx_rb_peek_prio(struct kbase_context *kctx, int js, int prio) -{ - struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js]; - int id; - - lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock); - - if (jsctx_rb_none_to_pull_prio(kctx, js, prio)) - return NULL; - - id = rb->entries[rb->read_idx & JSCTX_RB_MASK].atom_id; - return kbase_jd_atom_from_id(kctx, id); -} - -/** - * jsctx_rb_peek(): - Check all priority buffers and get next atom - * @kctx: Pointer to kbase context with ring buffer. - * @js: Job slot id to check. - * - * Check the ring buffers for all priorities, starting from - * KBASE_JS_ATOM_SCHED_PRIO_HIGH, for the specified @js and @prio and return a - * pointer to the next atom, unless all the priority's ring buffers are empty. - * - * Return: Pointer to next atom in buffer, or NULL if there is no atom. - */ -static inline struct kbase_jd_atom * -jsctx_rb_peek(struct kbase_context *kctx, int js) -{ - int prio; - - for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) { - struct kbase_jd_atom *katom; - - katom = jsctx_rb_peek_prio(kctx, js, prio); - if (katom) - return katom; - } - - return NULL; -} - -/** - * jsctx_rb_peek_last(): - Check a ring buffer and get the last atom - * @kctx: Pointer to kbase context with ring buffer. - * @js: Job slot id to check. - * @prio: Priority id to check. - * - * Check the ring buffer for the specified @js and @prio and return a - * pointer to the last atom, unless all the priority's ring buffers are empty. - * - * The last atom is the atom that was added using jsctx_rb_add() most recently. - * - * Return: Pointer to last atom in buffer, or NULL if there is no atom. - */ -static inline struct kbase_jd_atom * -jsctx_rb_peek_last(struct kbase_context *kctx, int js, int prio) -{ - struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js]; - int id; - - lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock); - lockdep_assert_held(&kctx->jctx.lock); - - if (jsctx_rb_is_empty_prio(kctx, js, prio)) - return NULL; - - id = rb->entries[(rb->write_idx - 1) & JSCTX_RB_MASK].atom_id; - return kbase_jd_atom_from_id(kctx, id); -} - -/** - * jsctx_rb_pull(): - Mark atom in list as running - * @kctx: Pointer to kbase context with ring buffer. - * @katom: Pointer to katom to pull. - * - * Mark an atom previously obtained from jsctx_rb_peek() as running. - * - * @katom must currently be at the head of the ring buffer. - */ -static inline void -jsctx_rb_pull(struct kbase_context *kctx, struct kbase_jd_atom *katom) -{ - int prio = katom->sched_priority; - int js = katom->slot_nr; - struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js]; - - lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock); - - /* Atoms must be pulled in the correct order. */ - WARN_ON(katom != jsctx_rb_peek_prio(kctx, js, prio)); - - rb->read_idx++; -} - -/** - * jsctx_rb_unpull(): - Undo marking of atom in list as running - * @kctx: Pointer to kbase context with ring buffer. - * @katom: Pointer to katom to unpull. - * - * Undo jsctx_rb_pull() and put @katom back in the queue. - * - * jsctx_rb_unpull() must be called on atoms in the same order the atoms were - * pulled. - */ -static inline void -jsctx_rb_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom) -{ - int prio = katom->sched_priority; - int js = katom->slot_nr; - struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js]; - - lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock); - - /* Atoms must be unpulled in correct order. */ - WARN_ON(rb->entries[(rb->read_idx - 1) & JSCTX_RB_MASK].atom_id != - kbase_jd_atom_id(kctx, katom)); - - rb->read_idx--; -} - -/** - * jsctx_rb_add(): - Add atom to ring buffer - * @kctx: Pointer to kbase context with ring buffer. - * @katom: Pointer to katom to add. - * - * Add @katom to the ring buffer determined by the atom's priority and job slot - * number. - * - * If the ring buffer is full -EBUSY will be returned. - * - * Return: On success 0 is returned, on failure a negative error code. - */ -static int -jsctx_rb_add_atom(struct kbase_context *kctx, struct kbase_jd_atom *katom) -{ - int prio = katom->sched_priority; - int js = katom->slot_nr; - struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js]; - - lockdep_assert_held(&kctx->jctx.lock); - - /* Check if the ring buffer is full */ - if ((rb->write_idx - rb->running_idx) >= JSCTX_RB_SIZE) - return -EBUSY; - - rb->entries[rb->write_idx & JSCTX_RB_MASK].atom_id = - kbase_jd_atom_id(kctx, katom); - rb->write_idx++; - - return 0; -} - -/** - * jsctx_rb_remove(): - Remove atom from ring buffer - * @kctx: Pointer to kbase context with ring buffer. - * @katom: Pointer to katom to remove. - * - * Remove @katom from the ring buffer. - * - * @katom must have been pulled from the buffer earlier by jsctx_rb_pull(), and - * atoms must be removed in the same order they were pulled from the ring - * buffer. - */ -static inline void -jsctx_rb_remove(struct kbase_context *kctx, struct kbase_jd_atom *katom) -{ - int prio = katom->sched_priority; - int js = katom->slot_nr; - struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js]; - - lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock); - - /* Atoms must be completed in order. */ - WARN_ON(rb->entries[rb->running_idx & JSCTX_RB_MASK].atom_id != - kbase_jd_atom_id(kctx, katom)); - - rb->running_idx++; -} - -/** - * jsctx_rb_evict(): - Evict atom, and dependents, from ring buffer - * @kctx: Pointer to kbase context with ring buffer. - * @start_katom: Pointer to the first katom to evict. - * @head_katom: Pointer to head katom. - * @evict_list: Pointer to head of list where evicted atoms are added. - * - * Iterate over the ring buffer starting at @start_katom and evict @start_atom - * and dependent atoms in ring buffer. - * - * @evict_list and @head_katom is passed on to kbase_js_evict_atom() which will - * examine the atom dependencies. - * - * jsctx_rb_evict() is only called by kbase_js_evict_deps(). - */ -static void -jsctx_rb_evict(struct kbase_context *kctx, - struct kbase_jd_atom *start_katom, - struct kbase_jd_atom *head_katom, - struct list_head *evict_list) -{ - int prio = start_katom->sched_priority; - int js = start_katom->slot_nr; - struct jsctx_rb *rb = &kctx->jsctx_rb[prio][js]; - bool atom_in_rb = false; - u16 i, start_idx; - - lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock); - lockdep_assert_held(&kctx->jctx.lock); - - for (i = rb->running_idx; i != rb->write_idx; i++) { - if (rb->entries[i & JSCTX_RB_MASK].atom_id == - kbase_jd_atom_id(kctx, start_katom)) { - start_idx = i; - atom_in_rb = true; - break; - } - } - - /* start_katom must still be in ring buffer. */ - if (i == rb->write_idx || !atom_in_rb) - return; - - /* Evict all dependencies on same slot. */ - for (i = start_idx; i != rb->write_idx; i++) { - u8 katom_evict; - - katom_evict = rb->entries[i & JSCTX_RB_MASK].atom_id; - if (katom_evict != KBASEP_ATOM_ID_INVALID) { - if (!kbase_js_evict_atom(kctx, - &kctx->jctx.atoms[katom_evict], - start_katom, head_katom, - evict_list, rb, i)) - break; - } - } -} - -/* - * Functions private to KBase ('Protected' functions) - */ -int kbasep_js_devdata_init(struct kbase_device * const kbdev) -{ - struct kbasep_js_device_data *jsdd; - int err; - int i; - u16 as_present; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - - jsdd = &kbdev->js_data; - - KBASE_DEBUG_ASSERT(jsdd->init_status == JS_DEVDATA_INIT_NONE); - - /* These two must be recalculated if nr_hw_address_spaces changes - * (e.g. for HW workarounds) */ - as_present = (1U << kbdev->nr_hw_address_spaces) - 1; - kbdev->nr_user_address_spaces = kbdev->nr_hw_address_spaces; - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) { - bool use_workaround; - - use_workaround = DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE; - if (use_workaround) { - dev_dbg(kbdev->dev, "GPU has HW ISSUE 8987, and driver configured for security workaround: 1 address space only"); - kbdev->nr_user_address_spaces = 1; - } - } -#ifdef CONFIG_MALI_DEBUG - /* Soft-stop will be disabled on a single context by default unless - * softstop_always is set */ - jsdd->softstop_always = false; -#endif /* CONFIG_MALI_DEBUG */ - jsdd->nr_all_contexts_running = 0; - jsdd->nr_user_contexts_running = 0; - jsdd->nr_contexts_pullable = 0; - atomic_set(&jsdd->nr_contexts_runnable, 0); - /* All ASs initially free */ - jsdd->as_free = as_present; - /* No ctx allowed to submit */ - jsdd->runpool_irq.submit_allowed = 0u; - memset(jsdd->runpool_irq.ctx_attr_ref_count, 0, - sizeof(jsdd->runpool_irq.ctx_attr_ref_count)); - memset(jsdd->runpool_irq.slot_affinities, 0, - sizeof(jsdd->runpool_irq.slot_affinities)); - memset(jsdd->runpool_irq.slot_affinity_refcount, 0, - sizeof(jsdd->runpool_irq.slot_affinity_refcount)); - INIT_LIST_HEAD(&jsdd->suspended_soft_jobs_list); - - /* Config attributes */ - jsdd->scheduling_period_ns = DEFAULT_JS_SCHEDULING_PERIOD_NS; - jsdd->soft_stop_ticks = DEFAULT_JS_SOFT_STOP_TICKS; - jsdd->soft_stop_ticks_cl = DEFAULT_JS_SOFT_STOP_TICKS_CL; - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408)) - jsdd->hard_stop_ticks_ss = DEFAULT_JS_HARD_STOP_TICKS_SS_8408; - else - jsdd->hard_stop_ticks_ss = DEFAULT_JS_HARD_STOP_TICKS_SS; - jsdd->hard_stop_ticks_cl = DEFAULT_JS_HARD_STOP_TICKS_CL; - jsdd->hard_stop_ticks_dumping = DEFAULT_JS_HARD_STOP_TICKS_DUMPING; - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408)) - jsdd->gpu_reset_ticks_ss = DEFAULT_JS_RESET_TICKS_SS_8408; - else - jsdd->gpu_reset_ticks_ss = DEFAULT_JS_RESET_TICKS_SS; - jsdd->gpu_reset_ticks_cl = DEFAULT_JS_RESET_TICKS_CL; - jsdd->gpu_reset_ticks_dumping = DEFAULT_JS_RESET_TICKS_DUMPING; - jsdd->ctx_timeslice_ns = DEFAULT_JS_CTX_TIMESLICE_NS; - jsdd->cfs_ctx_runtime_init_slices = - DEFAULT_JS_CFS_CTX_RUNTIME_INIT_SLICES; - jsdd->cfs_ctx_runtime_min_slices = - DEFAULT_JS_CFS_CTX_RUNTIME_MIN_SLICES; - - dev_dbg(kbdev->dev, "JS Config Attribs: "); - dev_dbg(kbdev->dev, "\tscheduling_period_ns:%u", - jsdd->scheduling_period_ns); - dev_dbg(kbdev->dev, "\tsoft_stop_ticks:%u", - jsdd->soft_stop_ticks); - dev_dbg(kbdev->dev, "\tsoft_stop_ticks_cl:%u", - jsdd->soft_stop_ticks_cl); - dev_dbg(kbdev->dev, "\thard_stop_ticks_ss:%u", - jsdd->hard_stop_ticks_ss); - dev_dbg(kbdev->dev, "\thard_stop_ticks_cl:%u", - jsdd->hard_stop_ticks_cl); - dev_dbg(kbdev->dev, "\thard_stop_ticks_dumping:%u", - jsdd->hard_stop_ticks_dumping); - dev_dbg(kbdev->dev, "\tgpu_reset_ticks_ss:%u", - jsdd->gpu_reset_ticks_ss); - dev_dbg(kbdev->dev, "\tgpu_reset_ticks_cl:%u", - jsdd->gpu_reset_ticks_cl); - dev_dbg(kbdev->dev, "\tgpu_reset_ticks_dumping:%u", - jsdd->gpu_reset_ticks_dumping); - dev_dbg(kbdev->dev, "\tctx_timeslice_ns:%u", - jsdd->ctx_timeslice_ns); - dev_dbg(kbdev->dev, "\tcfs_ctx_runtime_init_slices:%u", - jsdd->cfs_ctx_runtime_init_slices); - dev_dbg(kbdev->dev, "\tcfs_ctx_runtime_min_slices:%u", - jsdd->cfs_ctx_runtime_min_slices); - - if (!(jsdd->soft_stop_ticks < jsdd->hard_stop_ticks_ss && - jsdd->hard_stop_ticks_ss < jsdd->gpu_reset_ticks_ss && - jsdd->soft_stop_ticks < jsdd->hard_stop_ticks_dumping && - jsdd->hard_stop_ticks_dumping < - jsdd->gpu_reset_ticks_dumping)) { - dev_err(kbdev->dev, "Job scheduler timeouts invalid; soft/hard/reset tick counts should be in increasing order\n"); - return -EINVAL; - } - -#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS - dev_dbg(kbdev->dev, "Job Scheduling Policy Soft-stops disabled, ignoring value for soft_stop_ticks==%u at %uns per tick. Other soft-stops may still occur.", - jsdd->soft_stop_ticks, - jsdd->scheduling_period_ns); -#endif -#if KBASE_DISABLE_SCHEDULING_HARD_STOPS - dev_dbg(kbdev->dev, "Job Scheduling Policy Hard-stops disabled, ignoring values for hard_stop_ticks_ss==%d and hard_stop_ticks_dumping==%u at %uns per tick. Other hard-stops may still occur.", - jsdd->hard_stop_ticks_ss, - jsdd->hard_stop_ticks_dumping, - jsdd->scheduling_period_ns); -#endif -#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS && KBASE_DISABLE_SCHEDULING_HARD_STOPS - dev_dbg(kbdev->dev, "Note: The JS policy's tick timer (if coded) will still be run, but do nothing."); -#endif - - /* setup the number of irq throttle cycles base on given time */ - { - int time_us = kbdev->gpu_props.irq_throttle_time_us; - int cycles = kbasep_js_convert_us_to_gpu_ticks_max_freq(kbdev, - time_us); - - atomic_set(&kbdev->irq_throttle_cycles, cycles); - } - - /* Clear the AS data, including setting NULL pointers */ - memset(&jsdd->runpool_irq.per_as_data[0], 0, - sizeof(jsdd->runpool_irq.per_as_data)); - - for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i) - jsdd->js_reqs[i] = core_reqs_from_jsn_features( - kbdev->gpu_props.props.raw_props.js_features[i]); - - jsdd->init_status |= JS_DEVDATA_INIT_CONSTANTS; - - /* On error, we could continue on: providing none of the below resources - * rely on the ones above */ - - mutex_init(&jsdd->runpool_mutex); - mutex_init(&jsdd->queue_mutex); - spin_lock_init(&jsdd->runpool_irq.lock); - sema_init(&jsdd->schedule_sem, 1); - - err = kbasep_js_policy_init(kbdev); - if (!err) - jsdd->init_status |= JS_DEVDATA_INIT_POLICY; - - for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i) { - INIT_LIST_HEAD(&jsdd->ctx_list_pullable[i]); - INIT_LIST_HEAD(&jsdd->ctx_list_unpullable[i]); - } - - /* On error, do no cleanup; this will be handled by the caller(s), since - * we've designed this resource to be safe to terminate on init-fail */ - if (jsdd->init_status != JS_DEVDATA_INIT_ALL) - return -EINVAL; - - return 0; -} - -void kbasep_js_devdata_halt(struct kbase_device *kbdev) -{ - CSTD_UNUSED(kbdev); -} - -void kbasep_js_devdata_term(struct kbase_device *kbdev) -{ - struct kbasep_js_device_data *js_devdata; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - - js_devdata = &kbdev->js_data; - - if ((js_devdata->init_status & JS_DEVDATA_INIT_CONSTANTS)) { - s8 zero_ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT] = { 0, }; - /* The caller must de-register all contexts before calling this - */ - KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running == 0); - KBASE_DEBUG_ASSERT(memcmp( - js_devdata->runpool_irq.ctx_attr_ref_count, - zero_ctx_attr_ref_count, - sizeof(zero_ctx_attr_ref_count)) == 0); - CSTD_UNUSED(zero_ctx_attr_ref_count); - } - if ((js_devdata->init_status & JS_DEVDATA_INIT_POLICY)) - kbasep_js_policy_term(&js_devdata->policy); - - js_devdata->init_status = JS_DEVDATA_INIT_NONE; -} - -int kbasep_js_kctx_init(struct kbase_context * const kctx) -{ - struct kbase_device *kbdev; - struct kbasep_js_kctx_info *js_kctx_info; - int err; - int i; - - KBASE_DEBUG_ASSERT(kctx != NULL); - - kbdev = kctx->kbdev; - KBASE_DEBUG_ASSERT(kbdev != NULL); - - for (i = 0; i < BASE_JM_MAX_NR_SLOTS; ++i) - INIT_LIST_HEAD(&kctx->jctx.sched_info.ctx.ctx_list_entry[i]); - - js_kctx_info = &kctx->jctx.sched_info; - KBASE_DEBUG_ASSERT(js_kctx_info->init_status == JS_KCTX_INIT_NONE); - - js_kctx_info->ctx.nr_jobs = 0; - js_kctx_info->ctx.is_scheduled = false; - js_kctx_info->ctx.is_dying = false; - memset(js_kctx_info->ctx.ctx_attr_ref_count, 0, - sizeof(js_kctx_info->ctx.ctx_attr_ref_count)); - - /* Initially, the context is disabled from submission until the create - * flags are set */ - js_kctx_info->ctx.flags = KBASE_CTX_FLAG_SUBMIT_DISABLED; - - js_kctx_info->init_status |= JS_KCTX_INIT_CONSTANTS; - - /* On error, we could continue on: providing none of the below resources - * rely on the ones above */ - mutex_init(&js_kctx_info->ctx.jsctx_mutex); - - init_waitqueue_head(&js_kctx_info->ctx.is_scheduled_wait); - - err = kbasep_js_policy_init_ctx(kbdev, kctx); - if (!err) - js_kctx_info->init_status |= JS_KCTX_INIT_POLICY; - - /* On error, do no cleanup; this will be handled by the caller(s), since - * we've designed this resource to be safe to terminate on init-fail */ - if (js_kctx_info->init_status != JS_KCTX_INIT_ALL) - return -EINVAL; - - return 0; -} - -void kbasep_js_kctx_term(struct kbase_context *kctx) -{ - struct kbase_device *kbdev; - struct kbasep_js_kctx_info *js_kctx_info; - union kbasep_js_policy *js_policy; - int js; - - KBASE_DEBUG_ASSERT(kctx != NULL); - - kbdev = kctx->kbdev; - KBASE_DEBUG_ASSERT(kbdev != NULL); - - js_policy = &kbdev->js_data.policy; - js_kctx_info = &kctx->jctx.sched_info; - - if ((js_kctx_info->init_status & JS_KCTX_INIT_CONSTANTS)) { - /* The caller must de-register all jobs before calling this */ - KBASE_DEBUG_ASSERT(!js_kctx_info->ctx.is_scheduled); - KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs == 0); - } - - mutex_lock(&kbdev->js_data.queue_mutex); - for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) - list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]); - mutex_unlock(&kbdev->js_data.queue_mutex); - - if ((js_kctx_info->init_status & JS_KCTX_INIT_POLICY)) - kbasep_js_policy_term_ctx(js_policy, kctx); - - js_kctx_info->init_status = JS_KCTX_INIT_NONE; -} - -/** - * kbase_js_ctx_list_add_pullable - Add context to the tail of the per-slot - * pullable context queue - * @kbdev: Device pointer - * @kctx: Context to add to queue - * @js: Job slot to use - * - * If the context is on either the pullable or unpullable queues, then it is - * removed before being added to the tail. - * - * This function should be used when queueing a context for the first time, or - * re-queueing a context that has been pulled from. - * - * Caller must hold kbasep_jd_device_data.queue_mutex - * - * Return: true if caller should call kbase_backend_ctx_count_changed() - */ -static bool kbase_js_ctx_list_add_pullable(struct kbase_device *kbdev, - struct kbase_context *kctx, - int js) -{ - bool ret = false; - - lockdep_assert_held(&kbdev->js_data.queue_mutex); - lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex); - - if (!list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js])) - list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]); - - list_add_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js], - &kbdev->js_data.ctx_list_pullable[js]); - - if (!kctx->slots_pullable) { - kbdev->js_data.nr_contexts_pullable++; - ret = true; - if (!atomic_read(&kctx->atoms_pulled)) - atomic_inc(&kbdev->js_data.nr_contexts_runnable); - } - kctx->slots_pullable |= (1 << js); - - return ret; -} - -/** - * kbase_js_ctx_list_add_pullable_head - Add context to the head of the - * per-slot pullable context queue - * @kbdev: Device pointer - * @kctx: Context to add to queue - * @js: Job slot to use - * - * If the context is on either the pullable or unpullable queues, then it is - * removed before being added to the head. - * - * This function should be used when a context has been scheduled, but no jobs - * can currently be pulled from it. - * - * Caller must hold kbasep_jd_device_data.queue_mutex - * - * Return: true if caller should call kbase_backend_ctx_count_changed() - */ -static bool kbase_js_ctx_list_add_pullable_head(struct kbase_device *kbdev, - struct kbase_context *kctx, - int js) -{ - bool ret = false; - - lockdep_assert_held(&kbdev->js_data.queue_mutex); - lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex); - - if (!list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js])) - list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]); - - list_add(&kctx->jctx.sched_info.ctx.ctx_list_entry[js], - &kbdev->js_data.ctx_list_pullable[js]); - - if (!kctx->slots_pullable) { - kbdev->js_data.nr_contexts_pullable++; - ret = true; - if (!atomic_read(&kctx->atoms_pulled)) - atomic_inc(&kbdev->js_data.nr_contexts_runnable); - } - kctx->slots_pullable |= (1 << js); - - return ret; -} - -/** - * kbase_js_ctx_list_add_unpullable - Add context to the tail of the per-slot - * unpullable context queue - * @kbdev: Device pointer - * @kctx: Context to add to queue - * @js: Job slot to use - * - * The context must already be on the per-slot pullable queue. It will be - * removed from the pullable queue before being added to the unpullable queue. - * - * This function should be used when a context has been pulled from, and there - * are no jobs remaining on the specified slot. - * - * Caller must hold kbasep_jd_device_data.queue_mutex - * - * Return: true if caller should call kbase_backend_ctx_count_changed() - */ -static bool kbase_js_ctx_list_add_unpullable(struct kbase_device *kbdev, - struct kbase_context *kctx, - int js) -{ - bool ret = false; - - lockdep_assert_held(&kbdev->js_data.queue_mutex); - lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex); - - list_move_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js], - &kbdev->js_data.ctx_list_unpullable[js]); - - if (kctx->slots_pullable == (1 << js)) { - kbdev->js_data.nr_contexts_pullable--; - ret = true; - if (!atomic_read(&kctx->atoms_pulled)) - atomic_dec(&kbdev->js_data.nr_contexts_runnable); - } - kctx->slots_pullable &= ~(1 << js); - - return ret; -} - -/** - * kbase_js_ctx_list_remove - Remove context from the per-slot pullable or - * unpullable context queues - * @kbdev: Device pointer - * @kctx: Context to remove from queue - * @js: Job slot to use - * - * The context must already be on one of the queues. - * - * This function should be used when a context has no jobs on the GPU, and no - * jobs remaining for the specified slot. - * - * Caller must hold kbasep_jd_device_data.queue_mutex - * - * Return: true if caller should call kbase_backend_ctx_count_changed() - */ -static bool kbase_js_ctx_list_remove(struct kbase_device *kbdev, - struct kbase_context *kctx, - int js) -{ - bool ret = false; - - lockdep_assert_held(&kbdev->js_data.queue_mutex); - lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex); - - WARN_ON(list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js])); - - list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]); - - if (kctx->slots_pullable == (1 << js)) { - kbdev->js_data.nr_contexts_pullable--; - ret = true; - if (!atomic_read(&kctx->atoms_pulled)) - atomic_dec(&kbdev->js_data.nr_contexts_runnable); - } - kctx->slots_pullable &= ~(1 << js); - - return ret; -} - -/** - * kbase_js_ctx_list_pop_head - Pop the head context off the per-slot pullable - * queue. - * @kbdev: Device pointer - * @js: Job slot to use - * - * Caller must hold kbasep_jd_device_data::queue_mutex - * - * Return: Context to use for specified slot. - * NULL if no contexts present for specified slot - */ -static struct kbase_context *kbase_js_ctx_list_pop_head( - struct kbase_device *kbdev, - int js) -{ - struct kbase_context *kctx; - - lockdep_assert_held(&kbdev->js_data.queue_mutex); - - if (list_empty(&kbdev->js_data.ctx_list_pullable[js])) - return NULL; - - kctx = list_entry(kbdev->js_data.ctx_list_pullable[js].next, - struct kbase_context, - jctx.sched_info.ctx.ctx_list_entry[js]); - - list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]); - - return kctx; -} - -/** - * kbase_js_ctx_pullable - Return if a context can be pulled from on the - * specified slot - * @kctx: Context pointer - * @js: Job slot to use - * @is_scheduled: true if the context is currently scheduled - * - * Caller must hold runpool_irq.lock - * - * Return: true if context can be pulled from on specified slot - * false otherwise - */ -static bool kbase_js_ctx_pullable(struct kbase_context *kctx, int js, - bool is_scheduled) -{ - struct kbasep_js_device_data *js_devdata; - struct kbase_jd_atom *katom; - - lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock); - - js_devdata = &kctx->kbdev->js_data; - - if (is_scheduled) { - if (!kbasep_js_is_submit_allowed(js_devdata, kctx)) - return false; - } - katom = jsctx_rb_peek(kctx, js); - if (!katom) - return false; /* No pullable atoms */ - if (atomic_read(&katom->blocked)) - return false; /* next atom blocked */ - if (katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) { - if (katom->x_pre_dep->gpu_rb_state == - KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) - return false; - if ((katom->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) && - kbase_backend_nr_atoms_on_slot(kctx->kbdev, js)) - return false; - } - - return true; -} - -static bool kbase_js_dep_validate(struct kbase_context *kctx, - struct kbase_jd_atom *katom) -{ - struct kbase_device *kbdev = kctx->kbdev; - bool ret = true; - bool has_dep = false, has_x_dep = false; - int js = kbase_js_get_slot(kbdev, katom); - int prio = katom->sched_priority; - int i; - - for (i = 0; i < 2; i++) { - struct kbase_jd_atom *dep_atom = katom->dep[i].atom; - - if (dep_atom) { - int dep_js = kbase_js_get_slot(kbdev, dep_atom); - int dep_prio = dep_atom->sched_priority; - - /* Dependent atom must already have been submitted */ - if (!(dep_atom->atom_flags & - KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED)) { - ret = false; - break; - } - - /* Dependencies with different priorities can't - be represented in the ringbuffer */ - if (prio != dep_prio) { - ret = false; - break; - } - - if (js == dep_js) { - /* Only one same-slot dependency can be - * represented in the ringbuffer */ - if (has_dep) { - ret = false; - break; - } - has_dep = true; - } else { - /* Only one cross-slot dependency can be - * represented in the ringbuffer */ - if (has_x_dep) { - ret = false; - break; - } - /* Each dependee atom can only have one - * cross-slot dependency */ - if (dep_atom->x_post_dep) { - ret = false; - break; - } - /* The dependee atom can not already be in the - * HW access ringbuffer */ - if (dep_atom->gpu_rb_state != - KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) { - ret = false; - break; - } - /* The dependee atom can not already have - * completed */ - if (dep_atom->status != - KBASE_JD_ATOM_STATE_IN_JS) { - ret = false; - break; - } - /* Cross-slot dependencies must not violate - * PRLAM-8987 affinity restrictions */ - if (kbase_hw_has_issue(kbdev, - BASE_HW_ISSUE_8987) && - (js == 2 || dep_js == 2)) { - ret = false; - break; - } - has_x_dep = true; - } - - if (kbase_jd_katom_dep_type(&katom->dep[i]) == - BASE_JD_DEP_TYPE_DATA && - js == dep_js) { - struct kbase_jd_atom *last_atom = - jsctx_rb_peek_last(kctx, js, - prio); - - /* Last atom on slot must be pre-dep for this - * atom */ - if (last_atom != dep_atom) { - ret = false; - break; - } - } - - /* Dependency can be represented in ringbuffers */ - } - } - - /* If dependencies can be represented by ringbuffer then clear them from - * atom structure */ - if (ret) { - for (i = 0; i < 2; i++) { - struct kbase_jd_atom *dep_atom = katom->dep[i].atom; - - if (dep_atom) { - int dep_js = kbase_js_get_slot(kbdev, dep_atom); - - if ((js != dep_js) && - (dep_atom->status != - KBASE_JD_ATOM_STATE_COMPLETED) - && (dep_atom->status != - KBASE_JD_ATOM_STATE_HW_COMPLETED) - && (dep_atom->status != - KBASE_JD_ATOM_STATE_UNUSED)) { - - katom->atom_flags |= - KBASE_KATOM_FLAG_X_DEP_BLOCKED; - katom->x_pre_dep = dep_atom; - dep_atom->x_post_dep = katom; - if (kbase_jd_katom_dep_type( - &katom->dep[i]) == - BASE_JD_DEP_TYPE_DATA) - katom->atom_flags |= - KBASE_KATOM_FLAG_FAIL_BLOCKER; - } - if ((kbase_jd_katom_dep_type(&katom->dep[i]) - == BASE_JD_DEP_TYPE_DATA) && - (js == dep_js)) - katom->atom_flags |= - KBASE_KATOM_FLAG_FAIL_PREV; - - list_del(&katom->dep_item[i]); - kbase_jd_katom_dep_clear(&katom->dep[i]); - } - } - } - - return ret; -} - -bool kbasep_js_add_job(struct kbase_context *kctx, - struct kbase_jd_atom *atom) -{ - unsigned long flags; - struct kbasep_js_kctx_info *js_kctx_info; - struct kbase_device *kbdev; - struct kbasep_js_device_data *js_devdata; - union kbasep_js_policy *js_policy; - - bool enqueue_required = false; - bool timer_sync = false; - - KBASE_DEBUG_ASSERT(kctx != NULL); - KBASE_DEBUG_ASSERT(atom != NULL); - lockdep_assert_held(&kctx->jctx.lock); - - kbdev = kctx->kbdev; - js_devdata = &kbdev->js_data; - js_policy = &kbdev->js_data.policy; - js_kctx_info = &kctx->jctx.sched_info; - - mutex_lock(&js_devdata->queue_mutex); - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); - - /* - * Begin Runpool transaction - */ - mutex_lock(&js_devdata->runpool_mutex); - - /* Refcount ctx.nr_jobs */ - KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs < U32_MAX); - ++(js_kctx_info->ctx.nr_jobs); - - /* Setup any scheduling information */ - kbasep_js_clear_job_retry_submit(atom); - - /* Lock for state available during IRQ */ - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - - if (!kbase_js_dep_validate(kctx, atom)) { - /* Dependencies could not be represented */ - --(js_kctx_info->ctx.nr_jobs); - - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - mutex_unlock(&js_devdata->runpool_mutex); - - goto out_unlock; - } - - KBASE_TIMELINE_ATOM_READY(kctx, kbase_jd_atom_id(kctx, atom)); - - if (kbase_js_dep_resolved_submit(kctx, atom, &enqueue_required) != 0) { - /* Ringbuffer was full (should be impossible) - fail the job */ - --(js_kctx_info->ctx.nr_jobs); - - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - mutex_unlock(&js_devdata->runpool_mutex); - - atom->event_code = BASE_JD_EVENT_JOB_CANCELLED; - - goto out_unlock; - } - - KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_ADD_JOB, kctx, atom, atom->jc, - kbasep_js_trace_get_refcnt_nolock(kbdev, kctx)); - - /* Context Attribute Refcounting */ - kbasep_js_ctx_attr_ctx_retain_atom(kbdev, kctx, atom); - - if (enqueue_required) { - if (kbase_js_ctx_pullable(kctx, atom->slot_nr, false)) - timer_sync = kbase_js_ctx_list_add_pullable(kbdev, kctx, - atom->slot_nr); - else - timer_sync = kbase_js_ctx_list_add_unpullable(kbdev, - kctx, atom->slot_nr); - } - /* If this context is active and the atom is the first on its slot, - * kick the job manager to attempt to fast-start the atom */ - if (enqueue_required && kctx == kbdev->hwaccess.active_kctx) - kbase_jm_try_kick(kbdev, 1 << atom->slot_nr); - - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - if (timer_sync) - kbase_backend_ctx_count_changed(kbdev); - mutex_unlock(&js_devdata->runpool_mutex); - /* End runpool transaction */ - - if (!js_kctx_info->ctx.is_scheduled) { - if (js_kctx_info->ctx.is_dying) { - /* A job got added while/after kbase_job_zap_context() - * was called on a non-scheduled context (e.g. KDS - * dependency resolved). Kill that job by killing the - * context. */ - kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, - false); - } else if (js_kctx_info->ctx.nr_jobs == 1) { - /* Handle Refcount going from 0 to 1: schedule the - * context on the Policy Queue */ - KBASE_DEBUG_ASSERT(!js_kctx_info->ctx.is_scheduled); - dev_dbg(kbdev->dev, "JS: Enqueue Context %p", kctx); - - /* Policy Queue was updated - caller must try to - * schedule the head context */ - WARN_ON(!enqueue_required); - } - } -out_unlock: - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - - mutex_unlock(&js_devdata->queue_mutex); - - return enqueue_required; -} - -void kbasep_js_remove_job(struct kbase_device *kbdev, - struct kbase_context *kctx, struct kbase_jd_atom *atom) -{ - struct kbasep_js_kctx_info *js_kctx_info; - struct kbasep_js_device_data *js_devdata; - union kbasep_js_policy *js_policy; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(kctx != NULL); - KBASE_DEBUG_ASSERT(atom != NULL); - - js_devdata = &kbdev->js_data; - js_policy = &kbdev->js_data.policy; - js_kctx_info = &kctx->jctx.sched_info; - - KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_REMOVE_JOB, kctx, atom, atom->jc, - kbasep_js_trace_get_refcnt(kbdev, kctx)); - - /* De-refcount ctx.nr_jobs */ - KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs > 0); - --(js_kctx_info->ctx.nr_jobs); -} - -bool kbasep_js_remove_cancelled_job(struct kbase_device *kbdev, - struct kbase_context *kctx, struct kbase_jd_atom *katom) -{ - unsigned long flags; - struct kbasep_js_atom_retained_state katom_retained_state; - struct kbasep_js_device_data *js_devdata; - bool attr_state_changed; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(kctx != NULL); - KBASE_DEBUG_ASSERT(katom != NULL); - - js_devdata = &kbdev->js_data; - - kbasep_js_atom_retained_state_copy(&katom_retained_state, katom); - kbasep_js_remove_job(kbdev, kctx, katom); - - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - - /* The atom has 'finished' (will not be re-run), so no need to call - * kbasep_js_has_atom_finished(). - * - * This is because it returns false for soft-stopped atoms, but we - * want to override that, because we're cancelling an atom regardless of - * whether it was soft-stopped or not */ - attr_state_changed = kbasep_js_ctx_attr_ctx_release_atom(kbdev, kctx, - &katom_retained_state); - - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - - return attr_state_changed; -} - -bool kbasep_js_runpool_retain_ctx(struct kbase_device *kbdev, - struct kbase_context *kctx) -{ - unsigned long flags; - struct kbasep_js_device_data *js_devdata; - bool result; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - js_devdata = &kbdev->js_data; - - /* KBASE_TRACE_ADD_REFCOUNT( kbdev, JS_RETAIN_CTX, kctx, NULL, 0, - kbasep_js_trace_get_refcnt(kbdev, kctx)); */ - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - result = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx); - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - - return result; -} - -struct kbase_context *kbasep_js_runpool_lookup_ctx(struct kbase_device *kbdev, - int as_nr) -{ - unsigned long flags; - struct kbasep_js_device_data *js_devdata; - struct kbase_context *found_kctx = NULL; - struct kbasep_js_per_as_data *js_per_as_data; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS); - js_devdata = &kbdev->js_data; - js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr]; - - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - - found_kctx = js_per_as_data->kctx; - - if (found_kctx != NULL) - ++(js_per_as_data->as_busy_refcount); - - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - - return found_kctx; -} - -struct kbase_context *kbasep_js_runpool_lookup_ctx_nolock( - struct kbase_device *kbdev, int as_nr) -{ - struct kbasep_js_device_data *js_devdata; - struct kbase_context *found_kctx = NULL; - struct kbasep_js_per_as_data *js_per_as_data; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS); - - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - js_devdata = &kbdev->js_data; - js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr]; - - found_kctx = js_per_as_data->kctx; - - if (found_kctx != NULL) - ++(js_per_as_data->as_busy_refcount); - - return found_kctx; -} - -/** - * kbasep_js_release_result - Try running more jobs after releasing a context - * and/or atom - * - * @kbdev: The kbase_device to operate on - * @kctx: The kbase_context to operate on - * @katom_retained_state: Retained state from the atom - * @runpool_ctx_attr_change: True if the runpool context attributes have changed - * - * This collates a set of actions that must happen whilst - * kbasep_js_device_data.runpool_irq.lock is held. - * - * This includes running more jobs when: - * - The previously released kctx caused a ctx attribute change, - * - The released atom caused a ctx attribute change, - * - Slots were previously blocked due to affinity restrictions, - * - Submission during IRQ handling failed. - * - * Return: %KBASEP_JS_RELEASE_RESULT_SCHED_ALL if context attributes were - * changed. The caller should try scheduling all contexts - */ -static kbasep_js_release_result kbasep_js_run_jobs_after_ctx_and_atom_release( - struct kbase_device *kbdev, - struct kbase_context *kctx, - struct kbasep_js_atom_retained_state *katom_retained_state, - bool runpool_ctx_attr_change) -{ - struct kbasep_js_device_data *js_devdata; - kbasep_js_release_result result = 0; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(kctx != NULL); - KBASE_DEBUG_ASSERT(katom_retained_state != NULL); - js_devdata = &kbdev->js_data; - - lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex); - lockdep_assert_held(&js_devdata->runpool_mutex); - lockdep_assert_held(&js_devdata->runpool_irq.lock); - - if (js_devdata->nr_user_contexts_running != 0) { - bool retry_submit = false; - int retry_jobslot = 0; - - if (katom_retained_state) - retry_submit = kbasep_js_get_atom_retry_submit_slot( - katom_retained_state, &retry_jobslot); - - if (runpool_ctx_attr_change || retry_submit) { - /* A change in runpool ctx attributes might mean we can - * run more jobs than before */ - result = KBASEP_JS_RELEASE_RESULT_SCHED_ALL; - - KBASE_TRACE_ADD_SLOT(kbdev, JD_DONE_TRY_RUN_NEXT_JOB, - kctx, NULL, 0u, retry_jobslot); - } - } - return result; -} - -/* - * Internal function to release the reference on a ctx and an atom's "retained - * state", only taking the runpool and as transaction mutexes - * - * This also starts more jobs running in the case of an ctx-attribute state - * change - * - * This does none of the followup actions for scheduling: - * - It does not schedule in a new context - * - It does not requeue or handle dying contexts - * - * For those tasks, just call kbasep_js_runpool_release_ctx() instead - * - * Requires: - * - Context is scheduled in, and kctx->as_nr matches kctx_as_nr - * - Context has a non-zero refcount - * - Caller holds js_kctx_info->ctx.jsctx_mutex - * - Caller holds js_devdata->runpool_mutex - */ -static kbasep_js_release_result kbasep_js_runpool_release_ctx_internal( - struct kbase_device *kbdev, - struct kbase_context *kctx, - struct kbasep_js_atom_retained_state *katom_retained_state) -{ - unsigned long flags; - struct kbasep_js_device_data *js_devdata; - struct kbasep_js_kctx_info *js_kctx_info; - union kbasep_js_policy *js_policy; - struct kbasep_js_per_as_data *js_per_as_data; - - kbasep_js_release_result release_result = 0u; - bool runpool_ctx_attr_change = false; - int kctx_as_nr; - struct kbase_as *current_as; - int new_ref_count; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(kctx != NULL); - js_kctx_info = &kctx->jctx.sched_info; - js_devdata = &kbdev->js_data; - js_policy = &kbdev->js_data.policy; - - /* Ensure context really is scheduled in */ - KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled); - - /* kctx->as_nr and js_per_as_data are only read from here. The caller's - * js_ctx_mutex provides a barrier that ensures they are up-to-date. - * - * They will not change whilst we're reading them, because the refcount - * is non-zero (and we ASSERT on that last fact). - */ - kctx_as_nr = kctx->as_nr; - KBASE_DEBUG_ASSERT(kctx_as_nr != KBASEP_AS_NR_INVALID); - js_per_as_data = &js_devdata->runpool_irq.per_as_data[kctx_as_nr]; - KBASE_DEBUG_ASSERT(js_per_as_data->as_busy_refcount > 0); - - /* - * Transaction begins on AS and runpool_irq - * - * Assert about out calling contract - */ - current_as = &kbdev->as[kctx_as_nr]; - mutex_lock(&kbdev->pm.lock); - mutex_lock(¤t_as->transaction_mutex); - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - KBASE_DEBUG_ASSERT(kctx_as_nr == kctx->as_nr); - KBASE_DEBUG_ASSERT(js_per_as_data->as_busy_refcount > 0); - - /* Update refcount */ - new_ref_count = --(js_per_as_data->as_busy_refcount); - - /* Release the atom if it finished (i.e. wasn't soft-stopped) */ - if (kbasep_js_has_atom_finished(katom_retained_state)) - runpool_ctx_attr_change |= kbasep_js_ctx_attr_ctx_release_atom( - kbdev, kctx, katom_retained_state); - - KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_RELEASE_CTX, kctx, NULL, 0u, - new_ref_count); - - if (new_ref_count == 1 && kctx->jctx.sched_info.ctx.flags & - KBASE_CTX_FLAG_PRIVILEGED && - !kbase_pm_is_suspending(kbdev)) { - /* Context is kept scheduled into an address space even when - * there are no jobs, in this case we have to handle the - * situation where all jobs have been evicted from the GPU and - * submission is disabled. - * - * At this point we re-enable submission to allow further jobs - * to be executed - */ - kbasep_js_set_submit_allowed(js_devdata, kctx); - } - - /* Make a set of checks to see if the context should be scheduled out */ - if (new_ref_count == 0 && - (!kbasep_js_is_submit_allowed(js_devdata, kctx) || - kbdev->pm.suspending)) { - /* Last reference, and we've been told to remove this context - * from the Run Pool */ - dev_dbg(kbdev->dev, "JS: RunPool Remove Context %p because as_busy_refcount=%d, jobs=%d, allowed=%d", - kctx, new_ref_count, js_kctx_info->ctx.nr_jobs, - kbasep_js_is_submit_allowed(js_devdata, kctx)); - -#if defined(CONFIG_MALI_GATOR_SUPPORT) - kbase_trace_mali_mmu_as_released(kctx->as_nr); -#endif -#if defined(CONFIG_MALI_MIPE_ENABLED) - kbase_tlstream_tl_nret_as_ctx(&kbdev->as[kctx->as_nr], kctx); - kbase_tlstream_tl_nret_gpu_ctx(kbdev, kctx); -#endif - - kbase_backend_release_ctx_irq(kbdev, kctx); - - if (kbdev->hwaccess.active_kctx == kctx) - kbdev->hwaccess.active_kctx = NULL; - - /* Ctx Attribute handling - * - * Releasing atoms attributes must either happen before this, or - * after 'is_scheduled' is changed, otherwise we double-decount - * the attributes */ - runpool_ctx_attr_change |= - kbasep_js_ctx_attr_runpool_release_ctx(kbdev, kctx); - - /* Releasing the context and katom retained state can allow - * more jobs to run */ - release_result |= - kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev, - kctx, katom_retained_state, - runpool_ctx_attr_change); - - /* - * Transaction ends on AS and runpool_irq: - * - * By this point, the AS-related data is now clear and ready - * for re-use. - * - * Since releases only occur once for each previous successful - * retain, and no more retains are allowed on this context, no - * other thread will be operating in this - * code whilst we are - */ - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - - kbase_backend_release_ctx_noirq(kbdev, kctx); - - mutex_unlock(¤t_as->transaction_mutex); - mutex_unlock(&kbdev->pm.lock); - - /* Note: Don't reuse kctx_as_nr now */ - - /* Synchronize with any policy timers */ - kbase_backend_ctx_count_changed(kbdev); - - /* update book-keeping info */ - js_kctx_info->ctx.is_scheduled = false; - /* Signal any waiter that the context is not scheduled, so is - * safe for termination - once the jsctx_mutex is also dropped, - * and jobs have finished. */ - wake_up(&js_kctx_info->ctx.is_scheduled_wait); - - /* Queue an action to occur after we've dropped the lock */ - release_result |= KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED; - } else { - kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev, kctx, - katom_retained_state, runpool_ctx_attr_change); - - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - mutex_unlock(¤t_as->transaction_mutex); - mutex_unlock(&kbdev->pm.lock); - } - - return release_result; -} - -void kbasep_js_runpool_release_ctx_nolock(struct kbase_device *kbdev, - struct kbase_context *kctx) -{ - struct kbasep_js_atom_retained_state katom_retained_state; - - /* Setup a dummy katom_retained_state */ - kbasep_js_atom_retained_state_init_invalid(&katom_retained_state); - - kbasep_js_runpool_release_ctx_internal(kbdev, kctx, - &katom_retained_state); -} - -void kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev, - struct kbase_context *kctx, bool has_pm_ref) -{ - struct kbasep_js_device_data *js_devdata; - union kbasep_js_policy *js_policy; - struct kbasep_js_kctx_info *js_kctx_info; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(kctx != NULL); - js_kctx_info = &kctx->jctx.sched_info; - js_policy = &kbdev->js_data.policy; - js_devdata = &kbdev->js_data; - - /* This is called if and only if you've you've detached the context from - * the Runpool or the Policy Queue, and not added it back to the Runpool - */ - KBASE_DEBUG_ASSERT(!js_kctx_info->ctx.is_scheduled); - - if (js_kctx_info->ctx.is_dying) { - /* Dying: don't requeue, but kill all jobs on the context. This - * happens asynchronously */ - dev_dbg(kbdev->dev, - "JS: ** Killing Context %p on RunPool Remove **", kctx); - kbase_js_foreach_ctx_job(kctx, &kbase_jd_cancel); - } -} - -void kbasep_js_runpool_release_ctx_and_katom_retained_state( - struct kbase_device *kbdev, struct kbase_context *kctx, - struct kbasep_js_atom_retained_state *katom_retained_state) -{ - struct kbasep_js_device_data *js_devdata; - struct kbasep_js_kctx_info *js_kctx_info; - base_jd_event_code event_code; - kbasep_js_release_result release_result; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(kctx != NULL); - js_kctx_info = &kctx->jctx.sched_info; - js_devdata = &kbdev->js_data; - event_code = katom_retained_state->event_code; - - mutex_lock(&js_devdata->queue_mutex); - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); - mutex_lock(&js_devdata->runpool_mutex); - - release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx, - katom_retained_state); - - /* Drop the runpool mutex to allow requeing kctx */ - mutex_unlock(&js_devdata->runpool_mutex); - - if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u) - kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, true); - - /* Drop the jsctx_mutex to allow scheduling in a new context */ - - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - mutex_unlock(&js_devdata->queue_mutex); - - if (release_result & KBASEP_JS_RELEASE_RESULT_SCHED_ALL) - kbase_js_sched_all(kbdev); -} - -void kbasep_js_runpool_release_ctx(struct kbase_device *kbdev, - struct kbase_context *kctx) -{ - struct kbasep_js_atom_retained_state katom_retained_state; - - kbasep_js_atom_retained_state_init_invalid(&katom_retained_state); - - kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx, - &katom_retained_state); -} - -/* Variant of kbasep_js_runpool_release_ctx() that doesn't call into - * kbase_js_sched_all() */ -static void kbasep_js_runpool_release_ctx_no_schedule( - struct kbase_device *kbdev, struct kbase_context *kctx) -{ - struct kbasep_js_device_data *js_devdata; - struct kbasep_js_kctx_info *js_kctx_info; - kbasep_js_release_result release_result; - struct kbasep_js_atom_retained_state katom_retained_state_struct; - struct kbasep_js_atom_retained_state *katom_retained_state = - &katom_retained_state_struct; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(kctx != NULL); - js_kctx_info = &kctx->jctx.sched_info; - js_devdata = &kbdev->js_data; - kbasep_js_atom_retained_state_init_invalid(katom_retained_state); - - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); - mutex_lock(&js_devdata->runpool_mutex); - - release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx, - katom_retained_state); - - /* Drop the runpool mutex to allow requeing kctx */ - mutex_unlock(&js_devdata->runpool_mutex); - if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u) - kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, true); - - /* Drop the jsctx_mutex to allow scheduling in a new context */ - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - - /* NOTE: could return release_result if the caller would like to know - * whether it should schedule a new context, but currently no callers do - */ -} - -/** - * kbase_js_set_timeouts - update all JS timeouts with user specified data - * @kbdev: Device pointer - * - * Timeouts are specified through the 'js_timeouts' sysfs file. If a timeout is - * set to a positive number then that becomes the new value used, if a timeout - * is negative then the default is set. - */ -static void kbase_js_set_timeouts(struct kbase_device *kbdev) -{ - struct kbasep_js_device_data *js_data = &kbdev->js_data; - - if (kbdev->js_scheduling_period_ns < 0) - js_data->scheduling_period_ns = DEFAULT_JS_SCHEDULING_PERIOD_NS; - else if (kbdev->js_scheduling_period_ns > 0) - js_data->scheduling_period_ns = kbdev->js_scheduling_period_ns; - - if (kbdev->js_soft_stop_ticks < 0) - js_data->soft_stop_ticks = DEFAULT_JS_SOFT_STOP_TICKS; - else if (kbdev->js_soft_stop_ticks > 0) - js_data->soft_stop_ticks = kbdev->js_soft_stop_ticks; - - if (kbdev->js_soft_stop_ticks_cl < 0) - js_data->soft_stop_ticks_cl = DEFAULT_JS_SOFT_STOP_TICKS_CL; - else if (kbdev->js_soft_stop_ticks_cl > 0) - js_data->soft_stop_ticks_cl = kbdev->js_soft_stop_ticks_cl; - - if (kbdev->js_hard_stop_ticks_ss < 0) { - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408)) - js_data->hard_stop_ticks_ss = - DEFAULT_JS_HARD_STOP_TICKS_SS_8408; - else - js_data->hard_stop_ticks_ss = - DEFAULT_JS_HARD_STOP_TICKS_SS; - } else if (kbdev->js_hard_stop_ticks_ss > 0) { - js_data->hard_stop_ticks_ss = kbdev->js_hard_stop_ticks_ss; - } - - if (kbdev->js_hard_stop_ticks_cl < 0) - js_data->hard_stop_ticks_cl = DEFAULT_JS_HARD_STOP_TICKS_CL; - else if (kbdev->js_hard_stop_ticks_cl > 0) - js_data->hard_stop_ticks_cl = kbdev->js_hard_stop_ticks_cl; - - if (kbdev->js_hard_stop_ticks_dumping < 0) - js_data->hard_stop_ticks_dumping = - DEFAULT_JS_HARD_STOP_TICKS_DUMPING; - else if (kbdev->js_hard_stop_ticks_dumping > 0) - js_data->hard_stop_ticks_dumping = - kbdev->js_hard_stop_ticks_dumping; - - if (kbdev->js_reset_ticks_ss < 0) { - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408)) - js_data->gpu_reset_ticks_ss = - DEFAULT_JS_RESET_TICKS_SS_8408; - else - js_data->gpu_reset_ticks_ss = DEFAULT_JS_RESET_TICKS_SS; - } else if (kbdev->js_reset_ticks_ss > 0) { - js_data->gpu_reset_ticks_ss = kbdev->js_reset_ticks_ss; - } - - if (kbdev->js_reset_ticks_cl < 0) - js_data->gpu_reset_ticks_cl = DEFAULT_JS_RESET_TICKS_CL; - else if (kbdev->js_reset_ticks_cl > 0) - js_data->gpu_reset_ticks_cl = kbdev->js_reset_ticks_cl; - - if (kbdev->js_reset_ticks_dumping < 0) - js_data->gpu_reset_ticks_dumping = - DEFAULT_JS_RESET_TICKS_DUMPING; - else if (kbdev->js_reset_ticks_dumping > 0) - js_data->gpu_reset_ticks_dumping = - kbdev->js_reset_ticks_dumping; -} - -static bool kbasep_js_schedule_ctx(struct kbase_device *kbdev, - struct kbase_context *kctx) -{ - struct kbasep_js_device_data *js_devdata; - struct kbasep_js_kctx_info *js_kctx_info; - union kbasep_js_policy *js_policy; - struct kbase_as *new_address_space = NULL; - unsigned long flags; - bool kctx_suspended = false; - int as_nr; - - js_devdata = &kbdev->js_data; - js_policy = &kbdev->js_data.policy; - js_kctx_info = &kctx->jctx.sched_info; - - /* Pick available address space for this context */ - as_nr = kbase_backend_find_free_address_space(kbdev, kctx); - - if (as_nr == KBASEP_AS_NR_INVALID) - return false; /* No address spaces currently available */ - - new_address_space = &kbdev->as[as_nr]; - - /* - * Atomic transaction on the Context and Run Pool begins - */ - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); - mutex_lock(&js_devdata->runpool_mutex); - - /* Check to see if context is dying due to kbase_job_zap_context() */ - if (js_kctx_info->ctx.is_dying) { - /* Roll back the transaction so far and return */ - kbase_backend_release_free_address_space(kbdev, as_nr); - - mutex_unlock(&js_devdata->runpool_mutex); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - - return false; - } - - KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_TRY_SCHEDULE_HEAD_CTX, kctx, NULL, - 0u, - kbasep_js_trace_get_refcnt(kbdev, kctx)); - - if (js_devdata->nr_user_contexts_running == 0 && - kbdev->js_timeouts_updated) { - /* Only when there are no other contexts submitting jobs: - * Latch in run-time job scheduler timeouts that were set - * through js_timeouts sysfs file */ - kbase_js_set_timeouts(kbdev); - - kbdev->js_timeouts_updated = false; - } - - js_kctx_info->ctx.is_scheduled = true; - - mutex_lock(&new_address_space->transaction_mutex); - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - - /* Assign context to previously chosen address space */ - if (!kbase_backend_use_ctx(kbdev, kctx, as_nr)) { - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - mutex_unlock(&new_address_space->transaction_mutex); - /* If address space is not pending, then kbase_backend_use_ctx() - * failed. Roll back the transaction so far and return */ - if (!kctx->as_pending) { - js_kctx_info->ctx.is_scheduled = false; - - kbase_backend_release_free_address_space(kbdev, as_nr); - } - - mutex_unlock(&js_devdata->runpool_mutex); - - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - return false; - } - - kbdev->hwaccess.active_kctx = kctx; - -#if defined(CONFIG_MALI_GATOR_SUPPORT) - kbase_trace_mali_mmu_as_in_use(kctx->as_nr); -#endif -#if defined(CONFIG_MALI_MIPE_ENABLED) - kbase_tlstream_tl_ret_gpu_ctx(kbdev, kctx); - kbase_tlstream_tl_ret_as_ctx(&kbdev->as[kctx->as_nr], kctx); -#endif - - /* Cause any future waiter-on-termination to wait until the context is - * descheduled */ - wake_up(&js_kctx_info->ctx.is_scheduled_wait); - - /* Re-check for suspending: a suspend could've occurred, and all the - * contexts could've been removed from the runpool before we took this - * lock. In this case, we don't want to allow this context to run jobs, - * we just want it out immediately. - * - * The DMB required to read the suspend flag was issued recently as part - * of the runpool_irq locking. If a suspend occurs *after* that lock was - * taken (i.e. this condition doesn't execute), then the - * kbasep_js_suspend() code will cleanup this context instead (by virtue - * of it being called strictly after the suspend flag is set, and will - * wait for this lock to drop) */ - if (kbase_pm_is_suspending(kbdev)) { - /* Cause it to leave at some later point */ - bool retained; - - retained = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx); - KBASE_DEBUG_ASSERT(retained); - - kbasep_js_clear_submit_allowed(js_devdata, kctx); - kctx_suspended = true; - } - - /* Transaction complete */ - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - mutex_unlock(&new_address_space->transaction_mutex); - - /* Synchronize with any policy timers */ - kbase_backend_ctx_count_changed(kbdev); - - mutex_unlock(&js_devdata->runpool_mutex); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - /* Note: after this point, the context could potentially get scheduled - * out immediately */ - - if (kctx_suspended) { - /* Finishing forcing out the context due to a suspend. Use a - * variant of kbasep_js_runpool_release_ctx() that doesn't - * schedule a new context, to prevent a risk of recursion back - * into this function */ - kbasep_js_runpool_release_ctx_no_schedule(kbdev, kctx); - return false; - } - return true; -} - -static bool kbase_js_use_ctx(struct kbase_device *kbdev, - struct kbase_context *kctx) -{ - struct kbasep_js_device_data *js_devdata = &kbdev->js_data; - unsigned long flags; - - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - if (kctx->as_pending) { - /* Context waiting for AS to be assigned */ - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - return false; - } - if (kbase_backend_use_ctx_sched(kbdev, kctx)) { - /* Context already has ASID - mark as active */ - kbdev->hwaccess.active_kctx = kctx; - - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - return true; /* Context already scheduled */ - } - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - - return kbasep_js_schedule_ctx(kbdev, kctx); -} - -void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev, - struct kbase_context *kctx) -{ - struct kbasep_js_kctx_info *js_kctx_info; - struct kbasep_js_device_data *js_devdata; - bool is_scheduled; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(kctx != NULL); - - js_devdata = &kbdev->js_data; - js_kctx_info = &kctx->jctx.sched_info; - - /* This must never be attempted whilst suspending - i.e. it should only - * happen in response to a syscall from a user-space thread */ - BUG_ON(kbase_pm_is_suspending(kbdev)); - - mutex_lock(&js_devdata->queue_mutex); - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); - - /* Mark the context as privileged */ - js_kctx_info->ctx.flags |= KBASE_CTX_FLAG_PRIVILEGED; - - is_scheduled = js_kctx_info->ctx.is_scheduled; - if (!is_scheduled) { - /* Add the context to the pullable list */ - if (kbase_js_ctx_list_add_pullable(kbdev, kctx, 0)) - kbase_js_sync_timers(kbdev); - - /* Fast-starting requires the jsctx_mutex to be dropped, - * because it works on multiple ctxs */ - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - mutex_unlock(&js_devdata->queue_mutex); - - /* Try to schedule the context in */ - kbase_js_sched_all(kbdev); - - /* Wait for the context to be scheduled in */ - wait_event(kctx->jctx.sched_info.ctx.is_scheduled_wait, - kctx->jctx.sched_info.ctx.is_scheduled); - } else { - /* Already scheduled in - We need to retain it to keep the - * corresponding address space */ - kbasep_js_runpool_retain_ctx(kbdev, kctx); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - mutex_unlock(&js_devdata->queue_mutex); - } -} -KBASE_EXPORT_TEST_API(kbasep_js_schedule_privileged_ctx); - -void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev, - struct kbase_context *kctx) -{ - struct kbasep_js_kctx_info *js_kctx_info; - bool pending; - - KBASE_DEBUG_ASSERT(kctx != NULL); - js_kctx_info = &kctx->jctx.sched_info; - - /* We don't need to use the address space anymore */ - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); - js_kctx_info->ctx.flags &= (~KBASE_CTX_FLAG_PRIVILEGED); - pending = kctx->as_pending; - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - - /* Release the context - it will be scheduled out if there is no - * pending job */ - if (!pending) - kbasep_js_runpool_release_ctx(kbdev, kctx); - - kbase_js_sched_all(kbdev); -} -KBASE_EXPORT_TEST_API(kbasep_js_release_privileged_ctx); - -void kbasep_js_suspend(struct kbase_device *kbdev) -{ - unsigned long flags; - struct kbasep_js_device_data *js_devdata; - int i; - u16 retained = 0u; - int nr_privileged_ctx = 0; - - KBASE_DEBUG_ASSERT(kbdev); - KBASE_DEBUG_ASSERT(kbase_pm_is_suspending(kbdev)); - js_devdata = &kbdev->js_data; - - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - - /* Prevent all contexts from submitting */ - js_devdata->runpool_irq.submit_allowed = 0; - - /* Retain each of the contexts, so we can cause it to leave even if it - * had no refcount to begin with */ - for (i = BASE_MAX_NR_AS - 1; i >= 0; --i) { - struct kbasep_js_per_as_data *js_per_as_data = - &js_devdata->runpool_irq.per_as_data[i]; - struct kbase_context *kctx = js_per_as_data->kctx; - - retained = retained << 1; - - if (kctx) { - ++(js_per_as_data->as_busy_refcount); - retained |= 1u; - /* We can only cope with up to 1 privileged context - - * the instrumented context. It'll be suspended by - * disabling instrumentation */ - if (kctx->jctx.sched_info.ctx.flags & - KBASE_CTX_FLAG_PRIVILEGED) - KBASE_DEBUG_ASSERT(++nr_privileged_ctx == 1); - } - } - CSTD_UNUSED(nr_privileged_ctx); - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - - /* De-ref the previous retain to ensure each context gets pulled out - * sometime later. */ - for (i = 0; - i < BASE_MAX_NR_AS; - ++i, retained = retained >> 1) { - struct kbasep_js_per_as_data *js_per_as_data = - &js_devdata->runpool_irq.per_as_data[i]; - struct kbase_context *kctx = js_per_as_data->kctx; - - if (retained & 1u) - kbasep_js_runpool_release_ctx(kbdev, kctx); - } - - /* Caller must wait for all Power Manager active references to be - * dropped */ -} - -void kbasep_js_resume(struct kbase_device *kbdev) -{ - struct kbasep_js_device_data *js_devdata; - int js; - - KBASE_DEBUG_ASSERT(kbdev); - js_devdata = &kbdev->js_data; - KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev)); - - mutex_lock(&js_devdata->queue_mutex); - for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) { - struct kbase_context *kctx, *n; - - list_for_each_entry_safe(kctx, n, - &kbdev->js_data.ctx_list_unpullable[js], - jctx.sched_info.ctx.ctx_list_entry[js]) { - struct kbasep_js_kctx_info *js_kctx_info; - unsigned long flags; - bool timer_sync = false; - - js_kctx_info = &kctx->jctx.sched_info; - - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); - mutex_lock(&js_devdata->runpool_mutex); - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - - if (!js_kctx_info->ctx.is_scheduled && - kbase_js_ctx_pullable(kctx, js, false)) - timer_sync = kbase_js_ctx_list_add_pullable( - kbdev, kctx, js); - - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, - flags); - if (timer_sync) - kbase_backend_ctx_count_changed(kbdev); - mutex_unlock(&js_devdata->runpool_mutex); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - } - } - mutex_unlock(&js_devdata->queue_mutex); - - /* Restart atom processing */ - kbase_js_sched_all(kbdev); - - /* JS Resume complete */ -} - -bool kbase_js_is_atom_valid(struct kbase_device *kbdev, - struct kbase_jd_atom *katom) -{ - if ((katom->core_req & BASE_JD_REQ_FS) && - (katom->core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | - BASE_JD_REQ_T))) - return false; - - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987) && - (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) && - (katom->core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_T))) - return false; - - return true; -} - -static int kbase_js_get_slot(struct kbase_device *kbdev, - struct kbase_jd_atom *katom) -{ - if (katom->core_req & BASE_JD_REQ_FS) - return 0; - - if (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) { - if (katom->device_nr == 1 && - kbdev->gpu_props.num_core_groups == 2) - return 2; - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) - return 2; - } - - return 1; -} - -int kbase_js_dep_resolved_submit(struct kbase_context *kctx, - struct kbase_jd_atom *katom, - bool *enqueue_required) -{ - katom->slot_nr = kbase_js_get_slot(kctx->kbdev, katom); - - lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock); - - /* If slot will transition from unpullable to pullable then add to - * pullable list */ - if (jsctx_rb_none_to_pull(kctx, katom->slot_nr)) { - *enqueue_required = true; - } else { - *enqueue_required = false; - } - /* Check if there are lower priority jobs to soft stop */ - kbase_job_slot_ctx_priority_check_locked(kctx, katom); - - /* Add atom to ring buffer. */ - if (unlikely(jsctx_rb_add_atom(kctx, katom))) { - /* The ring buffer is full. This should be impossible as the - * job dispatcher can not submit enough atoms to exceed the - * ring buffer size. Fail the job. - */ - WARN(1, "Job submit while JSCTX ringbuffer already full\n"); - return -EINVAL; - } - - katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED; - - return 0; -} - -struct kbase_jd_atom *kbase_js_pull(struct kbase_context *kctx, int js) -{ - struct kbase_jd_atom *katom; - struct kbasep_js_device_data *js_devdata; - int pulled; - - KBASE_DEBUG_ASSERT(kctx); - - js_devdata = &kctx->kbdev->js_data; - lockdep_assert_held(&js_devdata->runpool_irq.lock); - - if (!kbasep_js_is_submit_allowed(js_devdata, kctx)) - return NULL; - if (kbase_pm_is_suspending(kctx->kbdev)) - return NULL; - - katom = jsctx_rb_peek(kctx, js); - if (!katom) - return NULL; - - if (atomic_read(&katom->blocked)) - return NULL; - - /* Due to ordering restrictions when unpulling atoms on failure, we do - * not allow multiple runs of fail-dep atoms from the same context to be - * present on the same slot */ - if ((katom->atom_flags & KBASE_KATOM_FLAG_FAIL_PREV) && - atomic_read(&kctx->atoms_pulled_slot[js])) { - struct kbase_jd_atom *prev_atom = - kbase_backend_inspect_tail(kctx->kbdev, js); - - if (prev_atom && prev_atom->kctx != kctx) - return NULL; - } - - if (katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) { - if (katom->x_pre_dep->gpu_rb_state == - KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) - return NULL; - if ((katom->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) && - kbase_backend_nr_atoms_on_slot(kctx->kbdev, js)) - return NULL; - } - - kctx->pulled = true; - pulled = atomic_inc_return(&kctx->atoms_pulled); - if (pulled == 1 && !kctx->slots_pullable) - atomic_inc(&kctx->kbdev->js_data.nr_contexts_runnable); - atomic_inc(&kctx->atoms_pulled_slot[katom->slot_nr]); - jsctx_rb_pull(kctx, katom); - - kbasep_js_runpool_retain_ctx_nolock(kctx->kbdev, kctx); - katom->atom_flags |= KBASE_KATOM_FLAG_HOLDING_CTX_REF; - - katom->sched_info.cfs.ticks = 0; - - return katom; -} - - -static void js_return_worker(struct work_struct *data) -{ - struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, - work); - struct kbase_context *kctx = katom->kctx; - struct kbase_device *kbdev = kctx->kbdev; - struct kbasep_js_device_data *js_devdata = &kbdev->js_data; - struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info; - struct kbasep_js_atom_retained_state retained_state; - int js = katom->slot_nr; - bool timer_sync = false; - bool context_idle = false; - unsigned long flags; - base_jd_core_req core_req = katom->core_req; - u64 affinity = katom->affinity; - enum kbase_atom_coreref_state coreref_state = katom->coreref_state; - - kbase_backend_complete_wq(kbdev, katom); - - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) - kbase_as_poking_timer_release_atom(kbdev, kctx, katom); - - kbasep_js_atom_retained_state_copy(&retained_state, katom); - - mutex_lock(&js_devdata->queue_mutex); - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); - - atomic_dec(&kctx->atoms_pulled); - atomic_dec(&kctx->atoms_pulled_slot[js]); - - atomic_dec(&katom->blocked); - - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - - if (!atomic_read(&kctx->atoms_pulled_slot[js]) && - jsctx_rb_none_to_pull(kctx, js)) - timer_sync |= kbase_js_ctx_list_remove(kbdev, kctx, js); - - if (!atomic_read(&kctx->atoms_pulled)) { - if (!kctx->slots_pullable) - atomic_dec(&kbdev->js_data.nr_contexts_runnable); - - if (kctx->as_nr != KBASEP_AS_NR_INVALID && - !js_kctx_info->ctx.is_dying) { - int num_slots = kbdev->gpu_props.num_job_slots; - int slot; - - if (!kbasep_js_is_submit_allowed(js_devdata, kctx)) - kbasep_js_set_submit_allowed(js_devdata, kctx); - - for (slot = 0; slot < num_slots; slot++) { - if (kbase_js_ctx_pullable(kctx, slot, true)) - timer_sync |= - kbase_js_ctx_list_add_pullable( - kbdev, kctx, slot); - } - } - - kbase_jm_idle_ctx(kbdev, kctx); - - context_idle = true; - } - - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - - if (context_idle) { - WARN_ON(!kctx->ctx_active); - kctx->ctx_active = false; - kbase_pm_context_idle(kbdev); - } - - if (timer_sync) - kbase_js_sync_timers(kbdev); - - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - mutex_unlock(&js_devdata->queue_mutex); - - katom->atom_flags &= ~KBASE_KATOM_FLAG_HOLDING_CTX_REF; - kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx, - &retained_state); - - kbase_js_sched_all(kbdev); - - kbase_backend_complete_wq_post_sched(kbdev, core_req, affinity, - coreref_state); -} - -void kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom) -{ - lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock); - - jsctx_rb_unpull(kctx, katom); - - WARN_ON(work_pending(&katom->work)); - - /* Block re-submission until workqueue has run */ - atomic_inc(&katom->blocked); - - kbase_job_check_leave_disjoint(kctx->kbdev, katom); - - KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work)); - INIT_WORK(&katom->work, js_return_worker); - queue_work(kctx->jctx.job_done_wq, &katom->work); -} - -static bool kbase_js_evict_atom(struct kbase_context *kctx, - struct kbase_jd_atom *katom_evict, - struct kbase_jd_atom *start_katom, - struct kbase_jd_atom *head_katom, - struct list_head *evict_list, - struct jsctx_rb *rb, int idx) -{ - struct kbase_jd_atom *x_dep = katom_evict->x_post_dep; - - if (!(katom_evict->atom_flags & KBASE_KATOM_FLAG_FAIL_PREV) && - katom_evict != start_katom) - return false; - - if (katom_evict->gpu_rb_state != KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) { - WARN_ON(katom_evict->event_code != head_katom->event_code); - - return false; - } - - if (katom_evict->status == KBASE_JD_ATOM_STATE_HW_COMPLETED && - katom_evict != head_katom) - return false; - - /* Evict cross dependency if present */ - if (x_dep && (x_dep->atom_flags & KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED) - && (x_dep->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER)) - list_add_tail(&x_dep->dep_item[0], evict_list); - - /* If cross dependency is present and does not have a data dependency - * then unblock */ - if (x_dep && (x_dep->atom_flags & KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED) - && !(x_dep->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER)) - x_dep->atom_flags &= ~KBASE_KATOM_FLAG_X_DEP_BLOCKED; - - if (katom_evict != head_katom) { - rb->entries[idx & JSCTX_RB_MASK].atom_id = - KBASEP_ATOM_ID_INVALID; - - katom_evict->event_code = head_katom->event_code; - katom_evict->atom_flags &= - ~KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED; - - if (katom_evict->atom_flags & KBASE_KATOM_FLAG_HOLDING_CTX_REF) - kbase_jd_done(katom_evict, katom_evict->slot_nr, NULL, - 0); - else - kbase_jd_evict(kctx->kbdev, katom_evict); - } - - return true; -} - -/** - * kbase_js_evict_deps - Evict dependencies - * @kctx: Context pointer - * @head_katom: Pointer to the atom to evict - * - * Remove all post dependencies of an atom from the context ringbuffers. - * - * The original atom's event_code will be propogated to all dependent atoms. - * - * Context: Caller must hold both jctx and HW access locks - */ -static void kbase_js_evict_deps(struct kbase_context *kctx, - struct kbase_jd_atom *head_katom) -{ - struct list_head evict_list; - - lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex); - lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock); - - INIT_LIST_HEAD(&evict_list); - - list_add_tail(&head_katom->dep_item[0], &evict_list); - - while (!list_empty(&evict_list)) { - struct kbase_jd_atom *start_katom; - - start_katom = list_entry(evict_list.prev, struct kbase_jd_atom, - dep_item[0]); - list_del(evict_list.prev); - - jsctx_rb_evict(kctx, start_katom, head_katom, &evict_list); - } -} - -/** - * kbase_js_compact - Compact JSCTX ringbuffers - * @kctx: Context pointer - * - * Compact the JSCTX ringbuffers, removing any NULL entries - * - * Context: Caller must hold both jctx and HW access locks - */ -static void kbase_js_compact(struct kbase_context *kctx) -{ - struct kbase_device *kbdev = kctx->kbdev; - int js; - - lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex); - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) - jsctx_rb_compact(kctx, js); -} - -bool kbase_js_complete_atom_wq(struct kbase_context *kctx, - struct kbase_jd_atom *katom) -{ - struct kbasep_js_kctx_info *js_kctx_info; - struct kbasep_js_device_data *js_devdata; - struct kbase_device *kbdev; - unsigned long flags; - bool timer_sync = false; - int atom_slot; - bool context_idle = false; - - kbdev = kctx->kbdev; - atom_slot = katom->slot_nr; - - js_kctx_info = &kctx->jctx.sched_info; - js_devdata = &kbdev->js_data; - - lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex); - - mutex_lock(&js_devdata->runpool_mutex); - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - - if (katom->atom_flags & KBASE_KATOM_FLAG_JSCTX_RB_SUBMITTED) { - if (katom->event_code != BASE_JD_EVENT_DONE) - kbase_js_evict_deps(kctx, katom); - - jsctx_rb_remove(kctx, katom); - - context_idle = !atomic_dec_return(&kctx->atoms_pulled); - atomic_dec(&kctx->atoms_pulled_slot[atom_slot]); - - if (!atomic_read(&kctx->atoms_pulled) && !kctx->slots_pullable) - atomic_dec(&kbdev->js_data.nr_contexts_runnable); - - if (katom->event_code != BASE_JD_EVENT_DONE) - kbase_js_compact(kctx); - } - - if (!atomic_read(&kctx->atoms_pulled_slot[atom_slot]) && - jsctx_rb_none_to_pull(kctx, atom_slot)) - timer_sync |= kbase_js_ctx_list_remove(kctx->kbdev, kctx, - atom_slot); - - /* - * If submission is disabled on this context (most likely due to an - * atom failure) and there are now no atoms left in the system then - * re-enable submission so that context can be scheduled again. - */ - if (!kbasep_js_is_submit_allowed(js_devdata, kctx) && - !atomic_read(&kctx->atoms_pulled) && - !js_kctx_info->ctx.is_dying) { - int js; - - kbasep_js_set_submit_allowed(js_devdata, kctx); - - for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) { - if (kbase_js_ctx_pullable(kctx, js, true)) - timer_sync |= kbase_js_ctx_list_add_pullable( - kbdev, kctx, js); - } - } else if (katom->x_post_dep && - kbasep_js_is_submit_allowed(js_devdata, kctx)) { - int js; - - for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) { - if (kbase_js_ctx_pullable(kctx, js, true)) - timer_sync |= kbase_js_ctx_list_add_pullable( - kbdev, kctx, js); - } - } - - /* Mark context as inactive. The pm reference will be dropped later in - * jd_done_worker(). - */ - if (context_idle) - kctx->ctx_active = false; - - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - if (timer_sync) - kbase_backend_ctx_count_changed(kbdev); - mutex_unlock(&js_devdata->runpool_mutex); - - return context_idle; -} - -void kbase_js_complete_atom(struct kbase_jd_atom *katom, ktime_t *end_timestamp) -{ - u64 microseconds_spent = 0; - struct kbase_device *kbdev; - struct kbase_context *kctx = katom->kctx; - union kbasep_js_policy *js_policy; - struct kbasep_js_device_data *js_devdata; - - kbdev = kctx->kbdev; - - js_policy = &kbdev->js_data.policy; - js_devdata = &kbdev->js_data; - - lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock); - - katom->status = KBASE_JD_ATOM_STATE_HW_COMPLETED; - -#if defined(CONFIG_MALI_GATOR_SUPPORT) - kbase_trace_mali_job_slots_event(GATOR_MAKE_EVENT(GATOR_JOB_SLOT_STOP, - katom->slot_nr), NULL, 0); -#endif -#if defined(CONFIG_MALI_MIPE_ENABLED) - kbase_tlstream_tl_nret_atom_lpu( - katom, - &kbdev->gpu_props.props.raw_props.js_features[ - katom->slot_nr]); - kbase_tlstream_tl_nret_atom_as(katom, &kbdev->as[kctx->as_nr]); -#endif - /* Calculate the job's time used */ - if (end_timestamp != NULL) { - /* Only calculating it for jobs that really run on the HW (e.g. - * removed from next jobs never actually ran, so really did take - * zero time) */ - ktime_t tick_diff = ktime_sub(*end_timestamp, - katom->start_timestamp); - - microseconds_spent = ktime_to_ns(tick_diff); - - do_div(microseconds_spent, 1000); - - /* Round up time spent to the minimum timer resolution */ - if (microseconds_spent < KBASEP_JS_TICK_RESOLUTION_US) - microseconds_spent = KBASEP_JS_TICK_RESOLUTION_US; - } - - /* Log the result of the job (completion status, and time spent). */ - kbasep_js_policy_log_job_result(js_policy, katom, microseconds_spent); - - kbase_jd_done(katom, katom->slot_nr, end_timestamp, 0); - - /* Unblock cross dependency if present */ - if (katom->x_post_dep && (katom->event_code == BASE_JD_EVENT_DONE || - !(katom->x_post_dep->atom_flags & - KBASE_KATOM_FLAG_FAIL_BLOCKER))) - katom->x_post_dep->atom_flags &= - ~KBASE_KATOM_FLAG_X_DEP_BLOCKED; -} - -void kbase_js_sched(struct kbase_device *kbdev, int js_mask) -{ - struct kbasep_js_device_data *js_devdata; - union kbasep_js_policy *js_policy; - bool timer_sync = false; - - js_devdata = &kbdev->js_data; - js_policy = &js_devdata->policy; - - down(&js_devdata->schedule_sem); - mutex_lock(&js_devdata->queue_mutex); - - while (js_mask) { - int js; - - js = ffs(js_mask) - 1; - - while (1) { - struct kbase_context *kctx; - unsigned long flags; - bool context_idle = false; - - kctx = kbase_js_ctx_list_pop_head(kbdev, js); - - if (!kctx) { - js_mask &= ~(1 << js); - break; /* No contexts on pullable list */ - } - - if (!kctx->ctx_active) { - context_idle = true; - - if (kbase_pm_context_active_handle_suspend( - kbdev, - KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE)) { - /* Suspend pending - return context to - * queue and stop scheduling */ - mutex_lock( - &kctx->jctx.sched_info.ctx.jsctx_mutex); - if (kbase_js_ctx_list_add_pullable_head( - kctx->kbdev, kctx, js)) - kbase_js_sync_timers(kbdev); - mutex_unlock( - &kctx->jctx.sched_info.ctx.jsctx_mutex); - mutex_unlock(&js_devdata->queue_mutex); - up(&js_devdata->schedule_sem); - return; - } - kctx->ctx_active = true; - } - - if (!kbase_js_use_ctx(kbdev, kctx)) { - mutex_lock( - &kctx->jctx.sched_info.ctx.jsctx_mutex); - /* Context can not be used at this time */ - spin_lock_irqsave(&js_devdata->runpool_irq.lock, - flags); - if (kctx->as_pending || - kbase_js_ctx_pullable(kctx, js, false) - || (kctx->jctx.sched_info.ctx.flags & - KBASE_CTX_FLAG_PRIVILEGED)) - timer_sync |= - kbase_js_ctx_list_add_pullable_head( - kctx->kbdev, kctx, js); - else - timer_sync |= - kbase_js_ctx_list_add_unpullable( - kctx->kbdev, kctx, js); - spin_unlock_irqrestore( - &js_devdata->runpool_irq.lock, flags); - mutex_unlock( - &kctx->jctx.sched_info.ctx.jsctx_mutex); - if (context_idle) { - WARN_ON(!kctx->ctx_active); - kctx->ctx_active = false; - kbase_pm_context_idle(kbdev); - } - - /* No more jobs can be submitted on this slot */ - js_mask &= ~(1 << js); - break; - } - mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex); - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - - kctx->pulled = false; - - if (!kbase_jm_kick(kbdev, 1 << js)) - /* No more jobs can be submitted on this slot */ - js_mask &= ~(1 << js); - - if (!kctx->pulled) { - /* Failed to pull jobs - push to head of list */ - if (kbase_js_ctx_pullable(kctx, js, true)) - timer_sync |= - kbase_js_ctx_list_add_pullable_head( - kctx->kbdev, - kctx, js); - else - timer_sync |= - kbase_js_ctx_list_add_unpullable( - kctx->kbdev, - kctx, js); - - if (context_idle) { - kbase_jm_idle_ctx(kbdev, kctx); - spin_unlock_irqrestore( - &js_devdata->runpool_irq.lock, - flags); - WARN_ON(!kctx->ctx_active); - kctx->ctx_active = false; - kbase_pm_context_idle(kbdev); - } else { - spin_unlock_irqrestore( - &js_devdata->runpool_irq.lock, - flags); - } - mutex_unlock( - &kctx->jctx.sched_info.ctx.jsctx_mutex); - - js_mask &= ~(1 << js); - break; /* Could not run atoms on this slot */ - } - - /* Push to back of list */ - if (kbase_js_ctx_pullable(kctx, js, true)) - timer_sync |= kbase_js_ctx_list_add_pullable( - kctx->kbdev, kctx, js); - else - timer_sync |= kbase_js_ctx_list_add_unpullable( - kctx->kbdev, kctx, js); - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, - flags); - mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex); - } - } - - if (timer_sync) - kbase_js_sync_timers(kbdev); - - mutex_unlock(&js_devdata->queue_mutex); - up(&js_devdata->schedule_sem); -} - -void kbase_js_zap_context(struct kbase_context *kctx) -{ - struct kbase_device *kbdev = kctx->kbdev; - struct kbasep_js_device_data *js_devdata = &kbdev->js_data; - struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info; - int js; - - /* - * Critical assumption: No more submission is possible outside of the - * workqueue. This is because the OS *must* prevent U/K calls (IOCTLs) - * whilst the struct kbase_context is terminating. - */ - - /* First, atomically do the following: - * - mark the context as dying - * - try to evict it from the policy queue */ - mutex_lock(&js_devdata->queue_mutex); - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); - js_kctx_info->ctx.is_dying = true; - - dev_dbg(kbdev->dev, "Zap: Try Evict Ctx %p", kctx); - - /* - * At this point we know: - * - If eviction succeeded, it was in the policy queue, but now no - * longer is - * - We must cancel the jobs here. No Power Manager active reference to - * release. - * - This happens asynchronously - kbase_jd_zap_context() will wait for - * those jobs to be killed. - * - If eviction failed, then it wasn't in the policy queue. It is one - * of the following: - * - a. it didn't have any jobs, and so is not in the Policy Queue or - * the Run Pool (not scheduled) - * - Hence, no more work required to cancel jobs. No Power Manager - * active reference to release. - * - b. it was in the middle of a scheduling transaction (and thus must - * have at least 1 job). This can happen from a syscall or a - * kernel thread. We still hold the jsctx_mutex, and so the thread - * must be waiting inside kbasep_js_try_schedule_head_ctx(), - * before checking whether the runpool is full. That thread will - * continue after we drop the mutex, and will notice the context - * is dying. It will rollback the transaction, killing all jobs at - * the same time. kbase_jd_zap_context() will wait for those jobs - * to be killed. - * - Hence, no more work required to cancel jobs, or to release the - * Power Manager active reference. - * - c. it is scheduled, and may or may not be running jobs - * - We must cause it to leave the runpool by stopping it from - * submitting any more jobs. When it finally does leave, - * kbasep_js_runpool_requeue_or_kill_ctx() will kill all remaining jobs - * (because it is dying), release the Power Manager active reference, - * and will not requeue the context in the policy queue. - * kbase_jd_zap_context() will wait for those jobs to be killed. - * - Hence, work required just to make it leave the runpool. Cancelling - * jobs and releasing the Power manager active reference will be - * handled when it leaves the runpool. - */ - if (!js_kctx_info->ctx.is_scheduled) { - for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) { - if (!list_empty( - &kctx->jctx.sched_info.ctx.ctx_list_entry[js])) - list_del_init( - &kctx->jctx.sched_info.ctx.ctx_list_entry[js]); - } - - /* The following events require us to kill off remaining jobs - * and update PM book-keeping: - * - we evicted it correctly (it must have jobs to be in the - * Policy Queue) - * - * These events need no action, but take this path anyway: - * - Case a: it didn't have any jobs, and was never in the Queue - * - Case b: scheduling transaction will be partially rolled- - * back (this already cancels the jobs) - */ - - KBASE_TRACE_ADD(kbdev, JM_ZAP_NON_SCHEDULED, kctx, NULL, 0u, - js_kctx_info->ctx.is_scheduled); - - dev_dbg(kbdev->dev, "Zap: Ctx %p scheduled=0", kctx); - - /* Only cancel jobs when we evicted from the policy - * queue. No Power Manager active reference was held. - * - * Having is_dying set ensures that this kills, and - * doesn't requeue */ - kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, false); - - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - mutex_unlock(&js_devdata->queue_mutex); - } else { - unsigned long flags; - bool was_retained; - - /* Case c: didn't evict, but it is scheduled - it's in the Run - * Pool */ - KBASE_TRACE_ADD(kbdev, JM_ZAP_SCHEDULED, kctx, NULL, 0u, - js_kctx_info->ctx.is_scheduled); - dev_dbg(kbdev->dev, "Zap: Ctx %p is in RunPool", kctx); - - /* Disable the ctx from submitting any more jobs */ - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - - kbasep_js_clear_submit_allowed(js_devdata, kctx); - - /* Retain and (later) release the context whilst it is is now - * disallowed from submitting jobs - ensures that someone - * somewhere will be removing the context later on */ - was_retained = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx); - - /* Since it's scheduled and we have the jsctx_mutex, it must be - * retained successfully */ - KBASE_DEBUG_ASSERT(was_retained); - - dev_dbg(kbdev->dev, "Zap: Ctx %p Kill Any Running jobs", kctx); - - /* Cancel any remaining running jobs for this kctx - if any. - * Submit is disallowed which takes effect immediately, so no - * more new jobs will appear after we do this. */ - for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) - kbase_job_slot_hardstop(kctx, js, NULL); - - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - mutex_unlock(&js_devdata->queue_mutex); - - dev_dbg(kbdev->dev, "Zap: Ctx %p Release (may or may not schedule out immediately)", - kctx); - - kbasep_js_runpool_release_ctx(kbdev, kctx); - } - - KBASE_TRACE_ADD(kbdev, JM_ZAP_DONE, kctx, NULL, 0u, 0u); - - /* After this, you must wait on both the - * kbase_jd_context::zero_jobs_wait and the - * kbasep_js_kctx_info::ctx::is_scheduled_waitq - to wait for the jobs - * to be destroyed, and the context to be de-scheduled (if it was on the - * runpool). - * - * kbase_jd_zap_context() will do this. */ -} - -static inline int trace_get_refcnt(struct kbase_device *kbdev, - struct kbase_context *kctx) -{ - struct kbasep_js_device_data *js_devdata; - int as_nr; - int refcnt = 0; - - js_devdata = &kbdev->js_data; - - as_nr = kctx->as_nr; - if (as_nr != KBASEP_AS_NR_INVALID) { - struct kbasep_js_per_as_data *js_per_as_data; - - js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr]; - - refcnt = js_per_as_data->as_busy_refcount; - } - - return refcnt; -} - -/** - * kbase_js_foreach_ctx_job(): - Call a function on all jobs in context - * @kctx: Pointer to context. - * @callback: Pointer to function to call for each job. - * - * Call a function on all jobs belonging to a non-queued, non-running - * context, and detach the jobs from the context as it goes. - * - * Due to the locks that might be held at the time of the call, the callback - * may need to defer work on a workqueue to complete its actions (e.g. when - * cancelling jobs) - * - * Atoms will be removed from the queue, so this must only be called when - * cancelling jobs (which occurs as part of context destruction). - * - * The locking conditions on the caller are as follows: - * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex. - */ -static void kbase_js_foreach_ctx_job(struct kbase_context *kctx, - kbasep_js_policy_ctx_job_cb callback) -{ - struct kbase_device *kbdev; - struct kbasep_js_device_data *js_devdata; - unsigned long flags; - u32 js; - - kbdev = kctx->kbdev; - - js_devdata = &kbdev->js_data; - - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - - KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_FOREACH_CTX_JOBS, kctx, NULL, - 0u, trace_get_refcnt(kbdev, kctx)); - - /* Invoke callback on jobs on each slot in turn */ - for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) - jsctx_rb_foreach(kctx, js, callback); - - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js.h deleted file mode 100755 index 868c6808d628c..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js.h +++ /dev/null @@ -1,1041 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_kbase_js.h - * Job Scheduler APIs. - */ - -#ifndef _KBASE_JS_H_ -#define _KBASE_JS_H_ - -#include "mali_kbase_js_defs.h" -#include "mali_kbase_js_policy.h" -#include "mali_kbase_defs.h" -#include "mali_kbase_debug.h" - -#include "mali_kbase_js_ctx_attr.h" - -/** - * @addtogroup base_api - * @{ - */ - -/** - * @addtogroup base_kbase_api - * @{ - */ - -/** - * @addtogroup kbase_js Job Scheduler Internal APIs - * @{ - * - * These APIs are Internal to KBase and are available for use by the - * @ref kbase_js_policy "Job Scheduler Policy APIs" - */ - -/** - * @brief Initialize the Job Scheduler - * - * The struct kbasep_js_device_data sub-structure of \a kbdev must be zero - * initialized before passing to the kbasep_js_devdata_init() function. This is - * to give efficient error path code. - */ -int kbasep_js_devdata_init(struct kbase_device * const kbdev); - -/** - * @brief Halt the Job Scheduler. - * - * It is safe to call this on \a kbdev even if it the kbasep_js_device_data - * sub-structure was never initialized/failed initialization, to give efficient - * error-path code. - * - * For this to work, the struct kbasep_js_device_data sub-structure of \a kbdev must - * be zero initialized before passing to the kbasep_js_devdata_init() - * function. This is to give efficient error path code. - * - * It is a Programming Error to call this whilst there are still kbase_context - * structures registered with this scheduler. - * - */ -void kbasep_js_devdata_halt(struct kbase_device *kbdev); - -/** - * @brief Terminate the Job Scheduler - * - * It is safe to call this on \a kbdev even if it the kbasep_js_device_data - * sub-structure was never initialized/failed initialization, to give efficient - * error-path code. - * - * For this to work, the struct kbasep_js_device_data sub-structure of \a kbdev must - * be zero initialized before passing to the kbasep_js_devdata_init() - * function. This is to give efficient error path code. - * - * It is a Programming Error to call this whilst there are still kbase_context - * structures registered with this scheduler. - */ -void kbasep_js_devdata_term(struct kbase_device *kbdev); - -/** - * @brief Initialize the Scheduling Component of a struct kbase_context on the Job Scheduler. - * - * This effectively registers a struct kbase_context with a Job Scheduler. - * - * It does not register any jobs owned by the struct kbase_context with the scheduler. - * Those must be separately registered by kbasep_js_add_job(). - * - * The struct kbase_context must be zero intitialized before passing to the - * kbase_js_init() function. This is to give efficient error path code. - */ -int kbasep_js_kctx_init(struct kbase_context * const kctx); - -/** - * @brief Terminate the Scheduling Component of a struct kbase_context on the Job Scheduler - * - * This effectively de-registers a struct kbase_context from its Job Scheduler - * - * It is safe to call this on a struct kbase_context that has never had or failed - * initialization of its jctx.sched_info member, to give efficient error-path - * code. - * - * For this to work, the struct kbase_context must be zero intitialized before passing - * to the kbase_js_init() function. - * - * It is a Programming Error to call this whilst there are still jobs - * registered with this context. - */ -void kbasep_js_kctx_term(struct kbase_context *kctx); - -/** - * @brief Add a job chain to the Job Scheduler, and take necessary actions to - * schedule the context/run the job. - * - * This atomically does the following: - * - Update the numbers of jobs information - * - Add the job to the run pool if necessary (part of init_job) - * - * Once this is done, then an appropriate action is taken: - * - If the ctx is scheduled, it attempts to start the next job (which might be - * this added job) - * - Otherwise, and if this is the first job on the context, it enqueues it on - * the Policy Queue - * - * The Policy's Queue can be updated by this in the following ways: - * - In the above case that this is the first job on the context - * - If the context is high priority and the context is not scheduled, then it - * could cause the Policy to schedule out a low-priority context, allowing - * this context to be scheduled in. - * - * If the context is already scheduled on the RunPool, then adding a job to it - * is guarenteed not to update the Policy Queue. And so, the caller is - * guarenteed to not need to try scheduling a context from the Run Pool - it - * can safely assert that the result is false. - * - * It is a programming error to have more than U32_MAX jobs in flight at a time. - * - * The following locking conditions are made on the caller: - * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex. - * - it must \em not hold kbasep_js_device_data::runpool_irq::lock (as this will be - * obtained internally) - * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be - * obtained internally) - * - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used internally). - * - * @return true indicates that the Policy Queue was updated, and so the - * caller will need to try scheduling a context onto the Run Pool. - * @return false indicates that no updates were made to the Policy Queue, - * so no further action is required from the caller. This is \b always returned - * when the context is currently scheduled. - */ -bool kbasep_js_add_job(struct kbase_context *kctx, struct kbase_jd_atom *atom); - -/** - * @brief Remove a job chain from the Job Scheduler, except for its 'retained state'. - * - * Completely removing a job requires several calls: - * - kbasep_js_copy_atom_retained_state(), to capture the 'retained state' of - * the atom - * - kbasep_js_remove_job(), to partially remove the atom from the Job Scheduler - * - kbasep_js_runpool_release_ctx_and_katom_retained_state(), to release the - * remaining state held as part of the job having been run. - * - * In the common case of atoms completing normally, this set of actions is more optimal for spinlock purposes than having kbasep_js_remove_job() handle all of the actions. - * - * In the case of cancelling atoms, it is easier to call kbasep_js_remove_cancelled_job(), which handles all the necessary actions. - * - * It is a programming error to call this when: - * - \a atom is not a job belonging to kctx. - * - \a atom has already been removed from the Job Scheduler. - * - \a atom is still in the runpool: - * - it has not been removed with kbasep_js_policy_dequeue_job() - * - or, it has not been removed with kbasep_js_policy_dequeue_job_irq() - * - * Do not use this for removing jobs being killed by kbase_jd_cancel() - use - * kbasep_js_remove_cancelled_job() instead. - * - * The following locking conditions are made on the caller: - * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex. - * - */ -void kbasep_js_remove_job(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *atom); - -/** - * @brief Completely remove a job chain from the Job Scheduler, in the case - * where the job chain was cancelled. - * - * This is a variant of kbasep_js_remove_job() that takes care of removing all - * of the retained state too. This is generally useful for cancelled atoms, - * which need not be handled in an optimal way. - * - * It is a programming error to call this when: - * - \a atom is not a job belonging to kctx. - * - \a atom has already been removed from the Job Scheduler. - * - \a atom is still in the runpool: - * - it is not being killed with kbasep_jd_cancel() - * - or, it has not been removed with kbasep_js_policy_dequeue_job() - * - or, it has not been removed with kbasep_js_policy_dequeue_job_irq() - * - * The following locking conditions are made on the caller: - * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex. - * - it must \em not hold the kbasep_js_device_data::runpool_irq::lock, (as this will be - * obtained internally) - * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this could be - * obtained internally) - * - * @return true indicates that ctx attributes have changed and the caller - * should call kbase_js_sched_all() to try to run more jobs - * @return false otherwise - */ -bool kbasep_js_remove_cancelled_job(struct kbase_device *kbdev, - struct kbase_context *kctx, - struct kbase_jd_atom *katom); - -/** - * @brief Refcount a context as being busy, preventing it from being scheduled - * out. - * - * @note This function can safely be called from IRQ context. - * - * The following locking conditions are made on the caller: - * - it must \em not hold the kbasep_js_device_data::runpool_irq::lock, because - * it will be used internally. - * - * @return value != false if the retain succeeded, and the context will not be scheduled out. - * @return false if the retain failed (because the context is being/has been scheduled out). - */ -bool kbasep_js_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx); - -/** - * @brief Refcount a context as being busy, preventing it from being scheduled - * out. - * - * @note This function can safely be called from IRQ context. - * - * The following locks must be held by the caller: - * - kbasep_js_device_data::runpool_irq::lock - * - * @return value != false if the retain succeeded, and the context will not be scheduled out. - * @return false if the retain failed (because the context is being/has been scheduled out). - */ -bool kbasep_js_runpool_retain_ctx_nolock(struct kbase_device *kbdev, struct kbase_context *kctx); - -/** - * @brief Lookup a context in the Run Pool based upon its current address space - * and ensure that is stays scheduled in. - * - * The context is refcounted as being busy to prevent it from scheduling - * out. It must be released with kbasep_js_runpool_release_ctx() when it is no - * longer required to stay scheduled in. - * - * @note This function can safely be called from IRQ context. - * - * The following locking conditions are made on the caller: - * - it must \em not hold the kbasep_js_device_data::runpoool_irq::lock, because - * it will be used internally. If the runpool_irq::lock is already held, then - * the caller should use kbasep_js_runpool_lookup_ctx_nolock() instead. - * - * @return a valid struct kbase_context on success, which has been refcounted as being busy. - * @return NULL on failure, indicating that no context was found in \a as_nr - */ -struct kbase_context *kbasep_js_runpool_lookup_ctx(struct kbase_device *kbdev, int as_nr); - -/** - * kbasep_js_runpool_lookup_ctx_nolock - Lookup a context in the Run Pool based - * upon its current address space and ensure that is stays scheduled in. - * @kbdev: Device pointer - * @as_nr: Address space to lookup - * - * The context is refcounted as being busy to prevent it from scheduling - * out. It must be released with kbasep_js_runpool_release_ctx() when it is no - * longer required to stay scheduled in. - * - * Note: This function can safely be called from IRQ context. - * - * The following locking conditions are made on the caller: - * - it must the kbasep_js_device_data::runpoool_irq::lock. - * - * Return: a valid struct kbase_context on success, which has been refcounted as - * being busy. - * NULL on failure, indicating that no context was found in \a as_nr - */ -struct kbase_context *kbasep_js_runpool_lookup_ctx_nolock( - struct kbase_device *kbdev, int as_nr); - -/** - * @brief Handling the requeuing/killing of a context that was evicted from the - * policy queue or runpool. - * - * This should be used whenever handing off a context that has been evicted - * from the policy queue or the runpool: - * - If the context is not dying and has jobs, it gets re-added to the policy - * queue - * - Otherwise, it is not added - * - * In addition, if the context is dying the jobs are killed asynchronously. - * - * In all cases, the Power Manager active reference is released - * (kbase_pm_context_idle()) whenever the has_pm_ref parameter is true. \a - * has_pm_ref must be set to false whenever the context was not previously in - * the runpool and does not hold a Power Manager active refcount. Note that - * contexts in a rollback of kbasep_js_try_schedule_head_ctx() might have an - * active refcount even though they weren't in the runpool. - * - * The following locking conditions are made on the caller: - * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex. - * - it must \em not hold kbasep_jd_device_data::queue_mutex (as this will be - * obtained internally) - */ -void kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev, struct kbase_context *kctx, bool has_pm_ref); - -/** - * @brief Release a refcount of a context being busy, allowing it to be - * scheduled out. - * - * When the refcount reaches zero and the context \em might be scheduled out - * (depending on whether the Scheudling Policy has deemed it so, or if it has run - * out of jobs). - * - * If the context does get scheduled out, then The following actions will be - * taken as part of deschduling a context: - * - For the context being descheduled: - * - If the context is in the processing of dying (all the jobs are being - * removed from it), then descheduling also kills off any jobs remaining in the - * context. - * - If the context is not dying, and any jobs remain after descheduling the - * context then it is re-enqueued to the Policy's Queue. - * - Otherwise, the context is still known to the scheduler, but remains absent - * from the Policy Queue until a job is next added to it. - * - In all descheduling cases, the Power Manager active reference (obtained - * during kbasep_js_try_schedule_head_ctx()) is released (kbase_pm_context_idle()). - * - * Whilst the context is being descheduled, this also handles actions that - * cause more atoms to be run: - * - Attempt submitting atoms when the Context Attributes on the Runpool have - * changed. This is because the context being scheduled out could mean that - * there are more opportunities to run atoms. - * - Attempt submitting to a slot that was previously blocked due to affinity - * restrictions. This is usually only necessary when releasing a context - * happens as part of completing a previous job, but is harmless nonetheless. - * - Attempt scheduling in a new context (if one is available), and if necessary, - * running a job from that new context. - * - * Unlike retaining a context in the runpool, this function \b cannot be called - * from IRQ context. - * - * It is a programming error to call this on a \a kctx that is not currently - * scheduled, or that already has a zero refcount. - * - * The following locking conditions are made on the caller: - * - it must \em not hold the kbasep_js_device_data::runpool_irq::lock, because - * it will be used internally. - * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex. - * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be - * obtained internally) - * - it must \em not hold the kbase_device::as[n].transaction_mutex (as this will be obtained internally) - * - it must \em not hold kbasep_jd_device_data::queue_mutex (as this will be - * obtained internally) - * - */ -void kbasep_js_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx); - -/** - * @brief Variant of kbasep_js_runpool_release_ctx() that handles additional - * actions from completing an atom. - * - * This is usually called as part of completing an atom and releasing the - * refcount on the context held by the atom. - * - * Therefore, the extra actions carried out are part of handling actions queued - * on a completed atom, namely: - * - Releasing the atom's context attributes - * - Retrying the submission on a particular slot, because we couldn't submit - * on that slot from an IRQ handler. - * - * The locking conditions of this function are the same as those for - * kbasep_js_runpool_release_ctx() - */ -void kbasep_js_runpool_release_ctx_and_katom_retained_state(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state); - -/** - * @brief Variant of kbase_js_runpool_release_ctx() that assumes that - * kbasep_js_device_data::runpool_mutex and - * kbasep_js_kctx_info::ctx::jsctx_mutex are held by the caller, and does not - * attempt to schedule new contexts. - */ -void kbasep_js_runpool_release_ctx_nolock(struct kbase_device *kbdev, - struct kbase_context *kctx); - -/** - * @brief Schedule in a privileged context - * - * This schedules a context in regardless of the context priority. - * If the runpool is full, a context will be forced out of the runpool and the function will wait - * for the new context to be scheduled in. - * The context will be kept scheduled in (and the corresponding address space reserved) until - * kbasep_js_release_privileged_ctx is called). - * - * The following locking conditions are made on the caller: - * - it must \em not hold the kbasep_js_device_data::runpool_irq::lock, because - * it will be used internally. - * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be - * obtained internally) - * - it must \em not hold the kbase_device::as[n].transaction_mutex (as this will be obtained internally) - * - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used internally). - * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex, because it will - * be used internally. - * - */ -void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx); - -/** - * @brief Release a privileged context, allowing it to be scheduled out. - * - * See kbasep_js_runpool_release_ctx for potential side effects. - * - * The following locking conditions are made on the caller: - * - it must \em not hold the kbasep_js_device_data::runpool_irq::lock, because - * it will be used internally. - * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex. - * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be - * obtained internally) - * - it must \em not hold the kbase_device::as[n].transaction_mutex (as this will be obtained internally) - * - */ -void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx); - -/** - * @brief Try to submit the next job on each slot - * - * The following locks may be used: - * - kbasep_js_device_data::runpool_mutex - * - kbasep_js_device_data::runpool_irq::lock - */ -void kbase_js_try_run_jobs(struct kbase_device *kbdev); - -/** - * @brief Suspend the job scheduler during a Power Management Suspend event. - * - * Causes all contexts to be removed from the runpool, and prevents any - * contexts from (re)entering the runpool. - * - * This does not handle suspending the one privileged context: the caller must - * instead do this by by suspending the GPU HW Counter Instrumentation. - * - * This will eventually cause all Power Management active references held by - * contexts on the runpool to be released, without running any more atoms. - * - * The caller must then wait for all Power Mangement active refcount to become - * zero before completing the suspend. - * - * The emptying mechanism may take some time to complete, since it can wait for - * jobs to complete naturally instead of forcing them to end quickly. However, - * this is bounded by the Job Scheduling Policy's Job Timeouts. Hence, this - * function is guaranteed to complete in a finite time whenever the Job - * Scheduling Policy implements Job Timeouts (such as those done by CFS). - */ -void kbasep_js_suspend(struct kbase_device *kbdev); - -/** - * @brief Resume the Job Scheduler after a Power Management Resume event. - * - * This restores the actions from kbasep_js_suspend(): - * - Schedules contexts back into the runpool - * - Resumes running atoms on the GPU - */ -void kbasep_js_resume(struct kbase_device *kbdev); - -/** - * @brief Submit an atom to the job scheduler. - * - * The atom is enqueued on the context's ringbuffer. The caller must have - * ensured that all dependencies can be represented in the ringbuffer. - * - * Caller must hold jctx->lock - * - * @param[in] kctx Context pointer - * @param[in] atom Pointer to the atom to submit - * - * @return 0 if submit succeeded - * error code if the atom can not be submitted at this - * time, due to insufficient space in the ringbuffer, or dependencies - * that can not be represented. - */ -int kbase_js_dep_resolved_submit(struct kbase_context *kctx, - struct kbase_jd_atom *katom, - bool *enqueue_required); - -/** - * @brief Pull an atom from a context in the job scheduler for execution. - * - * The atom will not be removed from the ringbuffer at this stage. - * - * The HW access lock must be held when calling this function. - * - * @param[in] kctx Context to pull from - * @param[in] js Job slot to pull from - * @return Pointer to an atom, or NULL if there are no atoms for this - * slot that can be currently run. - */ -struct kbase_jd_atom *kbase_js_pull(struct kbase_context *kctx, int js); - -/** - * @brief Return an atom to the job scheduler ringbuffer. - * - * An atom is 'unpulled' if execution is stopped but intended to be returned to - * later. The most common reason for this is that the atom has been - * soft-stopped. - * - * Note that if multiple atoms are to be 'unpulled', they must be returned in - * the reverse order to which they were originally pulled. It is a programming - * error to return atoms in any other order. - * - * The HW access lock must be held when calling this function. - * - * @param[in] kctx Context pointer - * @param[in] atom Pointer to the atom to unpull - */ -void kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom); - -/** - * @brief Complete an atom from jd_done_worker(), removing it from the job - * scheduler ringbuffer. - * - * If the atom failed then all dependee atoms marked for failure propagation - * will also fail. - * - * @param[in] kctx Context pointer - * @param[in] katom Pointer to the atom to complete - * @return true if the context is now idle (no jobs pulled) - * false otherwise - */ -bool kbase_js_complete_atom_wq(struct kbase_context *kctx, - struct kbase_jd_atom *katom); - -/** - * @brief Complete an atom. - * - * Most of the work required to complete an atom will be performed by - * jd_done_worker(). - * - * The HW access lock must be held when calling this function. - * - * @param[in] katom Pointer to the atom to complete - * @param[in] end_timestamp The time that the atom completed (may be NULL) - */ -void kbase_js_complete_atom(struct kbase_jd_atom *katom, - ktime_t *end_timestamp); - -/** - * @brief Submit atoms from all available contexts. - * - * This will attempt to submit as many jobs as possible to the provided job - * slots. It will exit when either all job slots are full, or all contexts have - * been used. - * - * @param[in] kbdev Device pointer - * @param[in] js_mask Mask of job slots to submit to - */ -void kbase_js_sched(struct kbase_device *kbdev, int js_mask); - -/** - * kbase_jd_zap_context - Attempt to deschedule a context that is being - * destroyed - * @kctx: Context pointer - * - * This will attempt to remove a context from any internal job scheduler queues - * and perform any other actions to ensure a context will not be submitted - * from. - * - * If the context is currently scheduled, then the caller must wait for all - * pending jobs to complete before taking any further action. - */ -void kbase_js_zap_context(struct kbase_context *kctx); - -/** - * @brief Validate an atom - * - * This will determine whether the atom can be scheduled onto the GPU. Atoms - * with invalid combinations of core requirements will be rejected. - * - * @param[in] kbdev Device pointer - * @param[in] katom Atom to validate - * @return true if atom is valid - * false otherwise - */ -bool kbase_js_is_atom_valid(struct kbase_device *kbdev, - struct kbase_jd_atom *katom); - -/* - * Helpers follow - */ - -/** - * @brief Check that a context is allowed to submit jobs on this policy - * - * The purpose of this abstraction is to hide the underlying data size, and wrap up - * the long repeated line of code. - * - * As with any bool, never test the return value with true. - * - * The caller must hold kbasep_js_device_data::runpool_irq::lock. - */ -static inline bool kbasep_js_is_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx) -{ - u16 test_bit; - - /* Ensure context really is scheduled in */ - KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); - KBASE_DEBUG_ASSERT(kctx->jctx.sched_info.ctx.is_scheduled); - - test_bit = (u16) (1u << kctx->as_nr); - - return (bool) (js_devdata->runpool_irq.submit_allowed & test_bit); -} - -/** - * @brief Allow a context to submit jobs on this policy - * - * The purpose of this abstraction is to hide the underlying data size, and wrap up - * the long repeated line of code. - * - * The caller must hold kbasep_js_device_data::runpool_irq::lock. - */ -static inline void kbasep_js_set_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx) -{ - u16 set_bit; - - /* Ensure context really is scheduled in */ - KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); - KBASE_DEBUG_ASSERT(kctx->jctx.sched_info.ctx.is_scheduled); - - set_bit = (u16) (1u << kctx->as_nr); - - dev_dbg(kctx->kbdev->dev, "JS: Setting Submit Allowed on %p (as=%d)", kctx, kctx->as_nr); - - js_devdata->runpool_irq.submit_allowed |= set_bit; -} - -/** - * @brief Prevent a context from submitting more jobs on this policy - * - * The purpose of this abstraction is to hide the underlying data size, and wrap up - * the long repeated line of code. - * - * The caller must hold kbasep_js_device_data::runpool_irq::lock. - */ -static inline void kbasep_js_clear_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx) -{ - u16 clear_bit; - u16 clear_mask; - - /* Ensure context really is scheduled in */ - KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); - KBASE_DEBUG_ASSERT(kctx->jctx.sched_info.ctx.is_scheduled); - - clear_bit = (u16) (1u << kctx->as_nr); - clear_mask = ~clear_bit; - - dev_dbg(kctx->kbdev->dev, "JS: Clearing Submit Allowed on %p (as=%d)", kctx, kctx->as_nr); - - js_devdata->runpool_irq.submit_allowed &= clear_mask; -} - -/** - * @brief Manage the 'retry_submit_on_slot' part of a kbase_jd_atom - */ -static inline void kbasep_js_clear_job_retry_submit(struct kbase_jd_atom *atom) -{ - atom->retry_submit_on_slot = KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID; -} - -/** - * Mark a slot as requiring resubmission by carrying that information on a - * completing atom. - * - * @note This can ASSERT in debug builds if the submit slot has been set to - * something other than the current value for @a js. This is because you might - * be unintentionally stopping more jobs being submitted on the old submit - * slot, and that might cause a scheduling-hang. - * - * @note If you can guarantee that the atoms for the original slot will be - * submitted on some other slot, then call kbasep_js_clear_job_retry_submit() - * first to silence the ASSERT. - */ -static inline void kbasep_js_set_job_retry_submit_slot(struct kbase_jd_atom *atom, int js) -{ - KBASE_DEBUG_ASSERT(0 <= js && js <= BASE_JM_MAX_NR_SLOTS); - KBASE_DEBUG_ASSERT((atom->retry_submit_on_slot == - KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID) - || (atom->retry_submit_on_slot == js)); - - atom->retry_submit_on_slot = js; -} - -/** - * Create an initial 'invalid' atom retained state, that requires no - * atom-related work to be done on releasing with - * kbasep_js_runpool_release_ctx_and_katom_retained_state() - */ -static inline void kbasep_js_atom_retained_state_init_invalid(struct kbasep_js_atom_retained_state *retained_state) -{ - retained_state->event_code = BASE_JD_EVENT_NOT_STARTED; - retained_state->core_req = KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID; - retained_state->retry_submit_on_slot = KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID; -} - -/** - * Copy atom state that can be made available after jd_done_nolock() is called - * on that atom. - */ -static inline void kbasep_js_atom_retained_state_copy(struct kbasep_js_atom_retained_state *retained_state, const struct kbase_jd_atom *katom) -{ - retained_state->event_code = katom->event_code; - retained_state->core_req = katom->core_req; - retained_state->retry_submit_on_slot = katom->retry_submit_on_slot; - retained_state->sched_priority = katom->sched_priority; - retained_state->device_nr = katom->device_nr; -} - -/** - * @brief Determine whether an atom has finished (given its retained state), - * and so should be given back to userspace/removed from the system. - * - * Reasons for an atom not finishing include: - * - Being soft-stopped (and so, the atom should be resubmitted sometime later) - * - * @param[in] katom_retained_state the retained state of the atom to check - * @return false if the atom has not finished - * @return !=false if the atom has finished - */ -static inline bool kbasep_js_has_atom_finished(const struct kbasep_js_atom_retained_state *katom_retained_state) -{ - return (bool) (katom_retained_state->event_code != BASE_JD_EVENT_STOPPED && katom_retained_state->event_code != BASE_JD_EVENT_REMOVED_FROM_NEXT); -} - -/** - * @brief Determine whether a struct kbasep_js_atom_retained_state is valid - * - * An invalid struct kbasep_js_atom_retained_state is allowed, and indicates that the - * code should just ignore it. - * - * @param[in] katom_retained_state the atom's retained state to check - * @return false if the retained state is invalid, and can be ignored - * @return !=false if the retained state is valid - */ -static inline bool kbasep_js_atom_retained_state_is_valid(const struct kbasep_js_atom_retained_state *katom_retained_state) -{ - return (bool) (katom_retained_state->core_req != KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID); -} - -static inline bool kbasep_js_get_atom_retry_submit_slot(const struct kbasep_js_atom_retained_state *katom_retained_state, int *res) -{ - int js = katom_retained_state->retry_submit_on_slot; - - *res = js; - return (bool) (js >= 0); -} - -#if KBASE_DEBUG_DISABLE_ASSERTS == 0 -/** - * Debug Check the refcount of a context. Only use within ASSERTs - * - * Obtains kbasep_js_device_data::runpool_irq::lock - * - * @return negative value if the context is not scheduled in - * @return current refcount of the context if it is scheduled in. The refcount - * is not guarenteed to be kept constant. - */ -static inline int kbasep_js_debug_check_ctx_refcount(struct kbase_device *kbdev, struct kbase_context *kctx) -{ - unsigned long flags; - struct kbasep_js_device_data *js_devdata; - int result = -1; - int as_nr; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(kctx != NULL); - js_devdata = &kbdev->js_data; - - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - as_nr = kctx->as_nr; - if (as_nr != KBASEP_AS_NR_INVALID) - result = js_devdata->runpool_irq.per_as_data[as_nr].as_busy_refcount; - - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - - return result; -} -#endif /* KBASE_DEBUG_DISABLE_ASSERTS == 0 */ - -/** - * @brief Variant of kbasep_js_runpool_lookup_ctx() that can be used when the - * context is guarenteed to be already previously retained. - * - * It is a programming error to supply the \a as_nr of a context that has not - * been previously retained/has a busy refcount of zero. The only exception is - * when there is no ctx in \a as_nr (NULL returned). - * - * The following locking conditions are made on the caller: - * - it must \em not hold the kbasep_js_device_data::runpoool_irq::lock, because - * it will be used internally. - * - * @return a valid struct kbase_context on success, with a refcount that is guarenteed - * to be non-zero and unmodified by this function. - * @return NULL on failure, indicating that no context was found in \a as_nr - */ -static inline struct kbase_context *kbasep_js_runpool_lookup_ctx_noretain(struct kbase_device *kbdev, int as_nr) -{ - unsigned long flags; - struct kbasep_js_device_data *js_devdata; - struct kbase_context *found_kctx; - struct kbasep_js_per_as_data *js_per_as_data; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS); - js_devdata = &kbdev->js_data; - js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr]; - - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - - found_kctx = js_per_as_data->kctx; - KBASE_DEBUG_ASSERT(found_kctx == NULL || js_per_as_data->as_busy_refcount > 0); - - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - - return found_kctx; -} - -/** - * This will provide a conversion from time (us) to ticks of the gpu clock - * based on the minimum available gpu frequency. - * This is usually good to compute best/worst case (where the use of current - * frequency is not valid due to DVFS). - * e.g.: when you need the number of cycles to guarantee you won't wait for - * longer than 'us' time (you might have a shorter wait). - */ -static inline u32 kbasep_js_convert_us_to_gpu_ticks_min_freq(struct kbase_device *kbdev, u32 us) -{ - u32 gpu_freq = kbdev->gpu_props.props.core_props.gpu_freq_khz_min; - - KBASE_DEBUG_ASSERT(0 != gpu_freq); - return us * (gpu_freq / 1000); -} - -/** - * This will provide a conversion from time (us) to ticks of the gpu clock - * based on the maximum available gpu frequency. - * This is usually good to compute best/worst case (where the use of current - * frequency is not valid due to DVFS). - * e.g.: When you need the number of cycles to guarantee you'll wait at least - * 'us' amount of time (but you might wait longer). - */ -static inline u32 kbasep_js_convert_us_to_gpu_ticks_max_freq(struct kbase_device *kbdev, u32 us) -{ - u32 gpu_freq = kbdev->gpu_props.props.core_props.gpu_freq_khz_max; - - KBASE_DEBUG_ASSERT(0 != gpu_freq); - return us * (u32) (gpu_freq / 1000); -} - -/** - * This will provide a conversion from ticks of the gpu clock to time (us) - * based on the minimum available gpu frequency. - * This is usually good to compute best/worst case (where the use of current - * frequency is not valid due to DVFS). - * e.g.: When you need to know the worst-case wait that 'ticks' cycles will - * take (you guarantee that you won't wait any longer than this, but it may - * be shorter). - */ -static inline u32 kbasep_js_convert_gpu_ticks_to_us_min_freq(struct kbase_device *kbdev, u32 ticks) -{ - u32 gpu_freq = kbdev->gpu_props.props.core_props.gpu_freq_khz_min; - - KBASE_DEBUG_ASSERT(0 != gpu_freq); - return ticks / gpu_freq * 1000; -} - -/** - * This will provide a conversion from ticks of the gpu clock to time (us) - * based on the maximum available gpu frequency. - * This is usually good to compute best/worst case (where the use of current - * frequency is not valid due to DVFS). - * e.g.: When you need to know the best-case wait for 'tick' cycles (you - * guarantee to be waiting for at least this long, but it may be longer). - */ -static inline u32 kbasep_js_convert_gpu_ticks_to_us_max_freq(struct kbase_device *kbdev, u32 ticks) -{ - u32 gpu_freq = kbdev->gpu_props.props.core_props.gpu_freq_khz_max; - - KBASE_DEBUG_ASSERT(0 != gpu_freq); - return ticks / gpu_freq * 1000; -} - -/* - * The following locking conditions are made on the caller: - * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex. - * - The caller must hold the kbasep_js_device_data::runpool_mutex - */ -static inline void kbase_js_runpool_inc_context_count( - struct kbase_device *kbdev, - struct kbase_context *kctx) -{ - struct kbasep_js_device_data *js_devdata; - struct kbasep_js_kctx_info *js_kctx_info; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(kctx != NULL); - - js_devdata = &kbdev->js_data; - js_kctx_info = &kctx->jctx.sched_info; - - lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex); - lockdep_assert_held(&js_devdata->runpool_mutex); - - /* Track total contexts */ - KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running < S8_MAX); - ++(js_devdata->nr_all_contexts_running); - - if ((js_kctx_info->ctx.flags & KBASE_CTX_FLAG_SUBMIT_DISABLED) == 0) { - /* Track contexts that can submit jobs */ - KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running < - S8_MAX); - ++(js_devdata->nr_user_contexts_running); - } -} - -/* - * The following locking conditions are made on the caller: - * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex. - * - The caller must hold the kbasep_js_device_data::runpool_mutex - */ -static inline void kbase_js_runpool_dec_context_count( - struct kbase_device *kbdev, - struct kbase_context *kctx) -{ - struct kbasep_js_device_data *js_devdata; - struct kbasep_js_kctx_info *js_kctx_info; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(kctx != NULL); - - js_devdata = &kbdev->js_data; - js_kctx_info = &kctx->jctx.sched_info; - - lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex); - lockdep_assert_held(&js_devdata->runpool_mutex); - - /* Track total contexts */ - --(js_devdata->nr_all_contexts_running); - KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running >= 0); - - if ((js_kctx_info->ctx.flags & KBASE_CTX_FLAG_SUBMIT_DISABLED) == 0) { - /* Track contexts that can submit jobs */ - --(js_devdata->nr_user_contexts_running); - KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running >= 0); - } -} - - -/** - * @brief Submit atoms from all available contexts to all job slots. - * - * This will attempt to submit as many jobs as possible. It will exit when - * either all job slots are full, or all contexts have been used. - * - * @param[in] kbdev Device pointer - */ -static inline void kbase_js_sched_all(struct kbase_device *kbdev) -{ - kbase_js_sched(kbdev, (1 << kbdev->gpu_props.num_job_slots) - 1); -} - -extern const int -kbasep_js_atom_priority_to_relative[BASE_JD_NR_PRIO_LEVELS]; - -extern const base_jd_prio -kbasep_js_relative_priority_to_atom[KBASE_JS_ATOM_SCHED_PRIO_COUNT]; - -/** - * kbasep_js_atom_prio_to_sched_prio(): - Convert atom priority (base_jd_prio) - * to relative ordering - * @atom_prio: Priority ID to translate. - * - * Atom priority values for @ref base_jd_prio cannot be compared directly to - * find out which are higher or lower. - * - * This function will convert base_jd_prio values for successively lower - * priorities into a monotonically increasing sequence. That is, the lower the - * base_jd_prio priority, the higher the value produced by this function. This - * is in accordance with how the rest of the kernel treates priority. - * - * The mapping is 1:1 and the size of the valid input range is the same as the - * size of the valid output range, i.e. - * KBASE_JS_ATOM_SCHED_PRIO_COUNT == BASE_JD_NR_PRIO_LEVELS - * - * Note This must be kept in sync with BASE_JD_PRIO_<...> definitions - * - * Return: On success: a value in the inclusive range - * 0..KBASE_JS_ATOM_SCHED_PRIO_COUNT-1. On failure: - * KBASE_JS_ATOM_SCHED_PRIO_INVALID - */ -static inline int kbasep_js_atom_prio_to_sched_prio(base_jd_prio atom_prio) -{ - if (atom_prio >= BASE_JD_NR_PRIO_LEVELS) - return KBASE_JS_ATOM_SCHED_PRIO_INVALID; - - return kbasep_js_atom_priority_to_relative[atom_prio]; -} - -static inline base_jd_prio kbasep_js_sched_prio_to_atom_prio(int sched_prio) -{ - unsigned int prio_idx; - - KBASE_DEBUG_ASSERT(0 <= sched_prio - && sched_prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT); - - prio_idx = (unsigned int)sched_prio; - - return kbasep_js_relative_priority_to_atom[prio_idx]; -} - - /** @} *//* end group kbase_js */ - /** @} *//* end group base_kbase_api */ - /** @} *//* end group base_api */ - -#endif /* _KBASE_JS_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.c deleted file mode 100755 index 8891bff70c60d..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.c +++ /dev/null @@ -1,310 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - -#include -#include - -/* - * Private functions follow - */ - -/** - * @brief Check whether a ctx has a certain attribute, and if so, retain that - * attribute on the runpool. - * - * Requires: - * - jsctx mutex - * - runpool_irq spinlock - * - ctx is scheduled on the runpool - * - * @return true indicates a change in ctx attributes state of the runpool. - * In this state, the scheduler might be able to submit more jobs than - * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock() - * or similar is called sometime later. - * @return false indicates no change in ctx attributes state of the runpool. - */ -static bool kbasep_js_ctx_attr_runpool_retain_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute) -{ - struct kbasep_js_device_data *js_devdata; - struct kbasep_js_kctx_info *js_kctx_info; - bool runpool_state_changed = false; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(kctx != NULL); - KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT); - js_devdata = &kbdev->js_data; - js_kctx_info = &kctx->jctx.sched_info; - - lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex); - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled != false); - - if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, attribute) != false) { - KBASE_DEBUG_ASSERT(js_devdata->runpool_irq.ctx_attr_ref_count[attribute] < S8_MAX); - ++(js_devdata->runpool_irq.ctx_attr_ref_count[attribute]); - - if (js_devdata->runpool_irq.ctx_attr_ref_count[attribute] == 1) { - /* First refcount indicates a state change */ - runpool_state_changed = true; - KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_ON_RUNPOOL, kctx, NULL, 0u, attribute); - } - } - - return runpool_state_changed; -} - -/** - * @brief Check whether a ctx has a certain attribute, and if so, release that - * attribute on the runpool. - * - * Requires: - * - jsctx mutex - * - runpool_irq spinlock - * - ctx is scheduled on the runpool - * - * @return true indicates a change in ctx attributes state of the runpool. - * In this state, the scheduler might be able to submit more jobs than - * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock() - * or similar is called sometime later. - * @return false indicates no change in ctx attributes state of the runpool. - */ -static bool kbasep_js_ctx_attr_runpool_release_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute) -{ - struct kbasep_js_device_data *js_devdata; - struct kbasep_js_kctx_info *js_kctx_info; - bool runpool_state_changed = false; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(kctx != NULL); - KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT); - js_devdata = &kbdev->js_data; - js_kctx_info = &kctx->jctx.sched_info; - - lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex); - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled != false); - - if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, attribute) != false) { - KBASE_DEBUG_ASSERT(js_devdata->runpool_irq.ctx_attr_ref_count[attribute] > 0); - --(js_devdata->runpool_irq.ctx_attr_ref_count[attribute]); - - if (js_devdata->runpool_irq.ctx_attr_ref_count[attribute] == 0) { - /* Last de-refcount indicates a state change */ - runpool_state_changed = true; - KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_OFF_RUNPOOL, kctx, NULL, 0u, attribute); - } - } - - return runpool_state_changed; -} - -/** - * @brief Retain a certain attribute on a ctx, also retaining it on the runpool - * if the context is scheduled. - * - * Requires: - * - jsctx mutex - * - If the context is scheduled, then runpool_irq spinlock must also be held - * - * @return true indicates a change in ctx attributes state of the runpool. - * This may allow the scheduler to submit more jobs than previously. - * @return false indicates no change in ctx attributes state of the runpool. - */ -static bool kbasep_js_ctx_attr_ctx_retain_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute) -{ - struct kbasep_js_kctx_info *js_kctx_info; - bool runpool_state_changed = false; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(kctx != NULL); - KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT); - js_kctx_info = &kctx->jctx.sched_info; - - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex); - KBASE_DEBUG_ASSERT(js_kctx_info->ctx.ctx_attr_ref_count[attribute] < U32_MAX); - - ++(js_kctx_info->ctx.ctx_attr_ref_count[attribute]); - - if (js_kctx_info->ctx.is_scheduled != false && js_kctx_info->ctx.ctx_attr_ref_count[attribute] == 1) { - /* Only ref-count the attribute on the runpool for the first time this contexts sees this attribute */ - KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_ON_CTX, kctx, NULL, 0u, attribute); - runpool_state_changed = kbasep_js_ctx_attr_runpool_retain_attr(kbdev, kctx, attribute); - } - - return runpool_state_changed; -} - -/* - * @brief Release a certain attribute on a ctx, also releasing it from the runpool - * if the context is scheduled. - * - * Requires: - * - jsctx mutex - * - If the context is scheduled, then runpool_irq spinlock must also be held - * - * @return true indicates a change in ctx attributes state of the runpool. - * This may allow the scheduler to submit more jobs than previously. - * @return false indicates no change in ctx attributes state of the runpool. - */ -static bool kbasep_js_ctx_attr_ctx_release_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute) -{ - struct kbasep_js_kctx_info *js_kctx_info; - bool runpool_state_changed = false; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(kctx != NULL); - KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT); - js_kctx_info = &kctx->jctx.sched_info; - - lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex); - KBASE_DEBUG_ASSERT(js_kctx_info->ctx.ctx_attr_ref_count[attribute] > 0); - - if (js_kctx_info->ctx.is_scheduled != false && js_kctx_info->ctx.ctx_attr_ref_count[attribute] == 1) { - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - /* Only de-ref-count the attribute on the runpool when this is the last ctx-reference to it */ - runpool_state_changed = kbasep_js_ctx_attr_runpool_release_attr(kbdev, kctx, attribute); - KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_OFF_CTX, kctx, NULL, 0u, attribute); - } - - /* De-ref must happen afterwards, because kbasep_js_ctx_attr_runpool_release() needs to check it too */ - --(js_kctx_info->ctx.ctx_attr_ref_count[attribute]); - - return runpool_state_changed; -} - -/* - * More commonly used public functions - */ - -void kbasep_js_ctx_attr_set_initial_attrs(struct kbase_device *kbdev, struct kbase_context *kctx) -{ - struct kbasep_js_kctx_info *js_kctx_info; - bool runpool_state_changed = false; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(kctx != NULL); - js_kctx_info = &kctx->jctx.sched_info; - - if ((js_kctx_info->ctx.flags & KBASE_CTX_FLAG_SUBMIT_DISABLED) != false) { - /* This context never submits, so don't track any scheduling attributes */ - return; - } - - /* Transfer attributes held in the context flags for contexts that have submit enabled */ - - if ((js_kctx_info->ctx.flags & KBASE_CTX_FLAG_HINT_ONLY_COMPUTE) != false) { - /* Compute context */ - runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE); - } - /* NOTE: Whether this is a non-compute context depends on the jobs being - * run, e.g. it might be submitting jobs with BASE_JD_REQ_ONLY_COMPUTE */ - - /* ... More attributes can be added here ... */ - - /* The context should not have been scheduled yet, so ASSERT if this caused - * runpool state changes (note that other threads *can't* affect the value - * of runpool_state_changed, due to how it's calculated) */ - KBASE_DEBUG_ASSERT(runpool_state_changed == false); - CSTD_UNUSED(runpool_state_changed); -} - -void kbasep_js_ctx_attr_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) -{ - bool runpool_state_changed; - int i; - - /* Retain any existing attributes */ - for (i = 0; i < KBASEP_JS_CTX_ATTR_COUNT; ++i) { - if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, (enum kbasep_js_ctx_attr) i) != false) { - /* The context is being scheduled in, so update the runpool with the new attributes */ - runpool_state_changed = kbasep_js_ctx_attr_runpool_retain_attr(kbdev, kctx, (enum kbasep_js_ctx_attr) i); - - /* We don't need to know about state changed, because retaining a - * context occurs on scheduling it, and that itself will also try - * to run new atoms */ - CSTD_UNUSED(runpool_state_changed); - } - } -} - -bool kbasep_js_ctx_attr_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) -{ - bool runpool_state_changed = false; - int i; - - /* Release any existing attributes */ - for (i = 0; i < KBASEP_JS_CTX_ATTR_COUNT; ++i) { - if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, (enum kbasep_js_ctx_attr) i) != false) { - /* The context is being scheduled out, so update the runpool on the removed attributes */ - runpool_state_changed |= kbasep_js_ctx_attr_runpool_release_attr(kbdev, kctx, (enum kbasep_js_ctx_attr) i); - } - } - - return runpool_state_changed; -} - -void kbasep_js_ctx_attr_ctx_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom) -{ - bool runpool_state_changed = false; - base_jd_core_req core_req; - - KBASE_DEBUG_ASSERT(katom); - core_req = katom->core_req; - - if (core_req & BASE_JD_REQ_ONLY_COMPUTE) - runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE); - else - runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_NON_COMPUTE); - - if ((core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T)) != 0 && (core_req & (BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)) == 0) { - /* Atom that can run on slot1 or slot2, and can use all cores */ - runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES); - } - - /* We don't need to know about state changed, because retaining an - * atom occurs on adding it, and that itself will also try to run - * new atoms */ - CSTD_UNUSED(runpool_state_changed); -} - -bool kbasep_js_ctx_attr_ctx_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state) -{ - bool runpool_state_changed = false; - base_jd_core_req core_req; - - KBASE_DEBUG_ASSERT(katom_retained_state); - core_req = katom_retained_state->core_req; - - /* No-op for invalid atoms */ - if (kbasep_js_atom_retained_state_is_valid(katom_retained_state) == false) - return false; - - if (core_req & BASE_JD_REQ_ONLY_COMPUTE) - runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE); - else - runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_NON_COMPUTE); - - if ((core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T)) != 0 && (core_req & (BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)) == 0) { - /* Atom that can run on slot1 or slot2, and can use all cores */ - runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES); - } - - return runpool_state_changed; -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.h deleted file mode 100755 index ce9183326a576..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.h +++ /dev/null @@ -1,158 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_kbase_js_ctx_attr.h - * Job Scheduler Context Attribute APIs - */ - -#ifndef _KBASE_JS_CTX_ATTR_H_ -#define _KBASE_JS_CTX_ATTR_H_ - -/** - * @addtogroup base_api - * @{ - */ - -/** - * @addtogroup base_kbase_api - * @{ - */ - -/** - * @addtogroup kbase_js - * @{ - */ - -/** - * Set the initial attributes of a context (when context create flags are set) - * - * Requires: - * - Hold the jsctx_mutex - */ -void kbasep_js_ctx_attr_set_initial_attrs(struct kbase_device *kbdev, struct kbase_context *kctx); - -/** - * Retain all attributes of a context - * - * This occurs on scheduling in the context on the runpool (but after - * is_scheduled is set) - * - * Requires: - * - jsctx mutex - * - runpool_irq spinlock - * - ctx->is_scheduled is true - */ -void kbasep_js_ctx_attr_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx); - -/** - * Release all attributes of a context - * - * This occurs on scheduling out the context from the runpool (but before - * is_scheduled is cleared) - * - * Requires: - * - jsctx mutex - * - runpool_irq spinlock - * - ctx->is_scheduled is true - * - * @return true indicates a change in ctx attributes state of the runpool. - * In this state, the scheduler might be able to submit more jobs than - * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock() - * or similar is called sometime later. - * @return false indicates no change in ctx attributes state of the runpool. - */ -bool kbasep_js_ctx_attr_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx); - -/** - * Retain all attributes of an atom - * - * This occurs on adding an atom to a context - * - * Requires: - * - jsctx mutex - * - If the context is scheduled, then runpool_irq spinlock must also be held - */ -void kbasep_js_ctx_attr_ctx_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom); - -/** - * Release all attributes of an atom, given its retained state. - * - * This occurs after (permanently) removing an atom from a context - * - * Requires: - * - jsctx mutex - * - If the context is scheduled, then runpool_irq spinlock must also be held - * - * This is a no-op when \a katom_retained_state is invalid. - * - * @return true indicates a change in ctx attributes state of the runpool. - * In this state, the scheduler might be able to submit more jobs than - * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock() - * or similar is called sometime later. - * @return false indicates no change in ctx attributes state of the runpool. - */ -bool kbasep_js_ctx_attr_ctx_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state); - -/** - * Requires: - * - runpool_irq spinlock - */ -static inline s8 kbasep_js_ctx_attr_count_on_runpool(struct kbase_device *kbdev, enum kbasep_js_ctx_attr attribute) -{ - struct kbasep_js_device_data *js_devdata; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT); - js_devdata = &kbdev->js_data; - - return js_devdata->runpool_irq.ctx_attr_ref_count[attribute]; -} - -/** - * Requires: - * - runpool_irq spinlock - */ -static inline bool kbasep_js_ctx_attr_is_attr_on_runpool(struct kbase_device *kbdev, enum kbasep_js_ctx_attr attribute) -{ - /* In general, attributes are 'on' when they have a non-zero refcount (note: the refcount will never be < 0) */ - return (bool) kbasep_js_ctx_attr_count_on_runpool(kbdev, attribute); -} - -/** - * Requires: - * - jsctx mutex - */ -static inline bool kbasep_js_ctx_attr_is_attr_on_ctx(struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute) -{ - struct kbasep_js_kctx_info *js_kctx_info; - - KBASE_DEBUG_ASSERT(kctx != NULL); - KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT); - js_kctx_info = &kctx->jctx.sched_info; - - /* In general, attributes are 'on' when they have a refcount (which should never be < 0) */ - return (bool) (js_kctx_info->ctx.ctx_attr_ref_count[attribute]); -} - - /** @} *//* end group kbase_js */ - /** @} *//* end group base_kbase_api */ - /** @} *//* end group base_api */ - -#endif /* _KBASE_JS_DEFS_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js_defs.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js_defs.h deleted file mode 100755 index d65b494a70c93..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js_defs.h +++ /dev/null @@ -1,511 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_kbase_js.h - * Job Scheduler Type Definitions - */ - -#ifndef _KBASE_JS_DEFS_H_ -#define _KBASE_JS_DEFS_H_ - -/** - * @addtogroup base_api - * @{ - */ - -/** - * @addtogroup base_kbase_api - * @{ - */ - -/** - * @addtogroup kbase_js - * @{ - */ -/* Forward decls */ -struct kbase_device; -struct kbase_jd_atom; - - -/* Types used by the policies must go here */ -enum { - /** Context will not submit any jobs */ - KBASE_CTX_FLAG_SUBMIT_DISABLED = (1u << 0), - - /** Set if the context uses an address space and should be kept scheduled in */ - KBASE_CTX_FLAG_PRIVILEGED = (1u << 1), - - /** Kernel-side equivalent of BASE_CONTEXT_HINT_ONLY_COMPUTE. Non-mutable after creation flags set */ - KBASE_CTX_FLAG_HINT_ONLY_COMPUTE = (1u << 2) - - /* NOTE: Add flags for other things, such as 'is scheduled', and 'is dying' */ -}; - -typedef u32 kbase_context_flags; - -struct kbasep_atom_req { - base_jd_core_req core_req; - kbase_context_flags ctx_req; - u32 device_nr; -}; - -#include "mali_kbase_js_policy_cfs.h" - -/* Wrapper Interface - doxygen is elsewhere */ -union kbasep_js_policy { - struct kbasep_js_policy_cfs cfs; -}; - -/* Wrapper Interface - doxygen is elsewhere */ -union kbasep_js_policy_ctx_info { - struct kbasep_js_policy_cfs_ctx cfs; -}; - -/* Wrapper Interface - doxygen is elsewhere */ -union kbasep_js_policy_job_info { - struct kbasep_js_policy_cfs_job cfs; -}; - - -/** Callback function run on all of a context's jobs registered with the Job - * Scheduler */ -typedef void (*kbasep_js_policy_ctx_job_cb)(struct kbase_device *kbdev, struct kbase_jd_atom *katom); - -/** - * @brief Maximum number of jobs that can be submitted to a job slot whilst - * inside the IRQ handler. - * - * This is important because GPU NULL jobs can complete whilst the IRQ handler - * is running. Otherwise, it potentially allows an unlimited number of GPU NULL - * jobs to be submitted inside the IRQ handler, which increases IRQ latency. - */ -#define KBASE_JS_MAX_JOB_SUBMIT_PER_SLOT_PER_IRQ 2 - -/** - * @brief the IRQ_THROTTLE time in microseconds - * - * This will be converted via the GPU's clock frequency into a cycle-count. - * - * @note we can make an estimate of the GPU's frequency by periodically - * sampling its CYCLE_COUNT register - */ -#define KBASE_JS_IRQ_THROTTLE_TIME_US 20 - -/** - * @brief Context attributes - * - * Each context attribute can be thought of as a boolean value that caches some - * state information about either the runpool, or the context: - * - In the case of the runpool, it is a cache of "Do any contexts owned by - * the runpool have attribute X?" - * - In the case of a context, it is a cache of "Do any atoms owned by the - * context have attribute X?" - * - * The boolean value of the context attributes often affect scheduling - * decisions, such as affinities to use and job slots to use. - * - * To accomodate changes of state in the context, each attribute is refcounted - * in the context, and in the runpool for all running contexts. Specifically: - * - The runpool holds a refcount of how many contexts in the runpool have this - * attribute. - * - The context holds a refcount of how many atoms have this attribute. - * - * Examples of use: - * - Finding out when there are a mix of @ref BASE_CONTEXT_HINT_ONLY_COMPUTE - * and ! @ref BASE_CONTEXT_HINT_ONLY_COMPUTE contexts in the runpool - */ -enum kbasep_js_ctx_attr { - /** Attribute indicating a context that contains Compute jobs. That is, - * @ref BASE_CONTEXT_HINT_ONLY_COMPUTE is \b set and/or the context has jobs of type - * @ref BASE_JD_REQ_ONLY_COMPUTE - * - * @note A context can be both 'Compute' and 'Non Compute' if it contains - * both types of jobs. - */ - KBASEP_JS_CTX_ATTR_COMPUTE, - - /** Attribute indicating a context that contains Non-Compute jobs. That is, - * the context has some jobs that are \b not of type @ref - * BASE_JD_REQ_ONLY_COMPUTE. The context usually has - * BASE_CONTEXT_HINT_COMPUTE \b clear, but this depends on the HW - * workarounds in use in the Job Scheduling Policy. - * - * @note A context can be both 'Compute' and 'Non Compute' if it contains - * both types of jobs. - */ - KBASEP_JS_CTX_ATTR_NON_COMPUTE, - - /** Attribute indicating that a context contains compute-job atoms that - * aren't restricted to a coherent group, and can run on all cores. - * - * Specifically, this is when the atom's \a core_req satisfy: - * - (\a core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T) // uses slot 1 or slot 2 - * - && !(\a core_req & BASE_JD_REQ_COHERENT_GROUP) // not restricted to coherent groups - * - * Such atoms could be blocked from running if one of the coherent groups - * is being used by another job slot, so tracking this context attribute - * allows us to prevent such situations. - * - * @note This doesn't take into account the 1-coregroup case, where all - * compute atoms would effectively be able to run on 'all cores', but - * contexts will still not always get marked with this attribute. Instead, - * it is the caller's responsibility to take into account the number of - * coregroups when interpreting this attribute. - * - * @note Whilst Tiler atoms are normally combined with - * BASE_JD_REQ_COHERENT_GROUP, it is possible to send such atoms without - * BASE_JD_REQ_COHERENT_GROUP set. This is an unlikely case, but it's easy - * enough to handle anyway. - */ - KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES, - - /** Must be the last in the enum */ - KBASEP_JS_CTX_ATTR_COUNT -}; - -enum { - /** Bit indicating that new atom should be started because this atom completed */ - KBASE_JS_ATOM_DONE_START_NEW_ATOMS = (1u << 0), - /** Bit indicating that the atom was evicted from the JS_NEXT registers */ - KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT = (1u << 1) -}; - -/** Combination of KBASE_JS_ATOM_DONE_<...> bits */ -typedef u32 kbasep_js_atom_done_code; - -/** - * Data used by the scheduler that is unique for each Address Space. - * - * This is used in IRQ context and kbasep_js_device_data::runpoool_irq::lock - * must be held whilst accessing this data (inculding reads and atomic - * decisions based on the read). - */ -struct kbasep_js_per_as_data { - /** - * Ref count of whether this AS is busy, and must not be scheduled out - * - * When jobs are running this is always positive. However, it can still be - * positive when no jobs are running. If all you need is a heuristic to - * tell you whether jobs might be running, this should be sufficient. - */ - int as_busy_refcount; - - /** Pointer to the current context on this address space, or NULL for no context */ - struct kbase_context *kctx; -}; - -/** - * @brief KBase Device Data Job Scheduler sub-structure - * - * This encapsulates the current context of the Job Scheduler on a particular - * device. This context is global to the device, and is not tied to any - * particular struct kbase_context running on the device. - * - * nr_contexts_running and as_free are optimized for packing together (by making - * them smaller types than u32). The operations on them should rarely involve - * masking. The use of signed types for arithmetic indicates to the compiler that - * the value will not rollover (which would be undefined behavior), and so under - * the Total License model, it is free to make optimizations based on that (i.e. - * to remove masking). - */ -struct kbasep_js_device_data { - /** Sub-structure to collect together Job Scheduling data used in IRQ context */ - struct runpool_irq { - /** - * Lock for accessing Job Scheduling data used in IRQ context - * - * This lock must be held whenever this data is accessed (read, or - * write). Even for read-only access, memory barriers would be needed. - * In any case, it is likely that decisions based on only reading must - * also be atomic with respect to data held here and elsewhere in the - * Job Scheduler. - * - * This lock must also be held for accessing: - * - kbase_context::as_nr - * - kbase_device::jm_slots - * - Parts of the kbasep_js_policy, dependent on the policy (refer to - * the policy in question for more information) - * - Parts of kbasep_js_policy_ctx_info, dependent on the policy (refer to - * the policy in question for more information) - */ - spinlock_t lock; - - /** Bitvector indicating whether a currently scheduled context is allowed to submit jobs. - * When bit 'N' is set in this, it indicates whether the context bound to address space - * 'N' (per_as_data[N].kctx) is allowed to submit jobs. - * - * It is placed here because it's much more memory efficient than having a u8 in - * struct kbasep_js_per_as_data to store this flag */ - u16 submit_allowed; - - /** Context Attributes: - * Each is large enough to hold a refcount of the number of contexts - * that can fit into the runpool. This is currently BASE_MAX_NR_AS - * - * Note that when BASE_MAX_NR_AS==16 we need 5 bits (not 4) to store - * the refcount. Hence, it's not worthwhile reducing this to - * bit-manipulation on u32s to save space (where in contrast, 4 bit - * sub-fields would be easy to do and would save space). - * - * Whilst this must not become negative, the sign bit is used for: - * - error detection in debug builds - * - Optimization: it is undefined for a signed int to overflow, and so - * the compiler can optimize for that never happening (thus, no masking - * is required on updating the variable) */ - s8 ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT]; - - /** Data that is unique for each AS */ - struct kbasep_js_per_as_data per_as_data[BASE_MAX_NR_AS]; - - /* - * Affinity management and tracking - */ - /** Bitvector to aid affinity checking. Element 'n' bit 'i' indicates - * that slot 'n' is using core i (i.e. slot_affinity_refcount[n][i] > 0) */ - u64 slot_affinities[BASE_JM_MAX_NR_SLOTS]; - /** Refcount for each core owned by each slot. Used to generate the - * slot_affinities array of bitvectors - * - * The value of the refcount will not exceed BASE_JM_SUBMIT_SLOTS, - * because it is refcounted only when a job is definitely about to be - * submitted to a slot, and is de-refcounted immediately after a job - * finishes */ - s8 slot_affinity_refcount[BASE_JM_MAX_NR_SLOTS][64]; - } runpool_irq; - - /** - * Run Pool mutex, for managing contexts within the runpool. - * Unless otherwise specified, you must hold this lock whilst accessing any - * members that follow - * - * In addition, this is used to access: - * - the kbasep_js_kctx_info::runpool substructure - */ - struct mutex runpool_mutex; - - /** - * Queue Lock, used to access the Policy's queue of contexts independently - * of the Run Pool. - * - * Of course, you don't need the Run Pool lock to access this. - */ - struct mutex queue_mutex; - - /** - * Scheduling semaphore. This must be held when calling - * kbase_jm_kick() - */ - struct semaphore schedule_sem; - - /** - * List of contexts that can currently be pulled from - */ - struct list_head ctx_list_pullable[BASE_JM_MAX_NR_SLOTS]; - /** - * List of contexts that can not currently be pulled from, but have - * jobs currently running. - */ - struct list_head ctx_list_unpullable[BASE_JM_MAX_NR_SLOTS]; - - u16 as_free; /**< Bitpattern of free Address Spaces */ - - /** Number of currently scheduled user contexts (excluding ones that are not submitting jobs) */ - s8 nr_user_contexts_running; - /** Number of currently scheduled contexts (including ones that are not submitting jobs) */ - s8 nr_all_contexts_running; - - /** - * Policy-specific information. - * - * Refer to the structure defined by the current policy to determine which - * locks must be held when accessing this. - */ - union kbasep_js_policy policy; - - /** Core Requirements to match up with base_js_atom's core_req memeber - * @note This is a write-once member, and so no locking is required to read */ - base_jd_core_req js_reqs[BASE_JM_MAX_NR_SLOTS]; - - u32 scheduling_period_ns; /*< Value for JS_SCHEDULING_PERIOD_NS */ - u32 soft_stop_ticks; /*< Value for JS_SOFT_STOP_TICKS */ - u32 soft_stop_ticks_cl; /*< Value for JS_SOFT_STOP_TICKS_CL */ - u32 hard_stop_ticks_ss; /*< Value for JS_HARD_STOP_TICKS_SS */ - u32 hard_stop_ticks_cl; /*< Value for JS_HARD_STOP_TICKS_CL */ - u32 hard_stop_ticks_dumping; /*< Value for JS_HARD_STOP_TICKS_DUMPING */ - u32 gpu_reset_ticks_ss; /*< Value for JS_RESET_TICKS_SS */ - u32 gpu_reset_ticks_cl; /*< Value for JS_RESET_TICKS_CL */ - u32 gpu_reset_ticks_dumping; /*< Value for JS_RESET_TICKS_DUMPING */ - u32 ctx_timeslice_ns; /**< Value for JS_CTX_TIMESLICE_NS */ - u32 cfs_ctx_runtime_init_slices; /**< Value for DEFAULT_JS_CFS_CTX_RUNTIME_INIT_SLICES */ - u32 cfs_ctx_runtime_min_slices; /**< Value for DEFAULT_JS_CFS_CTX_RUNTIME_MIN_SLICES */ - - /** List of suspended soft jobs */ - struct list_head suspended_soft_jobs_list; - -#ifdef CONFIG_MALI_DEBUG - /* Support soft-stop on a single context */ - bool softstop_always; -#endif /* CONFIG_MALI_DEBUG */ - - /** The initalized-flag is placed at the end, to avoid cache-pollution (we should - * only be using this during init/term paths). - * @note This is a write-once member, and so no locking is required to read */ - int init_status; - - /* Number of contexts that can currently be pulled from */ - u32 nr_contexts_pullable; - - /* Number of contexts that can either be pulled from or are currently - * running */ - atomic_t nr_contexts_runnable; -}; - -/** - * @brief KBase Context Job Scheduling information structure - * - * This is a substructure in the struct kbase_context that encapsulates all the - * scheduling information. - */ -struct kbasep_js_kctx_info { - /** - * Runpool substructure. This must only be accessed whilst the Run Pool - * mutex ( kbasep_js_device_data::runpool_mutex ) is held. - * - * In addition, the kbasep_js_device_data::runpool_irq::lock may need to be - * held for certain sub-members. - * - * @note some of the members could be moved into struct kbasep_js_device_data for - * improved d-cache/tlb efficiency. - */ - struct { - union kbasep_js_policy_ctx_info policy_ctx; /**< Policy-specific context */ - } runpool; - - /** - * Job Scheduler Context information sub-structure. These members are - * accessed regardless of whether the context is: - * - In the Policy's Run Pool - * - In the Policy's Queue - * - Not queued nor in the Run Pool. - * - * You must obtain the jsctx_mutex before accessing any other members of - * this substructure. - * - * You may not access any of these members from IRQ context. - */ - struct { - struct mutex jsctx_mutex; /**< Job Scheduler Context lock */ - - /** Number of jobs ready to run - does \em not include the jobs waiting in - * the dispatcher, and dependency-only jobs. See kbase_jd_context::job_nr - * for such jobs*/ - u32 nr_jobs; - - /** Context Attributes: - * Each is large enough to hold a refcount of the number of atoms on - * the context. **/ - u32 ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT]; - - kbase_context_flags flags; - /* NOTE: Unify the following flags into kbase_context_flags */ - /** - * Is the context scheduled on the Run Pool? - * - * This is only ever updated whilst the jsctx_mutex is held. - */ - bool is_scheduled; - /** - * Wait queue to wait for is_scheduled state changes. - * */ - wait_queue_head_t is_scheduled_wait; - - bool is_dying; /**< Is the context in the process of being evicted? */ - - /** Link implementing JS queues. Context can be present on one - * list per job slot - */ - struct list_head ctx_list_entry[BASE_JM_MAX_NR_SLOTS]; - } ctx; - - /* The initalized-flag is placed at the end, to avoid cache-pollution (we should - * only be using this during init/term paths) */ - int init_status; -}; - -/** Subset of atom state that can be available after jd_done_nolock() is called - * on that atom. A copy must be taken via kbasep_js_atom_retained_state_copy(), - * because the original atom could disappear. */ -struct kbasep_js_atom_retained_state { - /** Event code - to determine whether the atom has finished */ - enum base_jd_event_code event_code; - /** core requirements */ - base_jd_core_req core_req; - /* priority */ - int sched_priority; - /** Job Slot to retry submitting to if submission from IRQ handler failed */ - int retry_submit_on_slot; - /* Core group atom was executed on */ - u32 device_nr; - -}; - -/** - * Value signifying 'no retry on a slot required' for: - * - kbase_js_atom_retained_state::retry_submit_on_slot - * - kbase_jd_atom::retry_submit_on_slot - */ -#define KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID (-1) - -/** - * base_jd_core_req value signifying 'invalid' for a kbase_jd_atom_retained_state. - * - * @see kbase_atom_retained_state_is_valid() - */ -#define KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID BASE_JD_REQ_DEP - -/** - * @brief The JS timer resolution, in microseconds - * - * Any non-zero difference in time will be at least this size. - */ -#define KBASEP_JS_TICK_RESOLUTION_US 1 - -/* - * Internal atom priority defines for kbase_jd_atom::sched_prio - */ -enum { - KBASE_JS_ATOM_SCHED_PRIO_HIGH = 0, - KBASE_JS_ATOM_SCHED_PRIO_MED, - KBASE_JS_ATOM_SCHED_PRIO_LOW, - KBASE_JS_ATOM_SCHED_PRIO_COUNT, -}; - -/* Invalid priority for kbase_jd_atom::sched_prio */ -#define KBASE_JS_ATOM_SCHED_PRIO_INVALID -1 - -/* Default priority in the case of contexts with no atoms, or being lenient - * about invalid priorities from userspace */ -#define KBASE_JS_ATOM_SCHED_PRIO_DEFAULT KBASE_JS_ATOM_SCHED_PRIO_MED - - /** @} *//* end group kbase_js */ - /** @} *//* end group base_kbase_api */ - /** @} *//* end group base_api */ - -#endif /* _KBASE_JS_DEFS_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js_policy.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js_policy.h deleted file mode 100755 index 2094586ff2d39..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js_policy.h +++ /dev/null @@ -1,763 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_kbase_js_policy.h - * Job Scheduler Policy APIs. - */ - -#ifndef _KBASE_JS_POLICY_H_ -#define _KBASE_JS_POLICY_H_ - -/** - * @page page_kbase_js_policy Job Scheduling Policies - * The Job Scheduling system is described in the following: - * - @subpage page_kbase_js_policy_overview - * - @subpage page_kbase_js_policy_operation - * - * The API details are as follows: - * - @ref kbase_jm - * - @ref kbase_js - * - @ref kbase_js_policy - */ - -/** - * @page page_kbase_js_policy_overview Overview of the Policy System - * - * The Job Scheduler Policy manages: - * - The assigning of KBase Contexts to GPU Address Spaces (\em ASs) - * - The choosing of Job Chains (\em Jobs) from a KBase context, to run on the - * GPU's Job Slots (\em JSs). - * - The amount of \em time a context is assigned to (scheduled on) an - * Address Space - * - The amount of \em time a Job spends running on the GPU - * - * The Policy implements this management via 2 components: - * - A Policy Queue, which manages a set of contexts that are ready to run, - * but not currently running. - * - A Policy Run Pool, which manages the currently running contexts (one per Address - * Space) and the jobs to run on the Job Slots. - * - * Each Graphics Process in the system has at least one KBase Context. Therefore, - * the Policy Queue can be seen as a queue of Processes waiting to run Jobs on - * the GPU. - * - * - * @dotfile policy_overview.dot "Diagram showing a very simplified overview of the Policy System. IRQ handling, soft/hard-stopping, contexts re-entering the system and Policy details are omitted" - * - * The main operations on the queue are: - * - Enqueuing a Context to it - * - Dequeuing a Context from it, to run it. - * - Note: requeuing a context is much the same as enqueuing a context, but - * occurs when a context is scheduled out of the system to allow other contexts - * to run. - * - * These operations have much the same meaning for the Run Pool - Jobs are - * dequeued to run on a Jobslot, and requeued when they are scheduled out of - * the GPU. - * - * @note This is an over-simplification of the Policy APIs - there are more - * operations than 'Enqueue'/'Dequeue', and a Dequeue from the Policy Queue - * takes at least two function calls: one to Dequeue from the Queue, one to add - * to the Run Pool. - * - * As indicated on the diagram, Jobs permanently leave the scheduling system - * when they are completed, otherwise they get dequeued/requeued until this - * happens. Similarly, Contexts leave the scheduling system when their jobs - * have all completed. However, Contexts may later return to the scheduling - * system (not shown on the diagram) if more Bags of Jobs are submitted to - * them. - */ - -/** - * @page page_kbase_js_policy_operation Policy Operation - * - * We describe the actions that the Job Scheduler Core takes on the Policy in - * the following cases: - * - The IRQ Path - * - The Job Submission Path - * - The High Priority Job Submission Path - * - * This shows how the Policy APIs will be used by the Job Scheduler core. - * - * The following diagram shows an example Policy that contains a Low Priority - * queue, and a Real-time (High Priority) Queue. The RT queue is examined - * before the LowP one on dequeuing from the head. The Low Priority Queue is - * ordered by time, and the RT queue is ordered by time weighted by - * RT-priority. In addition, it shows that the Job Scheduler Core will start a - * Soft-Stop Timer (SS-Timer) when it dequeue's and submits a job. The - * Soft-Stop time is set by a global configuration value, and must be a value - * appropriate for the policy. For example, this could include "don't run a - * soft-stop timer" for a First-Come-First-Served (FCFS) policy. - * - * - * @dotfile policy_operation_diagram.dot "Diagram showing the objects managed by an Example Policy, and the operations made upon these objects by the Job Scheduler Core." - * - * @section sec_kbase_js_policy_operation_prio Dealing with Priority - * - * Priority applies separately to a context as a whole, and to the jobs within - * a context. The jobs specify a priority in the base_jd_atom::prio member, but - * it is independent of the context priority. That is, it only affects - * scheduling of atoms within a context. Refer to @ref base_jd_prio for more - * details. The meaning of the context's priority value is up to the policy - * itself, and could be a logarithmic scale instead of a linear scale (e.g. the - * policy could implement an increase/decrease in priority by 1 results in an - * increase/decrease in \em proportion of time spent scheduled in by 25%, an - * effective change in timeslice by 11%). - * - * It is up to the policy whether a boost in priority boosts the priority of - * the entire context (e.g. to such an extent where it may pre-empt other - * running contexts). If it chooses to do this, the Policy must make sure that - * only jobs from high-priority contexts are run, and that the context is - * scheduled out once only jobs from low priority contexts remain. This ensures - * that the low priority contexts do not gain from the priority boost, yet they - * still get scheduled correctly with respect to other low priority contexts. - * - * - * @section sec_kbase_js_policy_operation_irq IRQ Path - * - * The following happens on the IRQ path from the Job Scheduler Core: - * - Note the slot that completed (for later) - * - Log the time spent by the job (and implicitly, the time spent by the - * context) - * - call kbasep_js_policy_log_job_result() in the context of the irq - * handler. - * - This must happen regardless of whether the job completed successfully or - * not (otherwise the context gets away with DoS'ing the system with faulty jobs) - * - What was the result of the job? - * - If Completed: job is just removed from the system - * - If Hard-stop or failure: job is removed from the system - * - If Soft-stop: queue the book-keeping work onto a work-queue: have a - * work-queue call kbasep_js_policy_enqueue_job() - * - Check the timeslice used by the owning context - * - call kbasep_js_policy_should_remove_ctx() in the context of the irq - * handler. - * - If this returns true, clear the "allowed" flag. - * - Check the ctx's flags for "allowed", "has jobs to run" and "is running - * jobs" - * - And so, should the context stay scheduled in? - * - If No, push onto a work-queue the work of scheduling out the old context, - * and getting a new one. That is: - * - kbasep_js_policy_runpool_remove_ctx() on old_ctx - * - kbasep_js_policy_enqueue_ctx() on old_ctx - * - kbasep_js_policy_dequeue_head_ctx() to get new_ctx - * - kbasep_js_policy_runpool_add_ctx() on new_ctx - * - (all of this work is deferred on a work-queue to keep the IRQ handler quick) - * - If there is space in the completed job slots' HEAD/NEXT registers, run the next job: - * - kbasep_js_policy_dequeue_job() in the context of the irq - * handler with core_req set to that of the completing slot - * - if this returned true, submit the job to the completed slot. - * - This is repeated until kbasep_js_policy_dequeue_job() returns - * false, or the job slot has a job queued on both the HEAD and NEXT registers. - * - If kbasep_js_policy_dequeue_job() returned false, submit some work to - * the work-queue to retry from outside of IRQ context (calling - * kbasep_js_policy_dequeue_job() from a work-queue). - * - * Since the IRQ handler submits new jobs \em and re-checks the IRQ_RAWSTAT, - * this sequence could loop a large number of times: this could happen if - * the jobs submitted completed on the GPU very quickly (in a few cycles), such - * as GPU NULL jobs. Then, the HEAD/NEXT registers will always be free to take - * more jobs, causing us to loop until we run out of jobs. - * - * To mitigate this, we must limit the number of jobs submitted per slot during - * the IRQ handler - for example, no more than 2 jobs per slot per IRQ should - * be sufficient (to fill up the HEAD + NEXT registers in normal cases). For - * Mali-T600 with 3 job slots, this means that up to 6 jobs could be submitted per - * slot. Note that IRQ Throttling can make this situation commonplace: 6 jobs - * could complete but the IRQ for each of them is delayed by the throttling. By - * the time you get the IRQ, all 6 jobs could've completed, meaning you can - * submit jobs to fill all 6 HEAD+NEXT registers again. - * - * @note As much work is deferred as possible, which includes the scheduling - * out of a context and scheduling in a new context. However, we can still make - * starting a single high-priorty context quick despite this: - * - On Mali-T600 family, there is one more AS than JSs. - * - This means we can very quickly schedule out one AS, no matter what the - * situation (because there will always be one AS that's not currently running - * on the job slot - it can only have a job in the NEXT register). - * - Even with this scheduling out, fair-share can still be guaranteed e.g. by - * a timeline-based Completely Fair Scheduler. - * - When our high-priority context comes in, we can do this quick-scheduling - * out immediately, and then schedule in the high-priority context without having to block. - * - This all assumes that the context to schedule out is of lower - * priority. Otherwise, we will have to block waiting for some other low - * priority context to finish its jobs. Note that it's likely (but not - * impossible) that the high-priority context \b is running jobs, by virtue of - * it being high priority. - * - Therefore, we can give a high liklihood that on Mali-T600 at least one - * high-priority context can be started very quickly. For the general case, we - * can guarantee starting (no. ASs) - (no. JSs) high priority contexts - * quickly. In any case, there is a high likelihood that we're able to start - * more than one high priority context quickly. - * - * In terms of the functions used in the IRQ handler directly, these are the - * perfomance considerations: - * - kbase_js_policy_log_job_result(): - * - This is just adding to a 64-bit value (possibly even a 32-bit value if we - * only store the time the job's recently spent - see below on 'priority weighting') - * - For priority weighting, a divide operation ('div') could happen, but - * this can happen in a deferred context (outside of IRQ) when scheduling out - * the ctx; as per our Engineering Specification, the contexts of different - * priority still stay scheduled in for the same timeslice, but higher priority - * ones scheduled back in more often. - * - That is, the weighted and unweighted times must be stored separately, and - * the weighted time is only updated \em outside of IRQ context. - * - Of course, this divide is more likely to be a 'multiply by inverse of the - * weight', assuming that the weight (priority) doesn't change. - * - kbasep_js_policy_should_remove_ctx(): - * - This is usually just a comparison of the stored time value against some - * maximum value. - * - * @note all deferred work can be wrapped up into one call - we usually need to - * indicate that a job/bag is done outside of IRQ context anyway. - * - * - * - * @section sec_kbase_js_policy_operation_submit Submission path - * - * Start with a Context with no jobs present, and assume equal priority of all - * contexts in the system. The following work all happens outside of IRQ - * Context : - * - As soon as job is made 'ready to 'run', then is must be registerd with the Job - * Scheduler Policy: - * - 'Ready to run' means they've satisified their dependencies in the - * Kernel-side Job Dispatch system. - * - Call kbasep_js_policy_enqueue_job() - * - This indicates that the job should be scheduled (it is ready to run). - * - As soon as a ctx changes from having 0 jobs 'ready to run' to >0 jobs - * 'ready to run', we enqueue the context on the policy queue: - * - Call kbasep_js_policy_enqueue_ctx() - * - This indicates that the \em ctx should be scheduled (it is ready to run) - * - * Next, we need to handle adding a context to the Run Pool - if it's sensible - * to do so. This can happen due to two reasons: - * -# A context is enqueued as above, and there are ASs free for it to run on - * (e.g. it is the first context to be run, in which case it can be added to - * the Run Pool immediately after enqueuing on the Policy Queue) - * -# A previous IRQ caused another ctx to be scheduled out, requiring that the - * context at the head of the queue be scheduled in. Such steps would happen in - * a work queue (work deferred from the IRQ context). - * - * In both cases, we'd handle it as follows: - * - Get the context at the Head of the Policy Queue: - * - Call kbasep_js_policy_dequeue_head_ctx() - * - Assign the Context an Address Space (Assert that there will be one free, - * given the above two reasons) - * - Add this context to the Run Pool: - * - Call kbasep_js_policy_runpool_add_ctx() - * - Now see if a job should be run: - * - Mostly, this will be done in the IRQ handler at the completion of a - * previous job. - * - However, there are two cases where this cannot be done: a) The first job - * enqueued to the system (there is no previous IRQ to act upon) b) When jobs - * are submitted at a low enough rate to not fill up all Job Slots (or, not to - * fill both the 'HEAD' and 'NEXT' registers in the job-slots) - * - Hence, on each ctx and job submission we should try to see if we - * can run a job: - * - For each job slot that has free space (in NEXT or HEAD+NEXT registers): - * - Call kbasep_js_policy_dequeue_job() with core_req set to that of the - * slot - * - if we got one, submit it to the job slot. - * - This is repeated until kbasep_js_policy_dequeue_job() returns - * false, or the job slot has a job queued on both the HEAD and NEXT registers. - * - * The above case shows that we should attempt to run jobs in cases where a) a ctx - * has been added to the Run Pool, and b) new jobs have been added to a context - * in the Run Pool: - * - In the latter case, the context is in the runpool because it's got a job - * ready to run, or is already running a job - * - We could just wait until the IRQ handler fires, but for certain types of - * jobs this can take comparatively a long time to complete, e.g. GLES FS jobs - * generally take much longer to run that GLES CS jobs, which are vertex shader - * jobs. - * - Therefore, when a new job appears in the ctx, we must check the job-slots - * to see if they're free, and run the jobs as before. - * - * - * - * @section sec_kbase_js_policy_operation_submit_hipri Submission path for High Priority Contexts - * - * For High Priority Contexts on Mali-T600, we can make sure that at least 1 of - * them can be scheduled in immediately to start high prioriy jobs. In general, - * (no. ASs) - (no JSs) high priority contexts may be started immediately. The - * following describes how this happens: - * - * Similar to the previous section, consider what happens with a high-priority - * context (a context with a priority higher than that of any in the Run Pool) - * that starts out with no jobs: - * - A job becomes ready to run on the context, and so we enqueue the context - * on the Policy's Queue. - * - However, we'd like to schedule in this context immediately, instead of - * waiting for one of the Run Pool contexts' timeslice to expire - * - The policy's Enqueue function must detect this (because it is the policy - * that embodies the concept of priority), and take appropriate action - * - That is, kbasep_js_policy_enqueue_ctx() should check the Policy's Run - * Pool to see if a lower priority context should be scheduled out, and then - * schedule in the High Priority context. - * - For Mali-T600, we can always pick a context to schedule out immediately - * (because there are more ASs than JSs), and so scheduling out a victim context - * and scheduling in the high priority context can happen immediately. - * - If a policy implements fair-sharing, then this can still ensure the - * victim later on gets a fair share of the GPU. - * - As a note, consider whether the victim can be of equal/higher priority - * than the incoming context: - * - Usually, higher priority contexts will be the ones currently running - * jobs, and so the context with the lowest priority is usually not running - * jobs. - * - This makes it likely that the victim context is low priority, but - * it's not impossible for it to be a high priority one: - * - Suppose 3 high priority contexts are submitting only FS jobs, and one low - * priority context submitting CS jobs. Then, the context not running jobs will - * be one of the hi priority contexts (because only 2 FS jobs can be - * queued/running on the GPU HW for Mali-T600). - * - The problem can be mitigated by extra action, but it's questionable - * whether we need to: we already have a high likelihood that there's at least - * one high priority context - that should be good enough. - * - And so, this method makes sure that at least one high priority context - * can be started very quickly, but more than one high priority contexts could be - * delayed (up to one timeslice). - * - To improve this, use a GPU with a higher number of Address Spaces vs Job - * Slots. - * - At this point, let's assume this high priority context has been scheduled - * in immediately. The next step is to ensure it can start some jobs quickly. - * - It must do this by Soft-Stopping jobs on any of the Job Slots that it can - * submit to. - * - The rest of the logic for starting the jobs is taken care of by the IRQ - * handler. All the policy needs to do is ensure that - * kbasep_js_policy_dequeue_job() will return the jobs from the high priority - * context. - * - * @note in SS state, we currently only use 2 job-slots (even for T608, but - * this might change in future). In this case, it's always possible to schedule - * out 2 ASs quickly (their jobs won't be in the HEAD registers). At the same - * time, this maximizes usage of the job-slots (only 2 are in use), because you - * can guarantee starting of the jobs from the High Priority contexts immediately too. - * - * - * - * @section sec_kbase_js_policy_operation_notes Notes - * - * - In this design, a separate 'init' is needed from dequeue/requeue, so that - * information can be retained between the dequeue/requeue calls. For example, - * the total time spent for a context/job could be logged between - * dequeue/requeuing, to implement Fair Sharing. In this case, 'init' just - * initializes that information to some known state. - * - * - * - */ - -/** - * @addtogroup base_api - * @{ - */ - -/** - * @addtogroup base_kbase_api - * @{ - */ - -/** - * @addtogroup kbase_js_policy Job Scheduler Policy APIs - * @{ - * - * Refer to @ref page_kbase_js_policy for an overview and detailed operation of - * the Job Scheduler Policy and its use from the Job Scheduler Core. - */ - -/** - * @brief Job Scheduler Policy structure - */ -union kbasep_js_policy; - -/** - * @brief Initialize the Job Scheduler Policy - */ -int kbasep_js_policy_init(struct kbase_device *kbdev); - -/** - * @brief Terminate the Job Scheduler Policy - */ -void kbasep_js_policy_term(union kbasep_js_policy *js_policy); - -/** - * @addtogroup kbase_js_policy_ctx Job Scheduler Policy, Context Management API - * @{ - * - * Refer to @ref page_kbase_js_policy for an overview and detailed operation of - * the Job Scheduler Policy and its use from the Job Scheduler Core. - */ - -/** - * @brief Job Scheduler Policy Ctx Info structure - * - * This structure is embedded in the struct kbase_context structure. It is used to: - * - track information needed for the policy to schedule the context (e.g. time - * used, OS priority etc.) - * - link together kbase_contexts into a queue, so that a struct kbase_context can be - * obtained as the container of the policy ctx info. This allows the API to - * return what "the next context" should be. - * - obtain other information already stored in the struct kbase_context for - * scheduling purposes (e.g process ID to get the priority of the originating - * process) - */ -union kbasep_js_policy_ctx_info; - -/** - * @brief Initialize a ctx for use with the Job Scheduler Policy - * - * This effectively initializes the union kbasep_js_policy_ctx_info structure within - * the struct kbase_context (itself located within the kctx->jctx.sched_info structure). - */ -int kbasep_js_policy_init_ctx(struct kbase_device *kbdev, struct kbase_context *kctx); - -/** - * @brief Terminate resources associated with using a ctx in the Job Scheduler - * Policy. - */ -void kbasep_js_policy_term_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx); - -/** - * @brief Enqueue a context onto the Job Scheduler Policy Queue - * - * If the context enqueued has a priority higher than any in the Run Pool, then - * it is the Policy's responsibility to decide whether to schedule out a low - * priority context from the Run Pool to allow the high priority context to be - * scheduled in. - * - * If the context has the privileged flag set, it will always be kept at the - * head of the queue. - * - * The caller will be holding kbasep_js_kctx_info::ctx::jsctx_mutex. - * The caller will be holding kbasep_js_device_data::queue_mutex. - */ -void kbasep_js_policy_enqueue_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx); - -/** - * @brief Dequeue a context from the Head of the Job Scheduler Policy Queue - * - * The caller will be holding kbasep_js_device_data::queue_mutex. - * - * @return true if a context was available, and *kctx_ptr points to - * the kctx dequeued. - * @return false if no contexts were available. - */ -bool kbasep_js_policy_dequeue_head_ctx(union kbasep_js_policy *js_policy, struct kbase_context ** const kctx_ptr); - -/** - * @brief Evict a context from the Job Scheduler Policy Queue - * - * This is only called as part of destroying a kbase_context. - * - * There are many reasons why this might fail during the lifetime of a - * context. For example, the context is in the process of being scheduled. In - * that case a thread doing the scheduling might have a pointer to it, but the - * context is neither in the Policy Queue, nor is it in the Run - * Pool. Crucially, neither the Policy Queue, Run Pool, or the Context itself - * are locked. - * - * Hence to find out where in the system the context is, it is important to do - * more than just check the kbasep_js_kctx_info::ctx::is_scheduled member. - * - * The caller will be holding kbasep_js_device_data::queue_mutex. - * - * @return true if the context was evicted from the Policy Queue - * @return false if the context was not found in the Policy Queue - */ -bool kbasep_js_policy_try_evict_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx); - -/** - * @brief Call a function on all jobs belonging to a non-queued, non-running - * context, optionally detaching the jobs from the context as it goes. - * - * At the time of the call, the context is guarenteed to be not-currently - * scheduled on the Run Pool (is_scheduled == false), and not present in - * the Policy Queue. This is because one of the following functions was used - * recently on the context: - * - kbasep_js_policy_evict_ctx() - * - kbasep_js_policy_runpool_remove_ctx() - * - * In both cases, no subsequent call was made on the context to any of: - * - kbasep_js_policy_runpool_add_ctx() - * - kbasep_js_policy_enqueue_ctx() - * - * Due to the locks that might be held at the time of the call, the callback - * may need to defer work on a workqueue to complete its actions (e.g. when - * cancelling jobs) - * - * \a detach_jobs must only be set when cancelling jobs (which occurs as part - * of context destruction). - * - * The locking conditions on the caller are as follows: - * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex. - */ -void kbasep_js_policy_foreach_ctx_job(union kbasep_js_policy *js_policy, struct kbase_context *kctx, - kbasep_js_policy_ctx_job_cb callback, bool detach_jobs); - -/** - * @brief Add a context to the Job Scheduler Policy's Run Pool - * - * If the context enqueued has a priority higher than any in the Run Pool, then - * it is the Policy's responsibility to decide whether to schedule out low - * priority jobs that are currently running on the GPU. - * - * The number of contexts present in the Run Pool will never be more than the - * number of Address Spaces. - * - * The following guarentees are made about the state of the system when this - * is called: - * - kctx->as_nr member is valid - * - the context has its submit_allowed flag set - * - kbasep_js_device_data::runpool_irq::per_as_data[kctx->as_nr] is valid - * - The refcount of the context is guarenteed to be zero. - * - kbasep_js_kctx_info::ctx::is_scheduled will be true. - * - * The locking conditions on the caller are as follows: - * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex. - * - it will be holding kbasep_js_device_data::runpool_mutex. - * - it will be holding kbasep_js_device_data::runpool_irq::lock (a spinlock) - * - * Due to a spinlock being held, this function must not call any APIs that sleep. - */ -void kbasep_js_policy_runpool_add_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx); - -/** - * @brief Remove a context from the Job Scheduler Policy's Run Pool - * - * The kctx->as_nr member is valid and the context has its submit_allowed flag - * set when this is called. The state of - * kbasep_js_device_data::runpool_irq::per_as_data[kctx->as_nr] is also - * valid. The refcount of the context is guarenteed to be zero. - * - * The locking conditions on the caller are as follows: - * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex. - * - it will be holding kbasep_js_device_data::runpool_mutex. - * - it will be holding kbasep_js_device_data::runpool_irq::lock (a spinlock) - * - * Due to a spinlock being held, this function must not call any APIs that sleep. - */ -void kbasep_js_policy_runpool_remove_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx); - -/** - * @brief Indicate whether a context should be removed from the Run Pool - * (should be scheduled out). - * - * The kbasep_js_device_data::runpool_irq::lock will be held by the caller. - * - * @note This API is called from IRQ context. - */ -bool kbasep_js_policy_should_remove_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx); - -/** - * @brief Synchronize with any timers acting upon the runpool - * - * The policy should check whether any timers it owns should be running. If - * they should not, the policy must cancel such timers and ensure they are not - * re-run by the time this function finishes. - * - * In particular, the timers must not be running when there are no more contexts - * on the runpool, because the GPU could be powered off soon after this call. - * - * The locking conditions on the caller are as follows: - * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex. - * - it will be holding kbasep_js_device_data::runpool_mutex. - */ -void kbasep_js_policy_runpool_timers_sync(union kbasep_js_policy *js_policy); - - -/** - * @brief Indicate whether a new context has an higher priority than the current context. - * - * - * The caller has the following conditions on locking: - * - kbasep_js_kctx_info::ctx::jsctx_mutex will be held for \a new_ctx - * - * This function must not sleep, because an IRQ spinlock might be held whilst - * this is called. - * - * @note There is nothing to stop the priority of \a current_ctx changing - * during or immediately after this function is called (because its jsctx_mutex - * cannot be held). Therefore, this function should only be seen as a heuristic - * guide as to whether \a new_ctx is higher priority than \a current_ctx - */ -bool kbasep_js_policy_ctx_has_priority(union kbasep_js_policy *js_policy, struct kbase_context *current_ctx, struct kbase_context *new_ctx); - - /** @} *//* end group kbase_js_policy_ctx */ - -/** - * @addtogroup kbase_js_policy_job Job Scheduler Policy, Job Chain Management API - * @{ - * - * Refer to @ref page_kbase_js_policy for an overview and detailed operation of - * the Job Scheduler Policy and its use from the Job Scheduler Core. - */ - -/** - * @brief Job Scheduler Policy Job Info structure - * - * This structure is embedded in the struct kbase_jd_atom structure. It is used to: - * - track information needed for the policy to schedule the job (e.g. time - * used, etc.) - * - link together jobs into a queue/buffer, so that a struct kbase_jd_atom can be - * obtained as the container of the policy job info. This allows the API to - * return what "the next job" should be. - */ -union kbasep_js_policy_job_info; - -/** - * @brief Initialize a job for use with the Job Scheduler Policy - * - * This function initializes the union kbasep_js_policy_job_info structure within the - * kbase_jd_atom. It will only initialize/allocate resources that are specific - * to the job. - * - * That is, this function makes \b no attempt to: - * - initialize any context/policy-wide information - * - enqueue the job on the policy. - * - * At some later point, the following functions must be called on the job, in this order: - * - kbasep_js_policy_register_job() to register the job and initialize policy/context wide data. - * - kbasep_js_policy_enqueue_job() to enqueue the job - * - * A job must only ever be initialized on the Policy once, and must be - * terminated on the Policy before the job is freed. - * - * The caller will not be holding any locks, and so this function will not - * modify any information in \a kctx or \a js_policy. - * - * @return 0 if initialization was correct. - */ -int kbasep_js_policy_init_job(const union kbasep_js_policy *js_policy, const struct kbase_context *kctx, struct kbase_jd_atom *katom); - -/** - * @brief Register context/policy-wide information for a job on the Job Scheduler Policy. - * - * Registers the job with the policy. This is used to track the job before it - * has been enqueued/requeued by kbasep_js_policy_enqueue_job(). Specifically, - * it is used to update information under a lock that could not be updated at - * kbasep_js_policy_init_job() time (such as context/policy-wide data). - * - * @note This function will not fail, and hence does not allocate any - * resources. Any failures that could occur on registration will be caught - * during kbasep_js_policy_init_job() instead. - * - * A job must only ever be registerd on the Policy once, and must be - * deregistered on the Policy on completion (whether or not that completion was - * success/failure). - * - * The caller has the following conditions on locking: - * - kbasep_js_kctx_info::ctx::jsctx_mutex will be held. - */ -void kbasep_js_policy_register_job(union kbasep_js_policy *js_policy, struct kbase_context *kctx, struct kbase_jd_atom *katom); - -/** - * @brief De-register context/policy-wide information for a on the Job Scheduler Policy. - * - * This must be used before terminating the resources associated with using a - * job in the Job Scheduler Policy. This function does not itself terminate any - * resources, at most it just updates information in the policy and context. - * - * The caller has the following conditions on locking: - * - kbasep_js_kctx_info::ctx::jsctx_mutex will be held. - */ -void kbasep_js_policy_deregister_job(union kbasep_js_policy *js_policy, struct kbase_context *kctx, struct kbase_jd_atom *katom); - -/** - * @brief Dequeue a Job for a job slot from the Job Scheduler Policy Run Pool - * - * The job returned by the policy will match at least one of the bits in the - * job slot's core requirements (but it may match more than one, or all @ref - * base_jd_core_req bits supported by the job slot). - * - * In addition, the requirements of the job returned will be a subset of those - * requested - the job returned will not have requirements that \a job_slot_idx - * cannot satisfy. - * - * The caller will submit the job to the GPU as soon as the GPU's NEXT register - * for the corresponding slot is empty. Of course, the GPU will then only run - * this new job when the currently executing job (in the jobslot's HEAD - * register) has completed. - * - * @return true if a job was available, and *kctx_ptr points to - * the kctx dequeued. - * @return false if no jobs were available among all ctxs in the Run Pool. - * - * @note base_jd_core_req is currently a u8 - beware of type conversion. - * - * The caller has the following conditions on locking: - * - kbasep_js_device_data::runpool_lock::irq will be held. - * - kbasep_js_device_data::runpool_mutex will be held. - * - kbasep_js_kctx_info::ctx::jsctx_mutex. will be held - */ -bool kbasep_js_policy_dequeue_job(struct kbase_device *kbdev, int job_slot_idx, struct kbase_jd_atom ** const katom_ptr); - -/** - * @brief Requeue a Job back into the the Job Scheduler Policy Run Pool - * - * This will be used to enqueue a job after its creation and also to requeue - * a job into the Run Pool that was previously dequeued (running). It notifies - * the policy that the job should be run again at some point later. - * - * The caller has the following conditions on locking: - * - kbasep_js_device_data::runpool_irq::lock (a spinlock) will be held. - * - kbasep_js_device_data::runpool_mutex will be held. - * - kbasep_js_kctx_info::ctx::jsctx_mutex will be held. - */ -void kbasep_js_policy_enqueue_job(union kbasep_js_policy *js_policy, struct kbase_jd_atom *katom); - -/** - * @brief Log the result of a job: the time spent on a job/context, and whether - * the job failed or not. - * - * Since a struct kbase_jd_atom contains a pointer to the struct kbase_context owning it, - * then this can also be used to log time on either/both the job and the - * containing context. - * - * The completion state of the job can be found by examining \a katom->event.event_code - * - * If the Job failed and the policy is implementing fair-sharing, then the - * policy must penalize the failing job/context: - * - At the very least, it should penalize the time taken by the amount of - * time spent processing the IRQ in SW. This because a job in the NEXT slot - * waiting to run will be delayed until the failing job has had the IRQ - * cleared. - * - \b Optionally, the policy could apply other penalties. For example, based - * on a threshold of a number of failing jobs, after which a large penalty is - * applied. - * - * The kbasep_js_device_data::runpool_mutex will be held by the caller. - * - * @note This API is called from IRQ context. - * - * The caller has the following conditions on locking: - * - kbasep_js_device_data::runpool_irq::lock will be held. - * - * @param js_policy job scheduler policy - * @param katom job dispatch atom - * @param time_spent_us the time spent by the job, in microseconds (10^-6 seconds). - */ -void kbasep_js_policy_log_job_result(union kbasep_js_policy *js_policy, struct kbase_jd_atom *katom, u64 time_spent_us); - - /** @} *//* end group kbase_js_policy_job */ - - /** @} *//* end group kbase_js_policy */ - /** @} *//* end group base_kbase_api */ - /** @} *//* end group base_api */ - -#endif /* _KBASE_JS_POLICY_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js_policy_cfs.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js_policy_cfs.c deleted file mode 100755 index 692460710ce07..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js_policy_cfs.c +++ /dev/null @@ -1,297 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * Job Scheduler: Completely Fair Policy Implementation - */ - -#include -#include -#include -#include -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) -#include -#endif - -/** - * Define for when dumping is enabled. - * This should not be based on the instrumentation level as whether dumping is enabled for a particular level is down to the integrator. - * However this is being used for now as otherwise the cinstr headers would be needed. - */ -#define CINSTR_DUMPING_ENABLED (2 == MALI_INSTRUMENTATION_LEVEL) - -/* Fixed point constants used for runtime weight calculations */ -#define WEIGHT_FIXEDPOINT_SHIFT 10 -#define WEIGHT_TABLE_SIZE 40 -#define WEIGHT_0_NICE (WEIGHT_TABLE_SIZE/2) -#define WEIGHT_0_VAL (1 << WEIGHT_FIXEDPOINT_SHIFT) - -#define PROCESS_PRIORITY_MIN (-20) -#define PROCESS_PRIORITY_MAX (19) - -/* Defines for easy asserts 'is scheduled'/'is queued'/'is neither queued norscheduled' */ -#define KBASEP_JS_CHECKFLAG_QUEUED (1u << 0) /**< Check the queued state */ -#define KBASEP_JS_CHECKFLAG_SCHEDULED (1u << 1) /**< Check the scheduled state */ -#define KBASEP_JS_CHECKFLAG_IS_QUEUED (1u << 2) /**< Expect queued state to be set */ -#define KBASEP_JS_CHECKFLAG_IS_SCHEDULED (1u << 3) /**< Expect scheduled state to be set */ - -enum { - KBASEP_JS_CHECK_NOTQUEUED = KBASEP_JS_CHECKFLAG_QUEUED, - KBASEP_JS_CHECK_NOTSCHEDULED = KBASEP_JS_CHECKFLAG_SCHEDULED, - KBASEP_JS_CHECK_QUEUED = KBASEP_JS_CHECKFLAG_QUEUED | KBASEP_JS_CHECKFLAG_IS_QUEUED, - KBASEP_JS_CHECK_SCHEDULED = KBASEP_JS_CHECKFLAG_SCHEDULED | KBASEP_JS_CHECKFLAG_IS_SCHEDULED -}; - -typedef u32 kbasep_js_check; - -/* - * Private Functions - */ - -/* Table autogenerated using util built from: base/tools/gen_cfs_weight_of_prio/ */ - -/* weight = 1.25 */ -static const int weight_of_priority[] = { - /* -20 */ 11, 14, 18, 23, - /* -16 */ 29, 36, 45, 56, - /* -12 */ 70, 88, 110, 137, - /* -8 */ 171, 214, 268, 335, - /* -4 */ 419, 524, 655, 819, - /* 0 */ 1024, 1280, 1600, 2000, - /* 4 */ 2500, 3125, 3906, 4883, - /* 8 */ 6104, 7630, 9538, 11923, - /* 12 */ 14904, 18630, 23288, 29110, - /* 16 */ 36388, 45485, 56856, 71070 -}; - -/* - * Note: There is nothing to stop the priority of the ctx containing - * ctx_info changing during or immediately after this function is called - * (because its jsctx_mutex cannot be held during IRQ). Therefore, this - * function should only be seen as a heuristic guide as to the priority weight - * of the context. - */ -static u64 priority_weight(struct kbasep_js_policy_cfs_ctx *ctx_info, u64 time_us) -{ - u64 time_delta_us; - int priority; - - priority = ctx_info->process_priority; - - /* Adjust runtime_us using priority weight if required */ - if (priority != 0 && time_us != 0) { - int clamped_priority; - - /* Clamp values to min..max weights */ - if (priority > PROCESS_PRIORITY_MAX) - clamped_priority = PROCESS_PRIORITY_MAX; - else if (priority < PROCESS_PRIORITY_MIN) - clamped_priority = PROCESS_PRIORITY_MIN; - else - clamped_priority = priority; - - /* Fixed point multiplication */ - time_delta_us = (time_us * weight_of_priority[WEIGHT_0_NICE + clamped_priority]); - /* Remove fraction */ - time_delta_us = time_delta_us >> WEIGHT_FIXEDPOINT_SHIFT; - /* Make sure the time always increases */ - if (0 == time_delta_us) - time_delta_us++; - } else { - time_delta_us = time_us; - } - - return time_delta_us; -} - -#if KBASE_TRACE_ENABLE -static int kbasep_js_policy_trace_get_refcnt_nolock(struct kbase_device *kbdev, struct kbase_context *kctx) -{ - struct kbasep_js_device_data *js_devdata; - int as_nr; - int refcnt = 0; - - js_devdata = &kbdev->js_data; - - as_nr = kctx->as_nr; - if (as_nr != KBASEP_AS_NR_INVALID) { - struct kbasep_js_per_as_data *js_per_as_data; - - js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr]; - - refcnt = js_per_as_data->as_busy_refcount; - } - - return refcnt; -} - -static inline int kbasep_js_policy_trace_get_refcnt(struct kbase_device *kbdev, struct kbase_context *kctx) -{ - unsigned long flags; - struct kbasep_js_device_data *js_devdata; - int refcnt = 0; - - js_devdata = &kbdev->js_data; - - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - refcnt = kbasep_js_policy_trace_get_refcnt_nolock(kbdev, kctx); - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - - return refcnt; -} -#else /* KBASE_TRACE_ENABLE */ -static inline int kbasep_js_policy_trace_get_refcnt(struct kbase_device *kbdev, struct kbase_context *kctx) -{ - CSTD_UNUSED(kbdev); - CSTD_UNUSED(kctx); - return 0; -} -#endif /* KBASE_TRACE_ENABLE */ - - -/* - * Non-private functions - */ - -int kbasep_js_policy_init(struct kbase_device *kbdev) -{ - struct kbasep_js_device_data *js_devdata; - struct kbasep_js_policy_cfs *policy_info; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - js_devdata = &kbdev->js_data; - policy_info = &js_devdata->policy.cfs; - - atomic64_set(&policy_info->least_runtime_us, KBASEP_JS_RUNTIME_EMPTY); - atomic64_set(&policy_info->rt_least_runtime_us, KBASEP_JS_RUNTIME_EMPTY); - - policy_info->head_runtime_us = 0; - - return 0; -} - -void kbasep_js_policy_term(union kbasep_js_policy *js_policy) -{ - CSTD_UNUSED(js_policy); -} - -int kbasep_js_policy_init_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) -{ - struct kbasep_js_device_data *js_devdata; - struct kbasep_js_policy_cfs_ctx *ctx_info; - struct kbasep_js_policy_cfs *policy_info; - int policy; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - KBASE_DEBUG_ASSERT(kctx != NULL); - - js_devdata = &kbdev->js_data; - policy_info = &kbdev->js_data.policy.cfs; - ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs; - - KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_INIT_CTX, kctx, NULL, 0u, kbasep_js_policy_trace_get_refcnt(kbdev, kctx)); - - policy = current->policy; - if (policy == SCHED_FIFO || policy == SCHED_RR) { - ctx_info->process_rt_policy = true; - ctx_info->process_priority = (((MAX_RT_PRIO - 1) - current->rt_priority) / 5) - 20; - } else { - ctx_info->process_rt_policy = false; - ctx_info->process_priority = (current->static_prio - MAX_RT_PRIO) - 20; - } - - /* Initial runtime (relative to least-run context runtime) - * - * This uses the Policy Queue's most up-to-date head_runtime_us by using the - * queue mutex to issue memory barriers - also ensure future updates to - * head_runtime_us occur strictly after this context is initialized */ - mutex_lock(&js_devdata->queue_mutex); - - /* No need to hold the the runpool_irq.lock here, because we're initializing - * the value, and the context is definitely not being updated in the - * runpool at this point. The queue_mutex ensures the memory barrier. */ - ctx_info->runtime_us = policy_info->head_runtime_us + priority_weight(ctx_info, (u64) js_devdata->cfs_ctx_runtime_init_slices * (u64) (js_devdata->ctx_timeslice_ns / 1000u)); - - mutex_unlock(&js_devdata->queue_mutex); - - return 0; -} - -void kbasep_js_policy_term_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx) -{ - struct kbasep_js_policy_cfs_ctx *ctx_info; - struct kbasep_js_policy_cfs *policy_info; - struct kbase_device *kbdev; - - KBASE_DEBUG_ASSERT(js_policy != NULL); - KBASE_DEBUG_ASSERT(kctx != NULL); - - policy_info = &js_policy->cfs; - ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs; - - kbdev = container_of(js_policy, struct kbase_device, js_data.policy); - KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_TERM_CTX, kctx, NULL, 0u, kbasep_js_policy_trace_get_refcnt(kbdev, kctx)); - - /* No work to do */ -} - -/* - * Job Chain Management - */ - -void kbasep_js_policy_log_job_result(union kbasep_js_policy *js_policy, struct kbase_jd_atom *katom, u64 time_spent_us) -{ - struct kbasep_js_policy_cfs_ctx *ctx_info; - struct kbase_context *parent_ctx; - - KBASE_DEBUG_ASSERT(js_policy != NULL); - KBASE_DEBUG_ASSERT(katom != NULL); - CSTD_UNUSED(js_policy); - - parent_ctx = katom->kctx; - KBASE_DEBUG_ASSERT(parent_ctx != NULL); - - ctx_info = &parent_ctx->jctx.sched_info.runpool.policy_ctx.cfs; - - ctx_info->runtime_us += priority_weight(ctx_info, time_spent_us); - - katom->time_spent_us += time_spent_us; -} - -bool kbasep_js_policy_ctx_has_priority(union kbasep_js_policy *js_policy, struct kbase_context *current_ctx, struct kbase_context *new_ctx) -{ - struct kbasep_js_policy_cfs_ctx *current_ctx_info; - struct kbasep_js_policy_cfs_ctx *new_ctx_info; - - KBASE_DEBUG_ASSERT(current_ctx != NULL); - KBASE_DEBUG_ASSERT(new_ctx != NULL); - CSTD_UNUSED(js_policy); - - current_ctx_info = ¤t_ctx->jctx.sched_info.runpool.policy_ctx.cfs; - new_ctx_info = &new_ctx->jctx.sched_info.runpool.policy_ctx.cfs; - - if (!current_ctx_info->process_rt_policy && new_ctx_info->process_rt_policy) - return true; - - if (current_ctx_info->process_rt_policy == - new_ctx_info->process_rt_policy) - return true; - - return false; -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js_policy_cfs.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js_policy_cfs.h deleted file mode 100755 index b457d8215abe0..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_js_policy_cfs.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* - * Completely Fair Job Scheduler Policy structure definitions - */ - -#ifndef _KBASE_JS_POLICY_CFS_H_ -#define _KBASE_JS_POLICY_CFS_H_ - -#define KBASEP_JS_RUNTIME_EMPTY ((u64)-1) - -/** - * struct kbasep_js_policy_cfs - Data for the CFS policy - * @head_runtime_us: Number of microseconds the least-run context has been - * running for. The kbasep_js_device_data.queue_mutex must - * be held whilst updating this. - * Reads are possible without this mutex, but an older value - * might be read if no memory barries are issued beforehand. - * @least_runtime_us: Number of microseconds the least-run context in the - * context queue has been running for. - * -1 if context queue is empty. - * @rt_least_runtime_us: Number of microseconds the least-run context in the - * realtime (priority) context queue has been running for. - * -1 if realtime context queue is empty - */ -struct kbasep_js_policy_cfs { - u64 head_runtime_us; - atomic64_t least_runtime_us; - atomic64_t rt_least_runtime_us; -}; - -/** - * struct kbasep_js_policy_cfs_ctx - a single linked list of all contexts - * @runtime_us: microseconds this context has been running for - * @process_rt_policy: set if calling process policy scheme is a realtime - * scheduler and will use the priority queue. Non-mutable - * after ctx init - * @process_priority: calling process NICE priority, in the range -20..19 - * - * &kbasep_js_device_data.runpool_irq.lock must be held when updating - * @runtime_us. Initializing will occur on context init and context enqueue - * (which can only occur in one thread at a time), but multi-thread access only - * occurs while the context is in the runpool. - * - * Reads are possible without the spinlock, but an older value might be read if - * no memory barries are issued beforehand. - */ -struct kbasep_js_policy_cfs_ctx { - u64 runtime_us; - bool process_rt_policy; - int process_priority; -}; - -/** - * struct kbasep_js_policy_cfs_job - per job information for CFS - * @ticks: number of ticks that this job has been executing for - * - * &kbasep_js_device_data.runpool_irq.lock must be held when accessing @ticks. - */ -struct kbasep_js_policy_cfs_job { - u32 ticks; -}; - -#endif diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_linux.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_linux.h deleted file mode 100755 index 6d1e61fd41e03..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_linux.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_kbase_linux.h - * Base kernel APIs, Linux implementation. - */ - -#ifndef _KBASE_LINUX_H_ -#define _KBASE_LINUX_H_ - -/* All things that are needed for the Linux port. */ -#include -#include -#include -#include -#include - -#if (defined(MALI_KERNEL_TEST_API) && (1 == MALI_KERNEL_TEST_API)) - #define KBASE_EXPORT_TEST_API(func) EXPORT_SYMBOL(func) -#else - #define KBASE_EXPORT_TEST_API(func) -#endif - -#define KBASE_EXPORT_SYMBOL(func) EXPORT_SYMBOL(func) - -#endif /* _KBASE_LINUX_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem.c deleted file mode 100755 index 2909f20c08b24..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem.c +++ /dev/null @@ -1,1376 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_kbase_mem.c - * Base kernel memory APIs - */ -#ifdef CONFIG_DMA_SHARED_BUFFER -#include -#endif /* CONFIG_DMA_SHARED_BUFFER */ - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#if defined(CONFIG_MALI_MIPE_ENABLED) -#include -#endif - -/** - * @brief Check the zone compatibility of two regions. - */ -static int kbase_region_tracker_match_zone(struct kbase_va_region *reg1, - struct kbase_va_region *reg2) -{ - return ((reg1->flags & KBASE_REG_ZONE_MASK) == - (reg2->flags & KBASE_REG_ZONE_MASK)); -} - -KBASE_EXPORT_TEST_API(kbase_region_tracker_match_zone); - -/* This function inserts a region into the tree. */ -static void kbase_region_tracker_insert(struct kbase_context *kctx, struct kbase_va_region *new_reg) -{ - u64 start_pfn = new_reg->start_pfn; - struct rb_node **link = &(kctx->reg_rbtree.rb_node); - struct rb_node *parent = NULL; - - /* Find the right place in the tree using tree search */ - while (*link) { - struct kbase_va_region *old_reg; - - parent = *link; - old_reg = rb_entry(parent, struct kbase_va_region, rblink); - - /* RBTree requires no duplicate entries. */ - KBASE_DEBUG_ASSERT(old_reg->start_pfn != start_pfn); - - if (old_reg->start_pfn > start_pfn) - link = &(*link)->rb_left; - else - link = &(*link)->rb_right; - } - - /* Put the new node there, and rebalance tree */ - rb_link_node(&(new_reg->rblink), parent, link); - rb_insert_color(&(new_reg->rblink), &(kctx->reg_rbtree)); -} - -/* Find allocated region enclosing free range. */ -static struct kbase_va_region *kbase_region_tracker_find_region_enclosing_range_free( - struct kbase_context *kctx, u64 start_pfn, size_t nr_pages) -{ - struct rb_node *rbnode; - struct kbase_va_region *reg; - - u64 end_pfn = start_pfn + nr_pages; - - rbnode = kctx->reg_rbtree.rb_node; - while (rbnode) { - u64 tmp_start_pfn, tmp_end_pfn; - - reg = rb_entry(rbnode, struct kbase_va_region, rblink); - tmp_start_pfn = reg->start_pfn; - tmp_end_pfn = reg->start_pfn + reg->nr_pages; - - /* If start is lower than this, go left. */ - if (start_pfn < tmp_start_pfn) - rbnode = rbnode->rb_left; - /* If end is higher than this, then go right. */ - else if (end_pfn > tmp_end_pfn) - rbnode = rbnode->rb_right; - else /* Enclosing */ - return reg; - } - - return NULL; -} - -/* Find region enclosing given address. */ -struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(struct kbase_context *kctx, u64 gpu_addr) -{ - struct rb_node *rbnode; - struct kbase_va_region *reg; - u64 gpu_pfn = gpu_addr >> PAGE_SHIFT; - - KBASE_DEBUG_ASSERT(NULL != kctx); - - lockdep_assert_held(&kctx->reg_lock); - - rbnode = kctx->reg_rbtree.rb_node; - while (rbnode) { - u64 tmp_start_pfn, tmp_end_pfn; - - reg = rb_entry(rbnode, struct kbase_va_region, rblink); - tmp_start_pfn = reg->start_pfn; - tmp_end_pfn = reg->start_pfn + reg->nr_pages; - - /* If start is lower than this, go left. */ - if (gpu_pfn < tmp_start_pfn) - rbnode = rbnode->rb_left; - /* If end is higher than this, then go right. */ - else if (gpu_pfn >= tmp_end_pfn) - rbnode = rbnode->rb_right; - else /* Enclosing */ - return reg; - } - - return NULL; -} - -KBASE_EXPORT_TEST_API(kbase_region_tracker_find_region_enclosing_address); - -/* Find region with given base address */ -struct kbase_va_region *kbase_region_tracker_find_region_base_address(struct kbase_context *kctx, u64 gpu_addr) -{ - u64 gpu_pfn = gpu_addr >> PAGE_SHIFT; - struct rb_node *rbnode; - struct kbase_va_region *reg; - - KBASE_DEBUG_ASSERT(NULL != kctx); - - lockdep_assert_held(&kctx->reg_lock); - - rbnode = kctx->reg_rbtree.rb_node; - while (rbnode) { - reg = rb_entry(rbnode, struct kbase_va_region, rblink); - if (reg->start_pfn > gpu_pfn) - rbnode = rbnode->rb_left; - else if (reg->start_pfn < gpu_pfn) - rbnode = rbnode->rb_right; - else - return reg; - - } - - return NULL; -} - -KBASE_EXPORT_TEST_API(kbase_region_tracker_find_region_base_address); - -/* Find region meeting given requirements */ -static struct kbase_va_region *kbase_region_tracker_find_region_meeting_reqs(struct kbase_context *kctx, struct kbase_va_region *reg_reqs, size_t nr_pages, size_t align) -{ - struct rb_node *rbnode; - struct kbase_va_region *reg; - - /* Note that this search is a linear search, as we do not have a target - address in mind, so does not benefit from the rbtree search */ - rbnode = rb_first(&(kctx->reg_rbtree)); - while (rbnode) { - reg = rb_entry(rbnode, struct kbase_va_region, rblink); - if ((reg->nr_pages >= nr_pages) && - (reg->flags & KBASE_REG_FREE) && - kbase_region_tracker_match_zone(reg, reg_reqs)) { - /* Check alignment */ - u64 start_pfn = (reg->start_pfn + align - 1) & ~(align - 1); - - if ((start_pfn >= reg->start_pfn) && - (start_pfn <= (reg->start_pfn + reg->nr_pages - 1)) && - ((start_pfn + nr_pages - 1) <= (reg->start_pfn + reg->nr_pages - 1))) - return reg; - } - rbnode = rb_next(rbnode); - } - - return NULL; -} - -/** - * @brief Remove a region object from the global list. - * - * The region reg is removed, possibly by merging with other free and - * compatible adjacent regions. It must be called with the context - * region lock held. The associated memory is not released (see - * kbase_free_alloced_region). Internal use only. - */ -static int kbase_remove_va_region(struct kbase_context *kctx, struct kbase_va_region *reg) -{ - struct rb_node *rbprev; - struct kbase_va_region *prev = NULL; - struct rb_node *rbnext; - struct kbase_va_region *next = NULL; - - int merged_front = 0; - int merged_back = 0; - int err = 0; - - /* Try to merge with the previous block first */ - rbprev = rb_prev(&(reg->rblink)); - if (rbprev) { - prev = rb_entry(rbprev, struct kbase_va_region, rblink); - if ((prev->flags & KBASE_REG_FREE) && kbase_region_tracker_match_zone(prev, reg)) { - /* We're compatible with the previous VMA, merge with it */ - prev->nr_pages += reg->nr_pages; - rb_erase(&(reg->rblink), &kctx->reg_rbtree); - reg = prev; - merged_front = 1; - } - } - - /* Try to merge with the next block second */ - /* Note we do the lookup here as the tree may have been rebalanced. */ - rbnext = rb_next(&(reg->rblink)); - if (rbnext) { - /* We're compatible with the next VMA, merge with it */ - next = rb_entry(rbnext, struct kbase_va_region, rblink); - if ((next->flags & KBASE_REG_FREE) && kbase_region_tracker_match_zone(next, reg)) { - next->start_pfn = reg->start_pfn; - next->nr_pages += reg->nr_pages; - rb_erase(&(reg->rblink), &kctx->reg_rbtree); - merged_back = 1; - if (merged_front) { - /* We already merged with prev, free it */ - kbase_free_alloced_region(reg); - } - } - } - - /* If we failed to merge then we need to add a new block */ - if (!(merged_front || merged_back)) { - /* - * We didn't merge anything. Add a new free - * placeholder and remove the original one. - */ - struct kbase_va_region *free_reg; - - free_reg = kbase_alloc_free_region(kctx, reg->start_pfn, reg->nr_pages, reg->flags & KBASE_REG_ZONE_MASK); - if (!free_reg) { - err = -ENOMEM; - goto out; - } - - rb_replace_node(&(reg->rblink), &(free_reg->rblink), &(kctx->reg_rbtree)); - } - - out: - return err; -} - -KBASE_EXPORT_TEST_API(kbase_remove_va_region); - -/** - * @brief Insert a VA region to the list, replacing the current at_reg. - */ -static int kbase_insert_va_region_nolock(struct kbase_context *kctx, struct kbase_va_region *new_reg, struct kbase_va_region *at_reg, u64 start_pfn, size_t nr_pages) -{ - int err = 0; - - /* Must be a free region */ - KBASE_DEBUG_ASSERT((at_reg->flags & KBASE_REG_FREE) != 0); - /* start_pfn should be contained within at_reg */ - KBASE_DEBUG_ASSERT((start_pfn >= at_reg->start_pfn) && (start_pfn < at_reg->start_pfn + at_reg->nr_pages)); - /* at least nr_pages from start_pfn should be contained within at_reg */ - KBASE_DEBUG_ASSERT(start_pfn + nr_pages <= at_reg->start_pfn + at_reg->nr_pages); - - new_reg->start_pfn = start_pfn; - new_reg->nr_pages = nr_pages; - - /* Regions are a whole use, so swap and delete old one. */ - if (at_reg->start_pfn == start_pfn && at_reg->nr_pages == nr_pages) { - rb_replace_node(&(at_reg->rblink), &(new_reg->rblink), &(kctx->reg_rbtree)); - kbase_free_alloced_region(at_reg); - } - /* New region replaces the start of the old one, so insert before. */ - else if (at_reg->start_pfn == start_pfn) { - at_reg->start_pfn += nr_pages; - KBASE_DEBUG_ASSERT(at_reg->nr_pages >= nr_pages); - at_reg->nr_pages -= nr_pages; - - kbase_region_tracker_insert(kctx, new_reg); - } - /* New region replaces the end of the old one, so insert after. */ - else if ((at_reg->start_pfn + at_reg->nr_pages) == (start_pfn + nr_pages)) { - at_reg->nr_pages -= nr_pages; - - kbase_region_tracker_insert(kctx, new_reg); - } - /* New region splits the old one, so insert and create new */ - else { - struct kbase_va_region *new_front_reg; - - new_front_reg = kbase_alloc_free_region(kctx, - at_reg->start_pfn, - start_pfn - at_reg->start_pfn, - at_reg->flags & KBASE_REG_ZONE_MASK); - - if (new_front_reg) { - at_reg->nr_pages -= nr_pages + new_front_reg->nr_pages; - at_reg->start_pfn = start_pfn + nr_pages; - - kbase_region_tracker_insert(kctx, new_front_reg); - kbase_region_tracker_insert(kctx, new_reg); - } else { - err = -ENOMEM; - } - } - - return err; -} - -/** - * @brief Add a VA region to the list. - */ -int kbase_add_va_region(struct kbase_context *kctx, - struct kbase_va_region *reg, u64 addr, - size_t nr_pages, size_t align) -{ - struct kbase_va_region *tmp; - u64 gpu_pfn = addr >> PAGE_SHIFT; - int err = 0; - - KBASE_DEBUG_ASSERT(NULL != kctx); - KBASE_DEBUG_ASSERT(NULL != reg); - - lockdep_assert_held(&kctx->reg_lock); - - if (!align) - align = 1; - - /* must be a power of 2 */ - KBASE_DEBUG_ASSERT((align & (align - 1)) == 0); - KBASE_DEBUG_ASSERT(nr_pages > 0); - - /* Path 1: Map a specific address. Find the enclosing region, which *must* be free. */ - if (gpu_pfn) { - struct device *dev = kctx->kbdev->dev; - - KBASE_DEBUG_ASSERT(!(gpu_pfn & (align - 1))); - - tmp = kbase_region_tracker_find_region_enclosing_range_free(kctx, gpu_pfn, nr_pages); - if (!tmp) { - dev_warn(dev, "Enclosing region not found: 0x%08llx gpu_pfn, %zu nr_pages", gpu_pfn, nr_pages); - err = -ENOMEM; - goto exit; - } - - if ((!kbase_region_tracker_match_zone(tmp, reg)) || - (!(tmp->flags & KBASE_REG_FREE))) { - dev_warn(dev, "Zone mismatch: %lu != %lu", tmp->flags & KBASE_REG_ZONE_MASK, reg->flags & KBASE_REG_ZONE_MASK); - dev_warn(dev, "!(tmp->flags & KBASE_REG_FREE): tmp->start_pfn=0x%llx tmp->flags=0x%lx tmp->nr_pages=0x%zx gpu_pfn=0x%llx nr_pages=0x%zx\n", tmp->start_pfn, tmp->flags, tmp->nr_pages, gpu_pfn, nr_pages); - dev_warn(dev, "in function %s (%p, %p, 0x%llx, 0x%zx, 0x%zx)\n", __func__, kctx, reg, addr, nr_pages, align); - err = -ENOMEM; - goto exit; - } - - err = kbase_insert_va_region_nolock(kctx, reg, tmp, gpu_pfn, nr_pages); - if (err) { - dev_warn(dev, "Failed to insert va region"); - err = -ENOMEM; - goto exit; - } - - goto exit; - } - - /* Path 2: Map any free address which meets the requirements. */ - { - u64 start_pfn; - - tmp = kbase_region_tracker_find_region_meeting_reqs(kctx, reg, nr_pages, align); - if (!tmp) { - err = -ENOMEM; - goto exit; - } - start_pfn = (tmp->start_pfn + align - 1) & ~(align - 1); - err = kbase_insert_va_region_nolock(kctx, reg, tmp, start_pfn, nr_pages); - } - - exit: - return err; -} - -KBASE_EXPORT_TEST_API(kbase_add_va_region); - -/** - * @brief Initialize the internal region tracker data structure. - */ -static void kbase_region_tracker_ds_init(struct kbase_context *kctx, struct kbase_va_region *same_va_reg, struct kbase_va_region *exec_reg, struct kbase_va_region *custom_va_reg) -{ - kctx->reg_rbtree = RB_ROOT; - kbase_region_tracker_insert(kctx, same_va_reg); - - /* exec and custom_va_reg doesn't always exist */ - if (exec_reg && custom_va_reg) { - kbase_region_tracker_insert(kctx, exec_reg); - kbase_region_tracker_insert(kctx, custom_va_reg); - } -} - -void kbase_region_tracker_term(struct kbase_context *kctx) -{ - struct rb_node *rbnode; - struct kbase_va_region *reg; - - do { - rbnode = rb_first(&(kctx->reg_rbtree)); - if (rbnode) { - rb_erase(rbnode, &(kctx->reg_rbtree)); - reg = rb_entry(rbnode, struct kbase_va_region, rblink); - kbase_free_alloced_region(reg); - } - } while (rbnode); -} - -/** - * Initialize the region tracker data structure. - */ -int kbase_region_tracker_init(struct kbase_context *kctx) -{ - struct kbase_va_region *same_va_reg; - struct kbase_va_region *exec_reg = NULL; - struct kbase_va_region *custom_va_reg = NULL; - size_t same_va_bits = sizeof(void *) * BITS_PER_BYTE; - u64 custom_va_size = KBASE_REG_ZONE_CUSTOM_VA_SIZE; - u64 gpu_va_limit = (1ULL << kctx->kbdev->gpu_props.mmu.va_bits) >> PAGE_SHIFT; - -#if defined(CONFIG_ARM64) - same_va_bits = VA_BITS; -#elif defined(CONFIG_X86_64) - same_va_bits = 47; -#elif defined(CONFIG_64BIT) -#error Unsupported 64-bit architecture -#endif - -#ifdef CONFIG_64BIT - if (kctx->is_compat) - same_va_bits = 32; - else if (kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA)) - same_va_bits = 33; -#endif - - if (kctx->kbdev->gpu_props.mmu.va_bits < same_va_bits) - return -EINVAL; - - /* all have SAME_VA */ - same_va_reg = kbase_alloc_free_region(kctx, 1, - (1ULL << (same_va_bits - PAGE_SHIFT)) - 1, - KBASE_REG_ZONE_SAME_VA); - - if (!same_va_reg) - return -ENOMEM; - -#ifdef CONFIG_64BIT - /* only 32-bit clients have the other two zones */ - if (kctx->is_compat) { -#endif - if (gpu_va_limit <= KBASE_REG_ZONE_CUSTOM_VA_BASE) { - kbase_free_alloced_region(same_va_reg); - return -EINVAL; - } - /* If the current size of TMEM is out of range of the - * virtual address space addressable by the MMU then - * we should shrink it to fit - */ - if ((KBASE_REG_ZONE_CUSTOM_VA_BASE + KBASE_REG_ZONE_CUSTOM_VA_SIZE) >= gpu_va_limit) - custom_va_size = gpu_va_limit - KBASE_REG_ZONE_CUSTOM_VA_BASE; - - exec_reg = kbase_alloc_free_region(kctx, - KBASE_REG_ZONE_EXEC_BASE, - KBASE_REG_ZONE_EXEC_SIZE, - KBASE_REG_ZONE_EXEC); - - if (!exec_reg) { - kbase_free_alloced_region(same_va_reg); - return -ENOMEM; - } - - custom_va_reg = kbase_alloc_free_region(kctx, - KBASE_REG_ZONE_CUSTOM_VA_BASE, - custom_va_size, KBASE_REG_ZONE_CUSTOM_VA); - - if (!custom_va_reg) { - kbase_free_alloced_region(same_va_reg); - kbase_free_alloced_region(exec_reg); - return -ENOMEM; - } -#ifdef CONFIG_64BIT - } -#endif - - kbase_region_tracker_ds_init(kctx, same_va_reg, exec_reg, custom_va_reg); - - return 0; -} - -int kbase_mem_init(struct kbase_device *kbdev) -{ - struct kbasep_mem_device *memdev; - - KBASE_DEBUG_ASSERT(kbdev); - - memdev = &kbdev->memdev; - kbdev->mem_pool_max_size_default = KBASE_MEM_POOL_MAX_SIZE_KCTX; - - /* Initialize memory usage */ - atomic_set(&memdev->used_pages, 0); - - return kbase_mem_pool_init(&kbdev->mem_pool, - KBASE_MEM_POOL_MAX_SIZE_KBDEV, kbdev, NULL); -} - -void kbase_mem_halt(struct kbase_device *kbdev) -{ - CSTD_UNUSED(kbdev); -} - -void kbase_mem_term(struct kbase_device *kbdev) -{ - struct kbasep_mem_device *memdev; - int pages; - - KBASE_DEBUG_ASSERT(kbdev); - - memdev = &kbdev->memdev; - - pages = atomic_read(&memdev->used_pages); - if (pages != 0) - dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages); - - kbase_mem_pool_term(&kbdev->mem_pool); -} - -KBASE_EXPORT_TEST_API(kbase_mem_term); - - - - -/** - * @brief Allocate a free region object. - * - * The allocated object is not part of any list yet, and is flagged as - * KBASE_REG_FREE. No mapping is allocated yet. - * - * zone is KBASE_REG_ZONE_CUSTOM_VA, KBASE_REG_ZONE_SAME_VA, or KBASE_REG_ZONE_EXEC - * - */ -struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone) -{ - struct kbase_va_region *new_reg; - - KBASE_DEBUG_ASSERT(kctx != NULL); - - /* zone argument should only contain zone related region flags */ - KBASE_DEBUG_ASSERT((zone & ~KBASE_REG_ZONE_MASK) == 0); - KBASE_DEBUG_ASSERT(nr_pages > 0); - /* 64-bit address range is the max */ - KBASE_DEBUG_ASSERT(start_pfn + nr_pages <= (U64_MAX / PAGE_SIZE)); - - new_reg = kzalloc(sizeof(*new_reg), GFP_KERNEL); - - if (!new_reg) - return NULL; - - new_reg->cpu_alloc = NULL; /* no alloc bound yet */ - new_reg->gpu_alloc = NULL; /* no alloc bound yet */ - new_reg->kctx = kctx; - new_reg->flags = zone | KBASE_REG_FREE; - - new_reg->flags |= KBASE_REG_GROWABLE; - - new_reg->start_pfn = start_pfn; - new_reg->nr_pages = nr_pages; - - return new_reg; -} - -KBASE_EXPORT_TEST_API(kbase_alloc_free_region); - -/** - * @brief Free a region object. - * - * The described region must be freed of any mapping. - * - * If the region is not flagged as KBASE_REG_FREE, the region's - * alloc object will be released. - * It is a bug if no alloc object exists for non-free regions. - * - */ -void kbase_free_alloced_region(struct kbase_va_region *reg) -{ - KBASE_DEBUG_ASSERT(NULL != reg); - if (!(reg->flags & KBASE_REG_FREE)) { - kbase_mem_phy_alloc_put(reg->cpu_alloc); - kbase_mem_phy_alloc_put(reg->gpu_alloc); - /* To detect use-after-free in debug builds */ - KBASE_DEBUG_CODE(reg->flags |= KBASE_REG_FREE); - } - kfree(reg); -} - -KBASE_EXPORT_TEST_API(kbase_free_alloced_region); - -void kbase_mmu_update(struct kbase_context *kctx) -{ - KBASE_DEBUG_ASSERT(NULL != kctx); - lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock); - /* ASSERT that the context has a valid as_nr, which is only the case - * when it's scheduled in. - * - * as_nr won't change because the caller has the runpool_irq lock */ - KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); - lockdep_assert_held(&kctx->kbdev->as[kctx->as_nr].transaction_mutex); - - kctx->kbdev->mmu_mode->update(kctx); -} - -KBASE_EXPORT_TEST_API(kbase_mmu_update); - -void kbase_mmu_disable(struct kbase_context *kctx) -{ - KBASE_DEBUG_ASSERT(NULL != kctx); - /* ASSERT that the context has a valid as_nr, which is only the case - * when it's scheduled in. - * - * as_nr won't change because the caller has the runpool_irq lock */ - KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); - - kctx->kbdev->mmu_mode->disable_as(kctx->kbdev, kctx->as_nr); -} - -KBASE_EXPORT_TEST_API(kbase_mmu_disable); - -void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr) -{ - kbdev->mmu_mode->disable_as(kbdev, as_nr); -} - -int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align) -{ - int err; - size_t i = 0; - unsigned long attr; - unsigned long mask = ~KBASE_REG_MEMATTR_MASK; - - if ((kctx->kbdev->system_coherency == COHERENCY_ACE) && - (reg->flags & KBASE_REG_SHARE_BOTH)) - attr = KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_OUTER_WA); - else - attr = KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_WRITE_ALLOC); - - KBASE_DEBUG_ASSERT(NULL != kctx); - KBASE_DEBUG_ASSERT(NULL != reg); - - err = kbase_add_va_region(kctx, reg, addr, nr_pages, align); - if (err) - return err; - - if (reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) { - u64 stride; - struct kbase_mem_phy_alloc *alloc; - - alloc = reg->gpu_alloc; - stride = alloc->imported.alias.stride; - KBASE_DEBUG_ASSERT(alloc->imported.alias.aliased); - for (i = 0; i < alloc->imported.alias.nents; i++) { - if (alloc->imported.alias.aliased[i].alloc) { - err = kbase_mmu_insert_pages(kctx, - reg->start_pfn + (i * stride), - alloc->imported.alias.aliased[i].alloc->pages + alloc->imported.alias.aliased[i].offset, - alloc->imported.alias.aliased[i].length, - reg->flags); - if (err) - goto bad_insert; - - kbase_mem_phy_alloc_gpu_mapped(alloc->imported.alias.aliased[i].alloc); - } else { - err = kbase_mmu_insert_single_page(kctx, - reg->start_pfn + i * stride, - page_to_phys(kctx->aliasing_sink_page), - alloc->imported.alias.aliased[i].length, - (reg->flags & mask) | attr); - - if (err) - goto bad_insert; - } - } - } else { - err = kbase_mmu_insert_pages(kctx, reg->start_pfn, - kbase_get_gpu_phy_pages(reg), - kbase_reg_current_backed_size(reg), - reg->flags); - if (err) - goto bad_insert; - kbase_mem_phy_alloc_gpu_mapped(reg->gpu_alloc); - } - - return err; - -bad_insert: - if (reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) { - u64 stride; - - stride = reg->gpu_alloc->imported.alias.stride; - KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased); - while (i--) - if (reg->gpu_alloc->imported.alias.aliased[i].alloc) { - kbase_mmu_teardown_pages(kctx, reg->start_pfn + (i * stride), reg->gpu_alloc->imported.alias.aliased[i].length); - kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc); - } - } - - kbase_remove_va_region(kctx, reg); - - return err; -} - -KBASE_EXPORT_TEST_API(kbase_gpu_mmap); - -int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg) -{ - int err; - - if (reg->start_pfn == 0) - return 0; - - if (reg->gpu_alloc && reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) { - size_t i; - - err = kbase_mmu_teardown_pages(kctx, reg->start_pfn, reg->nr_pages); - KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased); - for (i = 0; i < reg->gpu_alloc->imported.alias.nents; i++) - if (reg->gpu_alloc->imported.alias.aliased[i].alloc) - kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc); - } else { - err = kbase_mmu_teardown_pages(kctx, reg->start_pfn, kbase_reg_current_backed_size(reg)); - kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc); - } - - if (err) - return err; - - err = kbase_remove_va_region(kctx, reg); - return err; -} - -static struct kbase_cpu_mapping *kbasep_find_enclosing_cpu_mapping_of_region(const struct kbase_va_region *reg, unsigned long uaddr, size_t size) -{ - struct kbase_cpu_mapping *map; - struct list_head *pos; - - KBASE_DEBUG_ASSERT(NULL != reg); - KBASE_DEBUG_ASSERT(reg->cpu_alloc); - - if ((uintptr_t) uaddr + size < (uintptr_t) uaddr) /* overflow check */ - return NULL; - - list_for_each(pos, ®->cpu_alloc->mappings) { - map = list_entry(pos, struct kbase_cpu_mapping, mappings_list); - if (map->vm_start <= uaddr && map->vm_end >= uaddr + size) - return map; - } - - return NULL; -} - -KBASE_EXPORT_TEST_API(kbasep_find_enclosing_cpu_mapping_of_region); - -int kbasep_find_enclosing_cpu_mapping_offset( - struct kbase_context *kctx, u64 gpu_addr, - unsigned long uaddr, size_t size, u64 * offset) -{ - struct kbase_cpu_mapping *map = NULL; - const struct kbase_va_region *reg; - int err = -EINVAL; - - KBASE_DEBUG_ASSERT(kctx != NULL); - - kbase_gpu_vm_lock(kctx); - - reg = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr); - if (reg && !(reg->flags & KBASE_REG_FREE)) { - map = kbasep_find_enclosing_cpu_mapping_of_region(reg, uaddr, - size); - if (map) { - *offset = (uaddr - PTR_TO_U64(map->vm_start)) + - (map->page_off << PAGE_SHIFT); - err = 0; - } - } - - kbase_gpu_vm_unlock(kctx); - - return err; -} - -KBASE_EXPORT_TEST_API(kbasep_find_enclosing_cpu_mapping_offset); - -void kbase_sync_single(struct kbase_context *kctx, - phys_addr_t cpu_pa, phys_addr_t gpu_pa, - off_t offset, size_t size, enum kbase_sync_type sync_fn) -{ - struct page *cpu_page; - - cpu_page = pfn_to_page(PFN_DOWN(cpu_pa)); - - if (likely(cpu_pa == gpu_pa)) { - dma_addr_t dma_addr; - - BUG_ON(!cpu_page); - BUG_ON(offset + size > PAGE_SIZE); - - dma_addr = kbase_dma_addr(cpu_page) + offset; - if (sync_fn == KBASE_SYNC_TO_CPU) - dma_sync_single_for_cpu(kctx->kbdev->dev, dma_addr, - size, DMA_BIDIRECTIONAL); - else if (sync_fn == KBASE_SYNC_TO_DEVICE) - dma_sync_single_for_device(kctx->kbdev->dev, dma_addr, - size, DMA_BIDIRECTIONAL); - } else { - void *src = NULL; - void *dst = NULL; - struct page *gpu_page; - - if (WARN(!gpu_pa, "No GPU PA found for infinite cache op")) - return; - - gpu_page = pfn_to_page(PFN_DOWN(gpu_pa)); - - if (sync_fn == KBASE_SYNC_TO_DEVICE) { - src = ((unsigned char *)kmap(cpu_page)) + offset; - dst = ((unsigned char *)kmap(gpu_page)) + offset; - } else if (sync_fn == KBASE_SYNC_TO_CPU) { - dma_sync_single_for_cpu(kctx->kbdev->dev, - kbase_dma_addr(gpu_page) + offset, - size, DMA_BIDIRECTIONAL); - src = ((unsigned char *)kmap(gpu_page)) + offset; - dst = ((unsigned char *)kmap(cpu_page)) + offset; - } - memcpy(dst, src, size); - kunmap(gpu_page); - kunmap(cpu_page); - if (sync_fn == KBASE_SYNC_TO_DEVICE) - dma_sync_single_for_device(kctx->kbdev->dev, - kbase_dma_addr(gpu_page) + offset, - size, DMA_BIDIRECTIONAL); - } -} - -static int kbase_do_syncset(struct kbase_context *kctx, - struct base_syncset *set, enum kbase_sync_type sync_fn) -{ - int err = 0; - struct basep_syncset *sset = &set->basep_sset; - struct kbase_va_region *reg; - struct kbase_cpu_mapping *map; - unsigned long start; - size_t size; - phys_addr_t *cpu_pa; - phys_addr_t *gpu_pa; - u64 page_off, page_count; - u64 i; - off_t offset; - - kbase_os_mem_map_lock(kctx); - kbase_gpu_vm_lock(kctx); - - /* find the region where the virtual address is contained */ - reg = kbase_region_tracker_find_region_enclosing_address(kctx, - sset->mem_handle); - if (!reg) { - dev_warn(kctx->kbdev->dev, "Can't find region at VA 0x%016llX", - sset->mem_handle); - err = -EINVAL; - goto out_unlock; - } - - if (!(reg->flags & KBASE_REG_CPU_CACHED)) - goto out_unlock; - - start = (uintptr_t)sset->user_addr; - size = (size_t)sset->size; - - map = kbasep_find_enclosing_cpu_mapping_of_region(reg, start, size); - if (!map) { - dev_warn(kctx->kbdev->dev, "Can't find CPU mapping 0x%016lX for VA 0x%016llX", - start, sset->mem_handle); - err = -EINVAL; - goto out_unlock; - } - - offset = start & (PAGE_SIZE - 1); - page_off = map->page_off + ((start - map->vm_start) >> PAGE_SHIFT); - page_count = (size + offset + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - cpu_pa = kbase_get_cpu_phy_pages(reg); - gpu_pa = kbase_get_gpu_phy_pages(reg); - - /* Sync first page */ - if (cpu_pa[page_off]) { - size_t sz = MIN(((size_t) PAGE_SIZE - offset), size); - - kbase_sync_single(kctx, cpu_pa[page_off], gpu_pa[page_off], - offset, sz, sync_fn); - } - - /* Sync middle pages (if any) */ - for (i = 1; page_count > 2 && i < page_count - 1; i++) { - /* we grow upwards, so bail on first non-present page */ - if (!cpu_pa[page_off + i]) - break; - - kbase_sync_single(kctx, cpu_pa[page_off + i], - gpu_pa[page_off + i], 0, PAGE_SIZE, sync_fn); - } - - /* Sync last page (if any) */ - if (page_count > 1 && cpu_pa[page_off + page_count - 1]) { - size_t sz = ((start + size - 1) & ~PAGE_MASK) + 1; - - kbase_sync_single(kctx, cpu_pa[page_off + page_count - 1], - gpu_pa[page_off + page_count - 1], 0, sz, - sync_fn); - } - -out_unlock: - kbase_gpu_vm_unlock(kctx); - kbase_os_mem_map_unlock(kctx); - return err; -} - -int kbase_sync_now(struct kbase_context *kctx, struct base_syncset *syncset) -{ - int err = -EINVAL; - struct basep_syncset *sset; - - KBASE_DEBUG_ASSERT(NULL != kctx); - KBASE_DEBUG_ASSERT(NULL != syncset); - - sset = &syncset->basep_sset; - - switch (sset->type) { - case BASE_SYNCSET_OP_MSYNC: - err = kbase_do_syncset(kctx, syncset, KBASE_SYNC_TO_DEVICE); - break; - - case BASE_SYNCSET_OP_CSYNC: - err = kbase_do_syncset(kctx, syncset, KBASE_SYNC_TO_CPU); - break; - - default: - dev_warn(kctx->kbdev->dev, "Unknown msync op %d\n", sset->type); - break; - } - - return err; -} - -KBASE_EXPORT_TEST_API(kbase_sync_now); - -/* vm lock must be held */ -int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *reg) -{ - int err; - - KBASE_DEBUG_ASSERT(NULL != kctx); - KBASE_DEBUG_ASSERT(NULL != reg); - lockdep_assert_held(&kctx->reg_lock); - err = kbase_gpu_munmap(kctx, reg); - if (err) { - dev_warn(reg->kctx->kbdev->dev, "Could not unmap from the GPU...\n"); - goto out; - } -#ifndef CONFIG_MALI_NO_MALI - if (kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_6367)) { - /* Wait for GPU to flush write buffer before freeing physical pages */ - kbase_wait_write_flush(kctx); - } -#endif - /* This will also free the physical pages */ - kbase_free_alloced_region(reg); - - out: - return err; -} - -KBASE_EXPORT_TEST_API(kbase_mem_free_region); - -/** - * @brief Free the region from the GPU and unregister it. - * - * This function implements the free operation on a memory segment. - * It will loudly fail if called with outstanding mappings. - */ -int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr) -{ - int err = 0; - struct kbase_va_region *reg; - - KBASE_DEBUG_ASSERT(kctx != NULL); - - if (0 == gpu_addr) { - dev_warn(kctx->kbdev->dev, "gpu_addr 0 is reserved for the ringbuffer and it's an error to try to free it using kbase_mem_free\n"); - return -EINVAL; - } - kbase_gpu_vm_lock(kctx); - - if (gpu_addr >= BASE_MEM_COOKIE_BASE && - gpu_addr < BASE_MEM_FIRST_FREE_ADDRESS) { - int cookie = PFN_DOWN(gpu_addr - BASE_MEM_COOKIE_BASE); - - reg = kctx->pending_regions[cookie]; - if (!reg) { - err = -EINVAL; - goto out_unlock; - } - - /* ask to unlink the cookie as we'll free it */ - - kctx->pending_regions[cookie] = NULL; - kctx->cookies |= (1UL << cookie); - - kbase_free_alloced_region(reg); - } else { - /* A real GPU va */ - - /* Validate the region */ - reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr); - if (!reg || (reg->flags & KBASE_REG_FREE)) { - dev_warn(kctx->kbdev->dev, "kbase_mem_free called with nonexistent gpu_addr 0x%llX", - gpu_addr); - err = -EINVAL; - goto out_unlock; - } - - if ((reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_SAME_VA) { - /* SAME_VA must be freed through munmap */ - dev_warn(kctx->kbdev->dev, "%s called on SAME_VA memory 0x%llX", __func__, - gpu_addr); - err = -EINVAL; - goto out_unlock; - } - - err = kbase_mem_free_region(kctx, reg); - } - - out_unlock: - kbase_gpu_vm_unlock(kctx); - return err; -} - -KBASE_EXPORT_TEST_API(kbase_mem_free); - -void kbase_update_region_flags(struct kbase_context *kctx, - struct kbase_va_region *reg, unsigned long flags) -{ - KBASE_DEBUG_ASSERT(NULL != reg); - KBASE_DEBUG_ASSERT((flags & ~((1ul << BASE_MEM_FLAGS_NR_BITS) - 1)) == 0); - - reg->flags |= kbase_cache_enabled(flags, reg->nr_pages); - /* all memory is now growable */ - reg->flags |= KBASE_REG_GROWABLE; - - if (flags & BASE_MEM_GROW_ON_GPF) - reg->flags |= KBASE_REG_PF_GROW; - - if (flags & BASE_MEM_PROT_CPU_WR) - reg->flags |= KBASE_REG_CPU_WR; - - if (flags & BASE_MEM_PROT_CPU_RD) - reg->flags |= KBASE_REG_CPU_RD; - - if (flags & BASE_MEM_PROT_GPU_WR) - reg->flags |= KBASE_REG_GPU_WR; - - if (flags & BASE_MEM_PROT_GPU_RD) - reg->flags |= KBASE_REG_GPU_RD; - - if (0 == (flags & BASE_MEM_PROT_GPU_EX)) - reg->flags |= KBASE_REG_GPU_NX; - - if (flags & BASE_MEM_COHERENT_SYSTEM || - flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) - reg->flags |= KBASE_REG_SHARE_BOTH; - else if (flags & BASE_MEM_COHERENT_LOCAL) - reg->flags |= KBASE_REG_SHARE_IN; - - /* Set up default MEMATTR usage */ - if (kctx->kbdev->system_coherency == COHERENCY_ACE && - (reg->flags & KBASE_REG_SHARE_BOTH)) { - reg->flags |= - KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT_ACE); - } else { - reg->flags |= - KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT); - } -} -KBASE_EXPORT_TEST_API(kbase_update_region_flags); - -int kbase_alloc_phy_pages_helper( - struct kbase_mem_phy_alloc *alloc, - size_t nr_pages_requested) -{ - KBASE_DEBUG_ASSERT(alloc); - KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE); - KBASE_DEBUG_ASSERT(alloc->imported.kctx); - - if (nr_pages_requested == 0) - goto done; /*nothing to do*/ - - kbase_atomic_add_pages(nr_pages_requested, &alloc->imported.kctx->used_pages); - kbase_atomic_add_pages(nr_pages_requested, &alloc->imported.kctx->kbdev->memdev.used_pages); - - /* Increase mm counters before we allocate pages so that this - * allocation is visible to the OOM killer */ - kbase_process_page_usage_inc(alloc->imported.kctx, nr_pages_requested); - - if (kbase_mem_pool_alloc_pages(&alloc->imported.kctx->mem_pool, - nr_pages_requested, alloc->pages + alloc->nents) != 0) - goto no_alloc; - -#if defined(CONFIG_MALI_MIPE_ENABLED) - kbase_tlstream_aux_pagesalloc((s64)nr_pages_requested); -#endif - - alloc->nents += nr_pages_requested; -done: - return 0; - -no_alloc: - kbase_process_page_usage_dec(alloc->imported.kctx, nr_pages_requested); - kbase_atomic_sub_pages(nr_pages_requested, &alloc->imported.kctx->used_pages); - kbase_atomic_sub_pages(nr_pages_requested, &alloc->imported.kctx->kbdev->memdev.used_pages); - - return -ENOMEM; -} - -int kbase_free_phy_pages_helper( - struct kbase_mem_phy_alloc *alloc, - size_t nr_pages_to_free) -{ - bool syncback; - phys_addr_t *start_free; - - KBASE_DEBUG_ASSERT(alloc); - KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE); - KBASE_DEBUG_ASSERT(alloc->imported.kctx); - KBASE_DEBUG_ASSERT(alloc->nents >= nr_pages_to_free); - - /* early out if nothing to do */ - if (0 == nr_pages_to_free) - return 0; - - start_free = alloc->pages + alloc->nents - nr_pages_to_free; - - syncback = alloc->properties & KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED; - - kbase_mem_pool_free_pages(&alloc->imported.kctx->mem_pool, - nr_pages_to_free, - start_free, - syncback); - - alloc->nents -= nr_pages_to_free; - kbase_process_page_usage_dec(alloc->imported.kctx, nr_pages_to_free); - kbase_atomic_sub_pages(nr_pages_to_free, &alloc->imported.kctx->used_pages); - kbase_atomic_sub_pages(nr_pages_to_free, &alloc->imported.kctx->kbdev->memdev.used_pages); - -#if defined(CONFIG_MALI_MIPE_ENABLED) - kbase_tlstream_aux_pagesalloc(-(s64)nr_pages_to_free); -#endif - - return 0; -} - -void kbase_mem_kref_free(struct kref *kref) -{ - struct kbase_mem_phy_alloc *alloc; - - alloc = container_of(kref, struct kbase_mem_phy_alloc, kref); - - switch (alloc->type) { - case KBASE_MEM_TYPE_NATIVE: { - KBASE_DEBUG_ASSERT(alloc->imported.kctx); - kbase_free_phy_pages_helper(alloc, alloc->nents); - break; - } - case KBASE_MEM_TYPE_ALIAS: { - /* just call put on the underlying phy allocs */ - size_t i; - struct kbase_aliased *aliased; - - aliased = alloc->imported.alias.aliased; - if (aliased) { - for (i = 0; i < alloc->imported.alias.nents; i++) - if (aliased[i].alloc) - kbase_mem_phy_alloc_put(aliased[i].alloc); - vfree(aliased); - } - break; - } - case KBASE_MEM_TYPE_RAW: - /* raw pages, external cleanup */ - break; - #ifdef CONFIG_UMP - case KBASE_MEM_TYPE_IMPORTED_UMP: - ump_dd_release(alloc->imported.ump_handle); - break; -#endif -#ifdef CONFIG_DMA_SHARED_BUFFER - case KBASE_MEM_TYPE_IMPORTED_UMM: - dma_buf_detach(alloc->imported.umm.dma_buf, - alloc->imported.umm.dma_attachment); - dma_buf_put(alloc->imported.umm.dma_buf); - break; -#endif - case KBASE_MEM_TYPE_TB:{ - void *tb; - - tb = alloc->imported.kctx->jctx.tb; - kbase_device_trace_buffer_uninstall(alloc->imported.kctx); - vfree(tb); - break; - } - default: - WARN(1, "Unexecpted free of type %d\n", alloc->type); - break; - } - - /* Free based on allocation type */ - if (alloc->properties & KBASE_MEM_PHY_ALLOC_LARGE) - vfree(alloc); - else - kfree(alloc); -} - -KBASE_EXPORT_TEST_API(kbase_mem_kref_free); - -int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size) -{ - KBASE_DEBUG_ASSERT(NULL != reg); - KBASE_DEBUG_ASSERT(vsize > 0); - - /* validate user provided arguments */ - if (size > vsize || vsize > reg->nr_pages) - goto out_term; - - /* Prevent vsize*sizeof from wrapping around. - * For instance, if vsize is 2**29+1, we'll allocate 1 byte and the alloc won't fail. - */ - if ((size_t) vsize > ((size_t) -1 / sizeof(*reg->cpu_alloc->pages))) - goto out_term; - - KBASE_DEBUG_ASSERT(0 != vsize); - - if (kbase_alloc_phy_pages_helper(reg->cpu_alloc, size) != 0) - goto out_term; - - if (reg->cpu_alloc != reg->gpu_alloc) { - if (kbase_alloc_phy_pages_helper(reg->gpu_alloc, size) != 0) - goto out_rollback; - } - - return 0; - -out_rollback: - kbase_free_phy_pages_helper(reg->cpu_alloc, size); -out_term: - return -1; -} - -KBASE_EXPORT_TEST_API(kbase_alloc_phy_pages); - -bool kbase_check_alloc_flags(unsigned long flags) -{ - /* Only known input flags should be set. */ - if (flags & ~BASE_MEM_FLAGS_INPUT_MASK) - return false; - - /* At least one flag should be set */ - if (flags == 0) - return false; - - /* Either the GPU or CPU must be reading from the allocated memory */ - if ((flags & (BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD)) == 0) - return false; - - /* Either the GPU or CPU must be writing to the allocated memory */ - if ((flags & (BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR)) == 0) - return false; - - /* GPU cannot be writing to GPU executable memory and cannot grow the memory on page fault. */ - if ((flags & BASE_MEM_PROT_GPU_EX) && (flags & (BASE_MEM_PROT_GPU_WR | BASE_MEM_GROW_ON_GPF))) - return false; - - /* GPU should have at least read or write access otherwise there is no - reason for allocating. */ - if ((flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR)) == 0) - return false; - - return true; -} - -bool kbase_check_import_flags(unsigned long flags) -{ - /* Only known input flags should be set. */ - if (flags & ~BASE_MEM_FLAGS_INPUT_MASK) - return false; - - /* At least one flag should be set */ - if (flags == 0) - return false; - - /* Imported memory cannot be GPU executable */ - if (flags & BASE_MEM_PROT_GPU_EX) - return false; - - /* Imported memory cannot grow on page fault */ - if (flags & BASE_MEM_GROW_ON_GPF) - return false; - - /* GPU should have at least read or write access otherwise there is no - reason for importing. */ - if ((flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR)) == 0) - return false; - - /* Secure memory cannot be read by the CPU */ - if ((flags & BASE_MEM_SECURE) && (flags & BASE_MEM_PROT_CPU_RD)) - return false; - - return true; -} - -/** - * @brief Acquire the per-context region list lock - */ -void kbase_gpu_vm_lock(struct kbase_context *kctx) -{ - KBASE_DEBUG_ASSERT(kctx != NULL); - mutex_lock(&kctx->reg_lock); -} - -KBASE_EXPORT_TEST_API(kbase_gpu_vm_lock); - -/** - * @brief Release the per-context region list lock - */ -void kbase_gpu_vm_unlock(struct kbase_context *kctx) -{ - KBASE_DEBUG_ASSERT(kctx != NULL); - mutex_unlock(&kctx->reg_lock); -} - -KBASE_EXPORT_TEST_API(kbase_gpu_vm_unlock); diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem.h deleted file mode 100755 index 1839cced237ed..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem.h +++ /dev/null @@ -1,833 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_kbase_mem.h - * Base kernel memory APIs - */ - -#ifndef _KBASE_MEM_H_ -#define _KBASE_MEM_H_ - -#ifndef _KBASE_H_ -#error "Don't include this file directly, use mali_kbase.h instead" -#endif - -#include - -#ifdef CONFIG_UMP -#include -#endif /* CONFIG_UMP */ -#include "mali_base_kernel.h" -#include -#include "mali_kbase_pm.h" -#include "mali_kbase_defs.h" -#if defined(CONFIG_MALI_GATOR_SUPPORT) -#include "mali_kbase_gator.h" -#endif - -/* Part of the workaround for uTLB invalid pages is to ensure we grow/shrink tmem by 4 pages at a time */ -#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316 (2) /* round to 4 pages */ - -/* Part of the workaround for PRLAM-9630 requires us to grow/shrink memory by 8 pages. -The MMU reads in 8 page table entries from memory at a time, if we have more than one page fault within the same 8 pages and -page tables are updated accordingly, the MMU does not re-read the page table entries from memory for the subsequent page table -updates and generates duplicate page faults as the page table information used by the MMU is not valid. */ -#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630 (3) /* round to 8 pages */ - -#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2 (0) /* round to 1 page */ - -/* This must always be a power of 2 */ -#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2) -#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_8316 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316) -#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_9630 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630) -/** - * A CPU mapping - */ -struct kbase_cpu_mapping { - struct list_head mappings_list; - struct kbase_mem_phy_alloc *alloc; - struct kbase_context *kctx; - struct kbase_va_region *region; - pgoff_t page_off; - int count; - unsigned long vm_start; - unsigned long vm_end; -}; - -enum kbase_memory_type { - KBASE_MEM_TYPE_NATIVE, - KBASE_MEM_TYPE_IMPORTED_UMP, - KBASE_MEM_TYPE_IMPORTED_UMM, - KBASE_MEM_TYPE_ALIAS, - KBASE_MEM_TYPE_TB, - KBASE_MEM_TYPE_RAW -}; - -/* internal structure, mirroring base_mem_aliasing_info, - * but with alloc instead of a gpu va (handle) */ -struct kbase_aliased { - struct kbase_mem_phy_alloc *alloc; /* NULL for special, non-NULL for native */ - u64 offset; /* in pages */ - u64 length; /* in pages */ -}; - -/** - * @brief Physical pages tracking object properties - */ -#define KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED (1ul << 0) -#define KBASE_MEM_PHY_ALLOC_LARGE (1ul << 1) - -/* physical pages tracking object. - * Set up to track N pages. - * N not stored here, the creator holds that info. - * This object only tracks how many elements are actually valid (present). - * Changing of nents or *pages should only happen if the kbase_mem_phy_alloc is not - * shared with another region or client. CPU mappings are OK to exist when changing, as - * long as the tracked mappings objects are updated as part of the change. - */ -struct kbase_mem_phy_alloc { - struct kref kref; /* number of users of this alloc */ - atomic_t gpu_mappings; - size_t nents; /* 0..N */ - phys_addr_t *pages; /* N elements, only 0..nents are valid */ - - /* kbase_cpu_mappings */ - struct list_head mappings; - - /* type of buffer */ - enum kbase_memory_type type; - - unsigned long properties; - - /* member in union valid based on @a type */ - union { -#ifdef CONFIG_UMP - ump_dd_handle ump_handle; -#endif /* CONFIG_UMP */ -#if defined(CONFIG_DMA_SHARED_BUFFER) - struct { - struct dma_buf *dma_buf; - struct dma_buf_attachment *dma_attachment; - unsigned int current_mapping_usage_count; - struct sg_table *sgt; - } umm; -#endif /* defined(CONFIG_DMA_SHARED_BUFFER) */ - struct { - u64 stride; - size_t nents; - struct kbase_aliased *aliased; - } alias; - /* Used by type = (KBASE_MEM_TYPE_NATIVE, KBASE_MEM_TYPE_TB) */ - struct kbase_context *kctx; - } imported; -}; - -static inline void kbase_mem_phy_alloc_gpu_mapped(struct kbase_mem_phy_alloc *alloc) -{ - KBASE_DEBUG_ASSERT(alloc); - /* we only track mappings of NATIVE buffers */ - if (alloc->type == KBASE_MEM_TYPE_NATIVE) - atomic_inc(&alloc->gpu_mappings); -} - -static inline void kbase_mem_phy_alloc_gpu_unmapped(struct kbase_mem_phy_alloc *alloc) -{ - KBASE_DEBUG_ASSERT(alloc); - /* we only track mappings of NATIVE buffers */ - if (alloc->type == KBASE_MEM_TYPE_NATIVE) - if (0 > atomic_dec_return(&alloc->gpu_mappings)) { - pr_err("Mismatched %s:\n", __func__); - dump_stack(); - } -} - -void kbase_mem_kref_free(struct kref *kref); - -int kbase_mem_init(struct kbase_device *kbdev); -void kbase_mem_halt(struct kbase_device *kbdev); -void kbase_mem_term(struct kbase_device *kbdev); - -static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_get(struct kbase_mem_phy_alloc *alloc) -{ - kref_get(&alloc->kref); - return alloc; -} - -static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_put(struct kbase_mem_phy_alloc *alloc) -{ - kref_put(&alloc->kref, kbase_mem_kref_free); - return NULL; -} - -/** - * A GPU memory region, and attributes for CPU mappings. - */ -struct kbase_va_region { - struct rb_node rblink; - struct list_head link; - - struct kbase_context *kctx; /* Backlink to base context */ - - u64 start_pfn; /* The PFN in GPU space */ - size_t nr_pages; - -/* Free region */ -#define KBASE_REG_FREE (1ul << 0) -/* CPU write access */ -#define KBASE_REG_CPU_WR (1ul << 1) -/* GPU write access */ -#define KBASE_REG_GPU_WR (1ul << 2) -/* No eXecute flag */ -#define KBASE_REG_GPU_NX (1ul << 3) -/* Is CPU cached? */ -#define KBASE_REG_CPU_CACHED (1ul << 4) -/* Is GPU cached? */ -#define KBASE_REG_GPU_CACHED (1ul << 5) - -#define KBASE_REG_GROWABLE (1ul << 6) -/* Can grow on pf? */ -#define KBASE_REG_PF_GROW (1ul << 7) - -/* VA managed by us */ -#define KBASE_REG_CUSTOM_VA (1ul << 8) - -/* inner shareable coherency */ -#define KBASE_REG_SHARE_IN (1ul << 9) -/* inner & outer shareable coherency */ -#define KBASE_REG_SHARE_BOTH (1ul << 10) - -/* Space for 4 different zones */ -#define KBASE_REG_ZONE_MASK (3ul << 11) -#define KBASE_REG_ZONE(x) (((x) & 3) << 11) - -/* GPU read access */ -#define KBASE_REG_GPU_RD (1ul<<13) -/* CPU read access */ -#define KBASE_REG_CPU_RD (1ul<<14) - -/* Aligned for GPU EX in SAME_VA */ -#define KBASE_REG_ALIGNED (1ul<<15) - -/* Index of chosen MEMATTR for this region (0..7) */ -#define KBASE_REG_MEMATTR_MASK (7ul << 16) -#define KBASE_REG_MEMATTR_INDEX(x) (((x) & 7) << 16) -#define KBASE_REG_MEMATTR_VALUE(x) (((x) & KBASE_REG_MEMATTR_MASK) >> 16) - -#define KBASE_REG_SECURE (1ul << 19) - -#define KBASE_REG_ZONE_SAME_VA KBASE_REG_ZONE(0) - -/* only used with 32-bit clients */ -/* - * On a 32bit platform, custom VA should be wired from (4GB + shader region) - * to the VA limit of the GPU. Unfortunately, the Linux mmap() interface - * limits us to 2^32 pages (2^44 bytes, see mmap64 man page for reference). - * So we put the default limit to the maximum possible on Linux and shrink - * it down, if required by the GPU, during initialization. - */ - -/* - * Dedicated 16MB region for shader code: - * VA range 0x101000000-0x102000000 - */ -#define KBASE_REG_ZONE_EXEC KBASE_REG_ZONE(1) -#define KBASE_REG_ZONE_EXEC_BASE (0x101000000ULL >> PAGE_SHIFT) -#define KBASE_REG_ZONE_EXEC_SIZE ((16ULL * 1024 * 1024) >> PAGE_SHIFT) - -#define KBASE_REG_ZONE_CUSTOM_VA KBASE_REG_ZONE(2) -#define KBASE_REG_ZONE_CUSTOM_VA_BASE (KBASE_REG_ZONE_EXEC_BASE + KBASE_REG_ZONE_EXEC_SIZE) /* Starting after KBASE_REG_ZONE_EXEC */ -#define KBASE_REG_ZONE_CUSTOM_VA_SIZE (((1ULL << 44) >> PAGE_SHIFT) - KBASE_REG_ZONE_CUSTOM_VA_BASE) -/* end 32-bit clients only */ - - unsigned long flags; - - size_t extent; /* nr of pages alloc'd on PF */ - - struct kbase_mem_phy_alloc *cpu_alloc; /* the one alloc object we mmap to the CPU when mapping this region */ - struct kbase_mem_phy_alloc *gpu_alloc; /* the one alloc object we mmap to the GPU when mapping this region */ - - /* non-NULL if this memory object is a kds_resource */ - struct kds_resource *kds_res; - -}; - -/* Common functions */ -static inline phys_addr_t *kbase_get_cpu_phy_pages(struct kbase_va_region *reg) -{ - KBASE_DEBUG_ASSERT(reg); - KBASE_DEBUG_ASSERT(reg->cpu_alloc); - KBASE_DEBUG_ASSERT(reg->gpu_alloc); - KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents); - - return reg->cpu_alloc->pages; -} - -static inline phys_addr_t *kbase_get_gpu_phy_pages(struct kbase_va_region *reg) -{ - KBASE_DEBUG_ASSERT(reg); - KBASE_DEBUG_ASSERT(reg->cpu_alloc); - KBASE_DEBUG_ASSERT(reg->gpu_alloc); - KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents); - - return reg->gpu_alloc->pages; -} - -static inline size_t kbase_reg_current_backed_size(struct kbase_va_region *reg) -{ - KBASE_DEBUG_ASSERT(reg); - /* if no alloc object the backed size naturally is 0 */ - if (!reg->cpu_alloc) - return 0; - - KBASE_DEBUG_ASSERT(reg->cpu_alloc); - KBASE_DEBUG_ASSERT(reg->gpu_alloc); - KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents); - - return reg->cpu_alloc->nents; -} - -#define KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD ((size_t)(4*1024)) /* size above which vmalloc is used over kmalloc */ - -static inline struct kbase_mem_phy_alloc *kbase_alloc_create(size_t nr_pages, enum kbase_memory_type type) -{ - struct kbase_mem_phy_alloc *alloc; - const size_t alloc_size = - sizeof(*alloc) + sizeof(*alloc->pages) * nr_pages; - - /* Prevent nr_pages*sizeof + sizeof(*alloc) from wrapping around. */ - if (nr_pages > ((((size_t) -1) - sizeof(*alloc)) - / sizeof(*alloc->pages))) - return ERR_PTR(-ENOMEM); - - /* Allocate based on the size to reduce internal fragmentation of vmem */ - if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD) - alloc = vzalloc(alloc_size); - else - alloc = kzalloc(alloc_size, GFP_KERNEL); - - if (!alloc) - return ERR_PTR(-ENOMEM); - - /* Store allocation method */ - if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD) - alloc->properties |= KBASE_MEM_PHY_ALLOC_LARGE; - - kref_init(&alloc->kref); - atomic_set(&alloc->gpu_mappings, 0); - alloc->nents = 0; - alloc->pages = (void *)(alloc + 1); - INIT_LIST_HEAD(&alloc->mappings); - alloc->type = type; - - return alloc; -} - -static inline int kbase_reg_prepare_native(struct kbase_va_region *reg, - struct kbase_context *kctx) -{ - KBASE_DEBUG_ASSERT(reg); - KBASE_DEBUG_ASSERT(!reg->cpu_alloc); - KBASE_DEBUG_ASSERT(!reg->gpu_alloc); - KBASE_DEBUG_ASSERT(reg->flags & KBASE_REG_FREE); - - reg->cpu_alloc = kbase_alloc_create(reg->nr_pages, - KBASE_MEM_TYPE_NATIVE); - if (IS_ERR(reg->cpu_alloc)) - return PTR_ERR(reg->cpu_alloc); - else if (!reg->cpu_alloc) - return -ENOMEM; - reg->cpu_alloc->imported.kctx = kctx; - if (kctx->infinite_cache_active && (reg->flags & KBASE_REG_CPU_CACHED)) { - reg->gpu_alloc = kbase_alloc_create(reg->nr_pages, - KBASE_MEM_TYPE_NATIVE); - reg->gpu_alloc->imported.kctx = kctx; - } else { - reg->gpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc); - } - - reg->flags &= ~KBASE_REG_FREE; - return 0; -} - -static inline int kbase_atomic_add_pages(int num_pages, atomic_t *used_pages) -{ - int new_val = atomic_add_return(num_pages, used_pages); -#if defined(CONFIG_MALI_GATOR_SUPPORT) - kbase_trace_mali_total_alloc_pages_change((long long int)new_val); -#endif - return new_val; -} - -static inline int kbase_atomic_sub_pages(int num_pages, atomic_t *used_pages) -{ - int new_val = atomic_sub_return(num_pages, used_pages); -#if defined(CONFIG_MALI_GATOR_SUPPORT) - kbase_trace_mali_total_alloc_pages_change((long long int)new_val); -#endif - return new_val; -} - -/* - * Max size for kbdev memory pool (in pages) - */ -#define KBASE_MEM_POOL_MAX_SIZE_KBDEV (SZ_64M >> PAGE_SHIFT) - -/* - * Max size for kctx memory pool (in pages) - */ -#define KBASE_MEM_POOL_MAX_SIZE_KCTX (SZ_64M >> PAGE_SHIFT) - -/** - * kbase_mem_pool_init - Create a memory pool for a kbase device - * @pool: Memory pool to initialize - * @max_size: Maximum number of free pages the pool can hold - * @kbdev: Kbase device where memory is used - * @next_pool: Pointer to the next pool or NULL. - * - * Allocations from @pool are in whole pages. Each @pool has a free list where - * pages can be quickly allocated from. The free list is initially empty and - * filled whenever pages are freed back to the pool. The number of free pages - * in the pool will in general not exceed @max_size, but the pool may in - * certain corner cases grow above @max_size. - * - * If @next_pool is not NULL, we will allocate from @next_pool before going to - * the kernel allocator. Similarily pages can spill over to @next_pool when - * @pool is full. Pages are zeroed before they spill over to another pool, to - * prevent leaking information between applications. - * - * A shrinker is registered so that Linux mm can reclaim pages from the pool as - * needed. - * - * Return: 0 on success, negative -errno on error - */ -int kbase_mem_pool_init(struct kbase_mem_pool *pool, - size_t max_size, - struct kbase_device *kbdev, - struct kbase_mem_pool *next_pool); - -/** - * kbase_mem_pool_term - Destroy a memory pool - * @pool: Memory pool to destroy - * - * Pages in the pool will spill over to @next_pool (if available) or freed to - * the kernel. - */ -void kbase_mem_pool_term(struct kbase_mem_pool *pool); - -/** - * kbase_mem_pool_alloc - Allocate a page from memory pool - * @pool: Memory pool to allocate from - * - * Allocations from the pool are made as follows: - * 1. If there are free pages in the pool, allocate a page from @pool. - * 2. Otherwise, if @next_pool is not NULL and has free pages, allocate a page - * from @next_pool. - * 3. Finally, allocate a page from the kernel. - * - * Return: Pointer to allocated page, or NULL if allocation failed. - */ -struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool); - -/** - * kbase_mem_pool_free - Free a page to memory pool - * @pool: Memory pool where page should be freed - * @page: Page to free to the pool - * @dirty: Whether some of the page may be dirty in the cache. - * - * Pages are freed to the pool as follows: - * 1. If @pool is not full, add @page to @pool. - * 2. Otherwise, if @next_pool is not NULL and not full, add @page to - * @next_pool. - * 3. Finally, free @page to the kernel. - */ -void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *page, - bool dirty); - -/** - * kbase_mem_pool_alloc_pages - Allocate pages from memory pool - * @pool: Memory pool to allocate from - * @nr_pages: Number of pages to allocate - * @pages: Pointer to array where the physical address of the allocated - * pages will be stored. - * - * Like kbase_mem_pool_alloc() but optimized for allocating many pages. - * - * Return: 0 on success, negative -errno on error - */ -int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages, - phys_addr_t *pages); - -/** - * kbase_mem_pool_free_pages - Free pages to memory pool - * @pool: Memory pool where pages should be freed - * @nr_pages: Number of pages to free - * @pages: Pointer to array holding the physical addresses of the pages to - * free. - * @dirty: Whether any pages may be dirty in the cache. - * - * Like kbase_mem_pool_free() but optimized for freeing many pages. - */ -void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages, - phys_addr_t *pages, bool dirty); - -/** - * kbase_mem_pool_size - Get number of free pages in memory pool - * @pool: Memory pool to inspect - * - * Note: the size of the pool may in certain corner cases exceed @max_size! - * - * Return: Number of free pages in the pool - */ -static inline size_t kbase_mem_pool_size(struct kbase_mem_pool *pool) -{ - return ACCESS_ONCE(pool->cur_size); -} - -/** - * kbase_mem_pool_max_size - Get maximum number of free pages in memory pool - * @pool: Memory pool to inspect - * - * Return: Maximum number of free pages in the pool - */ -static inline size_t kbase_mem_pool_max_size(struct kbase_mem_pool *pool) -{ - return pool->max_size; -} - - -/** - * kbase_mem_pool_set_max_size - Set maximum number of free pages in memory pool - * @pool: Memory pool to inspect - * @max_size: Maximum number of free pages the pool can hold - * - * If @max_size is reduced, the pool will be shrunk to adhere to the new limit. - * For details see kbase_mem_pool_shrink(). - */ -void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size); - -/** - * kbase_mem_pool_trim - Grow or shrink the pool to a new size - * @pool: Memory pool to trim - * @new_size: New number of pages in the pool - * - * If @new_size > @cur_size, fill the pool with new pages from the kernel, but - * not above @max_size. - * If @new_size < @cur_size, shrink the pool by freeing pages to the kernel. - * - * Return: The new size of the pool - */ -size_t kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size); - - -int kbase_region_tracker_init(struct kbase_context *kctx); -void kbase_region_tracker_term(struct kbase_context *kctx); - -struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(struct kbase_context *kctx, u64 gpu_addr); - -/** - * @brief Check that a pointer is actually a valid region. - * - * Must be called with context lock held. - */ -struct kbase_va_region *kbase_region_tracker_find_region_base_address(struct kbase_context *kctx, u64 gpu_addr); - -struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone); -void kbase_free_alloced_region(struct kbase_va_region *reg); -int kbase_add_va_region(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align); - -bool kbase_check_alloc_flags(unsigned long flags); -bool kbase_check_import_flags(unsigned long flags); -void kbase_update_region_flags(struct kbase_context *kctx, - struct kbase_va_region *reg, unsigned long flags); - -void kbase_gpu_vm_lock(struct kbase_context *kctx); -void kbase_gpu_vm_unlock(struct kbase_context *kctx); - -int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size); - -int kbase_mmu_init(struct kbase_context *kctx); -void kbase_mmu_term(struct kbase_context *kctx); - -phys_addr_t kbase_mmu_alloc_pgd(struct kbase_context *kctx); -void kbase_mmu_free_pgd(struct kbase_context *kctx); -int kbase_mmu_insert_pages(struct kbase_context *kctx, u64 vpfn, - phys_addr_t *phys, size_t nr, - unsigned long flags); -int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn, - phys_addr_t phys, size_t nr, - unsigned long flags); - -int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr); -int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, phys_addr_t *phys, size_t nr, unsigned long flags); - -/** - * @brief Register region and map it on the GPU. - * - * Call kbase_add_va_region() and map the region on the GPU. - */ -int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align); - -/** - * @brief Remove the region from the GPU and unregister it. - * - * Must be called with context lock held. - */ -int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg); - -/** - * The caller has the following locking conditions: - * - It must hold kbase_as::transaction_mutex on kctx's address space - * - It must hold the kbasep_js_device_data::runpool_irq::lock - */ -void kbase_mmu_update(struct kbase_context *kctx); - -/** - * The caller has the following locking conditions: - * - It must hold kbase_as::transaction_mutex on kctx's address space - * - It must hold the kbasep_js_device_data::runpool_irq::lock - */ -void kbase_mmu_disable(struct kbase_context *kctx); - -/** - * kbase_mmu_disable_as() - set the MMU in unmapped mode for an address space. - * - * @kbdev: Kbase device - * @as_nr: Number of the address space for which the MMU - * should be set in unmapped mode. - * - * The caller must hold kbdev->as[as_nr].transaction_mutex. - */ -void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr); - -void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat); - -/** Dump the MMU tables to a buffer - * - * This function allocates a buffer (of @c nr_pages pages) to hold a dump of the MMU tables and fills it. If the - * buffer is too small then the return value will be NULL. - * - * The GPU vm lock must be held when calling this function. - * - * The buffer returned should be freed with @ref vfree when it is no longer required. - * - * @param[in] kctx The kbase context to dump - * @param[in] nr_pages The number of pages to allocate for the buffer. - * - * @return The address of the buffer containing the MMU dump or NULL on error (including if the @c nr_pages is too - * small) - */ -void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages); - -int kbase_sync_now(struct kbase_context *kctx, struct base_syncset *syncset); -void kbase_sync_single(struct kbase_context *kctx, phys_addr_t cpu_pa, - phys_addr_t gpu_pa, off_t offset, size_t size, - enum kbase_sync_type sync_fn); -void kbase_pre_job_sync(struct kbase_context *kctx, struct base_syncset *syncsets, size_t nr); -void kbase_post_job_sync(struct kbase_context *kctx, struct base_syncset *syncsets, size_t nr); - -/* OS specific functions */ -int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr); -int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *reg); -void kbase_os_mem_map_lock(struct kbase_context *kctx); -void kbase_os_mem_map_unlock(struct kbase_context *kctx); - -/** - * @brief Update the memory allocation counters for the current process - * - * OS specific call to updates the current memory allocation counters for the current process with - * the supplied delta. - * - * @param[in] kctx The kbase context - * @param[in] pages The desired delta to apply to the memory usage counters. - */ - -void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages); - -/** - * @brief Add to the memory allocation counters for the current process - * - * OS specific call to add to the current memory allocation counters for the current process by - * the supplied amount. - * - * @param[in] kctx The kernel base context used for the allocation. - * @param[in] pages The desired delta to apply to the memory usage counters. - */ - -static inline void kbase_process_page_usage_inc(struct kbase_context *kctx, int pages) -{ - kbasep_os_process_page_usage_update(kctx, pages); -} - -/** - * @brief Subtract from the memory allocation counters for the current process - * - * OS specific call to subtract from the current memory allocation counters for the current process by - * the supplied amount. - * - * @param[in] kctx The kernel base context used for the allocation. - * @param[in] pages The desired delta to apply to the memory usage counters. - */ - -static inline void kbase_process_page_usage_dec(struct kbase_context *kctx, int pages) -{ - kbasep_os_process_page_usage_update(kctx, 0 - pages); -} - -/** - * @brief Find the offset of the CPU mapping of a memory allocation containing - * a given address range - * - * Searches for a CPU mapping of any part of the region starting at @p gpu_addr - * that fully encloses the CPU virtual address range specified by @p uaddr and - * @p size. Returns a failure indication if only part of the address range lies - * within a CPU mapping, or the address range lies within a CPU mapping of a - * different region. - * - * @param[in,out] kctx The kernel base context used for the allocation. - * @param[in] gpu_addr GPU address of the start of the allocated region - * within which to search. - * @param[in] uaddr Start of the CPU virtual address range. - * @param[in] size Size of the CPU virtual address range (in bytes). - * @param[out] offset The offset from the start of the allocation to the - * specified CPU virtual address. - * - * @return 0 if offset was obtained successfully. Error code - * otherwise. - */ -int kbasep_find_enclosing_cpu_mapping_offset(struct kbase_context *kctx, - u64 gpu_addr, - unsigned long uaddr, - size_t size, - u64 *offset); - -enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer); -void kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom); -void kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom); - -/** -* @brief Allocates physical pages. -* -* Allocates \a nr_pages_requested and updates the alloc object. -* -* @param[in] alloc allocation object to add pages to -* @param[in] nr_pages_requested number of physical pages to allocate -* -* @return 0 if all pages have been successfully allocated. Error code otherwise -*/ -int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_requested); - -/** -* @brief Free physical pages. -* -* Frees \a nr_pages and updates the alloc object. -* -* @param[in] alloc allocation object to free pages from -* @param[in] nr_pages_to_free number of physical pages to free -*/ -int kbase_free_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_to_free); - -static inline void kbase_set_dma_addr(struct page *p, dma_addr_t dma_addr) -{ - SetPagePrivate(p); - if (sizeof(dma_addr_t) > sizeof(p->private)) { - /* on 32-bit ARM with LPAE dma_addr_t becomes larger, but the - * private filed stays the same. So we have to be clever and - * use the fact that we only store DMA addresses of whole pages, - * so the low bits should be zero */ - KBASE_DEBUG_ASSERT(!(dma_addr & (PAGE_SIZE - 1))); - set_page_private(p, dma_addr >> PAGE_SHIFT); - } else { - set_page_private(p, dma_addr); - } -} - -static inline dma_addr_t kbase_dma_addr(struct page *p) -{ - if (sizeof(dma_addr_t) > sizeof(p->private)) - return ((dma_addr_t)page_private(p)) << PAGE_SHIFT; - - return (dma_addr_t)page_private(p); -} - -static inline void kbase_clear_dma_addr(struct page *p) -{ - ClearPagePrivate(p); -} - -/** -* @brief Process a bus or page fault. -* -* This function will process a fault on a specific address space -* -* @param[in] kbdev The @ref kbase_device the fault happened on -* @param[in] kctx The @ref kbase_context for the faulting address space if -* one was found. -* @param[in] as The address space that has the fault -*/ -void kbase_mmu_interrupt_process(struct kbase_device *kbdev, - struct kbase_context *kctx, struct kbase_as *as); - -/** - * @brief Process a page fault. - * - * @param[in] data work_struct passed by queue_work() - */ -void page_fault_worker(struct work_struct *data); - -/** - * @brief Process a bus fault. - * - * @param[in] data work_struct passed by queue_work() - */ -void bus_fault_worker(struct work_struct *data); - -/** - * @brief Flush MMU workqueues. - * - * This function will cause any outstanding page or bus faults to be processed. - * It should be called prior to powering off the GPU. - * - * @param[in] kbdev Device pointer - */ -void kbase_flush_mmu_wqs(struct kbase_device *kbdev); - -/** - * kbase_sync_single_for_device - update physical memory and give GPU ownership - * @kbdev: Device pointer - * @handle: DMA address of region - * @size: Size of region to sync - * @dir: DMA data direction - */ - -void kbase_sync_single_for_device(struct kbase_device *kbdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir); - -/** - * kbase_sync_single_for_cpu - update physical memory and give CPU ownership - * @kbdev: Device pointer - * @handle: DMA address of region - * @size: Size of region to sync - * @dir: DMA data direction - */ - -void kbase_sync_single_for_cpu(struct kbase_device *kbdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir); - -#endif /* _KBASE_MEM_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c deleted file mode 100755 index 3e4481a77e151..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c +++ /dev/null @@ -1,2003 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_kbase_mem_linux.c - * Base kernel memory APIs, Linux implementation. - */ - -#include -#include -#include -#include -#include -#include -#include -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) - #include -#endif -#ifdef CONFIG_DMA_SHARED_BUFFER -#include -#endif /* defined(CONFIG_DMA_SHARED_BUFFER) */ - -#include -#include -#include -#include - -static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma); -static const struct vm_operations_struct kbase_vm_ops; - -struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx, u64 va_pages, u64 commit_pages, u64 extent, u64 *flags, u64 *gpu_va, u16 *va_alignment) -{ - int zone; - int gpu_pc_bits; - int cpu_va_bits; - struct kbase_va_region *reg; - struct device *dev; - - KBASE_DEBUG_ASSERT(kctx); - KBASE_DEBUG_ASSERT(flags); - KBASE_DEBUG_ASSERT(gpu_va); - KBASE_DEBUG_ASSERT(va_alignment); - - dev = kctx->kbdev->dev; - *va_alignment = 0; /* no alignment by default */ - *gpu_va = 0; /* return 0 on failure */ - - gpu_pc_bits = kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size; - cpu_va_bits = BITS_PER_LONG; - - if (0 == va_pages) { - dev_warn(dev, "kbase_mem_alloc called with 0 va_pages!"); - goto bad_size; - } - - if (va_pages > (U64_MAX / PAGE_SIZE)) - /* 64-bit address range is the max */ - goto bad_size; - -#if defined(CONFIG_64BIT) - if (kctx->is_compat) - cpu_va_bits = 32; - else - /* force SAME_VA if a 64-bit client */ - *flags |= BASE_MEM_SAME_VA; -#endif - - if (!kbase_check_alloc_flags(*flags)) { - dev_warn(dev, - "kbase_mem_alloc called with bad flags (%llx)", - (unsigned long long)*flags); - goto bad_flags; - } - - if ((*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0 && - kctx->kbdev->system_coherency != COHERENCY_ACE) { - dev_warn(dev, "kbase_mem_alloc call required coherent mem when unavailable"); - goto bad_flags; - } - if ((*flags & BASE_MEM_COHERENT_SYSTEM) != 0 && - kctx->kbdev->system_coherency != COHERENCY_ACE) { - /* Remove COHERENT_SYSTEM flag if coherent mem is unavailable */ - *flags &= ~BASE_MEM_COHERENT_SYSTEM; - } - - /* Limit GPU executable allocs to GPU PC size */ - if ((*flags & BASE_MEM_PROT_GPU_EX) && - (va_pages > (1ULL << gpu_pc_bits >> PAGE_SHIFT))) - goto bad_ex_size; - - /* find out which VA zone to use */ - if (*flags & BASE_MEM_SAME_VA) - zone = KBASE_REG_ZONE_SAME_VA; - else if (*flags & BASE_MEM_PROT_GPU_EX) - zone = KBASE_REG_ZONE_EXEC; - else - zone = KBASE_REG_ZONE_CUSTOM_VA; - - reg = kbase_alloc_free_region(kctx, 0, va_pages, zone); - if (!reg) { - dev_err(dev, "Failed to allocate free region"); - goto no_region; - } - - kbase_update_region_flags(kctx, reg, *flags); - - if (kbase_reg_prepare_native(reg, kctx) != 0) { - dev_err(dev, "Failed to prepare region"); - goto prepare_failed; - } - - if (*flags & BASE_MEM_GROW_ON_GPF) - reg->extent = extent; - else - reg->extent = 0; - - if (kbase_alloc_phy_pages(reg, va_pages, commit_pages) != 0) { - dev_warn(dev, "Failed to allocate %lld pages (va_pages=%lld)", - (unsigned long long)commit_pages, - (unsigned long long)va_pages); - goto no_mem; - } - - kbase_gpu_vm_lock(kctx); - - /* mmap needed to setup VA? */ - if (*flags & BASE_MEM_SAME_VA) { - /* Bind to a cookie */ - if (!kctx->cookies) { - dev_err(dev, "No cookies available for allocation!"); - goto no_cookie; - } - /* return a cookie */ - *gpu_va = __ffs(kctx->cookies); - kctx->cookies &= ~(1UL << *gpu_va); - BUG_ON(kctx->pending_regions[*gpu_va]); - kctx->pending_regions[*gpu_va] = reg; - - /* relocate to correct base */ - *gpu_va += PFN_DOWN(BASE_MEM_COOKIE_BASE); - *gpu_va <<= PAGE_SHIFT; - - /* See if we must align memory due to GPU PC bits vs CPU VA */ - if ((*flags & BASE_MEM_PROT_GPU_EX) && - (cpu_va_bits > gpu_pc_bits)) { - *va_alignment = gpu_pc_bits; - reg->flags |= KBASE_REG_ALIGNED; - } - } else /* we control the VA */ { - if (kbase_gpu_mmap(kctx, reg, 0, va_pages, 1) != 0) { - dev_warn(dev, "Failed to map memory on GPU"); - goto no_mmap; - } - /* return real GPU VA */ - *gpu_va = reg->start_pfn << PAGE_SHIFT; - } - - kbase_gpu_vm_unlock(kctx); - return reg; - -no_mmap: -no_cookie: - kbase_gpu_vm_unlock(kctx); -no_mem: - kbase_mem_phy_alloc_put(reg->cpu_alloc); - kbase_mem_phy_alloc_put(reg->gpu_alloc); -prepare_failed: - kfree(reg); -no_region: -bad_ex_size: -bad_flags: -bad_size: - return NULL; -} -KBASE_EXPORT_TEST_API(kbase_mem_alloc); - -int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, int query, u64 * const out) -{ - struct kbase_va_region *reg; - int ret = -EINVAL; - - KBASE_DEBUG_ASSERT(kctx); - KBASE_DEBUG_ASSERT(out); - - kbase_gpu_vm_lock(kctx); - - /* Validate the region */ - reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr); - if (!reg || (reg->flags & KBASE_REG_FREE)) - goto out_unlock; - - switch (query) { - case KBASE_MEM_QUERY_COMMIT_SIZE: - if (reg->cpu_alloc->type != KBASE_MEM_TYPE_ALIAS) { - *out = kbase_reg_current_backed_size(reg); - } else { - size_t i; - struct kbase_aliased *aliased; - *out = 0; - aliased = reg->cpu_alloc->imported.alias.aliased; - for (i = 0; i < reg->cpu_alloc->imported.alias.nents; i++) - *out += aliased[i].length; - } - break; - case KBASE_MEM_QUERY_VA_SIZE: - *out = reg->nr_pages; - break; - case KBASE_MEM_QUERY_FLAGS: - { - *out = 0; - if (KBASE_REG_CPU_WR & reg->flags) - *out |= BASE_MEM_PROT_CPU_WR; - if (KBASE_REG_CPU_RD & reg->flags) - *out |= BASE_MEM_PROT_CPU_RD; - if (KBASE_REG_CPU_CACHED & reg->flags) - *out |= BASE_MEM_CACHED_CPU; - if (KBASE_REG_GPU_WR & reg->flags) - *out |= BASE_MEM_PROT_GPU_WR; - if (KBASE_REG_GPU_RD & reg->flags) - *out |= BASE_MEM_PROT_GPU_RD; - if (!(KBASE_REG_GPU_NX & reg->flags)) - *out |= BASE_MEM_PROT_GPU_EX; - if (KBASE_REG_SHARE_BOTH & reg->flags) - *out |= BASE_MEM_COHERENT_SYSTEM; - if (KBASE_REG_SHARE_IN & reg->flags) - *out |= BASE_MEM_COHERENT_LOCAL; - break; - } - default: - *out = 0; - goto out_unlock; - } - - ret = 0; - -out_unlock: - kbase_gpu_vm_unlock(kctx); - return ret; -} - -int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned int flags, unsigned int mask) -{ - struct kbase_va_region *reg; - int ret = -EINVAL; - unsigned int real_flags = 0; - unsigned int prev_flags = 0; - - KBASE_DEBUG_ASSERT(kctx); - - if (!gpu_addr) - return -EINVAL; - - /* nuke other bits */ - flags &= mask; - - /* check for only supported flags */ - if (flags & ~(BASE_MEM_COHERENT_SYSTEM | BASE_MEM_COHERENT_LOCAL)) - goto out; - - /* mask covers bits we don't support? */ - if (mask & ~(BASE_MEM_COHERENT_SYSTEM | BASE_MEM_COHERENT_LOCAL)) - goto out; - - /* convert flags */ - if (BASE_MEM_COHERENT_SYSTEM & flags) - real_flags |= KBASE_REG_SHARE_BOTH; - else if (BASE_MEM_COHERENT_LOCAL & flags) - real_flags |= KBASE_REG_SHARE_IN; - - /* now we can lock down the context, and find the region */ - kbase_gpu_vm_lock(kctx); - - /* Validate the region */ - reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr); - if (!reg || (reg->flags & KBASE_REG_FREE)) - goto out_unlock; - - /* limit to imported memory */ - if ((reg->gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMP) && - (reg->gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM)) - goto out_unlock; - - /* no change? */ - if (real_flags == (reg->flags & (KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH))) { - ret = 0; - goto out_unlock; - } - - /* save for roll back */ - prev_flags = reg->flags; - reg->flags &= ~(KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH); - reg->flags |= real_flags; - - /* Currently supporting only imported memory */ - switch (reg->gpu_alloc->type) { -#ifdef CONFIG_UMP - case KBASE_MEM_TYPE_IMPORTED_UMP: - ret = kbase_mmu_update_pages(kctx, reg->start_pfn, kbase_get_cpu_phy_pages(reg), reg->gpu_alloc->nents, reg->flags); - break; -#endif -#ifdef CONFIG_DMA_SHARED_BUFFER - case KBASE_MEM_TYPE_IMPORTED_UMM: - /* Future use will use the new flags, existing mapping will NOT be updated - * as memory should not be in use by the GPU when updating the flags. - */ - ret = 0; - WARN_ON(reg->gpu_alloc->imported.umm.current_mapping_usage_count); - break; -#endif - default: - break; - } - - /* roll back on error, i.e. not UMP */ - if (ret) - reg->flags = prev_flags; - -out_unlock: - kbase_gpu_vm_unlock(kctx); -out: - return ret; -} - -#define KBASE_MEM_IMPORT_HAVE_PAGES (1UL << BASE_MEM_FLAGS_NR_BITS) - -#ifdef CONFIG_UMP -static struct kbase_va_region *kbase_mem_from_ump(struct kbase_context *kctx, ump_secure_id id, u64 *va_pages, u64 *flags) -{ - struct kbase_va_region *reg; - ump_dd_handle umph; - u64 block_count; - const ump_dd_physical_block_64 *block_array; - u64 i, j; - int page = 0; - ump_alloc_flags ump_flags; - ump_alloc_flags cpu_flags; - ump_alloc_flags gpu_flags; - - KBASE_DEBUG_ASSERT(kctx); - KBASE_DEBUG_ASSERT(va_pages); - KBASE_DEBUG_ASSERT(flags); - - if (*flags & BASE_MEM_SECURE) - goto bad_flags; - - umph = ump_dd_from_secure_id(id); - if (UMP_DD_INVALID_MEMORY_HANDLE == umph) - goto bad_id; - - ump_flags = ump_dd_allocation_flags_get(umph); - cpu_flags = (ump_flags >> UMP_DEVICE_CPU_SHIFT) & UMP_DEVICE_MASK; - gpu_flags = (ump_flags >> DEFAULT_UMP_GPU_DEVICE_SHIFT) & - UMP_DEVICE_MASK; - - *va_pages = ump_dd_size_get_64(umph); - *va_pages >>= PAGE_SHIFT; - - if (!*va_pages) - goto bad_size; - - if (*va_pages > (U64_MAX / PAGE_SIZE)) - /* 64-bit address range is the max */ - goto bad_size; - - if (*flags & BASE_MEM_SAME_VA) - reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_SAME_VA); - else - reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_CUSTOM_VA); - - if (!reg) - goto no_region; - - /* we've got pages to map now, and support SAME_VA */ - *flags |= KBASE_MEM_IMPORT_HAVE_PAGES; - - reg->gpu_alloc = kbase_alloc_create(*va_pages, KBASE_MEM_TYPE_IMPORTED_UMP); - if (IS_ERR_OR_NULL(reg->gpu_alloc)) - goto no_alloc_obj; - - reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc); - - reg->gpu_alloc->imported.ump_handle = umph; - - reg->flags &= ~KBASE_REG_FREE; - reg->flags |= KBASE_REG_GPU_NX; /* UMP is always No eXecute */ - reg->flags &= ~KBASE_REG_GROWABLE; /* UMP cannot be grown */ - - /* Override import flags based on UMP flags */ - *flags &= ~(BASE_MEM_CACHED_CPU); - *flags &= ~(BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_CPU_WR); - *flags &= ~(BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR); - - if ((cpu_flags & (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) == - (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) { - reg->flags |= KBASE_REG_CPU_CACHED; - *flags |= BASE_MEM_CACHED_CPU; - } - - if (cpu_flags & UMP_PROT_CPU_WR) { - reg->flags |= KBASE_REG_CPU_WR; - *flags |= BASE_MEM_PROT_CPU_WR; - } - - if (cpu_flags & UMP_PROT_CPU_RD) { - reg->flags |= KBASE_REG_CPU_RD; - *flags |= BASE_MEM_PROT_CPU_RD; - } - - if ((gpu_flags & (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) == - (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) - reg->flags |= KBASE_REG_GPU_CACHED; - - if (gpu_flags & UMP_PROT_DEVICE_WR) { - reg->flags |= KBASE_REG_GPU_WR; - *flags |= BASE_MEM_PROT_GPU_WR; - } - - if (gpu_flags & UMP_PROT_DEVICE_RD) { - reg->flags |= KBASE_REG_GPU_RD; - *flags |= BASE_MEM_PROT_GPU_RD; - } - - /* ump phys block query */ - ump_dd_phys_blocks_get_64(umph, &block_count, &block_array); - - for (i = 0; i < block_count; i++) { - for (j = 0; j < (block_array[i].size >> PAGE_SHIFT); j++) { - reg->gpu_alloc->pages[page] = block_array[i].addr + (j << PAGE_SHIFT); - page++; - } - } - reg->gpu_alloc->nents = *va_pages; - reg->extent = 0; - - return reg; - -no_alloc_obj: - kfree(reg); -no_region: -bad_size: - ump_dd_release(umph); -bad_id: -bad_flags: - return NULL; -} -#endif /* CONFIG_UMP */ - -#ifdef CONFIG_DMA_SHARED_BUFFER -static struct kbase_va_region *kbase_mem_from_umm(struct kbase_context *kctx, int fd, u64 *va_pages, u64 *flags) -{ - struct kbase_va_region *reg; - struct dma_buf *dma_buf; - struct dma_buf_attachment *dma_attachment; - - dma_buf = dma_buf_get(fd); - if (IS_ERR_OR_NULL(dma_buf)) - goto no_buf; - - dma_attachment = dma_buf_attach(dma_buf, kctx->kbdev->dev); - if (!dma_attachment) - goto no_attachment; - - *va_pages = PAGE_ALIGN(dma_buf->size) >> PAGE_SHIFT; - if (!*va_pages) - goto bad_size; - - if (*va_pages > (U64_MAX / PAGE_SIZE)) - /* 64-bit address range is the max */ - goto bad_size; - - /* ignore SAME_VA */ - *flags &= ~BASE_MEM_SAME_VA; - -#ifdef CONFIG_64BIT - if (!kctx->is_compat) { - /* 64-bit tasks must MMAP anyway, but not expose this address to clients */ - *flags |= BASE_MEM_NEED_MMAP; - reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_SAME_VA); - } else { -#else - if (1) { -#endif - reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_CUSTOM_VA); - } - - if (!reg) - goto no_region; - - reg->gpu_alloc = kbase_alloc_create(*va_pages, KBASE_MEM_TYPE_IMPORTED_UMM); - if (IS_ERR_OR_NULL(reg->gpu_alloc)) - goto no_alloc_obj; - - reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc); - - /* No pages to map yet */ - reg->gpu_alloc->nents = 0; - - reg->flags &= ~KBASE_REG_FREE; - reg->flags |= KBASE_REG_GPU_NX; /* UMM is always No eXecute */ - reg->flags &= ~KBASE_REG_GROWABLE; /* UMM cannot be grown */ - reg->flags |= KBASE_REG_GPU_CACHED; - - if (*flags & BASE_MEM_PROT_CPU_WR) - reg->flags |= KBASE_REG_CPU_WR; - - if (*flags & BASE_MEM_PROT_CPU_RD) - reg->flags |= KBASE_REG_CPU_RD; - - if (*flags & BASE_MEM_PROT_GPU_WR) - reg->flags |= KBASE_REG_GPU_WR; - - if (*flags & BASE_MEM_PROT_GPU_RD) - reg->flags |= KBASE_REG_GPU_RD; - - if (*flags & BASE_MEM_SECURE) - reg->flags |= KBASE_REG_SECURE; - - /* no read or write permission given on import, only on run do we give the right permissions */ - - reg->gpu_alloc->type = BASE_MEM_IMPORT_TYPE_UMM; - reg->gpu_alloc->imported.umm.sgt = NULL; - reg->gpu_alloc->imported.umm.dma_buf = dma_buf; - reg->gpu_alloc->imported.umm.dma_attachment = dma_attachment; - reg->gpu_alloc->imported.umm.current_mapping_usage_count = 0; - reg->extent = 0; - - return reg; - -no_alloc_obj: - kfree(reg); -no_region: -bad_size: - dma_buf_detach(dma_buf, dma_attachment); -no_attachment: - dma_buf_put(dma_buf); -no_buf: - return NULL; -} -#endif /* CONFIG_DMA_SHARED_BUFFER */ - -u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride, - u64 nents, struct base_mem_aliasing_info *ai, - u64 *num_pages) -{ - struct kbase_va_region *reg; - u64 gpu_va; - size_t i; - bool coherent; - - KBASE_DEBUG_ASSERT(kctx); - KBASE_DEBUG_ASSERT(flags); - KBASE_DEBUG_ASSERT(ai); - KBASE_DEBUG_ASSERT(num_pages); - - /* mask to only allowed flags */ - *flags &= (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR | - BASE_MEM_COHERENT_SYSTEM | BASE_MEM_COHERENT_LOCAL | - BASE_MEM_COHERENT_SYSTEM_REQUIRED); - - if (!(*flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR))) { - dev_warn(kctx->kbdev->dev, - "kbase_mem_alias called with bad flags (%llx)", - (unsigned long long)*flags); - goto bad_flags; - } - coherent = (*flags & BASE_MEM_COHERENT_SYSTEM) != 0 || - (*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0; - - if (!stride) - goto bad_stride; - - if (!nents) - goto bad_nents; - - if ((nents * stride) > (U64_MAX / PAGE_SIZE)) - /* 64-bit address range is the max */ - goto bad_size; - - /* calculate the number of pages this alias will cover */ - *num_pages = nents * stride; - -#ifdef CONFIG_64BIT - if (!kctx->is_compat) { - /* 64-bit tasks must MMAP anyway, but not expose this address to - * clients */ - *flags |= BASE_MEM_NEED_MMAP; - reg = kbase_alloc_free_region(kctx, 0, *num_pages, - KBASE_REG_ZONE_SAME_VA); - } else { -#else - if (1) { -#endif - reg = kbase_alloc_free_region(kctx, 0, *num_pages, - KBASE_REG_ZONE_CUSTOM_VA); - } - - if (!reg) - goto no_reg; - - /* zero-sized page array, as we don't need one/can support one */ - reg->gpu_alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_ALIAS); - if (IS_ERR_OR_NULL(reg->gpu_alloc)) - goto no_alloc_obj; - - reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc); - - kbase_update_region_flags(kctx, reg, *flags); - - reg->gpu_alloc->imported.alias.nents = nents; - reg->gpu_alloc->imported.alias.stride = stride; - reg->gpu_alloc->imported.alias.aliased = vzalloc(sizeof(*reg->gpu_alloc->imported.alias.aliased) * nents); - if (!reg->gpu_alloc->imported.alias.aliased) - goto no_aliased_array; - - kbase_gpu_vm_lock(kctx); - - /* validate and add src handles */ - for (i = 0; i < nents; i++) { - if (ai[i].handle < BASE_MEM_FIRST_FREE_ADDRESS) { - if (ai[i].handle != BASE_MEM_WRITE_ALLOC_PAGES_HANDLE) - goto bad_handle; /* unsupported magic handle */ - if (!ai[i].length) - goto bad_handle; /* must be > 0 */ - if (ai[i].length > stride) - goto bad_handle; /* can't be larger than the - stride */ - reg->gpu_alloc->imported.alias.aliased[i].length = ai[i].length; - } else { - struct kbase_va_region *aliasing_reg; - struct kbase_mem_phy_alloc *alloc; - - aliasing_reg = kbase_region_tracker_find_region_base_address(kctx, (ai[i].handle >> PAGE_SHIFT) << PAGE_SHIFT); - - /* validate found region */ - if (!aliasing_reg) - goto bad_handle; /* Not found */ - if (aliasing_reg->flags & KBASE_REG_FREE) - goto bad_handle; /* Free region */ - if (!aliasing_reg->gpu_alloc) - goto bad_handle; /* No alloc */ - if (aliasing_reg->gpu_alloc->type != KBASE_MEM_TYPE_NATIVE) - goto bad_handle; /* Not a native alloc */ - if (coherent != ((aliasing_reg->flags & KBASE_REG_SHARE_BOTH) != 0)) - goto bad_handle; - /* Non-coherent memory cannot alias - coherent memory, and vice versa.*/ - - /* check size against stride */ - if (!ai[i].length) - goto bad_handle; /* must be > 0 */ - if (ai[i].length > stride) - goto bad_handle; /* can't be larger than the - stride */ - - alloc = aliasing_reg->gpu_alloc; - - /* check against the alloc's size */ - if (ai[i].offset > alloc->nents) - goto bad_handle; /* beyond end */ - if (ai[i].offset + ai[i].length > alloc->nents) - goto bad_handle; /* beyond end */ - - reg->gpu_alloc->imported.alias.aliased[i].alloc = kbase_mem_phy_alloc_get(alloc); - reg->gpu_alloc->imported.alias.aliased[i].length = ai[i].length; - reg->gpu_alloc->imported.alias.aliased[i].offset = ai[i].offset; - } - } - -#ifdef CONFIG_64BIT - if (!kctx->is_compat) { - /* Bind to a cookie */ - if (!kctx->cookies) { - dev_err(kctx->kbdev->dev, "No cookies available for allocation!"); - goto no_cookie; - } - /* return a cookie */ - gpu_va = __ffs(kctx->cookies); - kctx->cookies &= ~(1UL << gpu_va); - BUG_ON(kctx->pending_regions[gpu_va]); - kctx->pending_regions[gpu_va] = reg; - - /* relocate to correct base */ - gpu_va += PFN_DOWN(BASE_MEM_COOKIE_BASE); - gpu_va <<= PAGE_SHIFT; - } else /* we control the VA */ { -#else - if (1) { -#endif - if (kbase_gpu_mmap(kctx, reg, 0, *num_pages, 1) != 0) { - dev_warn(kctx->kbdev->dev, "Failed to map memory on GPU"); - goto no_mmap; - } - /* return real GPU VA */ - gpu_va = reg->start_pfn << PAGE_SHIFT; - } - - reg->flags &= ~KBASE_REG_FREE; - reg->flags &= ~KBASE_REG_GROWABLE; - - kbase_gpu_vm_unlock(kctx); - - return gpu_va; - -#ifdef CONFIG_64BIT -no_cookie: -#endif -no_mmap: -bad_handle: - kbase_gpu_vm_unlock(kctx); -no_aliased_array: - kbase_mem_phy_alloc_put(reg->cpu_alloc); - kbase_mem_phy_alloc_put(reg->gpu_alloc); -no_alloc_obj: - kfree(reg); -no_reg: -bad_size: -bad_nents: -bad_stride: -bad_flags: - return 0; -} - -int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type, int handle, u64 *gpu_va, u64 *va_pages, u64 *flags) -{ - struct kbase_va_region *reg; - - KBASE_DEBUG_ASSERT(kctx); - KBASE_DEBUG_ASSERT(gpu_va); - KBASE_DEBUG_ASSERT(va_pages); - KBASE_DEBUG_ASSERT(flags); - -#ifdef CONFIG_64BIT - if (!kctx->is_compat) - *flags |= BASE_MEM_SAME_VA; -#endif - - if (!kbase_check_import_flags(*flags)) { - dev_warn(kctx->kbdev->dev, - "kbase_mem_import called with bad flags (%llx)", - (unsigned long long)*flags); - goto bad_flags; - } - - switch (type) { -#ifdef CONFIG_UMP - case BASE_MEM_IMPORT_TYPE_UMP: - reg = kbase_mem_from_ump(kctx, (ump_secure_id)handle, va_pages, flags); - break; -#endif /* CONFIG_UMP */ -#ifdef CONFIG_DMA_SHARED_BUFFER - case BASE_MEM_IMPORT_TYPE_UMM: - reg = kbase_mem_from_umm(kctx, handle, va_pages, flags); - break; -#endif /* CONFIG_DMA_SHARED_BUFFER */ - default: - reg = NULL; - break; - } - - if (!reg) - goto no_reg; - - kbase_gpu_vm_lock(kctx); - - /* mmap needed to setup VA? */ - if (*flags & (BASE_MEM_SAME_VA | BASE_MEM_NEED_MMAP)) { - /* Bind to a cookie */ - if (!kctx->cookies) - goto no_cookie; - /* return a cookie */ - *gpu_va = __ffs(kctx->cookies); - kctx->cookies &= ~(1UL << *gpu_va); - BUG_ON(kctx->pending_regions[*gpu_va]); - kctx->pending_regions[*gpu_va] = reg; - - /* relocate to correct base */ - *gpu_va += PFN_DOWN(BASE_MEM_COOKIE_BASE); - *gpu_va <<= PAGE_SHIFT; - - } else if (*flags & KBASE_MEM_IMPORT_HAVE_PAGES) { - /* we control the VA, mmap now to the GPU */ - if (kbase_gpu_mmap(kctx, reg, 0, *va_pages, 1) != 0) - goto no_gpu_va; - /* return real GPU VA */ - *gpu_va = reg->start_pfn << PAGE_SHIFT; - } else { - /* we control the VA, but nothing to mmap yet */ - if (kbase_add_va_region(kctx, reg, 0, *va_pages, 1) != 0) - goto no_gpu_va; - /* return real GPU VA */ - *gpu_va = reg->start_pfn << PAGE_SHIFT; - } - - /* clear out private flags */ - *flags &= ((1UL << BASE_MEM_FLAGS_NR_BITS) - 1); - - kbase_gpu_vm_unlock(kctx); - - return 0; - -no_gpu_va: -no_cookie: - kbase_gpu_vm_unlock(kctx); - kbase_mem_phy_alloc_put(reg->cpu_alloc); - kbase_mem_phy_alloc_put(reg->gpu_alloc); - kfree(reg); -no_reg: -bad_flags: - *gpu_va = 0; - *va_pages = 0; - *flags = 0; - return -ENOMEM; -} - - -static int zap_range_nolock(struct mm_struct *mm, - const struct vm_operations_struct *vm_ops, - unsigned long start, unsigned long end) -{ - struct vm_area_struct *vma; - int err = -EINVAL; /* in case end < start */ - - while (start < end) { - unsigned long local_end; - - vma = find_vma_intersection(mm, start, end); - if (!vma) - break; - - /* is it ours? */ - if (vma->vm_ops != vm_ops) - goto try_next; - - local_end = vma->vm_end; - - if (end < local_end) - local_end = end; - - err = zap_vma_ptes(vma, start, local_end - start); - if (unlikely(err)) - break; - -try_next: - /* go to next vma, if any */ - start = vma->vm_end; - } - - return err; -} - -int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages, enum base_backing_threshold_status *failure_reason) -{ - u64 old_pages; - u64 delta; - int res = -EINVAL; - struct kbase_va_region *reg; - phys_addr_t *phy_pages; - - KBASE_DEBUG_ASSERT(kctx); - KBASE_DEBUG_ASSERT(failure_reason); - KBASE_DEBUG_ASSERT(gpu_addr != 0); - - down_read(¤t->mm->mmap_sem); - kbase_gpu_vm_lock(kctx); - - /* Validate the region */ - reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr); - if (!reg || (reg->flags & KBASE_REG_FREE)) { - *failure_reason = BASE_BACKING_THRESHOLD_ERROR_INVALID_ARGUMENTS; - goto out_unlock; - } - - KBASE_DEBUG_ASSERT(reg->cpu_alloc); - KBASE_DEBUG_ASSERT(reg->gpu_alloc); - - if (reg->gpu_alloc->type != KBASE_MEM_TYPE_NATIVE) { - *failure_reason = BASE_BACKING_THRESHOLD_ERROR_NOT_GROWABLE; - goto out_unlock; - } - - if (0 == (reg->flags & KBASE_REG_GROWABLE)) { - *failure_reason = BASE_BACKING_THRESHOLD_ERROR_NOT_GROWABLE; - goto out_unlock; - } - - if (new_pages > reg->nr_pages) { - /* Would overflow the VA region */ - *failure_reason = BASE_BACKING_THRESHOLD_ERROR_INVALID_ARGUMENTS; - goto out_unlock; - } - - /* can't be mapped more than once on the GPU */ - if (atomic_read(®->gpu_alloc->gpu_mappings) > 1) { - *failure_reason = BASE_BACKING_THRESHOLD_ERROR_NOT_GROWABLE; - goto out_unlock; - } - - if (new_pages == reg->gpu_alloc->nents) { - /* no change */ - res = 0; - goto out_unlock; - } - - phy_pages = kbase_get_gpu_phy_pages(reg); - old_pages = kbase_reg_current_backed_size(reg); - - if (new_pages > old_pages) { - /* growing */ - int err; - - delta = new_pages - old_pages; - /* Allocate some more pages */ - if (kbase_alloc_phy_pages_helper(reg->cpu_alloc, delta) != 0) { - *failure_reason = BASE_BACKING_THRESHOLD_ERROR_OOM; - goto out_unlock; - } - if (reg->cpu_alloc != reg->gpu_alloc) { - if (kbase_alloc_phy_pages_helper( - reg->gpu_alloc, delta) != 0) { - *failure_reason = BASE_BACKING_THRESHOLD_ERROR_OOM; - kbase_free_phy_pages_helper(reg->cpu_alloc, - delta); - goto out_unlock; - } - } - err = kbase_mmu_insert_pages(kctx, reg->start_pfn + old_pages, - phy_pages + old_pages, delta, reg->flags); - if (err) { - kbase_free_phy_pages_helper(reg->cpu_alloc, delta); - if (reg->cpu_alloc != reg->gpu_alloc) - kbase_free_phy_pages_helper(reg->gpu_alloc, - delta); - *failure_reason = BASE_BACKING_THRESHOLD_ERROR_OOM; - goto out_unlock; - } - } else { - /* shrinking */ - struct kbase_cpu_mapping *mapping; - int err; - - /* first, unmap from any mappings affected */ - list_for_each_entry(mapping, ®->cpu_alloc->mappings, mappings_list) { - unsigned long mapping_size = (mapping->vm_end - mapping->vm_start) >> PAGE_SHIFT; - - /* is this mapping affected ?*/ - if ((mapping->page_off + mapping_size) > new_pages) { - unsigned long first_bad = 0; - int zap_res; - - if (new_pages > mapping->page_off) - first_bad = new_pages - mapping->page_off; - - zap_res = zap_range_nolock(current->mm, - &kbase_vm_ops, - mapping->vm_start + - (first_bad << PAGE_SHIFT), - mapping->vm_end); - WARN(zap_res, - "Failed to zap VA range (0x%lx - 0x%lx);\n", - mapping->vm_start + - (first_bad << PAGE_SHIFT), - mapping->vm_end - ); - } - } - - /* Free some pages */ - delta = old_pages - new_pages; - err = kbase_mmu_teardown_pages(kctx, reg->start_pfn + new_pages, - delta); - if (err) { - *failure_reason = BASE_BACKING_THRESHOLD_ERROR_OOM; - goto out_unlock; - } -#ifndef CONFIG_MALI_NO_MALI - if (kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_6367)) { - /* Wait for GPU to flush write buffer before freeing physical pages */ - kbase_wait_write_flush(kctx); - } -#endif - kbase_free_phy_pages_helper(reg->cpu_alloc, delta); - if (reg->cpu_alloc != reg->gpu_alloc) - kbase_free_phy_pages_helper(reg->gpu_alloc, delta); - } - - res = 0; - -out_unlock: - kbase_gpu_vm_unlock(kctx); - up_read(¤t->mm->mmap_sem); - - return res; -} - -static void kbase_cpu_vm_open(struct vm_area_struct *vma) -{ - struct kbase_cpu_mapping *map = vma->vm_private_data; - - KBASE_DEBUG_ASSERT(map); - KBASE_DEBUG_ASSERT(map->count > 0); - /* non-atomic as we're under Linux' mm lock */ - map->count++; -} - -static void kbase_cpu_vm_close(struct vm_area_struct *vma) -{ - struct kbase_cpu_mapping *map = vma->vm_private_data; - - KBASE_DEBUG_ASSERT(map); - KBASE_DEBUG_ASSERT(map->count > 0); - - /* non-atomic as we're under Linux' mm lock */ - if (--map->count) - return; - - KBASE_DEBUG_ASSERT(map->kctx); - KBASE_DEBUG_ASSERT(map->alloc); - - kbase_gpu_vm_lock(map->kctx); - - if (map->region) { - KBASE_DEBUG_ASSERT((map->region->flags & KBASE_REG_ZONE_MASK) == - KBASE_REG_ZONE_SAME_VA); - /* Avoid freeing memory on the process death which results in - * GPU Page Fault. Memory will be freed in kbase_destroy_context - */ - if (!(current->flags & PF_EXITING)) - kbase_mem_free_region(map->kctx, map->region); - } - - list_del(&map->mappings_list); - - kbase_gpu_vm_unlock(map->kctx); - - kbase_mem_phy_alloc_put(map->alloc); - kfree(map); -} - -KBASE_EXPORT_TEST_API(kbase_cpu_vm_close); - - -static int kbase_cpu_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) -{ - struct kbase_cpu_mapping *map = vma->vm_private_data; - pgoff_t rel_pgoff; - size_t i; - - KBASE_DEBUG_ASSERT(map); - KBASE_DEBUG_ASSERT(map->count > 0); - KBASE_DEBUG_ASSERT(map->kctx); - KBASE_DEBUG_ASSERT(map->alloc); - - /* we don't use vmf->pgoff as it's affected by our mmap with - * offset being a GPU VA or a cookie */ - rel_pgoff = ((unsigned long)vmf->virtual_address - map->vm_start) - >> PAGE_SHIFT; - - kbase_gpu_vm_lock(map->kctx); - if (map->page_off + rel_pgoff >= map->alloc->nents) - goto locked_bad_fault; - - /* insert all valid pages from the fault location */ - for (i = rel_pgoff; - i < MIN((vma->vm_end - vma->vm_start) >> PAGE_SHIFT, - map->alloc->nents - map->page_off); i++) { - int ret = vm_insert_pfn(vma, map->vm_start + (i << PAGE_SHIFT), - PFN_DOWN(map->alloc->pages[map->page_off + i])); - if (ret < 0 && ret != -EBUSY) - goto locked_bad_fault; - } - - kbase_gpu_vm_unlock(map->kctx); - /* we resolved it, nothing for VM to do */ - return VM_FAULT_NOPAGE; - -locked_bad_fault: - kbase_gpu_vm_unlock(map->kctx); - return VM_FAULT_SIGBUS; -} - -static const struct vm_operations_struct kbase_vm_ops = { - .open = kbase_cpu_vm_open, - .close = kbase_cpu_vm_close, - .fault = kbase_cpu_vm_fault -}; - -static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vma, void *kaddr, size_t nr_pages, unsigned long aligned_offset, int free_on_close) -{ - struct kbase_cpu_mapping *map; - u64 start_off = vma->vm_pgoff - reg->start_pfn; - phys_addr_t *page_array; - int err = 0; - int i; - - map = kzalloc(sizeof(*map), GFP_KERNEL); - - if (!map) { - WARN_ON(1); - err = -ENOMEM; - goto out; - } - - /* - * VM_DONTCOPY - don't make this mapping available in fork'ed processes - * VM_DONTEXPAND - disable mremap on this region - * VM_IO - disables paging - * VM_DONTDUMP - Don't include in core dumps (3.7 only) - * VM_MIXEDMAP - Support mixing struct page*s and raw pfns. - * This is needed to support using the dedicated and - * the OS based memory backends together. - */ - /* - * This will need updating to propagate coherency flags - * See MIDBASE-1057 - */ - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) - vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP | VM_DONTEXPAND | VM_IO; -#else - vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO; -#endif - vma->vm_ops = &kbase_vm_ops; - vma->vm_private_data = map; - - page_array = kbase_get_cpu_phy_pages(reg); - - if (!(reg->flags & KBASE_REG_CPU_CACHED) && - (reg->flags & (KBASE_REG_CPU_WR|KBASE_REG_CPU_RD))) { - /* We can't map vmalloc'd memory uncached. - * Other memory will have been returned from - * kbase_mem_pool which would be - * suitable for mapping uncached. - */ - BUG_ON(kaddr); - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); - } - - if (!kaddr) { - unsigned long addr = vma->vm_start + aligned_offset; - - vma->vm_flags |= VM_PFNMAP; - for (i = 0; i < nr_pages; i++) { - unsigned long pfn = PFN_DOWN(page_array[i + start_off]); - - err = vm_insert_pfn(vma, addr, pfn); - if (WARN_ON(err)) - break; - - addr += PAGE_SIZE; - } - } else { - WARN_ON(aligned_offset); - /* MIXEDMAP so we can vfree the kaddr early and not track it after map time */ - vma->vm_flags |= VM_MIXEDMAP; - /* vmalloc remaping is easy... */ - err = remap_vmalloc_range(vma, kaddr, 0); - WARN_ON(err); - } - - if (err) { - kfree(map); - goto out; - } - - map->page_off = start_off; - map->region = free_on_close ? reg : NULL; - map->kctx = reg->kctx; - map->vm_start = vma->vm_start + aligned_offset; - if (aligned_offset) { - KBASE_DEBUG_ASSERT(!start_off); - map->vm_end = map->vm_start + (reg->nr_pages << PAGE_SHIFT); - } else { - map->vm_end = vma->vm_end; - } - map->alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc); - map->count = 1; /* start with one ref */ - - if (reg->flags & KBASE_REG_CPU_CACHED) - map->alloc->properties |= KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED; - - list_add(&map->mappings_list, &map->alloc->mappings); - - out: - return err; -} - -static int kbase_trace_buffer_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kaddr) -{ - struct kbase_va_region *new_reg; - u32 nr_pages; - size_t size; - int err = 0; - u32 *tb; - int owns_tb = 1; - - dev_dbg(kctx->kbdev->dev, "in %s\n", __func__); - size = (vma->vm_end - vma->vm_start); - nr_pages = size >> PAGE_SHIFT; - - if (!kctx->jctx.tb) { - KBASE_DEBUG_ASSERT(0 != size); - tb = vmalloc_user(size); - - if (NULL == tb) { - err = -ENOMEM; - goto out; - } - - kbase_device_trace_buffer_install(kctx, tb, size); - } else { - err = -EINVAL; - goto out; - } - - *kaddr = kctx->jctx.tb; - - new_reg = kbase_alloc_free_region(kctx, 0, nr_pages, KBASE_REG_ZONE_SAME_VA); - if (!new_reg) { - err = -ENOMEM; - WARN_ON(1); - goto out_no_region; - } - - new_reg->cpu_alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_TB); - if (IS_ERR_OR_NULL(new_reg->cpu_alloc)) { - err = -ENOMEM; - new_reg->cpu_alloc = NULL; - WARN_ON(1); - goto out_no_alloc; - } - - new_reg->gpu_alloc = kbase_mem_phy_alloc_get(new_reg->cpu_alloc); - - new_reg->cpu_alloc->imported.kctx = kctx; - new_reg->flags &= ~KBASE_REG_FREE; - new_reg->flags |= KBASE_REG_CPU_CACHED; - - /* alloc now owns the tb */ - owns_tb = 0; - - if (kbase_add_va_region(kctx, new_reg, vma->vm_start, nr_pages, 1) != 0) { - err = -ENOMEM; - WARN_ON(1); - goto out_no_va_region; - } - - *reg = new_reg; - - /* map read only, noexec */ - vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC); - /* the rest of the flags is added by the cpu_mmap handler */ - - dev_dbg(kctx->kbdev->dev, "%s done\n", __func__); - return 0; - -out_no_va_region: -out_no_alloc: - kbase_free_alloced_region(new_reg); -out_no_region: - if (owns_tb) { - kbase_device_trace_buffer_uninstall(kctx); - vfree(tb); - } -out: - return err; -} - -static int kbase_mmu_dump_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kmap_addr) -{ - struct kbase_va_region *new_reg; - void *kaddr; - u32 nr_pages; - size_t size; - int err = 0; - - dev_dbg(kctx->kbdev->dev, "in kbase_mmu_dump_mmap\n"); - size = (vma->vm_end - vma->vm_start); - nr_pages = size >> PAGE_SHIFT; - - kaddr = kbase_mmu_dump(kctx, nr_pages); - - if (!kaddr) { - err = -ENOMEM; - goto out; - } - - new_reg = kbase_alloc_free_region(kctx, 0, nr_pages, KBASE_REG_ZONE_SAME_VA); - if (!new_reg) { - err = -ENOMEM; - WARN_ON(1); - goto out; - } - - new_reg->cpu_alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_RAW); - if (IS_ERR_OR_NULL(new_reg->cpu_alloc)) { - err = -ENOMEM; - new_reg->cpu_alloc = NULL; - WARN_ON(1); - goto out_no_alloc; - } - - new_reg->gpu_alloc = kbase_mem_phy_alloc_get(new_reg->cpu_alloc); - - new_reg->flags &= ~KBASE_REG_FREE; - new_reg->flags |= KBASE_REG_CPU_CACHED; - if (kbase_add_va_region(kctx, new_reg, vma->vm_start, nr_pages, 1) != 0) { - err = -ENOMEM; - WARN_ON(1); - goto out_va_region; - } - - *kmap_addr = kaddr; - *reg = new_reg; - - dev_dbg(kctx->kbdev->dev, "kbase_mmu_dump_mmap done\n"); - return 0; - -out_no_alloc: -out_va_region: - kbase_free_alloced_region(new_reg); -out: - return err; -} - - -void kbase_os_mem_map_lock(struct kbase_context *kctx) -{ - struct mm_struct *mm = current->mm; - (void)kctx; - down_read(&mm->mmap_sem); -} - -void kbase_os_mem_map_unlock(struct kbase_context *kctx) -{ - struct mm_struct *mm = current->mm; - (void)kctx; - up_read(&mm->mmap_sem); -} - -#if defined(CONFIG_DMA_SHARED_BUFFER) && defined(CONFIG_MALI_TRACE_TIMELINE) -/* This section is required only for instrumentation. */ - -static void kbase_dma_buf_vm_open(struct vm_area_struct *vma) -{ - struct kbase_cpu_mapping *map = vma->vm_private_data; - - KBASE_DEBUG_ASSERT(map); - KBASE_DEBUG_ASSERT(map->count > 0); - /* Non-atomic as we're under Linux's mm lock. */ - map->count++; -} - -static void kbase_dma_buf_vm_close(struct vm_area_struct *vma) -{ - struct kbase_cpu_mapping *map = vma->vm_private_data; - - KBASE_DEBUG_ASSERT(map); - KBASE_DEBUG_ASSERT(map->count > 0); - - /* Non-atomic as we're under Linux's mm lock. */ - if (--map->count) - return; - - KBASE_DEBUG_ASSERT(map->kctx); - - kbase_gpu_vm_lock(map->kctx); - list_del(&map->mappings_list); - kbase_gpu_vm_unlock(map->kctx); - kfree(map); -} - -static const struct vm_operations_struct kbase_dma_mmap_ops = { - .open = kbase_dma_buf_vm_open, - .close = kbase_dma_buf_vm_close, -}; -#endif /* CONFIG_DMA_SHARED_BUFFER && CONFIG_MALI_TRACE_TIMELINE */ - -int kbase_mmap(struct file *file, struct vm_area_struct *vma) -{ - struct kbase_context *kctx = file->private_data; - struct kbase_va_region *reg; - void *kaddr = NULL; - size_t nr_pages; - int err = 0; - int free_on_close = 0; - struct device *dev = kctx->kbdev->dev; - size_t aligned_offset = 0; - - dev_dbg(dev, "kbase_mmap\n"); - nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; - - /* strip away corresponding VM_MAY% flags to the VM_% flags requested */ - vma->vm_flags &= ~((vma->vm_flags & (VM_READ | VM_WRITE)) << 4); - - if (0 == nr_pages) { - err = -EINVAL; - goto out; - } - - if (!(vma->vm_flags & VM_SHARED)) { - err = -EINVAL; - goto out; - } - - kbase_gpu_vm_lock(kctx); - - if (vma->vm_pgoff == PFN_DOWN(BASE_MEM_MAP_TRACKING_HANDLE)) { - /* The non-mapped tracking helper page */ - err = kbase_tracking_page_setup(kctx, vma); - goto out_unlock; - } - - /* if not the MTP, verify that the MTP has been mapped */ - rcu_read_lock(); - /* catches both when the special page isn't present or - * when we've forked */ - if (rcu_dereference(kctx->process_mm) != current->mm) { - err = -EINVAL; - rcu_read_unlock(); - goto out_unlock; - } - rcu_read_unlock(); - - switch (vma->vm_pgoff) { - case PFN_DOWN(BASE_MEM_INVALID_HANDLE): - case PFN_DOWN(BASE_MEM_WRITE_ALLOC_PAGES_HANDLE): - /* Illegal handle for direct map */ - err = -EINVAL; - goto out_unlock; - case PFN_DOWN(BASE_MEM_TRACE_BUFFER_HANDLE): - err = kbase_trace_buffer_mmap(kctx, vma, ®, &kaddr); - if (0 != err) - goto out_unlock; - dev_dbg(dev, "kbase_trace_buffer_mmap ok\n"); - /* free the region on munmap */ - free_on_close = 1; - goto map; - case PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE): - /* MMU dump */ - err = kbase_mmu_dump_mmap(kctx, vma, ®, &kaddr); - if (0 != err) - goto out_unlock; - /* free the region on munmap */ - free_on_close = 1; - goto map; - case PFN_DOWN(BASE_MEM_COOKIE_BASE) ... - PFN_DOWN(BASE_MEM_FIRST_FREE_ADDRESS) - 1: { - /* SAME_VA stuff, fetch the right region */ - int gpu_pc_bits; - int cookie = vma->vm_pgoff - PFN_DOWN(BASE_MEM_COOKIE_BASE); - - gpu_pc_bits = kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size; - reg = kctx->pending_regions[cookie]; - if (!reg) { - err = -ENOMEM; - goto out_unlock; - } - - if (reg->flags & KBASE_REG_ALIGNED) { - /* nr_pages must be able to hold alignment pages - * plus actual pages */ - unsigned long align = 1ULL << gpu_pc_bits; - unsigned long extra_pages = 3 * PFN_DOWN(align); - unsigned long aligned_addr; - unsigned long aligned_addr_end; - unsigned long nr_bytes = reg->nr_pages << PAGE_SHIFT; - - if (kctx->api_version < KBASE_API_VERSION(8, 5)) - /* Maintain compatibility with old userspace */ - extra_pages = PFN_DOWN(align); - - if (nr_pages != reg->nr_pages + extra_pages) { - /* incorrect mmap size */ - /* leave the cookie for a potential - * later mapping, or to be reclaimed - * later when the context is freed */ - err = -ENOMEM; - goto out_unlock; - } - - aligned_addr = ALIGN(vma->vm_start, align); - aligned_addr_end = aligned_addr + nr_bytes; - - if (kctx->api_version >= KBASE_API_VERSION(8, 5)) { - if ((aligned_addr_end & BASE_MEM_MASK_4GB) == 0) { - /* Can't end at 4GB boundary */ - aligned_addr += 2 * align; - } else if ((aligned_addr & BASE_MEM_MASK_4GB) == 0) { - /* Can't start at 4GB boundary */ - aligned_addr += align; - } - } - - aligned_offset = aligned_addr - vma->vm_start; - } else if (reg->nr_pages != nr_pages) { - /* incorrect mmap size */ - /* leave the cookie for a potential later - * mapping, or to be reclaimed later when the - * context is freed */ - err = -ENOMEM; - goto out_unlock; - } - - if ((vma->vm_flags & VM_READ && - !(reg->flags & KBASE_REG_CPU_RD)) || - (vma->vm_flags & VM_WRITE && - !(reg->flags & KBASE_REG_CPU_WR))) { - /* VM flags inconsistent with region flags */ - err = -EPERM; - dev_err(dev, "%s:%d inconsistent VM flags\n", - __FILE__, __LINE__); - goto out_unlock; - } - - /* adjust down nr_pages to what we have physically */ - nr_pages = kbase_reg_current_backed_size(reg); - - if (kbase_gpu_mmap(kctx, reg, - vma->vm_start + aligned_offset, - reg->nr_pages, 1) != 0) { - dev_err(dev, "%s:%d\n", __FILE__, __LINE__); - /* Unable to map in GPU space. */ - WARN_ON(1); - err = -ENOMEM; - goto out_unlock; - } - - /* no need for the cookie anymore */ - kctx->pending_regions[cookie] = NULL; - kctx->cookies |= (1UL << cookie); - - /* - * Overwrite the offset with the - * region start_pfn, so we effectively - * map from offset 0 in the region. - */ - vma->vm_pgoff = reg->start_pfn; - - /* free the region on munmap */ - free_on_close = 1; - goto map; - } - default: { - reg = kbase_region_tracker_find_region_enclosing_address(kctx, (u64)vma->vm_pgoff << PAGE_SHIFT); - - if (reg && !(reg->flags & KBASE_REG_FREE)) { - /* will this mapping overflow the size of the region? */ - if (nr_pages > (reg->nr_pages - (vma->vm_pgoff - reg->start_pfn))) - goto overflow; - - if ((vma->vm_flags & VM_READ && - !(reg->flags & KBASE_REG_CPU_RD)) || - (vma->vm_flags & VM_WRITE && - !(reg->flags & KBASE_REG_CPU_WR))) { - /* VM flags inconsistent with region flags */ - err = -EPERM; - dev_err(dev, "%s:%d inconsistent VM flags\n", - __FILE__, __LINE__); - goto out_unlock; - } - -#ifdef CONFIG_DMA_SHARED_BUFFER - if (reg->cpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) - goto dma_map; -#endif /* CONFIG_DMA_SHARED_BUFFER */ - - /* limit what we map to the amount currently backed */ - if (reg->cpu_alloc->nents < (vma->vm_pgoff - reg->start_pfn + nr_pages)) { - if ((vma->vm_pgoff - reg->start_pfn) >= reg->cpu_alloc->nents) - nr_pages = 0; - else - nr_pages = reg->cpu_alloc->nents - (vma->vm_pgoff - reg->start_pfn); - } - - goto map; - } - -overflow: - err = -ENOMEM; - goto out_unlock; - } /* default */ - } /* switch */ -map: - err = kbase_cpu_mmap(reg, vma, kaddr, nr_pages, aligned_offset, free_on_close); - - if (vma->vm_pgoff == PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE)) { - /* MMU dump - userspace should now have a reference on - * the pages, so we can now free the kernel mapping */ - vfree(kaddr); - } - goto out_unlock; - -#ifdef CONFIG_DMA_SHARED_BUFFER -dma_map: - err = dma_buf_mmap(reg->cpu_alloc->imported.umm.dma_buf, vma, vma->vm_pgoff - reg->start_pfn); -#if defined(CONFIG_MALI_TRACE_TIMELINE) - /* This section is required only for instrumentation. */ - /* Add created mapping to imported region mapping list. - * It is important to make it visible to dumping infrastructure. - * Add mapping only if vm_ops structure is not used by memory owner. */ - WARN_ON(vma->vm_ops); - WARN_ON(vma->vm_private_data); - if (!err && !vma->vm_ops && !vma->vm_private_data) { - struct kbase_cpu_mapping *map = kzalloc( - sizeof(*map), - GFP_KERNEL); - - if (map) { - map->kctx = reg->kctx; - map->region = NULL; - map->page_off = vma->vm_pgoff; - map->vm_start = vma->vm_start; - map->vm_end = vma->vm_end; - map->count = 1; /* start with one ref */ - - vma->vm_ops = &kbase_dma_mmap_ops; - vma->vm_private_data = map; - - list_add( - &map->mappings_list, - ®->cpu_alloc->mappings); - } - } -#endif /* CONFIG_MALI_TRACE_TIMELINE */ -#endif /* CONFIG_DMA_SHARED_BUFFER */ -out_unlock: - kbase_gpu_vm_unlock(kctx); -out: - if (err) - dev_err(dev, "mmap failed %d\n", err); - - return err; -} - -KBASE_EXPORT_TEST_API(kbase_mmap); - -void *kbase_vmap(struct kbase_context *kctx, u64 gpu_addr, size_t size, - struct kbase_vmap_struct *map) -{ - struct kbase_va_region *reg; - unsigned long page_index; - unsigned int offset = gpu_addr & ~PAGE_MASK; - size_t page_count = PFN_UP(offset + size); - phys_addr_t *page_array; - struct page **pages; - void *cpu_addr = NULL; - pgprot_t prot; - size_t i; - bool sync_needed; - - if (!size || !map) - return NULL; - - /* check if page_count calculation will wrap */ - if (size > ((size_t)-1 / PAGE_SIZE)) - return NULL; - - kbase_gpu_vm_lock(kctx); - - reg = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr); - if (!reg || (reg->flags & KBASE_REG_FREE)) - goto out_unlock; - - page_index = (gpu_addr >> PAGE_SHIFT) - reg->start_pfn; - - /* check if page_index + page_count will wrap */ - if (-1UL - page_count < page_index) - goto out_unlock; - - if (page_index + page_count > kbase_reg_current_backed_size(reg)) - goto out_unlock; - - page_array = kbase_get_cpu_phy_pages(reg); - if (!page_array) - goto out_unlock; - - pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); - if (!pages) - goto out_unlock; - - for (i = 0; i < page_count; i++) - pages[i] = pfn_to_page(PFN_DOWN(page_array[page_index + i])); - - prot = PAGE_KERNEL; - if (!(reg->flags & KBASE_REG_CPU_CACHED)) { - /* Map uncached */ - prot = pgprot_writecombine(prot); - } - - cpu_addr = vmap(pages, page_count, VM_MAP, prot); - - kfree(pages); - - if (!cpu_addr) - goto out_unlock; - - map->gpu_addr = gpu_addr; - map->cpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc); - map->cpu_pages = &kbase_get_cpu_phy_pages(reg)[page_index]; - map->gpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc); - map->gpu_pages = &kbase_get_gpu_phy_pages(reg)[page_index]; - map->addr = (void *)((uintptr_t)cpu_addr + offset); - map->size = size; - map->is_cached = (reg->flags & KBASE_REG_CPU_CACHED) != 0; - sync_needed = map->is_cached; - - if (sync_needed) { - /* Sync first page */ - size_t sz = MIN(((size_t) PAGE_SIZE - offset), size); - phys_addr_t cpu_pa = map->cpu_pages[0]; - phys_addr_t gpu_pa = map->gpu_pages[0]; - - kbase_sync_single(kctx, cpu_pa, gpu_pa, offset, sz, - KBASE_SYNC_TO_CPU); - - /* Sync middle pages (if any) */ - for (i = 1; page_count > 2 && i < page_count - 1; i++) { - cpu_pa = map->cpu_pages[i]; - gpu_pa = map->gpu_pages[i]; - kbase_sync_single(kctx, cpu_pa, gpu_pa, 0, PAGE_SIZE, - KBASE_SYNC_TO_CPU); - } - - /* Sync last page (if any) */ - if (page_count > 1) { - cpu_pa = map->cpu_pages[page_count - 1]; - gpu_pa = map->gpu_pages[page_count - 1]; - sz = ((offset + size - 1) & ~PAGE_MASK) + 1; - kbase_sync_single(kctx, cpu_pa, gpu_pa, 0, sz, - KBASE_SYNC_TO_CPU); - } - } - kbase_gpu_vm_unlock(kctx); - - return map->addr; - -out_unlock: - kbase_gpu_vm_unlock(kctx); - return NULL; -} -KBASE_EXPORT_TEST_API(kbase_vmap); - -void kbase_vunmap(struct kbase_context *kctx, struct kbase_vmap_struct *map) -{ - void *addr = (void *)((uintptr_t)map->addr & PAGE_MASK); - bool sync_needed = map->is_cached; - vunmap(addr); - if (sync_needed) { - off_t offset = (uintptr_t)map->addr & ~PAGE_MASK; - size_t size = map->size; - size_t page_count = PFN_UP(offset + size); - size_t i; - - /* Sync first page */ - size_t sz = MIN(((size_t) PAGE_SIZE - offset), size); - phys_addr_t cpu_pa = map->cpu_pages[0]; - phys_addr_t gpu_pa = map->gpu_pages[0]; - - kbase_sync_single(kctx, cpu_pa, gpu_pa, offset, sz, - KBASE_SYNC_TO_DEVICE); - - /* Sync middle pages (if any) */ - for (i = 1; page_count > 2 && i < page_count - 1; i++) { - cpu_pa = map->cpu_pages[i]; - gpu_pa = map->gpu_pages[i]; - kbase_sync_single(kctx, cpu_pa, gpu_pa, 0, PAGE_SIZE, - KBASE_SYNC_TO_DEVICE); - } - - /* Sync last page (if any) */ - if (page_count > 1) { - cpu_pa = map->cpu_pages[page_count - 1]; - gpu_pa = map->gpu_pages[page_count - 1]; - sz = ((offset + size - 1) & ~PAGE_MASK) + 1; - kbase_sync_single(kctx, cpu_pa, gpu_pa, 0, sz, - KBASE_SYNC_TO_DEVICE); - } - } - map->gpu_addr = 0; - map->cpu_alloc = kbase_mem_phy_alloc_put(map->cpu_alloc); - map->gpu_alloc = kbase_mem_phy_alloc_put(map->gpu_alloc); - map->cpu_pages = NULL; - map->gpu_pages = NULL; - map->addr = NULL; - map->size = 0; - map->is_cached = false; -} -KBASE_EXPORT_TEST_API(kbase_vunmap); - -void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages) -{ - struct mm_struct *mm; - - rcu_read_lock(); - mm = rcu_dereference(kctx->process_mm); - if (mm) { - atomic_add(pages, &kctx->nonmapped_pages); -#ifdef SPLIT_RSS_COUNTING - add_mm_counter(mm, MM_FILEPAGES, pages); -#else - spin_lock(&mm->page_table_lock); - add_mm_counter(mm, MM_FILEPAGES, pages); - spin_unlock(&mm->page_table_lock); -#endif - } - rcu_read_unlock(); -} - -static void kbasep_os_process_page_usage_drain(struct kbase_context *kctx) -{ - int pages; - struct mm_struct *mm; - - spin_lock(&kctx->mm_update_lock); - mm = rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock)); - if (!mm) { - spin_unlock(&kctx->mm_update_lock); - return; - } - - rcu_assign_pointer(kctx->process_mm, NULL); - spin_unlock(&kctx->mm_update_lock); - synchronize_rcu(); - - pages = atomic_xchg(&kctx->nonmapped_pages, 0); -#ifdef SPLIT_RSS_COUNTING - add_mm_counter(mm, MM_FILEPAGES, -pages); -#else - spin_lock(&mm->page_table_lock); - add_mm_counter(mm, MM_FILEPAGES, -pages); - spin_unlock(&mm->page_table_lock); -#endif -} - -static void kbase_special_vm_close(struct vm_area_struct *vma) -{ - struct kbase_context *kctx; - - kctx = vma->vm_private_data; - kbasep_os_process_page_usage_drain(kctx); -} - -static const struct vm_operations_struct kbase_vm_special_ops = { - .close = kbase_special_vm_close, -}; - -static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma) -{ - /* check that this is the only tracking page */ - spin_lock(&kctx->mm_update_lock); - if (rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock))) { - spin_unlock(&kctx->mm_update_lock); - return -EFAULT; - } - - rcu_assign_pointer(kctx->process_mm, current->mm); - - spin_unlock(&kctx->mm_update_lock); - - /* no real access */ - vma->vm_flags &= ~(VM_READ | VM_MAYREAD | VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) - vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP | VM_IO; -#else - vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO; -#endif - vma->vm_ops = &kbase_vm_special_ops; - vma->vm_private_data = kctx; - - return 0; -} -void *kbase_va_alloc(struct kbase_context *kctx, u32 size, struct kbase_hwc_dma_mapping *handle) -{ - int i; - int res; - void *va; - dma_addr_t dma_pa; - struct kbase_va_region *reg; - phys_addr_t *page_array; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) - DEFINE_DMA_ATTRS(attrs); -#endif - - u32 pages = ((size - 1) >> PAGE_SHIFT) + 1; - u32 flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_CPU_WR | - BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR; - - KBASE_DEBUG_ASSERT(kctx != NULL); - KBASE_DEBUG_ASSERT(0 != size); - KBASE_DEBUG_ASSERT(0 != pages); - - if (size == 0) - goto err; - - /* All the alloc calls return zeroed memory */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); - va = dma_alloc_attrs(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL, &attrs); -#else - va = dma_alloc_writecombine(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL); -#endif - if (!va) - goto err; - - /* Store the state so we can free it later. */ - handle->cpu_va = va; - handle->dma_pa = dma_pa; - handle->size = size; - - - reg = kbase_alloc_free_region(kctx, 0, pages, KBASE_REG_ZONE_SAME_VA); - if (!reg) - goto no_reg; - - reg->flags &= ~KBASE_REG_FREE; - kbase_update_region_flags(kctx, reg, flags); - - reg->cpu_alloc = kbase_alloc_create(pages, KBASE_MEM_TYPE_RAW); - if (IS_ERR_OR_NULL(reg->cpu_alloc)) - goto no_alloc; - - reg->gpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc); - - page_array = kbase_get_cpu_phy_pages(reg); - - for (i = 0; i < pages; i++) - page_array[i] = dma_pa + (i << PAGE_SHIFT); - - reg->cpu_alloc->nents = pages; - - kbase_gpu_vm_lock(kctx); - res = kbase_gpu_mmap(kctx, reg, (uintptr_t) va, pages, 1); - kbase_gpu_vm_unlock(kctx); - if (res) - goto no_mmap; - - return va; - -no_mmap: - kbase_mem_phy_alloc_put(reg->cpu_alloc); - kbase_mem_phy_alloc_put(reg->gpu_alloc); -no_alloc: - kfree(reg); -no_reg: -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) - dma_free_attrs(kctx->kbdev->dev, size, va, dma_pa, &attrs); -#else - dma_free_writecombine(kctx->kbdev->dev, size, va, dma_pa); -#endif -err: - return NULL; -} -KBASE_EXPORT_SYMBOL(kbase_va_alloc); - -void kbase_va_free(struct kbase_context *kctx, struct kbase_hwc_dma_mapping *handle) -{ - struct kbase_va_region *reg; - int err; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) - DEFINE_DMA_ATTRS(attrs); -#endif - - KBASE_DEBUG_ASSERT(kctx != NULL); - KBASE_DEBUG_ASSERT(handle->cpu_va != NULL); - - kbase_gpu_vm_lock(kctx); - reg = kbase_region_tracker_find_region_base_address(kctx, (uintptr_t)handle->cpu_va); - KBASE_DEBUG_ASSERT(reg); - err = kbase_gpu_munmap(kctx, reg); - kbase_gpu_vm_unlock(kctx); - KBASE_DEBUG_ASSERT(!err); - - kbase_mem_phy_alloc_put(reg->cpu_alloc); - kbase_mem_phy_alloc_put(reg->gpu_alloc); - kfree(reg); - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); - dma_free_attrs(kctx->kbdev->dev, handle->size, - handle->cpu_va, handle->dma_pa, &attrs); -#else - dma_free_writecombine(kctx->kbdev->dev, handle->size, - handle->cpu_va, handle->dma_pa); -#endif -} -KBASE_EXPORT_SYMBOL(kbase_va_free); - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_linux.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_linux.h deleted file mode 100755 index 1d854152704b7..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_linux.h +++ /dev/null @@ -1,73 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010, 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_kbase_mem_linux.h - * Base kernel memory APIs, Linux implementation. - */ - -#ifndef _KBASE_MEM_LINUX_H_ -#define _KBASE_MEM_LINUX_H_ - -/** A HWC dump mapping */ -struct kbase_hwc_dma_mapping { - void *cpu_va; - dma_addr_t dma_pa; - size_t size; -}; - -struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx, u64 va_pages, u64 commit_pages, u64 extent, u64 *flags, u64 *gpu_va, u16 *va_alignment); -int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, int query, u64 *const pages); -int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type, int handle, u64 *gpu_va, u64 *va_pages, u64 *flags); -u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride, u64 nents, struct base_mem_aliasing_info *ai, u64 *num_pages); -int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned int flags, unsigned int mask); -int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages, enum base_backing_threshold_status *failure_reason); -int kbase_mmap(struct file *file, struct vm_area_struct *vma); - -struct kbase_vmap_struct { - u64 gpu_addr; - struct kbase_mem_phy_alloc *cpu_alloc; - struct kbase_mem_phy_alloc *gpu_alloc; - phys_addr_t *cpu_pages; - phys_addr_t *gpu_pages; - void *addr; - size_t size; - bool is_cached; -}; -void *kbase_vmap(struct kbase_context *kctx, u64 gpu_addr, size_t size, - struct kbase_vmap_struct *map); -void kbase_vunmap(struct kbase_context *kctx, struct kbase_vmap_struct *map); - -/** @brief Allocate memory from kernel space and map it onto the GPU - * - * @param kctx The context used for the allocation/mapping - * @param size The size of the allocation in bytes - * @param handle An opaque structure used to contain the state needed to free the memory - * @return the VA for kernel space and GPU MMU - */ -void *kbase_va_alloc(struct kbase_context *kctx, u32 size, struct kbase_hwc_dma_mapping *handle); - -/** @brief Free/unmap memory allocated by kbase_va_alloc - * - * @param kctx The context used for the allocation/mapping - * @param handle An opaque structure returned by the kbase_va_alloc function. - */ -void kbase_va_free(struct kbase_context *kctx, struct kbase_hwc_dma_mapping *handle); - -#endif /* _KBASE_MEM_LINUX_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_lowlevel.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_lowlevel.h deleted file mode 100755 index 441180b64d561..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_lowlevel.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#ifndef _KBASE_MEM_LOWLEVEL_H -#define _KBASE_MEM_LOWLEVEL_H - -#ifndef _KBASE_H_ -#error "Don't include this file directly, use mali_kbase.h instead" -#endif - -#include - -/** - * @brief Flags for kbase_phy_allocator_pages_alloc - */ -#define KBASE_PHY_PAGES_FLAG_DEFAULT (0) /** Default allocation flag */ -#define KBASE_PHY_PAGES_FLAG_CLEAR (1 << 0) /** Clear the pages after allocation */ -#define KBASE_PHY_PAGES_FLAG_POISON (1 << 1) /** Fill the memory with a poison value */ - -#define KBASE_PHY_PAGES_SUPPORTED_FLAGS (KBASE_PHY_PAGES_FLAG_DEFAULT|KBASE_PHY_PAGES_FLAG_CLEAR|KBASE_PHY_PAGES_FLAG_POISON) - -#define KBASE_PHY_PAGES_POISON_VALUE 0xFD /** Value to fill the memory with when KBASE_PHY_PAGES_FLAG_POISON is set */ - -enum kbase_sync_type { - KBASE_SYNC_TO_CPU, - KBASE_SYNC_TO_DEVICE -}; - -#endif /* _KBASE_LOWLEVEL_H */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_pool.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_pool.c deleted file mode 100755 index a049205d0e90d..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_pool.c +++ /dev/null @@ -1,574 +0,0 @@ -/* - * - * (C) COPYRIGHT 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include -#include -#include -#include -#include -#include -#include -#include - -/* Backwards compatibility with kernels using the old carveout allocator */ -int __init kbase_carveout_mem_reserve(phys_addr_t size) -{ - return 0; -} - -#define pool_dbg(pool, format, ...) \ - dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format, \ - (pool->next_pool) ? "kctx" : "kbdev", \ - kbase_mem_pool_size(pool), \ - kbase_mem_pool_max_size(pool), \ - ##__VA_ARGS__) - -static inline void kbase_mem_pool_lock(struct kbase_mem_pool *pool) -{ - spin_lock(&pool->pool_lock); -} - -static inline void kbase_mem_pool_unlock(struct kbase_mem_pool *pool) -{ - spin_unlock(&pool->pool_lock); -} - -static size_t kbase_mem_pool_capacity(struct kbase_mem_pool *pool) -{ - ssize_t max_size = kbase_mem_pool_max_size(pool); - ssize_t cur_size = kbase_mem_pool_size(pool); - - return max(max_size - cur_size, (ssize_t)0); -} - -static bool kbase_mem_pool_is_full(struct kbase_mem_pool *pool) -{ - return kbase_mem_pool_size(pool) >= kbase_mem_pool_max_size(pool); -} - -static bool kbase_mem_pool_is_empty(struct kbase_mem_pool *pool) -{ - return kbase_mem_pool_size(pool) == 0; -} - -static void kbase_mem_pool_add_locked(struct kbase_mem_pool *pool, - struct page *p) -{ - lockdep_assert_held(&pool->pool_lock); - - list_add(&p->lru, &pool->page_list); - pool->cur_size++; - - pool_dbg(pool, "added page\n"); -} - -static void kbase_mem_pool_add(struct kbase_mem_pool *pool, struct page *p) -{ - kbase_mem_pool_lock(pool); - kbase_mem_pool_add_locked(pool, p); - kbase_mem_pool_unlock(pool); -} - -static void kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool, - struct list_head *page_list, size_t nr_pages) -{ - lockdep_assert_held(&pool->pool_lock); - - list_splice(page_list, &pool->page_list); - pool->cur_size += nr_pages; - - pool_dbg(pool, "added %zu pages\n", nr_pages); -} - -static void kbase_mem_pool_add_list(struct kbase_mem_pool *pool, - struct list_head *page_list, size_t nr_pages) -{ - kbase_mem_pool_lock(pool); - kbase_mem_pool_add_list_locked(pool, page_list, nr_pages); - kbase_mem_pool_unlock(pool); -} - -static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool) -{ - struct page *p; - - lockdep_assert_held(&pool->pool_lock); - - if (kbase_mem_pool_is_empty(pool)) - return NULL; - - p = list_first_entry(&pool->page_list, struct page, lru); - list_del_init(&p->lru); - pool->cur_size--; - - pool_dbg(pool, "removed page\n"); - - return p; -} - -static struct page *kbase_mem_pool_remove(struct kbase_mem_pool *pool) -{ - struct page *p; - - kbase_mem_pool_lock(pool); - p = kbase_mem_pool_remove_locked(pool); - kbase_mem_pool_unlock(pool); - - return p; -} - -static void kbase_mem_pool_sync_page(struct kbase_mem_pool *pool, - struct page *p) -{ - struct device *dev = pool->kbdev->dev; - - dma_sync_single_for_device(dev, kbase_dma_addr(p), - PAGE_SIZE, DMA_BIDIRECTIONAL); -} - -static void kbase_mem_pool_zero_page(struct kbase_mem_pool *pool, - struct page *p) -{ - clear_highpage(p); - kbase_mem_pool_sync_page(pool, p); -} - -static void kbase_mem_pool_spill(struct kbase_mem_pool *next_pool, - struct page *p) -{ - /* Zero page before spilling */ - kbase_mem_pool_zero_page(next_pool, p); - - kbase_mem_pool_add(next_pool, p); -} - -static struct page *kbase_mem_pool_alloc_page(struct kbase_mem_pool *pool) -{ - struct page *p; - gfp_t gfp; - struct device *dev = pool->kbdev->dev; - dma_addr_t dma_addr; - -#if defined(CONFIG_ARM) && !defined(CONFIG_HAVE_DMA_ATTRS) && \ - LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) - /* DMA cache sync fails for HIGHMEM before 3.5 on ARM */ - gfp = GFP_USER | __GFP_ZERO; -#else - gfp = GFP_HIGHUSER | __GFP_ZERO; -#endif - - if (current->flags & PF_KTHREAD) { - /* Don't trigger OOM killer from kernel threads, e.g. when - * growing memory on GPU page fault */ - gfp |= __GFP_NORETRY; - } - - p = alloc_page(gfp); - if (!p) - return NULL; - - dma_addr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); - if (dma_mapping_error(dev, dma_addr)) { - __free_page(p); - return NULL; - } - - WARN_ON(dma_addr != page_to_phys(p)); - - kbase_set_dma_addr(p, dma_addr); - - pool_dbg(pool, "alloced page from kernel\n"); - - return p; -} - -static void kbase_mem_pool_free_page(struct kbase_mem_pool *pool, - struct page *p) -{ - struct device *dev = pool->kbdev->dev; - dma_addr_t dma_addr = kbase_dma_addr(p); - - dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); - kbase_clear_dma_addr(p); - __free_page(p); - - pool_dbg(pool, "freed page to kernel\n"); -} - -static size_t kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool, - size_t nr_to_shrink) -{ - struct page *p; - size_t i; - - lockdep_assert_held(&pool->pool_lock); - - for (i = 0; i < nr_to_shrink && !kbase_mem_pool_is_empty(pool); i++) { - p = kbase_mem_pool_remove_locked(pool); - kbase_mem_pool_free_page(pool, p); - } - - return i; -} - -static size_t kbase_mem_pool_shrink(struct kbase_mem_pool *pool, - size_t nr_to_shrink) -{ - size_t nr_freed; - - kbase_mem_pool_lock(pool); - nr_freed = kbase_mem_pool_shrink_locked(pool, nr_to_shrink); - kbase_mem_pool_unlock(pool); - - return nr_freed; -} - -static size_t kbase_mem_pool_grow(struct kbase_mem_pool *pool, - size_t nr_to_grow) -{ - struct page *p; - size_t i; - - for (i = 0; i < nr_to_grow && !kbase_mem_pool_is_full(pool); i++) { - p = kbase_mem_pool_alloc_page(pool); - kbase_mem_pool_add(pool, p); - } - - return i; -} - -size_t kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size) -{ - size_t cur_size; - - cur_size = kbase_mem_pool_size(pool); - - if (new_size < cur_size) - kbase_mem_pool_shrink(pool, cur_size - new_size); - else if (new_size > cur_size) - kbase_mem_pool_grow(pool, new_size - cur_size); - - cur_size = kbase_mem_pool_size(pool); - - return cur_size; -} - -void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size) -{ - size_t cur_size; - size_t nr_to_shrink; - - kbase_mem_pool_lock(pool); - - pool->max_size = max_size; - - cur_size = kbase_mem_pool_size(pool); - if (max_size < cur_size) { - nr_to_shrink = cur_size - max_size; - kbase_mem_pool_shrink_locked(pool, nr_to_shrink); - } - - kbase_mem_pool_unlock(pool); -} - - -static unsigned long kbase_mem_pool_reclaim_count_objects(struct shrinker *s, - struct shrink_control *sc) -{ - struct kbase_mem_pool *pool; - - pool = container_of(s, struct kbase_mem_pool, reclaim); - pool_dbg(pool, "reclaim count: %zu\n", kbase_mem_pool_size(pool)); - return kbase_mem_pool_size(pool); -} - -static unsigned long kbase_mem_pool_reclaim_scan_objects(struct shrinker *s, - struct shrink_control *sc) -{ - struct kbase_mem_pool *pool; - unsigned long freed; - - pool = container_of(s, struct kbase_mem_pool, reclaim); - - pool_dbg(pool, "reclaim scan %ld:\n", sc->nr_to_scan); - - freed = kbase_mem_pool_shrink(pool, sc->nr_to_scan); - - pool_dbg(pool, "reclaim freed %ld pages\n", freed); - - return freed; -} - -#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) -static int kbase_mem_pool_reclaim_shrink(struct shrinker *s, - struct shrink_control *sc) -{ - if (sc->nr_to_scan == 0) - return kbase_mem_pool_reclaim_count_objects(s, sc); - - return kbase_mem_pool_reclaim_scan_objects(s, sc); -} -#endif - -int kbase_mem_pool_init(struct kbase_mem_pool *pool, - size_t max_size, - struct kbase_device *kbdev, - struct kbase_mem_pool *next_pool) -{ - pool->cur_size = 0; - pool->max_size = max_size; - pool->kbdev = kbdev; - pool->next_pool = next_pool; - - spin_lock_init(&pool->pool_lock); - INIT_LIST_HEAD(&pool->page_list); - - /* Register shrinker */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) - pool->reclaim.shrink = kbase_mem_pool_reclaim_shrink; -#else - pool->reclaim.count_objects = kbase_mem_pool_reclaim_count_objects; - pool->reclaim.scan_objects = kbase_mem_pool_reclaim_scan_objects; -#endif - pool->reclaim.seeks = DEFAULT_SEEKS; - /* Kernel versions prior to 3.1 : - * struct shrinker does not define batch */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0) - pool->reclaim.batch = 0; -#endif - register_shrinker(&pool->reclaim); - - pool_dbg(pool, "initialized\n"); - - return 0; -} - -void kbase_mem_pool_term(struct kbase_mem_pool *pool) -{ - struct kbase_mem_pool *next_pool = pool->next_pool; - struct page *p; - size_t nr_to_spill = 0; - LIST_HEAD(spill_list); - int i; - - pool_dbg(pool, "terminate()\n"); - - unregister_shrinker(&pool->reclaim); - - kbase_mem_pool_lock(pool); - pool->max_size = 0; - - if (next_pool && !kbase_mem_pool_is_full(next_pool)) { - /* Spill to next pool (may overspill) */ - nr_to_spill = kbase_mem_pool_capacity(next_pool); - nr_to_spill = min(kbase_mem_pool_size(pool), nr_to_spill); - - /* Zero pages first without holding the next_pool lock */ - for (i = 0; i < nr_to_spill; i++) { - p = kbase_mem_pool_remove_locked(pool); - kbase_mem_pool_zero_page(pool, p); - list_add(&p->lru, &spill_list); - } - } - - while (!kbase_mem_pool_is_empty(pool)) { - /* Free remaining pages to kernel */ - p = kbase_mem_pool_remove_locked(pool); - kbase_mem_pool_free_page(pool, p); - } - - kbase_mem_pool_unlock(pool); - - if (next_pool && nr_to_spill) { - /* Add new page list to next_pool */ - kbase_mem_pool_add_list(next_pool, &spill_list, nr_to_spill); - - pool_dbg(pool, "terminate() spilled %zu pages\n", nr_to_spill); - } - - pool_dbg(pool, "terminated\n"); -} - -struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool) -{ - struct page *p; - - pool_dbg(pool, "alloc()\n"); - - p = kbase_mem_pool_remove(pool); - - if (!p && pool->next_pool) { - /* Allocate via next pool */ - return kbase_mem_pool_alloc(pool->next_pool); - } - - if (!p) { - /* Get page from kernel */ - p = kbase_mem_pool_alloc_page(pool); - } - - return p; -} - -void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p, - bool dirty) -{ - struct kbase_mem_pool *next_pool = pool->next_pool; - - pool_dbg(pool, "free()\n"); - - if (!kbase_mem_pool_is_full(pool)) { - /* Add to our own pool */ - if (dirty) - kbase_mem_pool_sync_page(pool, p); - - kbase_mem_pool_add(pool, p); - } else if (next_pool && !kbase_mem_pool_is_full(next_pool)) { - /* Spill to next pool */ - kbase_mem_pool_spill(next_pool, p); - } else { - /* Free page */ - kbase_mem_pool_free_page(pool, p); - } -} - -int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages, - phys_addr_t *pages) -{ - struct page *p; - size_t nr_from_pool; - size_t i; - int err = -ENOMEM; - - pool_dbg(pool, "alloc_pages(%zu):\n", nr_pages); - - /* Get pages from this pool */ - kbase_mem_pool_lock(pool); - nr_from_pool = min(nr_pages, kbase_mem_pool_size(pool)); - for (i = 0; i < nr_from_pool; i++) { - p = kbase_mem_pool_remove_locked(pool); - pages[i] = page_to_phys(p); - } - kbase_mem_pool_unlock(pool); - - if (i != nr_pages && pool->next_pool) { - /* Allocate via next pool */ - err = kbase_mem_pool_alloc_pages(pool->next_pool, - nr_pages - i, pages + i); - - if (err) - goto err_rollback; - - i += nr_pages - i; - } - - /* Get any remaining pages from kernel */ - for (; i < nr_pages; i++) { - p = kbase_mem_pool_alloc_page(pool); - if (!p) - goto err_rollback; - pages[i] = page_to_phys(p); - } - - pool_dbg(pool, "alloc_pages(%zu) done\n", nr_pages); - - return 0; - -err_rollback: - kbase_mem_pool_free_pages(pool, i, pages, false); - return err; -} - -static void kbase_mem_pool_add_array(struct kbase_mem_pool *pool, - size_t nr_pages, phys_addr_t *pages, bool zero, bool sync) -{ - struct page *p; - size_t nr_to_pool = 0; - LIST_HEAD(new_page_list); - size_t i; - - if (!nr_pages) - return; - - pool_dbg(pool, "add_array(%zu, zero=%d, sync=%d):\n", - nr_pages, zero, sync); - - /* Zero/sync pages first without holding the pool lock */ - for (i = 0; i < nr_pages; i++) { - if (unlikely(!pages[i])) - continue; - - p = phys_to_page(pages[i]); - - if (zero) - kbase_mem_pool_zero_page(pool, p); - else if (sync) - kbase_mem_pool_sync_page(pool, p); - - list_add(&p->lru, &new_page_list); - nr_to_pool++; - pages[i] = 0; - } - - /* Add new page list to pool */ - kbase_mem_pool_add_list(pool, &new_page_list, nr_to_pool); - - pool_dbg(pool, "add_array(%zu) added %zu pages\n", - nr_pages, nr_to_pool); -} - -void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages, - phys_addr_t *pages, bool dirty) -{ - struct kbase_mem_pool *next_pool = pool->next_pool; - struct page *p; - size_t nr_to_pool; - LIST_HEAD(to_pool_list); - size_t i = 0; - - pool_dbg(pool, "free_pages(%zu):\n", nr_pages); - - /* Add to this pool */ - nr_to_pool = kbase_mem_pool_capacity(pool); - nr_to_pool = min(nr_pages, nr_to_pool); - - kbase_mem_pool_add_array(pool, nr_to_pool, pages, false, dirty); - - i += nr_to_pool; - - if (i != nr_pages && next_pool) { - /* Spill to next pool (may overspill) */ - nr_to_pool = kbase_mem_pool_capacity(next_pool); - nr_to_pool = min(nr_pages - i, nr_to_pool); - - kbase_mem_pool_add_array(next_pool, nr_to_pool, pages + i, - true, dirty); - i += nr_to_pool; - } - - /* Free any remaining pages to kernel */ - for (; i < nr_pages; i++) { - if (unlikely(!pages[i])) - continue; - - p = phys_to_page(pages[i]); - kbase_mem_pool_free_page(pool, p); - pages[i] = 0; - } - - pool_dbg(pool, "free_pages(%zu) done\n", nr_pages); -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.c deleted file mode 100755 index 493665b7e607a..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.c +++ /dev/null @@ -1,81 +0,0 @@ -/* - * - * (C) COPYRIGHT 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include -#include - -#include - -#ifdef CONFIG_DEBUG_FS - -static int kbase_mem_pool_debugfs_size_get(void *data, u64 *val) -{ - struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data; - - *val = kbase_mem_pool_size(pool); - - return 0; -} - -static int kbase_mem_pool_debugfs_size_set(void *data, u64 val) -{ - struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data; - - kbase_mem_pool_trim(pool, val); - - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(kbase_mem_pool_debugfs_size_fops, - kbase_mem_pool_debugfs_size_get, - kbase_mem_pool_debugfs_size_set, - "%llu\n"); - -static int kbase_mem_pool_debugfs_max_size_get(void *data, u64 *val) -{ - struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data; - - *val = kbase_mem_pool_max_size(pool); - - return 0; -} - -static int kbase_mem_pool_debugfs_max_size_set(void *data, u64 val) -{ - struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data; - - kbase_mem_pool_set_max_size(pool, val); - - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(kbase_mem_pool_debugfs_max_size_fops, - kbase_mem_pool_debugfs_max_size_get, - kbase_mem_pool_debugfs_max_size_set, - "%llu\n"); - -void kbase_mem_pool_debugfs_add(struct dentry *parent, - struct kbase_mem_pool *pool) -{ - debugfs_create_file("mem_pool_size", S_IRUGO | S_IWUSR, parent, - pool, &kbase_mem_pool_debugfs_size_fops); - - debugfs_create_file("mem_pool_max_size", S_IRUGO | S_IWUSR, parent, - pool, &kbase_mem_pool_debugfs_max_size_fops); -} - -#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.h deleted file mode 100755 index 458f3f09e6973..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * - * (C) COPYRIGHT 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#ifndef _KBASE_MEM_POOL_DEBUGFS_H -#define _KBASE_MEM_POOL_DEBUGFS_H - -#include - -/** - * kbase_mem_pool_debugfs_add - add debugfs knobs for @pool - * @parent: Parent debugfs dentry - * @pool: Memory pool to control - * - * Adds two debugfs files under @parent: - * - mem_pool_size: get/set the current size of @pool - * - mem_pool_max_size: get/set the max size of @pool - */ -void kbase_mem_pool_debugfs_add(struct dentry *parent, - struct kbase_mem_pool *pool); - -#endif /*_KBASE_MEM_POOL_DEBUGFS_H*/ - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.c deleted file mode 100755 index bf60c1920294a..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.c +++ /dev/null @@ -1,105 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include - -#ifdef CONFIG_DEBUG_FS - -/* mam_profile file name max length 22 based on format _\0 */ -#define KBASEP_DEBUGFS_FNAME_SIZE_MAX (10+1+10+1) - -void kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data, - size_t size) -{ - spin_lock(&kctx->mem_profile_lock); - kfree(kctx->mem_profile_data); - kctx->mem_profile_data = data; - kctx->mem_profile_size = size; - spin_unlock(&kctx->mem_profile_lock); -} - -/** Show callback for the @c mem_profile debugfs file. - * - * This function is called to get the contents of the @c mem_profile debugfs - * file. This is a report of current memory usage and distribution in userspace. - * - * @param sfile The debugfs entry - * @param data Data associated with the entry - * - * @return 0 if successfully prints data in debugfs entry file - * -1 if it encountered an error - */ -static int kbasep_mem_profile_seq_show(struct seq_file *sfile, void *data) -{ - struct kbase_context *kctx = sfile->private; - - KBASE_DEBUG_ASSERT(kctx != NULL); - - spin_lock(&kctx->mem_profile_lock); - seq_write(sfile, kctx->mem_profile_data, kctx->mem_profile_size); - seq_putc(sfile, '\n'); - spin_unlock(&kctx->mem_profile_lock); - - return 0; -} - -/* - * File operations related to debugfs entry for mem_profile - */ -static int kbasep_mem_profile_debugfs_open(struct inode *in, struct file *file) -{ - return single_open(file, kbasep_mem_profile_seq_show, in->i_private); -} - -static const struct file_operations kbasep_mem_profile_debugfs_fops = { - .open = kbasep_mem_profile_debugfs_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -void kbasep_mem_profile_debugfs_add(struct kbase_context *kctx) -{ - KBASE_DEBUG_ASSERT(kctx != NULL); - - spin_lock_init(&kctx->mem_profile_lock); - - debugfs_create_file("mem_profile", S_IRUGO, kctx->kctx_dentry, kctx, - &kbasep_mem_profile_debugfs_fops); -} - -void kbasep_mem_profile_debugfs_remove(struct kbase_context *kctx) -{ - KBASE_DEBUG_ASSERT(kctx != NULL); - - spin_lock(&kctx->mem_profile_lock); - kfree(kctx->mem_profile_data); - kctx->mem_profile_data = NULL; - spin_unlock(&kctx->mem_profile_lock); -} - -#else /* CONFIG_DEBUG_FS */ - -/** - * @brief Stub function for when debugfs is disabled - */ -void kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data, - size_t size) -{ - kfree(data); -} -#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.h deleted file mode 100755 index 205bd378c8eca..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_kbase_mem_profile_debugfs.h - * Header file for mem profiles entries in debugfs - * - */ - -#ifndef _KBASE_MEM_PROFILE_DEBUGFS_H -#define _KBASE_MEM_PROFILE_DEBUGFS_H - -#include -#include -#include - -/** - * @brief Add new entry to Mali memory profile debugfs - */ -void kbasep_mem_profile_debugfs_add(struct kbase_context *kctx); - -/** - * @brief Remove entry from Mali memory profile debugfs - */ -void kbasep_mem_profile_debugfs_remove(struct kbase_context *kctx); - -/** - * @brief Insert data to debugfs file, so it can be read by userspce - * - * Function takes ownership of @c data and frees it later when new data - * are inserted. - * - * @param kctx Context to which file data should be inserted - * @param data NULL-terminated string to be inserted to mem_profile file, - without trailing new line character - * @param size @c buf length - */ -void kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data, - size_t size); - -#endif /*_KBASE_MEM_PROFILE_DEBUGFS_H*/ - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h deleted file mode 100755 index 82f0702974c20..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * @file mali_kbase_mem_profile_debugfs_buf_size.h - * Header file for the size of the buffer to accumulate the histogram report text in - */ - -#ifndef _KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_ -#define _KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_ - -/** - * The size of the buffer to accumulate the histogram report text in - * @see @ref CCTXP_HIST_BUF_SIZE_MAX_LENGTH_REPORT - */ -#define KBASE_MEM_PROFILE_MAX_BUF_SIZE ((size_t) (64 + ((80 + (56 * 64)) * 15) + 56)) - -#endif /*_KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_*/ - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mmu.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mmu.c deleted file mode 100755 index c061f2a988d27..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mmu.c +++ /dev/null @@ -1,1698 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_kbase_mmu.c - * Base kernel MMU management. - */ - -/* #define DEBUG 1 */ -#include -#include -#include -#include -#if defined(CONFIG_MALI_GATOR_SUPPORT) -#include -#endif -#if defined(CONFIG_MALI_MIPE_ENABLED) -#include -#endif -#include - -#define beenthere(kctx, f, a...) dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a) - -#include -#include -#include -#include - -#define KBASE_MMU_PAGE_ENTRIES 512 - -/** - * kbase_mmu_sync_pgd - sync page directory to memory - * @dev: Device pointer. - * @handle: Address of DMA region. - * @size: Size of the region to sync. - * - * This should be called after each page directory update. - */ - -static void kbase_mmu_sync_pgd(struct device *dev, - dma_addr_t handle, size_t size) -{ - - dma_sync_single_for_device(dev, handle, size, DMA_TO_DEVICE); -} - -/* - * Definitions: - * - PGD: Page Directory. - * - PTE: Page Table Entry. A 64bit value pointing to the next - * level of translation - * - ATE: Address Transation Entry. A 64bit value pointing to - * a 4kB physical page. - */ - -static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx, - struct kbase_as *as, const char *reason_str); - - -static size_t make_multiple(size_t minimum, size_t multiple) -{ - size_t remainder = minimum % multiple; - - if (remainder == 0) - return minimum; - - return minimum + multiple - remainder; -} - -void page_fault_worker(struct work_struct *data) -{ - u64 fault_pfn; - u32 fault_status; - size_t new_pages; - size_t fault_rel_pfn; - struct kbase_as *faulting_as; - int as_no; - struct kbase_context *kctx; - struct kbase_device *kbdev; - struct kbase_va_region *region; - int err; - bool grown = false; - - faulting_as = container_of(data, struct kbase_as, work_pagefault); - fault_pfn = faulting_as->fault_addr >> PAGE_SHIFT; - as_no = faulting_as->number; - - kbdev = container_of(faulting_as, struct kbase_device, as[as_no]); - - /* Grab the context that was already refcounted in kbase_mmu_interrupt(). - * Therefore, it cannot be scheduled out of this AS until we explicitly release it - */ - kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no); - if (WARN_ON(!kctx)) { - atomic_dec(&kbdev->faults_pending); - return; - } - - KBASE_DEBUG_ASSERT(kctx->kbdev == kbdev); - - fault_status = faulting_as->fault_status; - switch (fault_status & AS_FAULTSTATUS_EXCEPTION_CODE_MASK) { - - case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSLATION_FAULT: - /* need to check against the region to handle this one */ - break; - - case AS_FAULTSTATUS_EXCEPTION_CODE_PERMISSION_FAULT: - kbase_mmu_report_fault_and_kill(kctx, faulting_as, - "Permission failure"); - goto fault_done; - - case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSTAB_BUS_FAULT: - kbase_mmu_report_fault_and_kill(kctx, faulting_as, - "Tranlation table bus fault"); - goto fault_done; - - case AS_FAULTSTATUS_EXCEPTION_CODE_ACCESS_FLAG: - /* nothing to do, but we don't expect this fault currently */ - dev_warn(kbdev->dev, "Access flag unexpectedly set"); - goto fault_done; - - - default: - kbase_mmu_report_fault_and_kill(kctx, faulting_as, - "Unknown fault code"); - goto fault_done; - } - - /* so we have a translation fault, let's see if it is for growable - * memory */ - kbase_gpu_vm_lock(kctx); - - region = kbase_region_tracker_find_region_enclosing_address(kctx, - faulting_as->fault_addr); - if (!region || region->flags & KBASE_REG_FREE) { - kbase_gpu_vm_unlock(kctx); - kbase_mmu_report_fault_and_kill(kctx, faulting_as, - "Memory is not mapped on the GPU"); - goto fault_done; - } - - if ((region->flags & GROWABLE_FLAGS_REQUIRED) - != GROWABLE_FLAGS_REQUIRED) { - kbase_gpu_vm_unlock(kctx); - kbase_mmu_report_fault_and_kill(kctx, faulting_as, - "Memory is not growable"); - goto fault_done; - } - - /* find the size we need to grow it by */ - /* we know the result fit in a size_t due to kbase_region_tracker_find_region_enclosing_address - * validating the fault_adress to be within a size_t from the start_pfn */ - fault_rel_pfn = fault_pfn - region->start_pfn; - - if (fault_rel_pfn < kbase_reg_current_backed_size(region)) { - dev_dbg(kbdev->dev, "Page fault @ 0x%llx in allocated region 0x%llx-0x%llx of growable TMEM: Ignoring", - faulting_as->fault_addr, region->start_pfn, - region->start_pfn + - kbase_reg_current_backed_size(region)); - - kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx, - KBASE_MMU_FAULT_TYPE_PAGE); - /* [1] in case another page fault occurred while we were - * handling the (duplicate) page fault we need to ensure we - * don't loose the other page fault as result of us clearing - * the MMU IRQ. Therefore, after we clear the MMU IRQ we send - * an UNLOCK command that will retry any stalled memory - * transaction (which should cause the other page fault to be - * raised again). - */ - kbase_mmu_hw_do_operation(kbdev, faulting_as, NULL, 0, 0, - AS_COMMAND_UNLOCK, 1); - kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx, - KBASE_MMU_FAULT_TYPE_PAGE); - kbase_gpu_vm_unlock(kctx); - - goto fault_done; - } - - new_pages = make_multiple(fault_rel_pfn - - kbase_reg_current_backed_size(region) + 1, - region->extent); - - /* cap to max vsize */ - if (new_pages + kbase_reg_current_backed_size(region) > - region->nr_pages) - new_pages = region->nr_pages - - kbase_reg_current_backed_size(region); - - if (0 == new_pages) { - /* Duplicate of a fault we've already handled, nothing to do */ - kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx, - KBASE_MMU_FAULT_TYPE_PAGE); - /* See comment [1] about UNLOCK usage */ - kbase_mmu_hw_do_operation(kbdev, faulting_as, NULL, 0, 0, - AS_COMMAND_UNLOCK, 1); - kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx, - KBASE_MMU_FAULT_TYPE_PAGE); - kbase_gpu_vm_unlock(kctx); - goto fault_done; - } - - if (kbase_alloc_phy_pages_helper(region->gpu_alloc, new_pages) == 0) { - if (region->gpu_alloc != region->cpu_alloc) { - if (kbase_alloc_phy_pages_helper( - region->cpu_alloc, new_pages) == 0) { - grown = true; - } else { - kbase_free_phy_pages_helper(region->gpu_alloc, - new_pages); - } - } else { - grown = true; - } - } - - - if (grown) { - u32 op; - - /* alloc success */ - KBASE_DEBUG_ASSERT(kbase_reg_current_backed_size(region) <= region->nr_pages); - - /* AS transaction begin */ - mutex_lock(&faulting_as->transaction_mutex); - - /* set up the new pages */ - err = kbase_mmu_insert_pages(kctx, region->start_pfn + kbase_reg_current_backed_size(region) - new_pages, &kbase_get_gpu_phy_pages(region)[kbase_reg_current_backed_size(region) - new_pages], new_pages, region->flags); - if (err) { - /* failed to insert pages, handle as a normal PF */ - mutex_unlock(&faulting_as->transaction_mutex); - kbase_free_phy_pages_helper(region->gpu_alloc, new_pages); - if (region->gpu_alloc != region->cpu_alloc) - kbase_free_phy_pages_helper(region->cpu_alloc, - new_pages); - kbase_gpu_vm_unlock(kctx); - /* The locked VA region will be unlocked and the cache invalidated in here */ - kbase_mmu_report_fault_and_kill(kctx, faulting_as, - "Page table update failure"); - goto fault_done; - } -#if defined(CONFIG_MALI_GATOR_SUPPORT) - kbase_trace_mali_page_fault_insert_pages(as_no, new_pages); -#endif -#if defined(CONFIG_MALI_MIPE_ENABLED) - kbase_tlstream_aux_pagefault( - as_no, - atomic_read(&kctx->used_pages)); -#endif - - /* flush L2 and unlock the VA (resumes the MMU) */ - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367)) - op = AS_COMMAND_FLUSH; - else - op = AS_COMMAND_FLUSH_PT; - - /* clear MMU interrupt - this needs to be done after updating - * the page tables but before issuing a FLUSH command. The - * FLUSH cmd has a side effect that it restarts stalled memory - * transactions in other address spaces which may cause - * another fault to occur. If we didn't clear the interrupt at - * this stage a new IRQ might not be raised when the GPU finds - * a MMU IRQ is already pending. - */ - kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx, - KBASE_MMU_FAULT_TYPE_PAGE); - - kbase_mmu_hw_do_operation(kbdev, faulting_as, kctx, - faulting_as->fault_addr >> PAGE_SHIFT, - new_pages, - op, 1); - - mutex_unlock(&faulting_as->transaction_mutex); - /* AS transaction end */ - - /* reenable this in the mask */ - kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx, - KBASE_MMU_FAULT_TYPE_PAGE); - kbase_gpu_vm_unlock(kctx); - } else { - /* failed to extend, handle as a normal PF */ - kbase_gpu_vm_unlock(kctx); - kbase_mmu_report_fault_and_kill(kctx, faulting_as, - "Page allocation failure"); - } - -fault_done: - /* - * By this point, the fault was handled in some way, - * so release the ctx refcount - */ - kbasep_js_runpool_release_ctx(kbdev, kctx); - - atomic_dec(&kbdev->faults_pending); -} - -phys_addr_t kbase_mmu_alloc_pgd(struct kbase_context *kctx) -{ - u64 *page; - int i; - struct page *p; - - KBASE_DEBUG_ASSERT(NULL != kctx); - kbase_atomic_add_pages(1, &kctx->used_pages); - kbase_atomic_add_pages(1, &kctx->kbdev->memdev.used_pages); - - p = kbase_mem_pool_alloc(&kctx->mem_pool); - if (!p) - goto sub_pages; - - page = kmap(p); - if (NULL == page) - goto alloc_free; - - kbase_process_page_usage_inc(kctx, 1); - - for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) - kctx->kbdev->mmu_mode->entry_invalidate(&page[i]); - - kbase_mmu_sync_pgd(kctx->kbdev->dev, kbase_dma_addr(p), PAGE_SIZE); - - kunmap(p); - return page_to_phys(p); - -alloc_free: - kbase_mem_pool_free(&kctx->mem_pool, p, false); -sub_pages: - kbase_atomic_sub_pages(1, &kctx->used_pages); - kbase_atomic_sub_pages(1, &kctx->kbdev->memdev.used_pages); - - return 0; -} - -KBASE_EXPORT_TEST_API(kbase_mmu_alloc_pgd); - -/* Given PGD PFN for level N, return PGD PFN for level N+1 */ -static phys_addr_t mmu_get_next_pgd(struct kbase_context *kctx, phys_addr_t pgd, u64 vpfn, int level) -{ - u64 *page; - phys_addr_t target_pgd; - struct page *p; - - KBASE_DEBUG_ASSERT(pgd); - KBASE_DEBUG_ASSERT(NULL != kctx); - - lockdep_assert_held(&kctx->reg_lock); - - /* - * Architecture spec defines level-0 as being the top-most. - * This is a bit unfortunate here, but we keep the same convention. - */ - vpfn >>= (3 - level) * 9; - vpfn &= 0x1FF; - - p = pfn_to_page(PFN_DOWN(pgd)); - page = kmap(p); - if (NULL == page) { - dev_warn(kctx->kbdev->dev, "mmu_get_next_pgd: kmap failure\n"); - return 0; - } - - target_pgd = kctx->kbdev->mmu_mode->pte_to_phy_addr(page[vpfn]); - - if (!target_pgd) { - target_pgd = kbase_mmu_alloc_pgd(kctx); - if (!target_pgd) { - dev_warn(kctx->kbdev->dev, "mmu_get_next_pgd: kbase_mmu_alloc_pgd failure\n"); - kunmap(p); - return 0; - } - - kctx->kbdev->mmu_mode->entry_set_pte(&page[vpfn], target_pgd); - - kbase_mmu_sync_pgd(kctx->kbdev->dev, - kbase_dma_addr(p), PAGE_SIZE); - /* Rely on the caller to update the address space flags. */ - } - - kunmap(p); - return target_pgd; -} - -static phys_addr_t mmu_get_bottom_pgd(struct kbase_context *kctx, u64 vpfn) -{ - phys_addr_t pgd; - int l; - - pgd = kctx->pgd; - - for (l = MIDGARD_MMU_TOPLEVEL; l < 3; l++) { - pgd = mmu_get_next_pgd(kctx, pgd, vpfn, l); - /* Handle failure condition */ - if (!pgd) { - dev_warn(kctx->kbdev->dev, "mmu_get_bottom_pgd: mmu_get_next_pgd failure\n"); - return 0; - } - } - - return pgd; -} - -static phys_addr_t mmu_insert_pages_recover_get_next_pgd(struct kbase_context *kctx, phys_addr_t pgd, u64 vpfn, int level) -{ - u64 *page; - phys_addr_t target_pgd; - - KBASE_DEBUG_ASSERT(pgd); - KBASE_DEBUG_ASSERT(NULL != kctx); - - lockdep_assert_held(&kctx->reg_lock); - - /* - * Architecture spec defines level-0 as being the top-most. - * This is a bit unfortunate here, but we keep the same convention. - */ - vpfn >>= (3 - level) * 9; - vpfn &= 0x1FF; - - page = kmap_atomic(pfn_to_page(PFN_DOWN(pgd))); - /* kmap_atomic should NEVER fail */ - KBASE_DEBUG_ASSERT(NULL != page); - - target_pgd = kctx->kbdev->mmu_mode->pte_to_phy_addr(page[vpfn]); - /* As we are recovering from what has already been set up, we should have a target_pgd */ - KBASE_DEBUG_ASSERT(0 != target_pgd); - kunmap_atomic(page); - return target_pgd; -} - -static phys_addr_t mmu_insert_pages_recover_get_bottom_pgd(struct kbase_context *kctx, u64 vpfn) -{ - phys_addr_t pgd; - int l; - - pgd = kctx->pgd; - - for (l = MIDGARD_MMU_TOPLEVEL; l < 3; l++) { - pgd = mmu_insert_pages_recover_get_next_pgd(kctx, pgd, vpfn, l); - /* Should never fail */ - KBASE_DEBUG_ASSERT(0 != pgd); - } - - return pgd; -} - -static void mmu_insert_pages_failure_recovery(struct kbase_context *kctx, u64 vpfn, - size_t nr) -{ - phys_addr_t pgd; - u64 *pgd_page; - struct kbase_mmu_mode const *mmu_mode; - - KBASE_DEBUG_ASSERT(NULL != kctx); - KBASE_DEBUG_ASSERT(0 != vpfn); - /* 64-bit address range is the max */ - KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE)); - - lockdep_assert_held(&kctx->reg_lock); - - mmu_mode = kctx->kbdev->mmu_mode; - - while (nr) { - unsigned int i; - unsigned int index = vpfn & 0x1FF; - unsigned int count = KBASE_MMU_PAGE_ENTRIES - index; - struct page *p; - - if (count > nr) - count = nr; - - pgd = mmu_insert_pages_recover_get_bottom_pgd(kctx, vpfn); - KBASE_DEBUG_ASSERT(0 != pgd); - - p = pfn_to_page(PFN_DOWN(pgd)); - - pgd_page = kmap_atomic(p); - KBASE_DEBUG_ASSERT(NULL != pgd_page); - - /* Invalidate the entries we added */ - for (i = 0; i < count; i++) - mmu_mode->entry_invalidate(&pgd_page[index + i]); - - vpfn += count; - nr -= count; - - kbase_mmu_sync_pgd(kctx->kbdev->dev, - kbase_dma_addr(p), - PAGE_SIZE); - - kunmap_atomic(pgd_page); - } -} - -/* - * Map the single page 'phys' 'nr' of times, starting at GPU PFN 'vpfn' - */ -int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn, - phys_addr_t phys, size_t nr, - unsigned long flags) -{ - phys_addr_t pgd; - u64 *pgd_page; - /* In case the insert_single_page only partially completes we need to be - * able to recover */ - bool recover_required = false; - u64 recover_vpfn = vpfn; - size_t recover_count = 0; - - KBASE_DEBUG_ASSERT(NULL != kctx); - KBASE_DEBUG_ASSERT(0 != vpfn); - /* 64-bit address range is the max */ - KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE)); - - lockdep_assert_held(&kctx->reg_lock); - - while (nr) { - unsigned int i; - unsigned int index = vpfn & 0x1FF; - unsigned int count = KBASE_MMU_PAGE_ENTRIES - index; - struct page *p; - - if (count > nr) - count = nr; - - /* - * Repeatedly calling mmu_get_bottom_pte() is clearly - * suboptimal. We don't have to re-parse the whole tree - * each time (just cache the l0-l2 sequence). - * On the other hand, it's only a gain when we map more than - * 256 pages at once (on average). Do we really care? - */ - pgd = mmu_get_bottom_pgd(kctx, vpfn); - if (!pgd) { - dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: mmu_get_bottom_pgd failure\n"); - if (recover_required) { - /* Invalidate the pages we have partially - * completed */ - mmu_insert_pages_failure_recovery(kctx, - recover_vpfn, - recover_count); - } - return -EINVAL; - } - - p = pfn_to_page(PFN_DOWN(pgd)); - pgd_page = kmap(p); - if (!pgd_page) { - dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: kmap failure\n"); - if (recover_required) { - /* Invalidate the pages we have partially - * completed */ - mmu_insert_pages_failure_recovery(kctx, - recover_vpfn, - recover_count); - } - return -ENOMEM; - } - - for (i = 0; i < count; i++) { - unsigned int ofs = index + i; - - KBASE_DEBUG_ASSERT(0 == (pgd_page[ofs] & 1UL)); - kctx->kbdev->mmu_mode->entry_set_ate(&pgd_page[ofs], - phys, flags); - } - - vpfn += count; - nr -= count; - - kbase_mmu_sync_pgd(kctx->kbdev->dev, - kbase_dma_addr(p) + - (index * sizeof(u64)), - count * sizeof(u64)); - - kunmap(p); - /* We have started modifying the page table. - * If further pages need inserting and fail we need to undo what - * has already taken place */ - recover_required = true; - recover_count += count; - } - return 0; -} - -/* - * Map 'nr' pages pointed to by 'phys' at GPU PFN 'vpfn' - */ -int kbase_mmu_insert_pages(struct kbase_context *kctx, u64 vpfn, - phys_addr_t *phys, size_t nr, - unsigned long flags) -{ - phys_addr_t pgd; - u64 *pgd_page; - /* In case the insert_pages only partially completes we need to be able - * to recover */ - bool recover_required = false; - u64 recover_vpfn = vpfn; - size_t recover_count = 0; - - KBASE_DEBUG_ASSERT(NULL != kctx); - KBASE_DEBUG_ASSERT(0 != vpfn); - /* 64-bit address range is the max */ - KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE)); - - lockdep_assert_held(&kctx->reg_lock); - - while (nr) { - unsigned int i; - unsigned int index = vpfn & 0x1FF; - unsigned int count = KBASE_MMU_PAGE_ENTRIES - index; - struct page *p; - - if (count > nr) - count = nr; - - /* - * Repeatedly calling mmu_get_bottom_pte() is clearly - * suboptimal. We don't have to re-parse the whole tree - * each time (just cache the l0-l2 sequence). - * On the other hand, it's only a gain when we map more than - * 256 pages at once (on average). Do we really care? - */ - pgd = mmu_get_bottom_pgd(kctx, vpfn); - if (!pgd) { - dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: mmu_get_bottom_pgd failure\n"); - if (recover_required) { - /* Invalidate the pages we have partially - * completed */ - mmu_insert_pages_failure_recovery(kctx, - recover_vpfn, - recover_count); - } - return -EINVAL; - } - - p = pfn_to_page(PFN_DOWN(pgd)); - pgd_page = kmap(p); - if (!pgd_page) { - dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: kmap failure\n"); - if (recover_required) { - /* Invalidate the pages we have partially - * completed */ - mmu_insert_pages_failure_recovery(kctx, - recover_vpfn, - recover_count); - } - return -ENOMEM; - } - - for (i = 0; i < count; i++) { - unsigned int ofs = index + i; - - KBASE_DEBUG_ASSERT(0 == (pgd_page[ofs] & 1UL)); - kctx->kbdev->mmu_mode->entry_set_ate(&pgd_page[ofs], - phys[i], flags); - } - - phys += count; - vpfn += count; - nr -= count; - - kbase_mmu_sync_pgd(kctx->kbdev->dev, - kbase_dma_addr(p) + - (index * sizeof(u64)), - count * sizeof(u64)); - - kunmap(p); - /* We have started modifying the page table. If further pages - * need inserting and fail we need to undo what has already - * taken place */ - recover_required = true; - recover_count += count; - } - return 0; -} - -KBASE_EXPORT_TEST_API(kbase_mmu_insert_pages); - -/** - * This function is responsible for validating the MMU PTs - * triggering reguired flushes. - * - * * IMPORTANT: This uses kbasep_js_runpool_release_ctx() when the context is - * currently scheduled into the runpool, and so potentially uses a lot of locks. - * These locks must be taken in the correct order with respect to others - * already held by the caller. Refer to kbasep_js_runpool_release_ctx() for more - * information. - */ -static void kbase_mmu_flush(struct kbase_context *kctx, u64 vpfn, size_t nr) -{ - struct kbase_device *kbdev; - bool ctx_is_in_runpool; - - KBASE_DEBUG_ASSERT(NULL != kctx); - - kbdev = kctx->kbdev; - - /* We must flush if we're currently running jobs. At the very least, we need to retain the - * context to ensure it doesn't schedule out whilst we're trying to flush it */ - ctx_is_in_runpool = kbasep_js_runpool_retain_ctx(kbdev, kctx); - - if (ctx_is_in_runpool) { - KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); - - /* Second level check is to try to only do this when jobs are running. The refcount is - * a heuristic for this. */ - if (kbdev->js_data.runpool_irq.per_as_data[kctx->as_nr].as_busy_refcount >= 2) { - if (!kbase_pm_context_active_handle_suspend(kbdev, - KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) { - int ret; - u32 op; - - /* AS transaction begin */ - mutex_lock(&kbdev->as[ - kctx->as_nr].transaction_mutex); - - if (kbase_hw_has_issue(kbdev, - BASE_HW_ISSUE_6367)) - op = AS_COMMAND_FLUSH; - else - op = AS_COMMAND_FLUSH_MEM; - - ret = kbase_mmu_hw_do_operation(kbdev, - &kbdev->as[kctx->as_nr], - kctx, vpfn, nr, - op, 0); -#if KBASE_GPU_RESET_EN - if (ret) { - /* Flush failed to complete, assume the - * GPU has hung and perform a reset to - * recover */ - dev_err(kbdev->dev, "Flush for GPU page table update did not complete. Issueing GPU soft-reset to recover\n"); - if (kbase_prepare_to_reset_gpu(kbdev)) - kbase_reset_gpu(kbdev); - } -#endif /* KBASE_GPU_RESET_EN */ - - mutex_unlock(&kbdev->as[ - kctx->as_nr].transaction_mutex); - /* AS transaction end */ - - kbase_pm_context_idle(kbdev); - } - } - kbasep_js_runpool_release_ctx(kbdev, kctx); - } -} - -/* - * We actually only discard the ATE, and not the page table - * pages. There is a potential DoS here, as we'll leak memory by - * having PTEs that are potentially unused. Will require physical - * page accounting, so MMU pages are part of the process allocation. - * - * IMPORTANT: This uses kbasep_js_runpool_release_ctx() when the context is - * currently scheduled into the runpool, and so potentially uses a lot of locks. - * These locks must be taken in the correct order with respect to others - * already held by the caller. Refer to kbasep_js_runpool_release_ctx() for more - * information. - */ -int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr) -{ - phys_addr_t pgd; - u64 *pgd_page; - struct kbase_device *kbdev; - size_t requested_nr = nr; - struct kbase_mmu_mode const *mmu_mode; - - KBASE_DEBUG_ASSERT(NULL != kctx); - beenthere(kctx, "kctx %p vpfn %lx nr %zd", (void *)kctx, (unsigned long)vpfn, nr); - - lockdep_assert_held(&kctx->reg_lock); - - if (0 == nr) { - /* early out if nothing to do */ - return 0; - } - - kbdev = kctx->kbdev; - mmu_mode = kbdev->mmu_mode; - - while (nr) { - unsigned int i; - unsigned int index = vpfn & 0x1FF; - unsigned int count = KBASE_MMU_PAGE_ENTRIES - index; - struct page *p; - - if (count > nr) - count = nr; - - pgd = mmu_get_bottom_pgd(kctx, vpfn); - if (!pgd) { - dev_warn(kbdev->dev, "kbase_mmu_teardown_pages: mmu_get_bottom_pgd failure\n"); - return -EINVAL; - } - - p = pfn_to_page(PFN_DOWN(pgd)); - pgd_page = kmap(p); - if (!pgd_page) { - dev_warn(kbdev->dev, "kbase_mmu_teardown_pages: kmap failure\n"); - return -ENOMEM; - } - - for (i = 0; i < count; i++) - mmu_mode->entry_invalidate(&pgd_page[index + i]); - - vpfn += count; - nr -= count; - - kbase_mmu_sync_pgd(kctx->kbdev->dev, - kbase_dma_addr(p) + - (index * sizeof(u64)), - count * sizeof(u64)); - - kunmap(p); - } - - kbase_mmu_flush(kctx, vpfn, requested_nr); - return 0; -} - -KBASE_EXPORT_TEST_API(kbase_mmu_teardown_pages); - -/** - * Update the entries for specified number of pages pointed to by 'phys' at GPU PFN 'vpfn'. - * This call is being triggered as a response to the changes of the mem attributes - * - * @pre : The caller is responsible for validating the memory attributes - * - * IMPORTANT: This uses kbasep_js_runpool_release_ctx() when the context is - * currently scheduled into the runpool, and so potentially uses a lot of locks. - * These locks must be taken in the correct order with respect to others - * already held by the caller. Refer to kbasep_js_runpool_release_ctx() for more - * information. - */ -int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, phys_addr_t *phys, size_t nr, unsigned long flags) -{ - phys_addr_t pgd; - u64 *pgd_page; - size_t requested_nr = nr; - struct kbase_mmu_mode const *mmu_mode; - - KBASE_DEBUG_ASSERT(NULL != kctx); - KBASE_DEBUG_ASSERT(0 != vpfn); - KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE)); - - lockdep_assert_held(&kctx->reg_lock); - - mmu_mode = kctx->kbdev->mmu_mode; - - dev_warn(kctx->kbdev->dev, "kbase_mmu_update_pages(): updating page share flags on GPU PFN 0x%llx from phys %p, %zu pages", - vpfn, phys, nr); - - while (nr) { - unsigned int i; - unsigned int index = vpfn & 0x1FF; - size_t count = KBASE_MMU_PAGE_ENTRIES - index; - struct page *p; - - if (count > nr) - count = nr; - - pgd = mmu_get_bottom_pgd(kctx, vpfn); - if (!pgd) { - dev_warn(kctx->kbdev->dev, "mmu_get_bottom_pgd failure\n"); - return -EINVAL; - } - - p = pfn_to_page(PFN_DOWN(pgd)); - pgd_page = kmap(p); - if (!pgd_page) { - dev_warn(kctx->kbdev->dev, "kmap failure\n"); - return -ENOMEM; - } - - for (i = 0; i < count; i++) - mmu_mode->entry_set_ate(&pgd_page[index + i], phys[i], - flags); - - phys += count; - vpfn += count; - nr -= count; - - kbase_mmu_sync_pgd(kctx->kbdev->dev, - kbase_dma_addr(p) + - (index * sizeof(u64)), - count * sizeof(u64)); - - kunmap(pfn_to_page(PFN_DOWN(pgd))); - } - - kbase_mmu_flush(kctx, vpfn, requested_nr); - - return 0; -} - -/* This is a debug feature only */ -static void mmu_check_unused(struct kbase_context *kctx, phys_addr_t pgd) -{ - u64 *page; - int i; - - page = kmap_atomic(pfn_to_page(PFN_DOWN(pgd))); - /* kmap_atomic should NEVER fail. */ - KBASE_DEBUG_ASSERT(NULL != page); - - for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) { - if (kctx->kbdev->mmu_mode->ate_is_valid(page[i])) - beenthere(kctx, "live pte %016lx", (unsigned long)page[i]); - } - kunmap_atomic(page); -} - -static void mmu_teardown_level(struct kbase_context *kctx, phys_addr_t pgd, int level, int zap, u64 *pgd_page_buffer) -{ - phys_addr_t target_pgd; - u64 *pgd_page; - int i; - struct kbase_mmu_mode const *mmu_mode; - - KBASE_DEBUG_ASSERT(NULL != kctx); - lockdep_assert_held(&kctx->reg_lock); - - pgd_page = kmap_atomic(pfn_to_page(PFN_DOWN(pgd))); - /* kmap_atomic should NEVER fail. */ - KBASE_DEBUG_ASSERT(NULL != pgd_page); - /* Copy the page to our preallocated buffer so that we can minimize kmap_atomic usage */ - memcpy(pgd_page_buffer, pgd_page, PAGE_SIZE); - kunmap_atomic(pgd_page); - pgd_page = pgd_page_buffer; - - mmu_mode = kctx->kbdev->mmu_mode; - - for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) { - target_pgd = mmu_mode->pte_to_phy_addr(pgd_page[i]); - - if (target_pgd) { - if (level < 2) { - mmu_teardown_level(kctx, target_pgd, level + 1, zap, pgd_page_buffer + (PAGE_SIZE / sizeof(u64))); - } else { - /* - * So target_pte is a level-3 page. - * As a leaf, it is safe to free it. - * Unless we have live pages attached to it! - */ - mmu_check_unused(kctx, target_pgd); - } - - beenthere(kctx, "pte %lx level %d", (unsigned long)target_pgd, level + 1); - if (zap) { - struct page *p = phys_to_page(target_pgd); - - kbase_mem_pool_free(&kctx->mem_pool, p, true); - kbase_process_page_usage_dec(kctx, 1); - kbase_atomic_sub_pages(1, &kctx->used_pages); - kbase_atomic_sub_pages(1, &kctx->kbdev->memdev.used_pages); - } - } - } -} - -int kbase_mmu_init(struct kbase_context *kctx) -{ - KBASE_DEBUG_ASSERT(NULL != kctx); - KBASE_DEBUG_ASSERT(NULL == kctx->mmu_teardown_pages); - - /* Preallocate MMU depth of four pages for mmu_teardown_level to use */ - kctx->mmu_teardown_pages = kmalloc(PAGE_SIZE * 4, GFP_KERNEL); - - if (NULL == kctx->mmu_teardown_pages) - return -ENOMEM; - - return 0; -} - -void kbase_mmu_term(struct kbase_context *kctx) -{ - KBASE_DEBUG_ASSERT(NULL != kctx); - KBASE_DEBUG_ASSERT(NULL != kctx->mmu_teardown_pages); - - kfree(kctx->mmu_teardown_pages); - kctx->mmu_teardown_pages = NULL; -} - -void kbase_mmu_free_pgd(struct kbase_context *kctx) -{ - KBASE_DEBUG_ASSERT(NULL != kctx); - KBASE_DEBUG_ASSERT(NULL != kctx->mmu_teardown_pages); - - lockdep_assert_held(&kctx->reg_lock); - - mmu_teardown_level(kctx, kctx->pgd, MIDGARD_MMU_TOPLEVEL, 1, kctx->mmu_teardown_pages); - - beenthere(kctx, "pgd %lx", (unsigned long)kctx->pgd); - kbase_mem_pool_free(&kctx->mem_pool, phys_to_page(kctx->pgd), true); - kbase_process_page_usage_dec(kctx, 1); - kbase_atomic_sub_pages(1, &kctx->used_pages); - kbase_atomic_sub_pages(1, &kctx->kbdev->memdev.used_pages); -} - -KBASE_EXPORT_TEST_API(kbase_mmu_free_pgd); - -static size_t kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd, int level, char ** const buffer, size_t *size_left) -{ - phys_addr_t target_pgd; - u64 *pgd_page; - int i; - size_t size = KBASE_MMU_PAGE_ENTRIES * sizeof(u64) + sizeof(u64); - size_t dump_size; - struct kbase_mmu_mode const *mmu_mode; - - KBASE_DEBUG_ASSERT(NULL != kctx); - lockdep_assert_held(&kctx->reg_lock); - - mmu_mode = kctx->kbdev->mmu_mode; - - pgd_page = kmap(pfn_to_page(PFN_DOWN(pgd))); - if (!pgd_page) { - dev_warn(kctx->kbdev->dev, "kbasep_mmu_dump_level: kmap failure\n"); - return 0; - } - - if (*size_left >= size) { - /* A modified physical address that contains the page table level */ - u64 m_pgd = pgd | level; - - /* Put the modified physical address in the output buffer */ - memcpy(*buffer, &m_pgd, sizeof(m_pgd)); - *buffer += sizeof(m_pgd); - - /* Followed by the page table itself */ - memcpy(*buffer, pgd_page, sizeof(u64) * KBASE_MMU_PAGE_ENTRIES); - *buffer += sizeof(u64) * KBASE_MMU_PAGE_ENTRIES; - - *size_left -= size; - } - - for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) { - if (mmu_mode->pte_is_valid(pgd_page[i])) { - target_pgd = mmu_mode->pte_to_phy_addr(pgd_page[i]); - - dump_size = kbasep_mmu_dump_level(kctx, target_pgd, level + 1, buffer, size_left); - if (!dump_size) { - kunmap(pfn_to_page(PFN_DOWN(pgd))); - return 0; - } - size += dump_size; - } - } - - kunmap(pfn_to_page(PFN_DOWN(pgd))); - - return size; -} - -void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages) -{ - void *kaddr; - size_t size_left; - - KBASE_DEBUG_ASSERT(kctx); - - lockdep_assert_held(&kctx->reg_lock); - - if (0 == nr_pages) { - /* can't dump in a 0 sized buffer, early out */ - return NULL; - } - - size_left = nr_pages * PAGE_SIZE; - - KBASE_DEBUG_ASSERT(0 != size_left); - kaddr = vmalloc_user(size_left); - - if (kaddr) { - u64 end_marker = 0xFFULL; - char *buffer; - char *mmu_dump_buffer; - u64 config[3]; - size_t size; - - buffer = (char *)kaddr; - mmu_dump_buffer = buffer; - - if (kctx->api_version >= KBASE_API_VERSION(8, 4)) { - struct kbase_mmu_setup as_setup; - - kctx->kbdev->mmu_mode->get_as_setup(kctx, &as_setup); - config[0] = as_setup.transtab; - config[1] = as_setup.memattr; - config[2] = 0; - memcpy(buffer, &config, sizeof(config)); - mmu_dump_buffer += sizeof(config); - size_left -= sizeof(config); - } - - - - size = kbasep_mmu_dump_level(kctx, - kctx->pgd, - MIDGARD_MMU_TOPLEVEL, - &mmu_dump_buffer, - &size_left); - - if (!size) { - vfree(kaddr); - return NULL; - } - - /* Add on the size for the end marker */ - size += sizeof(u64); - /* Add on the size for the config */ - if (kctx->api_version >= KBASE_API_VERSION(8, 4)) - size += sizeof(config); - - - if (size > nr_pages * PAGE_SIZE || size_left < sizeof(u64)) { - /* The buffer isn't big enough - free the memory and return failure */ - vfree(kaddr); - return NULL; - } - - /* Add the end marker */ - memcpy(mmu_dump_buffer, &end_marker, sizeof(u64)); - } - - return kaddr; -} -KBASE_EXPORT_TEST_API(kbase_mmu_dump); - -void bus_fault_worker(struct work_struct *data) -{ - struct kbase_as *faulting_as; - int as_no; - struct kbase_context *kctx; - struct kbase_device *kbdev; -#if KBASE_GPU_RESET_EN - bool reset_status = false; -#endif /* KBASE_GPU_RESET_EN */ - - faulting_as = container_of(data, struct kbase_as, work_busfault); - - as_no = faulting_as->number; - - kbdev = container_of(faulting_as, struct kbase_device, as[as_no]); - - /* Grab the context that was already refcounted in kbase_mmu_interrupt(). - * Therefore, it cannot be scheduled out of this AS until we explicitly release it - */ - kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no); - if (WARN_ON(!kctx)) { - atomic_dec(&kbdev->faults_pending); - return; - } - -#if KBASE_GPU_RESET_EN - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) { - /* Due to H/W issue 8245 we need to reset the GPU after using UNMAPPED mode. - * We start the reset before switching to UNMAPPED to ensure that unrelated jobs - * are evicted from the GPU before the switch. - */ - dev_err(kbdev->dev, "GPU bus error occurred. For this GPU version we now soft-reset as part of bus error recovery\n"); - reset_status = kbase_prepare_to_reset_gpu(kbdev); - } -#endif /* KBASE_GPU_RESET_EN */ - /* NOTE: If GPU already powered off for suspend, we don't need to switch to unmapped */ - if (!kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) { - - /* switch to UNMAPPED mode, will abort all jobs and stop any hw counter dumping */ - /* AS transaction begin */ - mutex_lock(&kbdev->as[as_no].transaction_mutex); - - /* Set the MMU into unmapped mode */ - kbase_mmu_disable_as(kbdev, as_no); - - mutex_unlock(&kbdev->as[as_no].transaction_mutex); - /* AS transaction end */ - - kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx, - KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED); - kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx, - KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED); - - kbase_pm_context_idle(kbdev); - } - -#if KBASE_GPU_RESET_EN - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245) && reset_status) - kbase_reset_gpu(kbdev); -#endif /* KBASE_GPU_RESET_EN */ - - kbasep_js_runpool_release_ctx(kbdev, kctx); - - atomic_dec(&kbdev->faults_pending); -} - -const char *kbase_exception_name(struct kbase_device *kbdev, u32 exception_code) -{ - const char *e; - - switch (exception_code) { - /* Non-Fault Status code */ - case 0x00: - e = "NOT_STARTED/IDLE/OK"; - break; - case 0x01: - e = "DONE"; - break; - case 0x02: - e = "INTERRUPTED"; - break; - case 0x03: - e = "STOPPED"; - break; - case 0x04: - e = "TERMINATED"; - break; - case 0x08: - e = "ACTIVE"; - break; - /* Job exceptions */ - case 0x40: - e = "JOB_CONFIG_FAULT"; - break; - case 0x41: - e = "JOB_POWER_FAULT"; - break; - case 0x42: - e = "JOB_READ_FAULT"; - break; - case 0x43: - e = "JOB_WRITE_FAULT"; - break; - case 0x44: - e = "JOB_AFFINITY_FAULT"; - break; - case 0x48: - e = "JOB_BUS_FAULT"; - break; - case 0x50: - e = "INSTR_INVALID_PC"; - break; - case 0x51: - e = "INSTR_INVALID_ENC"; - break; - case 0x52: - e = "INSTR_TYPE_MISMATCH"; - break; - case 0x53: - e = "INSTR_OPERAND_FAULT"; - break; - case 0x54: - e = "INSTR_TLS_FAULT"; - break; - case 0x55: - e = "INSTR_BARRIER_FAULT"; - break; - case 0x56: - e = "INSTR_ALIGN_FAULT"; - break; - case 0x58: - e = "DATA_INVALID_FAULT"; - break; - case 0x59: - e = "TILE_RANGE_FAULT"; - break; - case 0x5A: - e = "ADDR_RANGE_FAULT"; - break; - case 0x60: - e = "OUT_OF_MEMORY"; - break; - /* GPU exceptions */ - case 0x80: - e = "DELAYED_BUS_FAULT"; - break; - case 0x88: - e = "SHAREABILITY_FAULT"; - break; - /* MMU exceptions */ - case 0xC0: - case 0xC1: - case 0xC2: - case 0xC3: - case 0xC4: - case 0xC5: - case 0xC6: - case 0xC7: - e = "TRANSLATION_FAULT"; - break; - case 0xC8: - e = "PERMISSION_FAULT"; - break; - case 0xD0: - case 0xD1: - case 0xD2: - case 0xD3: - case 0xD4: - case 0xD5: - case 0xD6: - case 0xD7: - e = "TRANSTAB_BUS_FAULT"; - break; - case 0xD8: - e = "ACCESS_FLAG"; - break; - break; - default: - e = "UNKNOWN"; - break; - }; - - return e; -} - -static const char *access_type_name(struct kbase_device *kbdev, - u32 fault_status) -{ - switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) { - return "UNKNOWN"; - case AS_FAULTSTATUS_ACCESS_TYPE_READ: - return "READ"; - case AS_FAULTSTATUS_ACCESS_TYPE_WRITE: - return "WRITE"; - case AS_FAULTSTATUS_ACCESS_TYPE_EX: - return "EXECUTE"; - default: - WARN_ON(1); - return NULL; - } -} - -/** - * The caller must ensure it's retained the ctx to prevent it from being scheduled out whilst it's being worked on. - */ -static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx, - struct kbase_as *as, const char *reason_str) -{ - unsigned long flags; - int exception_type; - int access_type; - int source_id; - int as_no; - struct kbase_device *kbdev; - struct kbasep_js_device_data *js_devdata; - -#if KBASE_GPU_RESET_EN - bool reset_status = false; -#endif - - as_no = as->number; - kbdev = kctx->kbdev; - js_devdata = &kbdev->js_data; - - /* ASSERT that the context won't leave the runpool */ - KBASE_DEBUG_ASSERT(kbasep_js_debug_check_ctx_refcount(kbdev, kctx) > 0); - - /* decode the fault status */ - exception_type = as->fault_status & 0xFF; - access_type = (as->fault_status >> 8) & 0x3; - source_id = (as->fault_status >> 16); - - /* terminal fault, print info about the fault */ - dev_err(kbdev->dev, - "Unhandled Page fault in AS%d at VA 0x%016llX\n" - "Reason: %s\n" - "raw fault status 0x%X\n" - "decoded fault status: %s\n" - "exception type 0x%X: %s\n" - "access type 0x%X: %s\n" - "source id 0x%X\n" - "pid: %d\n", - as_no, as->fault_addr, - reason_str, - as->fault_status, - (as->fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"), - exception_type, kbase_exception_name(kbdev, exception_type), - access_type, access_type_name(kbdev, as->fault_status), - source_id, - kctx->pid); - - /* hardware counters dump fault handling */ - if ((kbdev->hwcnt.kctx) && (kbdev->hwcnt.kctx->as_nr == as_no) && - (kbdev->hwcnt.backend.state == - KBASE_INSTR_STATE_DUMPING)) { - unsigned int num_core_groups = kbdev->gpu_props.num_core_groups; - - if ((as->fault_addr >= kbdev->hwcnt.addr) && - (as->fault_addr < (kbdev->hwcnt.addr + - (num_core_groups * 2048)))) - kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_FAULT; - } - - /* Stop the kctx from submitting more jobs and cause it to be scheduled - * out/rescheduled - this will occur on releasing the context's refcount */ - spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags); - kbasep_js_clear_submit_allowed(js_devdata, kctx); - spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags); - - /* Kill any running jobs from the context. Submit is disallowed, so no more jobs from this - * context can appear in the job slots from this point on */ - kbase_backend_jm_kill_jobs_from_kctx(kctx); - /* AS transaction begin */ - mutex_lock(&as->transaction_mutex); -#if KBASE_GPU_RESET_EN - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) { - /* Due to H/W issue 8245 we need to reset the GPU after using UNMAPPED mode. - * We start the reset before switching to UNMAPPED to ensure that unrelated jobs - * are evicted from the GPU before the switch. - */ - dev_err(kbdev->dev, "Unhandled page fault. For this GPU version we now soft-reset the GPU as part of page fault recovery."); - reset_status = kbase_prepare_to_reset_gpu(kbdev); - } -#endif /* KBASE_GPU_RESET_EN */ - /* switch to UNMAPPED mode, will abort all jobs and stop any hw counter dumping */ - kbase_mmu_disable_as(kbdev, as_no); - - mutex_unlock(&as->transaction_mutex); - /* AS transaction end */ - /* Clear down the fault */ - kbase_mmu_hw_clear_fault(kbdev, as, kctx, - KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED); - kbase_mmu_hw_enable_fault(kbdev, as, kctx, - KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED); - -#if KBASE_GPU_RESET_EN - if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245) && reset_status) - kbase_reset_gpu(kbdev); -#endif /* KBASE_GPU_RESET_EN */ -} - -void kbasep_as_do_poke(struct work_struct *work) -{ - struct kbase_as *as; - struct kbase_device *kbdev; - struct kbase_context *kctx; - unsigned long flags; - - KBASE_DEBUG_ASSERT(work); - as = container_of(work, struct kbase_as, poke_work); - kbdev = container_of(as, struct kbase_device, as[as->number]); - KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT); - - /* GPU power will already be active by virtue of the caller holding a JS - * reference on the address space, and will not release it until this worker - * has finished */ - - /* Further to the comment above, we know that while this function is running - * the AS will not be released as before the atom is released this workqueue - * is flushed (in kbase_as_poking_timer_release_atom) - */ - kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as->number); - - /* AS transaction begin */ - mutex_lock(&as->transaction_mutex); - /* Force a uTLB invalidate */ - kbase_mmu_hw_do_operation(kbdev, as, kctx, 0, 0, - AS_COMMAND_UNLOCK, 0); - mutex_unlock(&as->transaction_mutex); - /* AS transaction end */ - - spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags); - if (as->poke_refcount && - !(as->poke_state & KBASE_AS_POKE_STATE_KILLING_POKE)) { - /* Only queue up the timer if we need it, and we're not trying to kill it */ - hrtimer_start(&as->poke_timer, HR_TIMER_DELAY_MSEC(5), HRTIMER_MODE_REL); - } - spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags); -} - -enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer) -{ - struct kbase_as *as; - int queue_work_ret; - - KBASE_DEBUG_ASSERT(NULL != timer); - as = container_of(timer, struct kbase_as, poke_timer); - KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT); - - queue_work_ret = queue_work(as->poke_wq, &as->poke_work); - KBASE_DEBUG_ASSERT(queue_work_ret); - return HRTIMER_NORESTART; -} - -/** - * Retain the poking timer on an atom's context (if the atom hasn't already - * done so), and start the timer (if it's not already started). - * - * This must only be called on a context that's scheduled in, and an atom - * that's running on the GPU. - * - * The caller must hold kbasep_js_device_data::runpool_irq::lock - * - * This can be called safely from atomic context - */ -void kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom) -{ - struct kbase_as *as; - - KBASE_DEBUG_ASSERT(kbdev); - KBASE_DEBUG_ASSERT(kctx); - KBASE_DEBUG_ASSERT(katom); - KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - if (katom->poking) - return; - - katom->poking = 1; - - /* It's safe to work on the as/as_nr without an explicit reference, - * because the caller holds the runpool_irq lock, and the atom itself - * was also running and had already taken a reference */ - as = &kbdev->as[kctx->as_nr]; - - if (++(as->poke_refcount) == 1) { - /* First refcount for poke needed: check if not already in flight */ - if (!as->poke_state) { - /* need to start poking */ - as->poke_state |= KBASE_AS_POKE_STATE_IN_FLIGHT; - queue_work(as->poke_wq, &as->poke_work); - } - } -} - -/** - * If an atom holds a poking timer, release it and wait for it to finish - * - * This must only be called on a context that's scheduled in, and an atom - * that still has a JS reference on the context - * - * This must \b not be called from atomic context, since it can sleep. - */ -void kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom) -{ - struct kbase_as *as; - unsigned long flags; - - KBASE_DEBUG_ASSERT(kbdev); - KBASE_DEBUG_ASSERT(kctx); - KBASE_DEBUG_ASSERT(katom); - KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); - - if (!katom->poking) - return; - - as = &kbdev->as[kctx->as_nr]; - - spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags); - KBASE_DEBUG_ASSERT(as->poke_refcount > 0); - KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT); - - if (--(as->poke_refcount) == 0) { - as->poke_state |= KBASE_AS_POKE_STATE_KILLING_POKE; - spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags); - - hrtimer_cancel(&as->poke_timer); - flush_workqueue(as->poke_wq); - - spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, flags); - - /* Re-check whether it's still needed */ - if (as->poke_refcount) { - int queue_work_ret; - /* Poking still needed: - * - Another retain will not be starting the timer or queueing work, - * because it's still marked as in-flight - * - The hrtimer has finished, and has not started a new timer or - * queued work because it's been marked as killing - * - * So whatever happens now, just queue the work again */ - as->poke_state &= ~((kbase_as_poke_state)KBASE_AS_POKE_STATE_KILLING_POKE); - queue_work_ret = queue_work(as->poke_wq, &as->poke_work); - KBASE_DEBUG_ASSERT(queue_work_ret); - } else { - /* It isn't - so mark it as not in flight, and not killing */ - as->poke_state = 0u; - - /* The poke associated with the atom has now finished. If this is - * also the last atom on the context, then we can guarentee no more - * pokes (and thus no more poking register accesses) will occur on - * the context until new atoms are run */ - } - } - spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, flags); - - katom->poking = 0; -} - -void kbase_mmu_interrupt_process(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_as *as) -{ - struct kbasep_js_device_data *js_devdata = &kbdev->js_data; - - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - if (!kctx) { - dev_warn(kbdev->dev, "%s in AS%d at 0x%016llx with no context present! Suprious IRQ or SW Design Error?\n", - kbase_as_has_bus_fault(as) ? "Bus error" : "Page fault", - as->number, as->fault_addr); - - /* Since no ctx was found, the MMU must be disabled. */ - WARN_ON(as->current_setup.transtab); - - if (kbase_as_has_bus_fault(as)) { - kbase_mmu_hw_clear_fault(kbdev, as, kctx, - KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED); - kbase_mmu_hw_enable_fault(kbdev, as, kctx, - KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED); - } else if (kbase_as_has_page_fault(as)) { - kbase_mmu_hw_clear_fault(kbdev, as, kctx, - KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED); - kbase_mmu_hw_enable_fault(kbdev, as, kctx, - KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED); - } - -#if KBASE_GPU_RESET_EN - if (kbase_as_has_bus_fault(as) && - kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) { - bool reset_status; - /* - * Reset the GPU, like in bus_fault_worker, in case an - * earlier error hasn't been properly cleared by this - * point. - */ - dev_err(kbdev->dev, "GPU bus error occurred. For this GPU version we now soft-reset as part of bus error recovery\n"); - reset_status = kbase_prepare_to_reset_gpu_locked(kbdev); - if (reset_status) - kbase_reset_gpu_locked(kbdev); - } -#endif /* KBASE_GPU_RESET_EN */ - - return; - } - - if (kbase_as_has_bus_fault(as)) { - /* - * hw counters dumping in progress, signal the - * other thread that it failed - */ - if ((kbdev->hwcnt.kctx == kctx) && - (kbdev->hwcnt.backend.state == - KBASE_INSTR_STATE_DUMPING)) - kbdev->hwcnt.backend.state = - KBASE_INSTR_STATE_FAULT; - - /* - * Stop the kctx from submitting more jobs and cause it - * to be scheduled out/rescheduled when all references - * to it are released - */ - kbasep_js_clear_submit_allowed(js_devdata, kctx); - - dev_warn(kbdev->dev, "Bus error in AS%d at 0x%016llx\n", - as->number, as->fault_addr); - - /* - * We need to switch to UNMAPPED mode - but we do this in a - * worker so that we can sleep - */ - KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&as->work_busfault)); - WARN_ON(work_pending(&as->work_busfault)); - queue_work(as->pf_wq, &as->work_busfault); - atomic_inc(&kbdev->faults_pending); - } else { - KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&as->work_pagefault)); - WARN_ON(work_pending(&as->work_pagefault)); - queue_work(as->pf_wq, &as->work_pagefault); - atomic_inc(&kbdev->faults_pending); - } -} - -void kbase_flush_mmu_wqs(struct kbase_device *kbdev) -{ - int i; - - for (i = 0; i < kbdev->nr_hw_address_spaces; i++) { - struct kbase_as *as = &kbdev->as[i]; - - flush_workqueue(as->pf_wq); - } -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mmu_hw.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mmu_hw.h deleted file mode 100755 index 986e959e9a0c7..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mmu_hw.h +++ /dev/null @@ -1,123 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * @file - * Interface file for accessing MMU hardware functionality - */ - -/** - * @page mali_kbase_mmu_hw_page MMU hardware interface - * - * @section mali_kbase_mmu_hw_intro_sec Introduction - * This module provides an abstraction for accessing the functionality provided - * by the midgard MMU and thus allows all MMU HW access to be contained within - * one common place and allows for different backends (implementations) to - * be provided. - */ - -#ifndef _MALI_KBASE_MMU_HW_H_ -#define _MALI_KBASE_MMU_HW_H_ - -/* Forward declarations */ -struct kbase_device; -struct kbase_as; -struct kbase_context; - -/** - * @addtogroup base_kbase_api - * @{ - */ - -/** - * @addtogroup mali_kbase_mmu_hw MMU access APIs - * @{ - */ - -/** @brief MMU fault type descriptor. - */ -enum kbase_mmu_fault_type { - KBASE_MMU_FAULT_TYPE_UNKNOWN = 0, - KBASE_MMU_FAULT_TYPE_PAGE, - KBASE_MMU_FAULT_TYPE_BUS, - KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED, - KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED -}; - -/** @brief Configure an address space for use. - * - * Configure the MMU using the address space details setup in the - * @ref kbase_context structure. - * - * @param[in] kbdev kbase device to configure. - * @param[in] as address space to configure. - * @param[in] kctx kbase context to configure. - */ -void kbase_mmu_hw_configure(struct kbase_device *kbdev, - struct kbase_as *as, struct kbase_context *kctx); - -/** @brief Issue an operation to the MMU. - * - * Issue an operation (MMU invalidate, MMU flush, etc) on the address space that - * is associated with the provided @ref kbase_context over the specified range - * - * @param[in] kbdev kbase device to issue the MMU operation on. - * @param[in] as address space to issue the MMU operation on. - * @param[in] kctx kbase context to issue the MMU operation on. - * @param[in] vpfn MMU Virtual Page Frame Number to start the - * operation on. - * @param[in] nr Number of pages to work on. - * @param[in] type Operation type (written to ASn_COMMAND). - * @param[in] handling_irq Is this operation being called during the handling - * of an interrupt? - * - * @return Zero if the operation was successful, non-zero otherwise. - */ -int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as, - struct kbase_context *kctx, u64 vpfn, u32 nr, u32 type, - unsigned int handling_irq); - -/** @brief Clear a fault that has been previously reported by the MMU. - * - * Clear a bus error or page fault that has been reported by the MMU. - * - * @param[in] kbdev kbase device to clear the fault from. - * @param[in] as address space to clear the fault from. - * @param[in] kctx kbase context to clear the fault from or NULL. - * @param[in] type The type of fault that needs to be cleared. - */ -void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as, - struct kbase_context *kctx, enum kbase_mmu_fault_type type); - -/** @brief Enable fault that has been previously reported by the MMU. - * - * After a page fault or bus error has been reported by the MMU these - * will be disabled. After these are handled this function needs to be - * called to enable the page fault or bus error fault again. - * - * @param[in] kbdev kbase device to again enable the fault from. - * @param[in] as address space to again enable the fault from. - * @param[in] kctx kbase context to again enable the fault from. - * @param[in] type The type of fault that needs to be enabled again. - */ -void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as, - struct kbase_context *kctx, enum kbase_mmu_fault_type type); - -/** @} *//* end group mali_kbase_mmu_hw */ -/** @} *//* end group base_kbase_api */ - -#endif /* _MALI_KBASE_MMU_HW_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mmu_mode.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mmu_mode.h deleted file mode 100755 index 2449c60a92fd3..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mmu_mode.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#ifndef _MALI_KBASE_MMU_MODE_ -#define _MALI_KBASE_MMU_MODE_ - -#include - -/* Forward declarations */ -struct kbase_context; -struct kbase_device; -struct kbase_as; -struct kbase_mmu_setup; - -struct kbase_mmu_mode { - void (*update)(struct kbase_context *kctx); - void (*get_as_setup)(struct kbase_context *kctx, - struct kbase_mmu_setup * const setup); - void (*disable_as)(struct kbase_device *kbdev, int as_nr); - phys_addr_t (*pte_to_phy_addr)(u64 entry); - int (*ate_is_valid)(u64 ate); - int (*pte_is_valid)(u64 pte); - void (*entry_set_ate)(u64 *entry, phys_addr_t phy, unsigned long flags); - void (*entry_set_pte)(u64 *entry, phys_addr_t phy); - void (*entry_invalidate)(u64 *entry); -}; - -struct kbase_mmu_mode const *kbase_mmu_mode_get_lpae(void); -struct kbase_mmu_mode const *kbase_mmu_mode_get_aarch64(void); - -#endif /* _MALI_KBASE_MMU_MODE_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_lpae.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_lpae.c deleted file mode 100755 index 079ef81d06d13..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_lpae.c +++ /dev/null @@ -1,195 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#include "mali_kbase_mmu_mode.h" - -#include "mali_kbase.h" -#include "mali_midg_regmap.h" - -#define ENTRY_TYPE_MASK 3ULL -#define ENTRY_IS_ATE 1ULL -#define ENTRY_IS_INVAL 2ULL -#define ENTRY_IS_PTE 3ULL - -#define ENTRY_ATTR_BITS (7ULL << 2) /* bits 4:2 */ -#define ENTRY_RD_BIT (1ULL << 6) -#define ENTRY_WR_BIT (1ULL << 7) -#define ENTRY_SHARE_BITS (3ULL << 8) /* bits 9:8 */ -#define ENTRY_ACCESS_BIT (1ULL << 10) -#define ENTRY_NX_BIT (1ULL << 54) - -#define ENTRY_FLAGS_MASK (ENTRY_ATTR_BITS | ENTRY_RD_BIT | ENTRY_WR_BIT | \ - ENTRY_SHARE_BITS | ENTRY_ACCESS_BIT | ENTRY_NX_BIT) - -/* Helper Function to perform assignment of page table entries, to - * ensure the use of strd, which is required on LPAE systems. - */ -static inline void page_table_entry_set(u64 *pte, u64 phy) -{ -#ifdef CONFIG_64BIT - *pte = phy; -#elif defined(CONFIG_ARM) - /* - * In order to prevent the compiler keeping cached copies of - * memory, we have to explicitly say that we have updated - * memory. - * - * Note: We could manually move the data ourselves into R0 and - * R1 by specifying register variables that are explicitly - * given registers assignments, the down side of this is that - * we have to assume cpu endianness. To avoid this we can use - * the ldrd to read the data from memory into R0 and R1 which - * will respect the cpu endianness, we then use strd to make - * the 64 bit assignment to the page table entry. - */ - asm volatile("ldrd r0, r1, [%[ptemp]]\n\t" - "strd r0, r1, [%[pte]]\n\t" - : "=m" (*pte) - : [ptemp] "r" (&phy), [pte] "r" (pte), "m" (phy) - : "r0", "r1"); -#else -#error "64-bit atomic write must be implemented for your architecture" -#endif -} - -static void mmu_get_as_setup(struct kbase_context *kctx, - struct kbase_mmu_setup * const setup) -{ - /* Set up the required caching policies at the correct indices - * in the memattr register. */ - setup->memattr = - (AS_MEMATTR_LPAE_IMPL_DEF_CACHE_POLICY << - (AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) | - (AS_MEMATTR_LPAE_FORCE_TO_CACHE_ALL << - (AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL * 8)) | - (AS_MEMATTR_LPAE_WRITE_ALLOC << - (AS_MEMATTR_INDEX_WRITE_ALLOC * 8)) | - 0; /* The other indices are unused for now */ - - setup->transtab = (u64)kctx->pgd & - ((0xFFFFFFFFULL << 32) | AS_TRANSTAB_LPAE_ADDR_SPACE_MASK); - - setup->transtab |= AS_TRANSTAB_LPAE_ADRMODE_TABLE; - setup->transtab |= AS_TRANSTAB_LPAE_READ_INNER; - -} - -static void mmu_update(struct kbase_context *kctx) -{ - struct kbase_device * const kbdev = kctx->kbdev; - struct kbase_as * const as = &kbdev->as[kctx->as_nr]; - struct kbase_mmu_setup * const current_setup = &as->current_setup; - - mmu_get_as_setup(kctx, current_setup); - - /* Apply the address space setting */ - kbase_mmu_hw_configure(kbdev, as, kctx); -} - -static void mmu_disable_as(struct kbase_device *kbdev, int as_nr) -{ - struct kbase_as * const as = &kbdev->as[as_nr]; - struct kbase_mmu_setup * const current_setup = &as->current_setup; - - current_setup->transtab = AS_TRANSTAB_LPAE_ADRMODE_UNMAPPED; - - - /* Apply the address space setting */ - kbase_mmu_hw_configure(kbdev, as, NULL); -} - -static phys_addr_t pte_to_phy_addr(u64 entry) -{ - if (!(entry & 1)) - return 0; - - return entry & ~0xFFF; -} - -static int ate_is_valid(u64 ate) -{ - return ((ate & ENTRY_TYPE_MASK) == ENTRY_IS_ATE); -} - -static int pte_is_valid(u64 pte) -{ - return ((pte & ENTRY_TYPE_MASK) == ENTRY_IS_PTE); -} - -/* - * Map KBASE_REG flags to MMU flags - */ -static u64 get_mmu_flags(unsigned long flags) -{ - u64 mmu_flags; - - /* store mem_attr index as 4:2 (macro called ensures 3 bits already) */ - mmu_flags = KBASE_REG_MEMATTR_VALUE(flags) << 2; - - /* write perm if requested */ - mmu_flags |= (flags & KBASE_REG_GPU_WR) ? ENTRY_WR_BIT : 0; - /* read perm if requested */ - mmu_flags |= (flags & KBASE_REG_GPU_RD) ? ENTRY_RD_BIT : 0; - /* nx if requested */ - mmu_flags |= (flags & KBASE_REG_GPU_NX) ? ENTRY_NX_BIT : 0; - - if (flags & KBASE_REG_SHARE_BOTH) { - /* inner and outer shareable */ - mmu_flags |= SHARE_BOTH_BITS; - } else if (flags & KBASE_REG_SHARE_IN) { - /* inner shareable coherency */ - mmu_flags |= SHARE_INNER_BITS; - } - - return mmu_flags; -} - -static void entry_set_ate(u64 *entry, phys_addr_t phy, unsigned long flags) -{ - page_table_entry_set(entry, (phy & ~0xFFF) | - get_mmu_flags(flags) | - ENTRY_IS_ATE); -} - -static void entry_set_pte(u64 *entry, phys_addr_t phy) -{ - page_table_entry_set(entry, (phy & ~0xFFF) | ENTRY_IS_PTE); -} - -static void entry_invalidate(u64 *entry) -{ - page_table_entry_set(entry, ENTRY_IS_INVAL); -} - -static struct kbase_mmu_mode const lpae_mode = { - .update = mmu_update, - .get_as_setup = mmu_get_as_setup, - .disable_as = mmu_disable_as, - .pte_to_phy_addr = pte_to_phy_addr, - .ate_is_valid = ate_is_valid, - .pte_is_valid = pte_is_valid, - .entry_set_ate = entry_set_ate, - .entry_set_pte = entry_set_pte, - .entry_invalidate = entry_invalidate -}; - -struct kbase_mmu_mode const *kbase_mmu_mode_get_lpae(void) -{ - return &lpae_mode; -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_platform_fake.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_platform_fake.c deleted file mode 100755 index 5bbd6d4856382..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_platform_fake.c +++ /dev/null @@ -1,126 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#ifdef CONFIG_MALI_PLATFORM_FAKE - -#include -#include -#include -#include -#include - -#ifdef CONFIG_MACH_MANTA -#include -#endif - -/* - * This file is included only for type definitions and functions belonging to - * specific platform folders. Do not add dependencies with symbols that are - * defined somewhere else. - */ -#include - -#define PLATFORM_CONFIG_RESOURCE_COUNT 4 -#define PLATFORM_CONFIG_IRQ_RES_COUNT 3 - -static struct platform_device *mali_device; - -#ifndef CONFIG_OF -/** - * @brief Convert data in struct kbase_io_resources struct to Linux-specific resources - * - * Function converts data in struct kbase_io_resources struct to an array of Linux resource structures. Note that function - * assumes that size of linux_resource array is at least PLATFORM_CONFIG_RESOURCE_COUNT. - * Resources are put in fixed order: I/O memory region, job IRQ, MMU IRQ, GPU IRQ. - * - * @param[in] io_resource Input IO resource data - * @param[out] linux_resources Pointer to output array of Linux resource structures - */ -static void kbasep_config_parse_io_resources(const struct kbase_io_resources *io_resources, struct resource *const linux_resources) -{ - if (!io_resources || !linux_resources) { - pr_err("%s: couldn't find proper resources\n", __func__); - return; - } - - memset(linux_resources, 0, PLATFORM_CONFIG_RESOURCE_COUNT * sizeof(struct resource)); - - linux_resources[0].start = io_resources->io_memory_region.start; - linux_resources[0].end = io_resources->io_memory_region.end; - linux_resources[0].flags = IORESOURCE_MEM; - linux_resources[1].start = io_resources->job_irq_number; - linux_resources[1].end = io_resources->job_irq_number; - linux_resources[1].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL; - - linux_resources[2].start = io_resources->mmu_irq_number; - linux_resources[2].end = io_resources->mmu_irq_number; - linux_resources[2].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL; - - linux_resources[3].start = io_resources->gpu_irq_number; - linux_resources[3].end = io_resources->gpu_irq_number; - linux_resources[3].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL; -} -#endif /* CONFIG_OF */ - -int kbase_platform_fake_register(void) -{ - struct kbase_platform_config *config; -#ifndef CONFIG_OF - struct resource resources[PLATFORM_CONFIG_RESOURCE_COUNT]; -#endif - int err; - - config = kbase_get_platform_config(); /* declared in midgard/mali_kbase_config.h but defined in platform folder */ - if (config == NULL) { - pr_err("%s: couldn't get platform config\n", __func__); - return -ENODEV; - } - - mali_device = platform_device_alloc("mali", 0); - if (mali_device == NULL) - return -ENOMEM; - -#ifndef CONFIG_OF - kbasep_config_parse_io_resources(config->io_resources, resources); - err = platform_device_add_resources(mali_device, resources, PLATFORM_CONFIG_RESOURCE_COUNT); - if (err) { - platform_device_put(mali_device); - mali_device = NULL; - return err; - } -#endif /* CONFIG_OF */ - - err = platform_device_add(mali_device); - if (err) { - platform_device_unregister(mali_device); - mali_device = NULL; - return err; - } - - return 0; -} -EXPORT_SYMBOL(kbase_platform_fake_register); - -void kbase_platform_fake_unregister(void) -{ - if (mali_device) - platform_device_unregister(mali_device); -} -EXPORT_SYMBOL(kbase_platform_fake_unregister); - -#endif /* CONFIG_MALI_PLATFORM_FAKE */ - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_pm.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_pm.c deleted file mode 100755 index 261441fa145b2..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_pm.c +++ /dev/null @@ -1,204 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_kbase_pm.c - * Base kernel power management APIs - */ -#include -#include -#include -#include - -#include - -int kbase_pm_powerup(struct kbase_device *kbdev, unsigned int flags) -{ - return kbase_hwaccess_pm_powerup(kbdev, flags); -} - -void kbase_pm_halt(struct kbase_device *kbdev) -{ - kbase_hwaccess_pm_halt(kbdev); -} - -void kbase_pm_context_active(struct kbase_device *kbdev) -{ - (void)kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE); -} - -int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler) -{ - struct kbasep_js_device_data *js_devdata = &kbdev->js_data; - int c; - int old_count; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - - /* Trace timeline information about how long it took to handle the decision - * to powerup. Sometimes the event might be missed due to reading the count - * outside of mutex, but this is necessary to get the trace timing - * correct. */ - old_count = kbdev->pm.active_count; - if (old_count == 0) - kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE); - - mutex_lock(&js_devdata->runpool_mutex); - mutex_lock(&kbdev->pm.lock); - if (kbase_pm_is_suspending(kbdev)) { - switch (suspend_handler) { - case KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE: - if (kbdev->pm.active_count != 0) - break; - /* FALLTHROUGH */ - case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE: - mutex_unlock(&kbdev->pm.lock); - mutex_unlock(&js_devdata->runpool_mutex); - if (old_count == 0) - kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE); - return 1; - - case KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE: - /* FALLTHROUGH */ - default: - KBASE_DEBUG_ASSERT_MSG(false, "unreachable"); - break; - } - } - c = ++kbdev->pm.active_count; - KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c); - KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_ACTIVE, NULL, NULL, 0u, c); - - /* Trace the event being handled */ - if (old_count == 0) - kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE); - - if (c == 1) - /* First context active: Power on the GPU and any cores requested by - * the policy */ - kbase_hwaccess_pm_gpu_active(kbdev); - - mutex_unlock(&kbdev->pm.lock); - mutex_unlock(&js_devdata->runpool_mutex); - - return 0; -} - -KBASE_EXPORT_TEST_API(kbase_pm_context_active); - -void kbase_pm_context_idle(struct kbase_device *kbdev) -{ - struct kbasep_js_device_data *js_devdata = &kbdev->js_data; - int c; - int old_count; - - KBASE_DEBUG_ASSERT(kbdev != NULL); - - /* Trace timeline information about how long it took to handle the decision - * to powerdown. Sometimes the event might be missed due to reading the - * count outside of mutex, but this is necessary to get the trace timing - * correct. */ - old_count = kbdev->pm.active_count; - if (old_count == 0) - kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE); - - mutex_lock(&js_devdata->runpool_mutex); - mutex_lock(&kbdev->pm.lock); - - c = --kbdev->pm.active_count; - KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c); - KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_IDLE, NULL, NULL, 0u, c); - - KBASE_DEBUG_ASSERT(c >= 0); - - /* Trace the event being handled */ - if (old_count == 0) - kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE); - - if (c == 0) { - /* Last context has gone idle */ - kbase_hwaccess_pm_gpu_idle(kbdev); - - /* Wake up anyone waiting for this to become 0 (e.g. suspend). The - * waiters must synchronize with us by locking the pm.lock after - * waiting */ - wake_up(&kbdev->pm.zero_active_count_wait); - } - - mutex_unlock(&kbdev->pm.lock); - mutex_unlock(&js_devdata->runpool_mutex); -} - -KBASE_EXPORT_TEST_API(kbase_pm_context_idle); - -void kbase_pm_suspend(struct kbase_device *kbdev) -{ - KBASE_DEBUG_ASSERT(kbdev); - - mutex_lock(&kbdev->pm.lock); - KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev)); - kbdev->pm.suspending = true; - mutex_unlock(&kbdev->pm.lock); - - /* From now on, the active count will drop towards zero. Sometimes, it'll - * go up briefly before going down again. However, once it reaches zero it - * will stay there - guaranteeing that we've idled all pm references */ - - /* Suspend job scheduler and associated components, so that it releases all - * the PM active count references */ - kbasep_js_suspend(kbdev); - - /* Suspend any counter collection that might be happening */ - kbase_instr_hwcnt_suspend(kbdev); - - /* Wait for the active count to reach zero. This is not the same as - * waiting for a power down, since not all policies power down when this - * reaches zero. */ - wait_event(kbdev->pm.zero_active_count_wait, kbdev->pm.active_count == 0); - - /* NOTE: We synchronize with anything that was just finishing a - * kbase_pm_context_idle() call by locking the pm.lock below */ - - kbase_hwaccess_pm_suspend(kbdev); -} - -void kbase_pm_resume(struct kbase_device *kbdev) -{ - /* MUST happen before any pm_context_active calls occur */ - kbase_hwaccess_pm_resume(kbdev); - - /* Initial active call, to power on the GPU/cores if needed */ - kbase_pm_context_active(kbdev); - - /* Re-enable instrumentation, if it was previously disabled */ - kbase_instr_hwcnt_resume(kbdev); - - /* Resume any blocked atoms (which may cause contexts to be scheduled in - * and dependent atoms to run) */ - kbase_resume_suspended_soft_jobs(kbdev); - - /* Resume the Job Scheduler and associated components, and start running - * atoms */ - kbasep_js_resume(kbdev); - - /* Matching idle call, to power off the GPU/cores if we didn't actually - * need it and the policy doesn't want it on */ - kbase_pm_context_idle(kbdev); -} - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_pm.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_pm.h deleted file mode 100755 index 37fa2479df74b..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_pm.h +++ /dev/null @@ -1,171 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_kbase_pm.h - * Power management API definitions - */ - -#ifndef _KBASE_PM_H_ -#define _KBASE_PM_H_ - -#include "mali_kbase_hwaccess_pm.h" - -#define PM_ENABLE_IRQS 0x01 -#define PM_HW_ISSUES_DETECT 0x02 - - -/** Initialize the power management framework. - * - * Must be called before any other power management function - * - * @param kbdev The kbase device structure for the device (must be a valid pointer) - * - * @return 0 if the power management framework was successfully initialized. - */ -int kbase_pm_init(struct kbase_device *kbdev); - -/** Power up GPU after all modules have been initialized and interrupt handlers installed. - * - * @param kbdev The kbase device structure for the device (must be a valid pointer) - * - * @param flags Flags to pass on to kbase_pm_init_hw - * - * @return 0 if powerup was successful. - */ -int kbase_pm_powerup(struct kbase_device *kbdev, unsigned int flags); - -/** - * Halt the power management framework. - * Should ensure that no new interrupts are generated, - * but allow any currently running interrupt handlers to complete successfully. - * The GPU is forced off by the time this function returns, regardless of - * whether or not the active power policy asks for the GPU to be powered off. - * - * @param kbdev The kbase device structure for the device (must be a valid pointer) - */ -void kbase_pm_halt(struct kbase_device *kbdev); - -/** Terminate the power management framework. - * - * No power management functions may be called after this - * (except @ref kbase_pm_init) - * - * @param kbdev The kbase device structure for the device (must be a valid pointer) - */ -void kbase_pm_term(struct kbase_device *kbdev); - -/** Increment the count of active contexts. - * - * This function should be called when a context is about to submit a job. It informs the active power policy that the - * GPU is going to be in use shortly and the policy is expected to start turning on the GPU. - * - * This function will block until the GPU is available. - * - * This function ASSERTS if a suspend is occuring/has occurred whilst this is - * in use. Use kbase_pm_contect_active_unless_suspending() instead. - * - * @note a Suspend is only visible to Kernel threads; user-space threads in a - * syscall cannot witness a suspend, because they are frozen before the suspend - * begins. - * - * @param kbdev The kbase device structure for the device (must be a valid pointer) - */ -void kbase_pm_context_active(struct kbase_device *kbdev); - - -/** Handler codes for doing kbase_pm_context_active_handle_suspend() */ -enum kbase_pm_suspend_handler { - /** A suspend is not expected/not possible - this is the same as - * kbase_pm_context_active() */ - KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE, - /** If we're suspending, fail and don't increase the active count */ - KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE, - /** If we're suspending, succeed and allow the active count to increase iff - * it didn't go from 0->1 (i.e., we didn't re-activate the GPU). - * - * This should only be used when there is a bounded time on the activation - * (e.g. guarantee it's going to be idled very soon after) */ - KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE -}; - -/** Suspend 'safe' variant of kbase_pm_context_active() - * - * If a suspend is in progress, this allows for various different ways of - * handling the suspend. Refer to @ref enum kbase_pm_suspend_handler for details. - * - * We returns a status code indicating whether we're allowed to keep the GPU - * active during the suspend, depending on the handler code. If the status code - * indicates a failure, the caller must abort whatever operation it was - * attempting, and potentially queue it up for after the OS has resumed. - * - * @param kbdev The kbase device structure for the device (must be a valid pointer) - * @param suspend_handler The handler code for how to handle a suspend that might occur - * @return zero Indicates success - * @return non-zero Indicates failure due to the system being suspending/suspended. - */ -int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler); - -/** Decrement the reference count of active contexts. - * - * This function should be called when a context becomes idle. After this call the GPU may be turned off by the power - * policy so the calling code should ensure that it does not access the GPU's registers. - * - * @param kbdev The kbase device structure for the device (must be a valid pointer) - */ -void kbase_pm_context_idle(struct kbase_device *kbdev); - -/** - * Suspend the GPU and prevent any further register accesses to it from Kernel - * threads. - * - * This is called in response to an OS suspend event, and calls into the various - * kbase components to complete the suspend. - * - * @note the mechanisms used here rely on all user-space threads being frozen - * by the OS before we suspend. Otherwise, an IOCTL could occur that powers up - * the GPU e.g. via atom submission. - * - * @param kbdev The kbase device structure for the device (must be a valid pointer) - */ -void kbase_pm_suspend(struct kbase_device *kbdev); - -/** - * Resume the GPU, allow register accesses to it, and resume running atoms on - * the GPU. - * - * This is called in response to an OS resume event, and calls into the various - * kbase components to complete the resume. - * - * @param kbdev The kbase device structure for the device (must be a valid pointer) - */ -void kbase_pm_resume(struct kbase_device *kbdev); - -/** - * kbase_pm_vsync_callback - vsync callback - * - * @buffer_updated: 1 if a new frame was displayed, 0 otherwise - * @data: Pointer to the kbase device as returned by kbase_find_device() - * - * Callback function used to notify the power management code that a vsync has - * occurred on the display. - */ -void kbase_pm_vsync_callback(int buffer_updated, void *data); - -#endif /* _KBASE_PM_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_profiling_gator_api.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_profiling_gator_api.h deleted file mode 100755 index 7fb674eded373..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_profiling_gator_api.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010, 2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * @file mali_kbase_profiling_gator_api.h - * Model interface - */ - -#ifndef _KBASE_PROFILING_GATOR_API_H_ -#define _KBASE_PROFILING_GATOR_API_H_ - -/* - * List of possible actions to be controlled by Streamline. - * The following numbers are used by gator to control - * the frame buffer dumping and s/w counter reporting. - */ -#define FBDUMP_CONTROL_ENABLE (1) -#define FBDUMP_CONTROL_RATE (2) -#define SW_COUNTER_ENABLE (3) -#define FBDUMP_CONTROL_RESIZE_FACTOR (4) -#define FBDUMP_CONTROL_MAX (5) -#define FBDUMP_CONTROL_MIN FBDUMP_CONTROL_ENABLE - -void _mali_profiling_control(u32 action, u32 value); - -#endif /* _KBASE_PROFILING_GATOR_API */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_replay.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_replay.c deleted file mode 100755 index 71f005e325215..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_replay.c +++ /dev/null @@ -1,1147 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * @file mali_kbase_replay.c - * Replay soft job handlers - */ - -#include -#include -#include -#include -#include - -#define JOB_NOT_STARTED 0 -#define JOB_TYPE_MASK 0xfe -#define JOB_TYPE_NULL (1 << 1) -#define JOB_TYPE_VERTEX (5 << 1) -#define JOB_TYPE_TILER (7 << 1) -#define JOB_TYPE_FUSED (8 << 1) -#define JOB_TYPE_FRAGMENT (9 << 1) - -#define JOB_FLAG_DESC_SIZE (1 << 0) -#define JOB_FLAG_PERFORM_JOB_BARRIER (1 << 8) - -#define JOB_HEADER_32_FBD_OFFSET (31*4) -#define JOB_HEADER_64_FBD_OFFSET (44*4) - -#define FBD_POINTER_MASK (~0x3f) - -#define SFBD_TILER_OFFSET (48*4) - -#define MFBD_TILER_OFFSET (14*4) - -#define FBD_HIERARCHY_WEIGHTS 8 -#define FBD_HIERARCHY_MASK_MASK 0x1fff - -#define FBD_TYPE 1 - -#define HIERARCHY_WEIGHTS 13 - -#define JOB_HEADER_ID_MAX 0xffff - -#define JOB_SOURCE_ID(status) (((status) >> 16) & 0xFFFF) -#define JOB_POLYGON_LIST (0x03) - -struct job_head { - u32 status; - u32 not_complete_index; - u64 fault_addr; - u16 flags; - u16 index; - u16 dependencies[2]; - union { - u64 _64; - u32 _32; - } next; - u32 x[2]; - union { - u64 _64; - u32 _32; - } fragment_fbd; -}; - -static void dump_job_head(struct kbase_context *kctx, char *head_str, - struct job_head *job) -{ -#ifdef CONFIG_MALI_DEBUG - dev_dbg(kctx->kbdev->dev, "%s\n", head_str); - dev_dbg(kctx->kbdev->dev, "addr = %p\n" - "status = %x\n" - "not_complete_index = %x\n" - "fault_addr = %llx\n" - "flags = %x\n" - "index = %x\n" - "dependencies = %x,%x\n", - job, job->status, job->not_complete_index, - job->fault_addr, job->flags, job->index, - job->dependencies[0], - job->dependencies[1]); - - if (job->flags & JOB_FLAG_DESC_SIZE) - dev_dbg(kctx->kbdev->dev, "next = %llx\n", - job->next._64); - else - dev_dbg(kctx->kbdev->dev, "next = %x\n", - job->next._32); -#endif -} - -static int kbasep_replay_reset_sfbd(struct kbase_context *kctx, - u64 fbd_address, u64 tiler_heap_free, - u16 hierarchy_mask, u32 default_weight) -{ - struct { - u32 padding_1[1]; - u32 flags; - u64 padding_2[2]; - u64 heap_free_address; - u32 padding[8]; - u32 weights[FBD_HIERARCHY_WEIGHTS]; - } *fbd_tiler; - struct kbase_vmap_struct map; - - dev_dbg(kctx->kbdev->dev, "fbd_address: %llx\n", fbd_address); - - fbd_tiler = kbase_vmap(kctx, fbd_address + SFBD_TILER_OFFSET, - sizeof(*fbd_tiler), &map); - if (!fbd_tiler) { - dev_err(kctx->kbdev->dev, "kbasep_replay_reset_fbd: failed to map fbd\n"); - return -EINVAL; - } - -#ifdef CONFIG_MALI_DEBUG - dev_dbg(kctx->kbdev->dev, - "FBD tiler:\n" - "flags = %x\n" - "heap_free_address = %llx\n", - fbd_tiler->flags, fbd_tiler->heap_free_address); -#endif - if (hierarchy_mask) { - u32 weights[HIERARCHY_WEIGHTS]; - u16 old_hierarchy_mask = fbd_tiler->flags & - FBD_HIERARCHY_MASK_MASK; - int i, j = 0; - - for (i = 0; i < HIERARCHY_WEIGHTS; i++) { - if (old_hierarchy_mask & (1 << i)) { - KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS); - weights[i] = fbd_tiler->weights[j++]; - } else { - weights[i] = default_weight; - } - } - - - dev_dbg(kctx->kbdev->dev, "Old hierarchy mask=%x New hierarchy mask=%x\n", - old_hierarchy_mask, hierarchy_mask); - - for (i = 0; i < HIERARCHY_WEIGHTS; i++) - dev_dbg(kctx->kbdev->dev, " Hierarchy weight %02d: %08x\n", - i, weights[i]); - - j = 0; - - for (i = 0; i < HIERARCHY_WEIGHTS; i++) { - if (hierarchy_mask & (1 << i)) { - KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS); - - dev_dbg(kctx->kbdev->dev, " Writing hierarchy level %02d (%08x) to %d\n", - i, weights[i], j); - - fbd_tiler->weights[j++] = weights[i]; - } - } - - for (; j < FBD_HIERARCHY_WEIGHTS; j++) - fbd_tiler->weights[j] = 0; - - fbd_tiler->flags = hierarchy_mask | (1 << 16); - } - - fbd_tiler->heap_free_address = tiler_heap_free; - - dev_dbg(kctx->kbdev->dev, "heap_free_address=%llx flags=%x\n", - fbd_tiler->heap_free_address, fbd_tiler->flags); - - kbase_vunmap(kctx, &map); - - return 0; -} - -static int kbasep_replay_reset_mfbd(struct kbase_context *kctx, - u64 fbd_address, u64 tiler_heap_free, - u16 hierarchy_mask, u32 default_weight) -{ - struct kbase_vmap_struct map; - struct { - u32 padding_0; - u32 flags; - u64 padding_1[2]; - u64 heap_free_address; - u64 padding_2; - u32 weights[FBD_HIERARCHY_WEIGHTS]; - } *fbd_tiler; - - dev_dbg(kctx->kbdev->dev, "fbd_address: %llx\n", fbd_address); - - fbd_tiler = kbase_vmap(kctx, fbd_address + MFBD_TILER_OFFSET, - sizeof(*fbd_tiler), &map); - if (!fbd_tiler) { - dev_err(kctx->kbdev->dev, - "kbasep_replay_reset_fbd: failed to map fbd\n"); - return -EINVAL; - } - -#ifdef CONFIG_MALI_DEBUG - dev_dbg(kctx->kbdev->dev, "FBD tiler:\n" - "flags = %x\n" - "heap_free_address = %llx\n", - fbd_tiler->flags, - fbd_tiler->heap_free_address); -#endif - if (hierarchy_mask) { - u32 weights[HIERARCHY_WEIGHTS]; - u16 old_hierarchy_mask = (fbd_tiler->flags) & - FBD_HIERARCHY_MASK_MASK; - int i, j = 0; - - for (i = 0; i < HIERARCHY_WEIGHTS; i++) { - if (old_hierarchy_mask & (1 << i)) { - KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS); - weights[i] = fbd_tiler->weights[j++]; - } else { - weights[i] = default_weight; - } - } - - - dev_dbg(kctx->kbdev->dev, "Old hierarchy mask=%x New hierarchy mask=%x\n", - old_hierarchy_mask, hierarchy_mask); - - for (i = 0; i < HIERARCHY_WEIGHTS; i++) - dev_dbg(kctx->kbdev->dev, " Hierarchy weight %02d: %08x\n", - i, weights[i]); - - j = 0; - - for (i = 0; i < HIERARCHY_WEIGHTS; i++) { - if (hierarchy_mask & (1 << i)) { - KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS); - - dev_dbg(kctx->kbdev->dev, - " Writing hierarchy level %02d (%08x) to %d\n", - i, weights[i], j); - - fbd_tiler->weights[j++] = weights[i]; - } - } - - for (; j < FBD_HIERARCHY_WEIGHTS; j++) - fbd_tiler->weights[j] = 0; - - fbd_tiler->flags = hierarchy_mask | (1 << 16); - } - - fbd_tiler->heap_free_address = tiler_heap_free; - - kbase_vunmap(kctx, &map); - - return 0; -} - -/** - * @brief Reset the status of an FBD pointed to by a tiler job - * - * This performs two functions : - * - Set the hierarchy mask - * - Reset the tiler free heap address - * - * @param[in] kctx Context pointer - * @param[in] job_header Address of job header to reset. - * @param[in] tiler_heap_free The value to reset Tiler Heap Free to - * @param[in] hierarchy_mask The hierarchy mask to use - * @param[in] default_weight Default hierarchy weight to write when no other - * weight is given in the FBD - * @param[in] job_64 true if this job is using 64-bit - * descriptors - * - * @return 0 on success, error code on failure - */ -static int kbasep_replay_reset_tiler_job(struct kbase_context *kctx, - u64 job_header, u64 tiler_heap_free, - u16 hierarchy_mask, u32 default_weight, bool job_64) -{ - struct kbase_vmap_struct map; - u64 fbd_address; - - if (job_64) { - u64 *job_ext; - - job_ext = kbase_vmap(kctx, - job_header + JOB_HEADER_64_FBD_OFFSET, - sizeof(*job_ext), &map); - - if (!job_ext) { - dev_err(kctx->kbdev->dev, "kbasep_replay_reset_tiler_job: failed to map jc\n"); - return -EINVAL; - } - - fbd_address = *job_ext; - - kbase_vunmap(kctx, &map); - } else { - u32 *job_ext; - - job_ext = kbase_vmap(kctx, - job_header + JOB_HEADER_32_FBD_OFFSET, - sizeof(*job_ext), &map); - - if (!job_ext) { - dev_err(kctx->kbdev->dev, "kbasep_replay_reset_tiler_job: failed to map jc\n"); - return -EINVAL; - } - - fbd_address = *job_ext; - - kbase_vunmap(kctx, &map); - } - - if (fbd_address & FBD_TYPE) { - return kbasep_replay_reset_mfbd(kctx, - fbd_address & FBD_POINTER_MASK, - tiler_heap_free, - hierarchy_mask, - default_weight); - } else { - return kbasep_replay_reset_sfbd(kctx, - fbd_address & FBD_POINTER_MASK, - tiler_heap_free, - hierarchy_mask, - default_weight); - } -} - -/** - * @brief Reset the status of a job - * - * This performs the following functions : - * - * - Reset the Job Status field of each job to NOT_STARTED. - * - Set the Job Type field of any Vertex Jobs to Null Job. - * - For any jobs using an FBD, set the Tiler Heap Free field to the value of - * the tiler_heap_free parameter, and set the hierarchy level mask to the - * hier_mask parameter. - * - Offset HW dependencies by the hw_job_id_offset parameter - * - Set the Perform Job Barrier flag if this job is the first in the chain - * - Read the address of the next job header - * - * @param[in] kctx Context pointer - * @param[in,out] job_header Address of job header to reset. Set to address - * of next job header on exit. - * @param[in] prev_jc Previous job chain to link to, if this job is - * the last in the chain. - * @param[in] hw_job_id_offset Offset for HW job IDs - * @param[in] tiler_heap_free The value to reset Tiler Heap Free to - * @param[in] hierarchy_mask The hierarchy mask to use - * @param[in] default_weight Default hierarchy weight to write when no other - * weight is given in the FBD - * @param[in] first_in_chain true if this job is the first in the chain - * @param[in] fragment_chain true if this job is in the fragment chain - * - * @return 0 on success, error code on failure - */ -static int kbasep_replay_reset_job(struct kbase_context *kctx, - u64 *job_header, u64 prev_jc, - u64 tiler_heap_free, u16 hierarchy_mask, - u32 default_weight, u16 hw_job_id_offset, - bool first_in_chain, bool fragment_chain) -{ - struct job_head *job; - u64 new_job_header; - struct kbase_vmap_struct map; - - job = kbase_vmap(kctx, *job_header, sizeof(*job), &map); - if (!job) { - dev_err(kctx->kbdev->dev, - "kbasep_replay_parse_jc: failed to map jc\n"); - return -EINVAL; - } - - dump_job_head(kctx, "Job header:", job); - - if (job->status == JOB_NOT_STARTED && !fragment_chain) { - dev_err(kctx->kbdev->dev, "Job already not started\n"); - goto out_unmap; - } - job->status = JOB_NOT_STARTED; - - if ((job->flags & JOB_TYPE_MASK) == JOB_TYPE_VERTEX) - job->flags = (job->flags & ~JOB_TYPE_MASK) | JOB_TYPE_NULL; - - if ((job->flags & JOB_TYPE_MASK) == JOB_TYPE_FUSED) { - dev_err(kctx->kbdev->dev, "Fused jobs can not be replayed\n"); - goto out_unmap; - } - - if (first_in_chain) - job->flags |= JOB_FLAG_PERFORM_JOB_BARRIER; - - if ((job->dependencies[0] + hw_job_id_offset) > JOB_HEADER_ID_MAX || - (job->dependencies[1] + hw_job_id_offset) > JOB_HEADER_ID_MAX || - (job->index + hw_job_id_offset) > JOB_HEADER_ID_MAX) { - dev_err(kctx->kbdev->dev, - "Job indicies/dependencies out of valid range\n"); - goto out_unmap; - } - - if (job->dependencies[0]) - job->dependencies[0] += hw_job_id_offset; - if (job->dependencies[1]) - job->dependencies[1] += hw_job_id_offset; - - job->index += hw_job_id_offset; - - if (job->flags & JOB_FLAG_DESC_SIZE) { - new_job_header = job->next._64; - if (!job->next._64) - job->next._64 = prev_jc; - } else { - new_job_header = job->next._32; - if (!job->next._32) - job->next._32 = prev_jc; - } - dump_job_head(kctx, "Updated to:", job); - - if ((job->flags & JOB_TYPE_MASK) == JOB_TYPE_TILER) { - bool job_64 = (job->flags & JOB_FLAG_DESC_SIZE) != 0; - - if (kbasep_replay_reset_tiler_job(kctx, *job_header, - tiler_heap_free, hierarchy_mask, - default_weight, job_64) != 0) - goto out_unmap; - - } else if ((job->flags & JOB_TYPE_MASK) == JOB_TYPE_FRAGMENT) { - u64 fbd_address; - - if (job->flags & JOB_FLAG_DESC_SIZE) - fbd_address = job->fragment_fbd._64; - else - fbd_address = (u64)job->fragment_fbd._32; - - if (fbd_address & FBD_TYPE) { - if (kbasep_replay_reset_mfbd(kctx, - fbd_address & FBD_POINTER_MASK, - tiler_heap_free, - hierarchy_mask, - default_weight) != 0) - goto out_unmap; - } else { - if (kbasep_replay_reset_sfbd(kctx, - fbd_address & FBD_POINTER_MASK, - tiler_heap_free, - hierarchy_mask, - default_weight) != 0) - goto out_unmap; - } - } - - kbase_vunmap(kctx, &map); - - *job_header = new_job_header; - - return 0; - -out_unmap: - kbase_vunmap(kctx, &map); - return -EINVAL; -} - -/** - * @brief Find the highest job ID in a job chain - * - * @param[in] kctx Context pointer - * @param[in] jc Job chain start address - * @param[out] hw_job_id Highest job ID in chain - * - * @return 0 on success, error code on failure - */ -static int kbasep_replay_find_hw_job_id(struct kbase_context *kctx, - u64 jc, u16 *hw_job_id) -{ - while (jc) { - struct job_head *job; - struct kbase_vmap_struct map; - - dev_dbg(kctx->kbdev->dev, - "kbasep_replay_find_hw_job_id: parsing jc=%llx\n", jc); - - job = kbase_vmap(kctx, jc, sizeof(*job), &map); - if (!job) { - dev_err(kctx->kbdev->dev, "failed to map jc\n"); - - return -EINVAL; - } - - if (job->index > *hw_job_id) - *hw_job_id = job->index; - - if (job->flags & JOB_FLAG_DESC_SIZE) - jc = job->next._64; - else - jc = job->next._32; - - kbase_vunmap(kctx, &map); - } - - return 0; -} - -/** - * @brief Reset the status of a number of jobs - * - * This function walks the provided job chain, and calls - * kbasep_replay_reset_job for each job. It also links the job chain to the - * provided previous job chain. - * - * The function will fail if any of the jobs passed already have status of - * NOT_STARTED. - * - * @param[in] kctx Context pointer - * @param[in] jc Job chain to be processed - * @param[in] prev_jc Job chain to be added to. May be NULL - * @param[in] tiler_heap_free The value to reset Tiler Heap Free to - * @param[in] hierarchy_mask The hierarchy mask to use - * @param[in] default_weight Default hierarchy weight to write when no other - * weight is given in the FBD - * @param[in] hw_job_id_offset Offset for HW job IDs - * @param[in] fragment_chain true if this chain is the fragment chain - * - * @return 0 on success, error code otherwise - */ -static int kbasep_replay_parse_jc(struct kbase_context *kctx, - u64 jc, u64 prev_jc, - u64 tiler_heap_free, u16 hierarchy_mask, - u32 default_weight, u16 hw_job_id_offset, - bool fragment_chain) -{ - bool first_in_chain = true; - int nr_jobs = 0; - - dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_jc: jc=%llx hw_job_id=%x\n", - jc, hw_job_id_offset); - - while (jc) { - dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_jc: parsing jc=%llx\n", jc); - - if (kbasep_replay_reset_job(kctx, &jc, prev_jc, - tiler_heap_free, hierarchy_mask, - default_weight, hw_job_id_offset, - first_in_chain, fragment_chain) != 0) - return -EINVAL; - - first_in_chain = false; - - nr_jobs++; - if (fragment_chain && - nr_jobs >= BASE_JD_REPLAY_F_CHAIN_JOB_LIMIT) { - dev_err(kctx->kbdev->dev, - "Exceeded maximum number of jobs in fragment chain\n"); - return -EINVAL; - } - } - - return 0; -} - -/** - * @brief Reset the status of a replay job, and set up dependencies - * - * This performs the actions to allow the replay job to be re-run following - * completion of the passed dependency. - * - * @param[in] katom The atom to be reset - * @param[in] dep_atom The dependency to be attached to the atom - */ -static void kbasep_replay_reset_softjob(struct kbase_jd_atom *katom, - struct kbase_jd_atom *dep_atom) -{ - katom->status = KBASE_JD_ATOM_STATE_QUEUED; - kbase_jd_katom_dep_set(&katom->dep[0], dep_atom, BASE_JD_DEP_TYPE_DATA); - list_add_tail(&katom->dep_item[0], &dep_atom->dep_head[0]); -} - -/** - * @brief Allocate an unused katom - * - * This will search the provided context for an unused katom, and will mark it - * as KBASE_JD_ATOM_STATE_QUEUED. - * - * If no atoms are available then the function will fail. - * - * @param[in] kctx Context pointer - * @return An atom ID, or -1 on failure - */ -static int kbasep_allocate_katom(struct kbase_context *kctx) -{ - struct kbase_jd_context *jctx = &kctx->jctx; - int i; - - for (i = BASE_JD_ATOM_COUNT-1; i > 0; i--) { - if (jctx->atoms[i].status == KBASE_JD_ATOM_STATE_UNUSED) { - jctx->atoms[i].status = KBASE_JD_ATOM_STATE_QUEUED; - dev_dbg(kctx->kbdev->dev, - "kbasep_allocate_katom: Allocated atom %d\n", - i); - return i; - } - } - - return -1; -} - -/** - * @brief Release a katom - * - * This will mark the provided atom as available, and remove any dependencies. - * - * For use on error path. - * - * @param[in] kctx Context pointer - * @param[in] atom_id ID of atom to release - */ -static void kbasep_release_katom(struct kbase_context *kctx, int atom_id) -{ - struct kbase_jd_context *jctx = &kctx->jctx; - - dev_dbg(kctx->kbdev->dev, "kbasep_release_katom: Released atom %d\n", - atom_id); - - while (!list_empty(&jctx->atoms[atom_id].dep_head[0])) - list_del(jctx->atoms[atom_id].dep_head[0].next); - - while (!list_empty(&jctx->atoms[atom_id].dep_head[1])) - list_del(jctx->atoms[atom_id].dep_head[1].next); - - jctx->atoms[atom_id].status = KBASE_JD_ATOM_STATE_UNUSED; -} - -static void kbasep_replay_create_atom(struct kbase_context *kctx, - struct base_jd_atom_v2 *atom, - int atom_nr, - base_jd_prio prio) -{ - atom->nr_extres = 0; - atom->extres_list.value = NULL; - atom->device_nr = 0; - atom->prio = prio; - atom->atom_number = atom_nr; - - base_jd_atom_dep_set(&atom->pre_dep[0], 0 , BASE_JD_DEP_TYPE_INVALID); - base_jd_atom_dep_set(&atom->pre_dep[1], 0 , BASE_JD_DEP_TYPE_INVALID); - - atom->udata.blob[0] = 0; - atom->udata.blob[1] = 0; -} - -/** - * @brief Create two atoms for the purpose of replaying jobs - * - * Two atoms are allocated and created. The jc pointer is not set at this - * stage. The second atom has a dependency on the first. The remaining fields - * are set up as follows : - * - * - No external resources. Any required external resources will be held by the - * replay atom. - * - device_nr is set to 0. This is not relevant as - * BASE_JD_REQ_SPECIFIC_COHERENT_GROUP should not be set. - * - Priority is inherited from the replay job. - * - * @param[out] t_atom Atom to use for tiler jobs - * @param[out] f_atom Atom to use for fragment jobs - * @param[in] prio Priority of new atom (inherited from replay soft - * job) - * @return 0 on success, error code on failure - */ -static int kbasep_replay_create_atoms(struct kbase_context *kctx, - struct base_jd_atom_v2 *t_atom, - struct base_jd_atom_v2 *f_atom, - base_jd_prio prio) -{ - int t_atom_nr, f_atom_nr; - - t_atom_nr = kbasep_allocate_katom(kctx); - if (t_atom_nr < 0) { - dev_err(kctx->kbdev->dev, "Failed to allocate katom\n"); - return -EINVAL; - } - - f_atom_nr = kbasep_allocate_katom(kctx); - if (f_atom_nr < 0) { - dev_err(kctx->kbdev->dev, "Failed to allocate katom\n"); - kbasep_release_katom(kctx, t_atom_nr); - return -EINVAL; - } - - kbasep_replay_create_atom(kctx, t_atom, t_atom_nr, prio); - kbasep_replay_create_atom(kctx, f_atom, f_atom_nr, prio); - - base_jd_atom_dep_set(&f_atom->pre_dep[0], t_atom_nr , BASE_JD_DEP_TYPE_DATA); - - return 0; -} - -#ifdef CONFIG_MALI_DEBUG -static void payload_dump(struct kbase_context *kctx, base_jd_replay_payload *payload) -{ - u64 next; - - dev_dbg(kctx->kbdev->dev, "Tiler jc list :\n"); - next = payload->tiler_jc_list; - - while (next) { - struct kbase_vmap_struct map; - base_jd_replay_jc *jc_struct; - - jc_struct = kbase_vmap(kctx, next, sizeof(*jc_struct), &map); - - if (!jc_struct) - return; - - dev_dbg(kctx->kbdev->dev, "* jc_struct=%p jc=%llx next=%llx\n", - jc_struct, jc_struct->jc, jc_struct->next); - - next = jc_struct->next; - - kbase_vunmap(kctx, &map); - } -} -#endif - -/** - * @brief Parse a base_jd_replay_payload provided by userspace - * - * This will read the payload from userspace, and parse the job chains. - * - * @param[in] kctx Context pointer - * @param[in] replay_atom Replay soft job atom - * @param[in] t_atom Atom to use for tiler jobs - * @param[in] f_atom Atom to use for fragment jobs - * @return 0 on success, error code on failure - */ -static int kbasep_replay_parse_payload(struct kbase_context *kctx, - struct kbase_jd_atom *replay_atom, - struct base_jd_atom_v2 *t_atom, - struct base_jd_atom_v2 *f_atom) -{ - base_jd_replay_payload *payload; - u64 next; - u64 prev_jc = 0; - u16 hw_job_id_offset = 0; - int ret = -EINVAL; - struct kbase_vmap_struct map; - - dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_payload: replay_atom->jc = %llx sizeof(payload) = %zu\n", - replay_atom->jc, sizeof(payload)); - - payload = kbase_vmap(kctx, replay_atom->jc, sizeof(*payload), &map); - - if (!payload) { - dev_err(kctx->kbdev->dev, "kbasep_replay_parse_payload: failed to map payload into kernel space\n"); - return -EINVAL; - } - -#ifdef CONFIG_MALI_DEBUG - dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_payload: payload=%p\n", payload); - dev_dbg(kctx->kbdev->dev, "Payload structure:\n" - "tiler_jc_list = %llx\n" - "fragment_jc = %llx\n" - "tiler_heap_free = %llx\n" - "fragment_hierarchy_mask = %x\n" - "tiler_hierarchy_mask = %x\n" - "hierarchy_default_weight = %x\n" - "tiler_core_req = %x\n" - "fragment_core_req = %x\n", - payload->tiler_jc_list, - payload->fragment_jc, - payload->tiler_heap_free, - payload->fragment_hierarchy_mask, - payload->tiler_hierarchy_mask, - payload->hierarchy_default_weight, - payload->tiler_core_req, - payload->fragment_core_req); - payload_dump(kctx, payload); -#endif - - t_atom->core_req = payload->tiler_core_req | BASEP_JD_REQ_EVENT_NEVER; - f_atom->core_req = payload->fragment_core_req | BASEP_JD_REQ_EVENT_NEVER; - - /* Sanity check core requirements*/ - if ((t_atom->core_req & BASEP_JD_REQ_ATOM_TYPE & - ~BASE_JD_REQ_COHERENT_GROUP) != BASE_JD_REQ_T || - (f_atom->core_req & BASEP_JD_REQ_ATOM_TYPE & - ~BASE_JD_REQ_COHERENT_GROUP) != BASE_JD_REQ_FS || - t_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES || - f_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) { - dev_err(kctx->kbdev->dev, "Invalid core requirements\n"); - goto out; - } - - /* Process tiler job chains */ - next = payload->tiler_jc_list; - if (!next) { - dev_err(kctx->kbdev->dev, "Invalid tiler JC list\n"); - goto out; - } - - while (next) { - base_jd_replay_jc *jc_struct; - struct kbase_vmap_struct jc_map; - u64 jc; - - jc_struct = kbase_vmap(kctx, next, sizeof(*jc_struct), &jc_map); - - if (!jc_struct) { - dev_err(kctx->kbdev->dev, "Failed to map jc struct\n"); - goto out; - } - - jc = jc_struct->jc; - next = jc_struct->next; - if (next) - jc_struct->jc = 0; - - kbase_vunmap(kctx, &jc_map); - - if (jc) { - u16 max_hw_job_id = 0; - - if (kbasep_replay_find_hw_job_id(kctx, jc, - &max_hw_job_id) != 0) - goto out; - - if (kbasep_replay_parse_jc(kctx, jc, prev_jc, - payload->tiler_heap_free, - payload->tiler_hierarchy_mask, - payload->hierarchy_default_weight, - hw_job_id_offset, false) != 0) { - goto out; - } - - hw_job_id_offset += max_hw_job_id; - - prev_jc = jc; - } - } - t_atom->jc = prev_jc; - - /* Process fragment job chain */ - f_atom->jc = payload->fragment_jc; - if (kbasep_replay_parse_jc(kctx, payload->fragment_jc, 0, - payload->tiler_heap_free, - payload->fragment_hierarchy_mask, - payload->hierarchy_default_weight, 0, - true) != 0) { - goto out; - } - - if (!t_atom->jc || !f_atom->jc) { - dev_err(kctx->kbdev->dev, "Invalid payload\n"); - goto out; - } - - dev_dbg(kctx->kbdev->dev, "t_atom->jc=%llx f_atom->jc=%llx\n", - t_atom->jc, f_atom->jc); - ret = 0; - -out: - kbase_vunmap(kctx, &map); - - return ret; -} - -static void kbase_replay_process_worker(struct work_struct *data) -{ - struct kbase_jd_atom *katom; - struct kbase_context *kctx; - struct kbase_jd_context *jctx; - bool need_to_try_schedule_context = false; - - struct base_jd_atom_v2 t_atom, f_atom; - struct kbase_jd_atom *t_katom, *f_katom; - base_jd_prio atom_prio; - - katom = container_of(data, struct kbase_jd_atom, work); - kctx = katom->kctx; - jctx = &kctx->jctx; - - mutex_lock(&jctx->lock); - - atom_prio = kbasep_js_sched_prio_to_atom_prio(katom->sched_priority); - - if (kbasep_replay_create_atoms( - kctx, &t_atom, &f_atom, atom_prio) != 0) { - katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; - goto out; - } - - t_katom = &jctx->atoms[t_atom.atom_number]; - f_katom = &jctx->atoms[f_atom.atom_number]; - - if (kbasep_replay_parse_payload(kctx, katom, &t_atom, &f_atom) != 0) { - kbasep_release_katom(kctx, t_atom.atom_number); - kbasep_release_katom(kctx, f_atom.atom_number); - katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; - goto out; - } - - kbasep_replay_reset_softjob(katom, f_katom); - - need_to_try_schedule_context |= jd_submit_atom(kctx, &t_atom, t_katom); - if (t_katom->event_code == BASE_JD_EVENT_JOB_INVALID) { - dev_err(kctx->kbdev->dev, "Replay failed to submit atom\n"); - kbasep_release_katom(kctx, f_atom.atom_number); - katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; - goto out; - } - need_to_try_schedule_context |= jd_submit_atom(kctx, &f_atom, f_katom); - if (f_katom->event_code == BASE_JD_EVENT_JOB_INVALID) { - dev_err(kctx->kbdev->dev, "Replay failed to submit atom\n"); - katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; - goto out; - } - - katom->event_code = BASE_JD_EVENT_DONE; - -out: - if (katom->event_code != BASE_JD_EVENT_DONE) { - kbase_disjoint_state_down(kctx->kbdev); - - need_to_try_schedule_context |= jd_done_nolock(katom, NULL); - } - - if (need_to_try_schedule_context) - kbase_js_sched_all(kctx->kbdev); - - mutex_unlock(&jctx->lock); -} - -/** - * @brief Check job replay fault - * - * This will read the job payload, checks fault type and source, then decides - * whether replay is required. - * - * @param[in] katom The atom to be processed - * @return true (success) if replay required or false on failure. - */ -static bool kbase_replay_fault_check(struct kbase_jd_atom *katom) -{ - struct kbase_context *kctx = katom->kctx; - struct device *dev = kctx->kbdev->dev; - base_jd_replay_payload *payload; - u64 job_header; - u64 job_loop_detect; - struct job_head *job; - struct kbase_vmap_struct job_map; - struct kbase_vmap_struct map; - bool err = false; - - /* Replay job if fault is of type BASE_JD_EVENT_JOB_WRITE_FAULT or - * if force_replay is enabled. - */ - if (BASE_JD_EVENT_TERMINATED == katom->event_code) { - return false; - } else if (BASE_JD_EVENT_JOB_WRITE_FAULT == katom->event_code) { - return true; - } else if (BASE_JD_EVENT_FORCE_REPLAY == katom->event_code) { - katom->event_code = BASE_JD_EVENT_DATA_INVALID_FAULT; - return true; - } else if (BASE_JD_EVENT_DATA_INVALID_FAULT != katom->event_code) { - /* No replay for faults of type other than - * BASE_JD_EVENT_DATA_INVALID_FAULT. - */ - return false; - } - - /* Job fault is BASE_JD_EVENT_DATA_INVALID_FAULT, now scan fragment jc - * to find out whether the source of exception is POLYGON_LIST. Replay - * is required if the source of fault is POLYGON_LIST. - */ - payload = kbase_vmap(kctx, katom->jc, sizeof(*payload), &map); - if (!payload) { - dev_err(dev, "kbase_replay_fault_check: failed to map payload.\n"); - return false; - } - -#ifdef CONFIG_MALI_DEBUG - dev_dbg(dev, "kbase_replay_fault_check: payload=%p\n", payload); - dev_dbg(dev, "\nPayload structure:\n" - "fragment_jc = 0x%llx\n" - "fragment_hierarchy_mask = 0x%x\n" - "fragment_core_req = 0x%x\n", - payload->fragment_jc, - payload->fragment_hierarchy_mask, - payload->fragment_core_req); -#endif - /* Process fragment job chain */ - job_header = (u64) payload->fragment_jc; - job_loop_detect = job_header; - while (job_header) { - job = kbase_vmap(kctx, job_header, sizeof(*job), &job_map); - if (!job) { - dev_err(dev, "failed to map jc\n"); - /* unmap payload*/ - kbase_vunmap(kctx, &map); - return false; - } - - -#ifdef CONFIG_MALI_DEBUG - dev_dbg(dev, "\njob_head structure:\n" - "Source ID:0x%x Access:0x%x Exception:0x%x\n" - "at job addr = %p\n" - "not_complete_index = 0x%x\n" - "fault_addr = 0x%llx\n" - "flags = 0x%x\n" - "index = 0x%x\n" - "dependencies = 0x%x,0x%x\n", - JOB_SOURCE_ID(job->status), - ((job->status >> 8) & 0x3), - (job->status & 0xFF), - job, - job->not_complete_index, - job->fault_addr, - job->flags, - job->index, - job->dependencies[0], - job->dependencies[1]); -#endif - - /* Replay only when the polygon list reader caused the - * DATA_INVALID_FAULT */ - if ((BASE_JD_EVENT_DATA_INVALID_FAULT == katom->event_code) && - (JOB_POLYGON_LIST == JOB_SOURCE_ID(job->status))) { - err = true; - kbase_vunmap(kctx, &job_map); - break; - } - - /* Move on to next fragment job in the list */ - if (job->flags & JOB_FLAG_DESC_SIZE) - job_header = job->next._64; - else - job_header = job->next._32; - - kbase_vunmap(kctx, &job_map); - - /* Job chain loop detected */ - if (job_header == job_loop_detect) - break; - } - - /* unmap payload*/ - kbase_vunmap(kctx, &map); - - return err; -} - - -/** - * @brief Process a replay job - * - * Called from kbase_process_soft_job. - * - * On exit, if the job has completed, katom->event_code will have been updated. - * If the job has not completed, and is replaying jobs, then the atom status - * will have been reset to KBASE_JD_ATOM_STATE_QUEUED. - * - * @param[in] katom The atom to be processed - * @return false if the atom has completed - * true if the atom is replaying jobs - */ -bool kbase_replay_process(struct kbase_jd_atom *katom) -{ - struct kbase_context *kctx = katom->kctx; - struct kbase_jd_context *jctx = &kctx->jctx; - struct kbase_device *kbdev = kctx->kbdev; - - /* Don't replay this atom if these issues are not present in the - * hardware */ - if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11020) && - !kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11024)) { - dev_dbg(kbdev->dev, "Hardware does not need replay workaround"); - - /* Signal failure to userspace */ - katom->event_code = BASE_JD_EVENT_JOB_INVALID; - - return false; - } - - if (katom->event_code == BASE_JD_EVENT_DONE) { - dev_dbg(kbdev->dev, "Previous job succeeded - not replaying\n"); - - if (katom->retry_count) - kbase_disjoint_state_down(kbdev); - - return false; - } - - if (jctx->sched_info.ctx.is_dying) { - dev_dbg(kbdev->dev, "Not replaying; context is dying\n"); - - if (katom->retry_count) - kbase_disjoint_state_down(kbdev); - - return false; - } - - /* Check job exception type and source before replaying. */ - if (!kbase_replay_fault_check(katom)) { - dev_dbg(kbdev->dev, - "Replay cancelled on event %x\n", katom->event_code); - /* katom->event_code is already set to the failure code of the - * previous job. - */ - return false; - } - - dev_warn(kbdev->dev, "Replaying jobs retry=%d\n", - katom->retry_count); - - katom->retry_count++; - - if (katom->retry_count > BASEP_JD_REPLAY_LIMIT) { - dev_err(kbdev->dev, "Replay exceeded limit - failing jobs\n"); - - kbase_disjoint_state_down(kbdev); - - /* katom->event_code is already set to the failure code of the - previous job */ - return false; - } - - /* only enter the disjoint state once for the whole time while the replay is ongoing */ - if (katom->retry_count == 1) - kbase_disjoint_state_up(kbdev); - - INIT_WORK(&katom->work, kbase_replay_process_worker); - queue_work(kctx->event_workq, &katom->work); - - return true; -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_security.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_security.c deleted file mode 100755 index a0bb3529baf59..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_security.c +++ /dev/null @@ -1,76 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_kbase_security.c - * Base kernel security capability API - */ - -#include - -static inline bool kbasep_am_i_root(void) -{ -#if KBASE_HWCNT_DUMP_BYPASS_ROOT - return true; -#else - /* Check if root */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) - if (uid_eq(current_euid(), GLOBAL_ROOT_UID)) - return true; -#else - if (current_euid() == 0) - return true; -#endif /*LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)*/ - return false; -#endif /*KBASE_HWCNT_DUMP_BYPASS_ROOT*/ -} - -/** - * kbase_security_has_capability - see mali_kbase_caps.h for description. - */ - -bool kbase_security_has_capability(struct kbase_context *kctx, enum kbase_security_capability cap, u32 flags) -{ - /* Assume failure */ - bool access_allowed = false; - bool audit = KBASE_SEC_FLAG_AUDIT & flags; - - KBASE_DEBUG_ASSERT(NULL != kctx); - CSTD_UNUSED(kctx); - - /* Detect unsupported flags */ - KBASE_DEBUG_ASSERT(((~KBASE_SEC_FLAG_MASK) & flags) == 0); - - /* Determine if access is allowed for the given cap */ - switch (cap) { - case KBASE_SEC_MODIFY_PRIORITY: - case KBASE_SEC_INSTR_HW_COUNTERS_COLLECT: - /* Access is granted only if the caller is privileged */ - access_allowed = kbasep_am_i_root(); - break; - } - - /* Report problem if requested */ - if (!access_allowed && audit) - dev_warn(kctx->kbdev->dev, "Security capability failure: %d, %p", cap, (void *)kctx); - - return access_allowed; -} - -KBASE_EXPORT_TEST_API(kbase_security_has_capability); diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_security.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_security.h deleted file mode 100755 index 024a7ee1aaba1..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_security.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_kbase_security.h - * Base kernel security capability APIs - */ - -#ifndef _KBASE_SECURITY_H_ -#define _KBASE_SECURITY_H_ - -/* Security flags */ -#define KBASE_SEC_FLAG_NOAUDIT (0u << 0) /* Silently handle privilege failure */ -#define KBASE_SEC_FLAG_AUDIT (1u << 0) /* Write audit message on privilege failure */ -#define KBASE_SEC_FLAG_MASK (KBASE_SEC_FLAG_AUDIT) /* Mask of all valid flag bits */ - -/* List of unique capabilities that have security access privileges */ -enum kbase_security_capability { - /* Instrumentation Counters access privilege */ - KBASE_SEC_INSTR_HW_COUNTERS_COLLECT = 1, - KBASE_SEC_MODIFY_PRIORITY - /* Add additional access privileges here */ -}; - -/** - * kbase_security_has_capability - determine whether a task has a particular effective capability - * @param[in] kctx The task context. - * @param[in] cap The capability to check for. - * @param[in] flags Additional configuration information - * Such as whether to write an audit message or not. - * @return true if success (capability is allowed), false otherwise. - */ - -bool kbase_security_has_capability(struct kbase_context *kctx, enum kbase_security_capability cap, u32 flags); - -#endif /* _KBASE_SECURITY_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_smc.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_smc.c deleted file mode 100755 index 17455fe00d79d..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_smc.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * - * (C) COPYRIGHT 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#ifdef CONFIG_ARM64 - -#include -#include - -#include - -static noinline u64 invoke_smc_fid(u64 function_id, - u64 arg0, u64 arg1, u64 arg2) -{ - asm volatile( - __asmeq("%0", "x0") - __asmeq("%1", "x1") - __asmeq("%2", "x2") - __asmeq("%3", "x3") - "smc #0\n" - : "+r" (function_id) - : "r" (arg0), "r" (arg1), "r" (arg2)); - - return function_id; -} - -u64 kbase_invoke_smc_fid(u32 fid, u64 arg0, u64 arg1, u64 arg2) -{ - /* Is fast call (bit 31 set) */ - KBASE_DEBUG_ASSERT(fid & ~SMC_FAST_CALL); - /* bits 16-23 must be zero for fast calls */ - KBASE_DEBUG_ASSERT((fid & (0xFF << 16)) == 0); - - return invoke_smc_fid(fid, arg0, arg1, arg2); -} - -u64 kbase_invoke_smc(u32 oen, u16 function_number, bool smc64, - u64 arg0, u64 arg1, u64 arg2) -{ - u32 fid = 0; - - /* Only the six bits allowed should be used. */ - KBASE_DEBUG_ASSERT((oen & ~SMC_OEN_MASK) == 0); - - fid |= SMC_FAST_CALL; /* Bit 31: Fast call */ - if (smc64) - fid |= SMC_64; /* Bit 30: 1=SMC64, 0=SMC32 */ - fid |= oen; /* Bit 29:24: OEN */ - /* Bit 23:16: Must be zero for fast calls */ - fid |= (function_number); /* Bit 15:0: function number */ - - return kbase_invoke_smc_fid(fid, arg0, arg1, arg2); -} - -#endif /* CONFIG_ARM64 */ - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_smc.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_smc.h deleted file mode 100755 index 9bff3d2e8b4d8..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_smc.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * - * (C) COPYRIGHT 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#ifndef _KBASE_SMC_H_ -#define _KBASE_SMC_H_ - -#ifdef CONFIG_ARM64 - -#include - -#define SMC_FAST_CALL (1 << 31) -#define SMC_64 (1 << 30) - -#define SMC_OEN_OFFSET 24 -#define SMC_OEN_MASK (0x3F << SMC_OEN_OFFSET) /* 6 bits */ -#define SMC_OEN_SIP (2 << SMC_OEN_OFFSET) -#define SMC_OEN_STD (4 << SMC_OEN_OFFSET) - - -/** - * kbase_invoke_smc_fid - Perform a secure monitor call - * @fid: The SMC function to call, see SMC Calling convention. - * @arg0: First argument to the SMC. - * @arg1: Second argument to the SMC. - * @arg2: Third argument to the SMC. - * - * See SMC Calling Convention for details. - * - * Return: the return value from the SMC. - */ -u64 kbase_invoke_smc_fid(u32 fid, u64 arg0, u64 arg1, u64 arg2); - -/** - * kbase_invoke_smc_fid - Perform a secure monitor call - * @oen: Owning Entity number (SIP, STD etc). - * @function_number: The function number within the OEN. - * @smc64: use SMC64 calling convention instead of SMC32. - * @arg0: First argument to the SMC. - * @arg1: Second argument to the SMC. - * @arg2: Third argument to the SMC. - * - * See SMC Calling Convention for details. - * - * Return: the return value from the SMC call. - */ -u64 kbase_invoke_smc(u32 oen, u16 function_number, bool smc64, - u64 arg0, u64 arg1, u64 arg2); - -#endif /* CONFIG_ARM64 */ - -#endif /* _KBASE_SMC_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_softjobs.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_softjobs.c deleted file mode 100755 index 637eba893a2db..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_softjobs.c +++ /dev/null @@ -1,442 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#include - -#include -#ifdef CONFIG_SYNC -#include "sync.h" -#include -#include "mali_kbase_sync.h" -#endif -#include -#include - -/* Mask to check cache alignment of data structures */ -#define KBASE_CACHE_ALIGNMENT_MASK ((1<jc; - struct kbase_context *kctx = katom->kctx; - int pm_active_err; - - memset(&data, 0, sizeof(data)); - - /* Take the PM active reference as late as possible - otherwise, it could - * delay suspend until we process the atom (which may be at the end of a - * long chain of dependencies */ - pm_active_err = kbase_pm_context_active_handle_suspend(kctx->kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE); - if (pm_active_err) { - struct kbasep_js_device_data *js_devdata = &kctx->kbdev->js_data; - - /* We're suspended - queue this on the list of suspended jobs - * Use dep_item[1], because dep_item[0] is in use for 'waiting_soft_jobs' */ - mutex_lock(&js_devdata->runpool_mutex); - list_add_tail(&katom->dep_item[1], &js_devdata->suspended_soft_jobs_list); - mutex_unlock(&js_devdata->runpool_mutex); - - return pm_active_err; - } - - kbase_backend_get_gpu_time(kctx->kbdev, &cycle_counter, &system_time, - &ts); - - kbase_pm_context_idle(kctx->kbdev); - - data.sec = ts.tv_sec; - data.usec = ts.tv_nsec / 1000; - data.system_time = system_time; - data.cycle_counter = cycle_counter; - - pfn = jc >> PAGE_SHIFT; - offset = jc & ~PAGE_MASK; - - /* Assume this atom will be cancelled until we know otherwise */ - katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; - if (offset > 0x1000 - sizeof(data)) { - /* Wouldn't fit in the page */ - return 0; - } - - kbase_gpu_vm_lock(kctx); - reg = kbase_region_tracker_find_region_enclosing_address(kctx, jc); - if (reg && - (reg->flags & KBASE_REG_GPU_WR) && - reg->cpu_alloc && reg->cpu_alloc->pages) - addr = reg->cpu_alloc->pages[pfn - reg->start_pfn]; - - kbase_gpu_vm_unlock(kctx); - if (!addr) - return 0; - - page = kmap(pfn_to_page(PFN_DOWN(addr))); - if (!page) - return 0; - - kbase_sync_single_for_cpu(katom->kctx->kbdev, - kbase_dma_addr(pfn_to_page(PFN_DOWN(addr))) + - offset, sizeof(data), - DMA_BIDIRECTIONAL); - - memcpy(page + offset, &data, sizeof(data)); - - kbase_sync_single_for_device(katom->kctx->kbdev, - kbase_dma_addr(pfn_to_page(PFN_DOWN(addr))) + - offset, sizeof(data), - DMA_BIDIRECTIONAL); - - kunmap(pfn_to_page(PFN_DOWN(addr))); - - /* Atom was fine - mark it as done */ - katom->event_code = BASE_JD_EVENT_DONE; - - return 0; -} - -#ifdef CONFIG_SYNC - -/* Complete an atom that has returned '1' from kbase_process_soft_job (i.e. has waited) - * - * @param katom The atom to complete - */ -static void complete_soft_job(struct kbase_jd_atom *katom) -{ - struct kbase_context *kctx = katom->kctx; - - mutex_lock(&kctx->jctx.lock); - list_del(&katom->dep_item[0]); - kbase_finish_soft_job(katom); - if (jd_done_nolock(katom, NULL)) - kbase_js_sched_all(kctx->kbdev); - mutex_unlock(&kctx->jctx.lock); -} - -static enum base_jd_event_code kbase_fence_trigger(struct kbase_jd_atom *katom, int result) -{ - struct sync_pt *pt; - struct sync_timeline *timeline; - -#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) - if (!list_is_singular(&katom->fence->pt_list_head)) { -#else - if (katom->fence->num_fences != 1) { -#endif - /* Not exactly one item in the list - so it didn't (directly) come from us */ - return BASE_JD_EVENT_JOB_CANCELLED; - } - -#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) - pt = list_first_entry(&katom->fence->pt_list_head, struct sync_pt, pt_list); -#else - pt = container_of(katom->fence->cbs[0].sync_pt, struct sync_pt, base); -#endif - timeline = sync_pt_parent(pt); - - if (!kbase_sync_timeline_is_ours(timeline)) { - /* Fence has a sync_pt which isn't ours! */ - return BASE_JD_EVENT_JOB_CANCELLED; - } - - kbase_sync_signal_pt(pt, result); - - sync_timeline_signal(timeline); - - return (result < 0) ? BASE_JD_EVENT_JOB_CANCELLED : BASE_JD_EVENT_DONE; -} - -static void kbase_fence_wait_worker(struct work_struct *data) -{ - struct kbase_jd_atom *katom; - struct kbase_context *kctx; - - katom = container_of(data, struct kbase_jd_atom, work); - kctx = katom->kctx; - - complete_soft_job(katom); -} - -static void kbase_fence_wait_callback(struct sync_fence *fence, struct sync_fence_waiter *waiter) -{ - struct kbase_jd_atom *katom = container_of(waiter, struct kbase_jd_atom, sync_waiter); - struct kbase_context *kctx; - - KBASE_DEBUG_ASSERT(NULL != katom); - - kctx = katom->kctx; - - KBASE_DEBUG_ASSERT(NULL != kctx); - - /* Propagate the fence status to the atom. - * If negative then cancel this atom and its dependencies. - */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) - if (fence->status < 0) -#else - if (atomic_read(&fence->status) < 0) -#endif - katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; - - /* To prevent a potential deadlock we schedule the work onto the job_done_wq workqueue - * - * The issue is that we may signal the timeline while holding kctx->jctx.lock and - * the callbacks are run synchronously from sync_timeline_signal. So we simply defer the work. - */ - - KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work)); - INIT_WORK(&katom->work, kbase_fence_wait_worker); - queue_work(kctx->jctx.job_done_wq, &katom->work); -} - -static int kbase_fence_wait(struct kbase_jd_atom *katom) -{ - int ret; - - KBASE_DEBUG_ASSERT(NULL != katom); - KBASE_DEBUG_ASSERT(NULL != katom->kctx); - - sync_fence_waiter_init(&katom->sync_waiter, kbase_fence_wait_callback); - - ret = sync_fence_wait_async(katom->fence, &katom->sync_waiter); - - if (ret == 1) { - /* Already signalled */ - return 0; - } else if (ret < 0) { - goto cancel_atom; - } - return 1; - - cancel_atom: - katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; - /* We should cause the dependant jobs in the bag to be failed, - * to do this we schedule the work queue to complete this job */ - KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work)); - INIT_WORK(&katom->work, kbase_fence_wait_worker); - queue_work(katom->kctx->jctx.job_done_wq, &katom->work); - return 1; -} - -static void kbase_fence_cancel_wait(struct kbase_jd_atom *katom) -{ - if (sync_fence_cancel_async(katom->fence, &katom->sync_waiter) != 0) { - /* The wait wasn't cancelled - leave the cleanup for kbase_fence_wait_callback */ - return; - } - - /* Wait was cancelled - zap the atoms */ - katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; - - kbase_finish_soft_job(katom); - - if (jd_done_nolock(katom, NULL)) - kbase_js_sched_all(katom->kctx->kbdev); -} -#endif /* CONFIG_SYNC */ - -int kbase_process_soft_job(struct kbase_jd_atom *katom) -{ - switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) { - case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME: - return kbase_dump_cpu_gpu_time(katom); -#ifdef CONFIG_SYNC - case BASE_JD_REQ_SOFT_FENCE_TRIGGER: - KBASE_DEBUG_ASSERT(katom->fence != NULL); - katom->event_code = kbase_fence_trigger(katom, katom->event_code == BASE_JD_EVENT_DONE ? 0 : -EFAULT); - /* Release the reference as we don't need it any more */ - sync_fence_put(katom->fence); - katom->fence = NULL; - break; - case BASE_JD_REQ_SOFT_FENCE_WAIT: - return kbase_fence_wait(katom); -#endif /* CONFIG_SYNC */ - case BASE_JD_REQ_SOFT_REPLAY: - return kbase_replay_process(katom); - } - - /* Atom is complete */ - return 0; -} - -void kbase_cancel_soft_job(struct kbase_jd_atom *katom) -{ - switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) { -#ifdef CONFIG_SYNC - case BASE_JD_REQ_SOFT_FENCE_WAIT: - kbase_fence_cancel_wait(katom); - break; -#endif - default: - /* This soft-job doesn't support cancellation! */ - KBASE_DEBUG_ASSERT(0); - } -} - -int kbase_prepare_soft_job(struct kbase_jd_atom *katom) -{ - switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) { - case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME: - { - if (0 != (katom->jc & KBASE_CACHE_ALIGNMENT_MASK)) - return -EINVAL; - } - break; -#ifdef CONFIG_SYNC - case BASE_JD_REQ_SOFT_FENCE_TRIGGER: - { - struct base_fence fence; - int fd; - - if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence))) - return -EINVAL; - - fd = kbase_stream_create_fence(fence.basep.stream_fd); - if (fd < 0) - return -EINVAL; - - katom->fence = sync_fence_fdget(fd); - - if (katom->fence == NULL) { - /* The only way the fence can be NULL is if userspace closed it for us. - * So we don't need to clear it up */ - return -EINVAL; - } - fence.basep.fd = fd; - if (0 != copy_to_user((__user void *)(uintptr_t) katom->jc, &fence, sizeof(fence))) { - katom->fence = NULL; - sys_close(fd); - return -EINVAL; - } - } - break; - case BASE_JD_REQ_SOFT_FENCE_WAIT: - { - struct base_fence fence; - - if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence))) - return -EINVAL; - - /* Get a reference to the fence object */ - katom->fence = sync_fence_fdget(fence.basep.fd); - if (katom->fence == NULL) - return -EINVAL; - } - break; -#endif /* CONFIG_SYNC */ - case BASE_JD_REQ_SOFT_REPLAY: - break; - default: - /* Unsupported soft-job */ - return -EINVAL; - } - return 0; -} - -void kbase_finish_soft_job(struct kbase_jd_atom *katom) -{ - switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) { - case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME: - /* Nothing to do */ - break; -#ifdef CONFIG_SYNC - case BASE_JD_REQ_SOFT_FENCE_TRIGGER: - /* If fence has not yet been signalled, do it now */ - if (katom->fence) { - kbase_fence_trigger(katom, katom->event_code == - BASE_JD_EVENT_DONE ? 0 : -EFAULT); - sync_fence_put(katom->fence); - katom->fence = NULL; - } - break; - case BASE_JD_REQ_SOFT_FENCE_WAIT: - /* Release the reference to the fence object */ - sync_fence_put(katom->fence); - katom->fence = NULL; - break; -#endif /* CONFIG_SYNC */ - } -} - -void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev) -{ - LIST_HEAD(local_suspended_soft_jobs); - struct kbase_jd_atom *tmp_iter; - struct kbase_jd_atom *katom_iter; - struct kbasep_js_device_data *js_devdata; - bool resched = false; - - KBASE_DEBUG_ASSERT(kbdev); - - js_devdata = &kbdev->js_data; - - /* Move out the entire list */ - mutex_lock(&js_devdata->runpool_mutex); - list_splice_init(&js_devdata->suspended_soft_jobs_list, - &local_suspended_soft_jobs); - mutex_unlock(&js_devdata->runpool_mutex); - - /* - * Each atom must be detached from the list and ran separately - - * it could be re-added to the old list, but this is unlikely - */ - list_for_each_entry_safe(katom_iter, tmp_iter, - &local_suspended_soft_jobs, dep_item[1]) { - struct kbase_context *kctx = katom_iter->kctx; - - mutex_lock(&kctx->jctx.lock); - - /* Remove from the global list */ - list_del(&katom_iter->dep_item[1]); - /* Remove from the context's list of waiting soft jobs */ - list_del(&katom_iter->dep_item[0]); - - if (kbase_process_soft_job(katom_iter) == 0) { - kbase_finish_soft_job(katom_iter); - resched |= jd_done_nolock(katom_iter, NULL); - } else { - /* The job has not completed */ - KBASE_DEBUG_ASSERT((katom_iter->core_req & - BASEP_JD_REQ_ATOM_TYPE) - != BASE_JD_REQ_SOFT_REPLAY); - list_add_tail(&katom_iter->dep_item[0], - &kctx->waiting_soft_jobs); - } - - mutex_unlock(&kctx->jctx.lock); - } - - if (resched) - kbase_js_sched_all(kbdev); -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_sync.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_sync.c deleted file mode 100755 index c5fb98152554e..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_sync.c +++ /dev/null @@ -1,182 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#ifdef CONFIG_SYNC - -#include -#include "sync.h" -#include -#include - -struct mali_sync_timeline { - struct sync_timeline timeline; - atomic_t counter; - atomic_t signalled; -}; - -struct mali_sync_pt { - struct sync_pt pt; - int order; - int result; -}; - -static struct mali_sync_timeline *to_mali_sync_timeline(struct sync_timeline *timeline) -{ - return container_of(timeline, struct mali_sync_timeline, timeline); -} - -static struct mali_sync_pt *to_mali_sync_pt(struct sync_pt *pt) -{ - return container_of(pt, struct mali_sync_pt, pt); -} - -static struct sync_pt *timeline_dup(struct sync_pt *pt) -{ - struct mali_sync_pt *mpt = to_mali_sync_pt(pt); - struct mali_sync_pt *new_mpt; - struct sync_pt *new_pt = sync_pt_create(sync_pt_parent(pt), sizeof(struct mali_sync_pt)); - - if (!new_pt) - return NULL; - - new_mpt = to_mali_sync_pt(new_pt); - new_mpt->order = mpt->order; - new_mpt->result = mpt->result; - - return new_pt; -} - -static int timeline_has_signaled(struct sync_pt *pt) -{ - struct mali_sync_pt *mpt = to_mali_sync_pt(pt); - struct mali_sync_timeline *mtl = to_mali_sync_timeline(sync_pt_parent(pt)); - int result = mpt->result; - - int diff = atomic_read(&mtl->signalled) - mpt->order; - - if (diff >= 0) - return (result < 0) ? result : 1; - - return 0; -} - -static int timeline_compare(struct sync_pt *a, struct sync_pt *b) -{ - struct mali_sync_pt *ma = container_of(a, struct mali_sync_pt, pt); - struct mali_sync_pt *mb = container_of(b, struct mali_sync_pt, pt); - - int diff = ma->order - mb->order; - - if (diff == 0) - return 0; - - return (diff < 0) ? -1 : 1; -} - -static void timeline_value_str(struct sync_timeline *timeline, char *str, - int size) -{ - struct mali_sync_timeline *mtl = to_mali_sync_timeline(timeline); - - snprintf(str, size, "%d", atomic_read(&mtl->signalled)); -} - -static void pt_value_str(struct sync_pt *pt, char *str, int size) -{ - struct mali_sync_pt *mpt = to_mali_sync_pt(pt); - - snprintf(str, size, "%d(%d)", mpt->order, mpt->result); -} - -static struct sync_timeline_ops mali_timeline_ops = { - .driver_name = "Mali", - .dup = timeline_dup, - .has_signaled = timeline_has_signaled, - .compare = timeline_compare, - .timeline_value_str = timeline_value_str, - .pt_value_str = pt_value_str, -}; - -int kbase_sync_timeline_is_ours(struct sync_timeline *timeline) -{ - return timeline->ops == &mali_timeline_ops; -} - -struct sync_timeline *kbase_sync_timeline_alloc(const char *name) -{ - struct sync_timeline *tl; - struct mali_sync_timeline *mtl; - - tl = sync_timeline_create(&mali_timeline_ops, sizeof(struct mali_sync_timeline), name); - if (!tl) - return NULL; - - /* Set the counter in our private struct */ - mtl = to_mali_sync_timeline(tl); - atomic_set(&mtl->counter, 0); - atomic_set(&mtl->signalled, 0); - - return tl; -} - -struct sync_pt *kbase_sync_pt_alloc(struct sync_timeline *parent) -{ - struct sync_pt *pt = sync_pt_create(parent, sizeof(struct mali_sync_pt)); - struct mali_sync_timeline *mtl = to_mali_sync_timeline(parent); - struct mali_sync_pt *mpt; - - if (!pt) - return NULL; - - mpt = to_mali_sync_pt(pt); - mpt->order = atomic_inc_return(&mtl->counter); - mpt->result = 0; - - return pt; -} - -void kbase_sync_signal_pt(struct sync_pt *pt, int result) -{ - struct mali_sync_pt *mpt = to_mali_sync_pt(pt); - struct mali_sync_timeline *mtl = to_mali_sync_timeline(sync_pt_parent(pt)); - int signalled; - int diff; - - mpt->result = result; - - do { - signalled = atomic_read(&mtl->signalled); - - diff = signalled - mpt->order; - - if (diff > 0) { - /* The timeline is already at or ahead of this point. - * This should not happen unless userspace has been - * signalling fences out of order, so warn but don't - * violate the sync_pt API. - * The warning is only in debug builds to prevent - * a malicious user being able to spam dmesg. - */ -#ifdef CONFIG_MALI_DEBUG - pr_err("Fences were triggered in a different order to allocation!"); -#endif /* CONFIG_MALI_DEBUG */ - return; - } - } while (atomic_cmpxchg(&mtl->signalled, signalled, mpt->order) != signalled); -} - -#endif /* CONFIG_SYNC */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_sync.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_sync.h deleted file mode 100755 index 6d8e34d3c3ae7..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_sync.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_kbase_sync.h - * - */ - -#ifndef MALI_KBASE_SYNC_H -#define MALI_KBASE_SYNC_H - -#include "sync.h" - -#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) -/* For backwards compatiblility with kernels before 3.17. After 3.17 - * sync_pt_parent is included in the kernel. */ -static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt) -{ - return pt->parent; -} -#endif - -/* - * Create a stream object. - * Built on top of timeline object. - * Exposed as a file descriptor. - * Life-time controlled via the file descriptor: - * - dup to add a ref - * - close to remove a ref - */ -int kbase_stream_create(const char *name, int *const out_fd); - -/* - * Create a fence in a stream object - */ -int kbase_stream_create_fence(int tl_fd); - -/* - * Validate a fd to be a valid fence - * No reference is taken. - * - * This function is only usable to catch unintentional user errors early, - * it does not stop malicious code changing the fd after this function returns. - */ -int kbase_fence_validate(int fd); - -/* Returns true if the specified timeline is allocated by Mali */ -int kbase_sync_timeline_is_ours(struct sync_timeline *timeline); - -/* Allocates a timeline for Mali - * - * One timeline should be allocated per API context. - */ -struct sync_timeline *kbase_sync_timeline_alloc(const char *name); - -/* Allocates a sync point within the timeline. - * - * The timeline must be the one allocated by kbase_sync_timeline_alloc - * - * Sync points must be triggered in *exactly* the same order as they are allocated. - */ -struct sync_pt *kbase_sync_pt_alloc(struct sync_timeline *parent); - -/* Signals a particular sync point - * - * Sync points must be triggered in *exactly* the same order as they are allocated. - * - * If they are signalled in the wrong order then a message will be printed in debug - * builds and otherwise attempts to signal order sync_pts will be ignored. - * - * result can be negative to indicate error, any other value is interpreted as success. - */ -void kbase_sync_signal_pt(struct sync_pt *pt, int result); - -#endif diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_sync_user.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_sync_user.c deleted file mode 100755 index ddd0847a69c5d..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_sync_user.c +++ /dev/null @@ -1,157 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_kbase_sync_user.c - * - */ - -#ifdef CONFIG_SYNC - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static int kbase_stream_close(struct inode *inode, struct file *file) -{ - struct sync_timeline *tl; - - tl = (struct sync_timeline *)file->private_data; - BUG_ON(!tl); - sync_timeline_destroy(tl); - return 0; -} - -static const struct file_operations stream_fops = { - .owner = THIS_MODULE, - .release = kbase_stream_close, -}; - -int kbase_stream_create(const char *name, int *const out_fd) -{ - struct sync_timeline *tl; - - BUG_ON(!out_fd); - - tl = kbase_sync_timeline_alloc(name); - if (!tl) - return -EINVAL; - - *out_fd = anon_inode_getfd(name, &stream_fops, tl, O_RDONLY | O_CLOEXEC); - - if (*out_fd < 0) { - sync_timeline_destroy(tl); - return -EINVAL; - } - - return 0; -} - -int kbase_stream_create_fence(int tl_fd) -{ - struct sync_timeline *tl; - struct sync_pt *pt; - struct sync_fence *fence; -#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0) - struct files_struct *files; - struct fdtable *fdt; -#endif - int fd; - struct file *tl_file; - - tl_file = fget(tl_fd); - if (tl_file == NULL) - return -EBADF; - - if (tl_file->f_op != &stream_fops) { - fd = -EBADF; - goto out; - } - - tl = tl_file->private_data; - - pt = kbase_sync_pt_alloc(tl); - if (!pt) { - fd = -EFAULT; - goto out; - } - - fence = sync_fence_create("mali_fence", pt); - if (!fence) { - sync_pt_free(pt); - fd = -EFAULT; - goto out; - } - - /* from here the fence owns the sync_pt */ - - /* create a fd representing the fence */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) - fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC); - if (fd < 0) { - sync_fence_put(fence); - goto out; - } -#else - fd = get_unused_fd(); - if (fd < 0) { - sync_fence_put(fence); - goto out; - } - - files = current->files; - spin_lock(&files->file_lock); - fdt = files_fdtable(files); -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) - __set_close_on_exec(fd, fdt); -#else - FD_SET(fd, fdt->close_on_exec); -#endif - spin_unlock(&files->file_lock); -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) */ - - /* bind fence to the new fd */ - sync_fence_install(fence, fd); - - out: - fput(tl_file); - - return fd; -} - -int kbase_fence_validate(int fd) -{ - struct sync_fence *fence; - - fence = sync_fence_fdget(fd); - if (!fence) - return -EINVAL; - - sync_fence_put(fence); - return 0; -} - -#endif /* CONFIG_SYNC */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_tlstream.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_tlstream.c deleted file mode 100755 index 99428d1e660eb..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_tlstream.c +++ /dev/null @@ -1,2004 +0,0 @@ -/* - * - * (C) COPYRIGHT 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -/*****************************************************************************/ - -/* The version of timeline stream. */ -#define KBASEP_TLSTREAM_VERSION 1 - -/* The maximum expected length of string in tracepoint descriptor. */ -#define STRLEN_MAX 64 /* bytes */ - -/* The number of nanoseconds in a second. */ -#define NSECS_IN_SEC 1000000000ull /* ns */ - -/* The number of nanoseconds to wait before autoflushing the stream. */ -#define AUTOFLUSH_TIMEOUT (2ull * NSECS_IN_SEC) /* ns */ - -/* The period of autoflush checker execution in milliseconds. */ -#define AUTOFLUSH_INTERVAL 1000 /* ms */ - -/* The maximum size of a single packet used by timeline. */ -#define PACKET_SIZE 2048 /* bytes */ - -/* The number of packets used by one timeline stream. */ -#define PACKET_COUNT 16 - -/* The number of bytes reserved for packet header. - * These value must be defined according to MIPE documentation. */ -#define PACKET_HEADER_SIZE 8 /* bytes */ - -/* The number of bytes reserved for packet sequence number. - * These value must be defined according to MIPE documentation. */ -#define PACKET_NUMBER_SIZE 4 /* bytes */ - -/* Packet header - first word. - * These values must be defined according to MIPE documentation. */ -#define PACKET_STREAMID_POS 0 -#define PACKET_STREAMID_LEN 8 -#define PACKET_RSVD1_POS (PACKET_STREAMID_POS + PACKET_STREAMID_LEN) -#define PACKET_RSVD1_LEN 8 -#define PACKET_TYPE_POS (PACKET_RSVD1_POS + PACKET_RSVD1_LEN) -#define PACKET_TYPE_LEN 3 -#define PACKET_CLASS_POS (PACKET_TYPE_POS + PACKET_TYPE_LEN) -#define PACKET_CLASS_LEN 7 -#define PACKET_FAMILY_POS (PACKET_CLASS_POS + PACKET_CLASS_LEN) -#define PACKET_FAMILY_LEN 6 - -/* Packet header - second word - * These values must be defined according to MIPE documentation. */ -#define PACKET_LENGTH_POS 0 -#define PACKET_LENGTH_LEN 24 -#define PACKET_SEQBIT_POS (PACKET_LENGTH_POS + PACKET_LENGTH_LEN) -#define PACKET_SEQBIT_LEN 1 -#define PACKET_RSVD2_POS (PACKET_SEQBIT_POS + PACKET_SEQBIT_LEN) -#define PACKET_RSVD2_LEN 7 - -/* Types of streams generated by timeline. - * Order is significant! Header streams must precede respective body streams. */ -enum tl_stream_type { - TL_STREAM_TYPE_OBJ_HEADER, - TL_STREAM_TYPE_OBJ_SUMMARY, - TL_STREAM_TYPE_OBJ, - TL_STREAM_TYPE_AUX_HEADER, - TL_STREAM_TYPE_AUX, - - TL_STREAM_TYPE_COUNT -}; - -/* Timeline packet family ids. - * Values are significant! Check MIPE documentation. */ -enum tl_packet_family { - TL_PACKET_FAMILY_CTRL = 0, /* control packets */ - TL_PACKET_FAMILY_TL = 1, /* timeline packets */ - - TL_PACKET_FAMILY_COUNT -}; - -/* Packet classes used in timeline streams. - * Values are significant! Check MIPE documentation. */ -enum tl_packet_class { - TL_PACKET_CLASS_OBJ = 0, /* timeline objects packet */ - TL_PACKET_CLASS_AUX = 1, /* auxiliary events packet */ -}; - -/* Packet types used in timeline streams. - * Values are significant! Check MIPE documentation. */ -enum tl_packet_type { - TL_PACKET_TYPE_HEADER = 0, /* stream's header/directory */ - TL_PACKET_TYPE_BODY = 1, /* stream's body */ - TL_PACKET_TYPE_SUMMARY = 2, /* stream's summary */ -}; - -/* Message ids of trace events that are recorded in the timeline stream. */ -enum tl_msg_id { - /* Timeline object events. */ - KBASE_TL_NEW_CTX, - KBASE_TL_NEW_GPU, - KBASE_TL_NEW_LPU, - KBASE_TL_NEW_ATOM, - KBASE_TL_NEW_AS, - KBASE_TL_DEL_CTX, - KBASE_TL_DEL_ATOM, - KBASE_TL_LIFELINK_LPU_GPU, - KBASE_TL_LIFELINK_AS_GPU, - KBASE_TL_RET_GPU_CTX, - KBASE_TL_RET_ATOM_CTX, - KBASE_TL_RET_ATOM_LPU, - KBASE_TL_NRET_GPU_CTX, - KBASE_TL_NRET_ATOM_CTX, - KBASE_TL_NRET_ATOM_LPU, - KBASE_TL_RET_AS_CTX, - KBASE_TL_NRET_AS_CTX, - KBASE_TL_RET_ATOM_AS, - KBASE_TL_NRET_ATOM_AS, - KBASE_TL_ATTRIB_ATOM_CONFIG, - KBASE_TL_ATTRIB_AS_CONFIG, - - /* Job dump specific events (part of timeline stream). */ - KBASE_JD_GPU_SOFT_RESET, - - /* Timeline non-object events. */ - KBASE_AUX_PM_STATE, - KBASE_AUX_JOB_SOFTSTOP, - KBASE_AUX_PAGEFAULT, - KBASE_AUX_PAGESALLOC -}; - -/*****************************************************************************/ - -/** - * struct tl_stream - timeline stream structure - * @lock: message order lock - * @buffer: array of buffers - * @wbi: write buffer index - * @rbi: read buffer index - * @numbered: if non-zero stream's packets are sequentially numbered - * @last_write_time: timestamp indicating last write - * - * This structure holds information needed to construct proper packets in the - * timeline stream. Each message in sequence must bear timestamp that is greater - * to one in previous message in the same stream. For this reason lock is held - * throughout the process of message creation. Each stream contains set of - * buffers. Each buffer will hold one MIPE packet. In case there is no free - * space required to store incoming message the oldest buffer is discarded. - * Each packet in timeline body stream has sequence number embedded (this value - * must increment monotonically and is used by packets receiver to discover - * buffer overflows. - */ -struct tl_stream { - spinlock_t lock; - - struct { - atomic_t size; /* number of bytes in buffer */ - char data[PACKET_SIZE]; /* buffer's data */ - } buffer[PACKET_COUNT]; - - atomic_t wbi; - atomic_t rbi; - - int numbered; - u64 last_write_time; -}; - -/** - * struct tp_desc - tracepoint message descriptor structure - * @id: tracepoint ID identifying message in stream - * @id_str: human readable version of tracepoint ID - * @name: tracepoint description - * @arg_types: tracepoint's arguments types declaration - * @arg_names: comma separated list of tracepoint's arguments names - */ -struct tp_desc { - u32 id; - const char *id_str; - const char *name; - const char *arg_types; - const char *arg_names; -}; - -/*****************************************************************************/ - -/* Configuration of timeline streams generated by kernel. - * Kernel emit only streams containing either timeline object events or - * auxiliary events. All streams have stream id value of 1 (as opposed to user - * space streams that have value of 0). */ -static const struct { - enum tl_packet_family pkt_family; - enum tl_packet_class pkt_class; - enum tl_packet_type pkt_type; - unsigned int stream_id; -} tl_stream_cfg[TL_STREAM_TYPE_COUNT] = { - {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_HEADER, 1}, - {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_SUMMARY, 1}, - {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_BODY, 1}, - {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_AUX, TL_PACKET_TYPE_HEADER, 1}, - {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_AUX, TL_PACKET_TYPE_BODY, 1} -}; - -/* The timeline streams generated by kernel. */ -static struct tl_stream *tl_stream[TL_STREAM_TYPE_COUNT]; - -/* Autoflush timer. */ -static struct timer_list autoflush_timer; - -/* If non-zero autoflush timer is active. */ -static atomic_t autoflush_timer_active; - -/* Reader lock. Only one reader is allowed to have access to the timeline - * streams at any given time. */ -static DEFINE_MUTEX(tl_reader_lock); - -/* Indicator of whether the timeline stream file descriptor is already used. */ -static atomic_t tlstream_busy = {0}; - -/* Timeline stream event queue. */ -static DECLARE_WAIT_QUEUE_HEAD(tl_event_queue); - -/* The timeline stream file operations functions. */ -static ssize_t kbasep_tlstream_read( - struct file *filp, - char __user *buffer, - size_t size, - loff_t *f_pos); -static unsigned int kbasep_tlstream_poll(struct file *filp, poll_table *wait); -static int kbasep_tlstream_release(struct inode *inode, struct file *filp); - -/* The timeline stream file operations structure. */ -static const struct file_operations kbasep_tlstream_fops = { - .release = kbasep_tlstream_release, - .read = kbasep_tlstream_read, - .poll = kbasep_tlstream_poll, -}; - -/* Descriptors of timeline messages transmitted in object events stream. */ -static const struct tp_desc tp_desc_obj[] = { - { - KBASE_TL_NEW_CTX, - __stringify(KBASE_TL_NEW_CTX), - "object ctx is created", - "@pI", - "ctx,ctx_nr" - }, - { - KBASE_TL_NEW_GPU, - __stringify(KBASE_TL_NEW_GPU), - "object gpu is created", - "@pII", - "gpu,gpu_id,core_count" - }, - { - KBASE_TL_NEW_LPU, - __stringify(KBASE_TL_NEW_LPU), - "object lpu is created", - "@pII", - "lpu,lpu_nr,lpu_fn" - }, - { - KBASE_TL_NEW_ATOM, - __stringify(KBASE_TL_NEW_ATOM), - "object atom is created", - "@pI", - "atom,atom_nr" - }, - { - KBASE_TL_NEW_AS, - __stringify(KBASE_TL_NEW_AS), - "address space object is created", - "@pI", - "address_space,as_nr" - }, - { - KBASE_TL_DEL_CTX, - __stringify(KBASE_TL_DEL_CTX), - "context is destroyed", - "@p", - "ctx" - }, - { - KBASE_TL_DEL_ATOM, - __stringify(KBASE_TL_DEL_ATOM), - "atom is destroyed", - "@p", - "atom" - }, - { - KBASE_TL_LIFELINK_LPU_GPU, - __stringify(KBASE_TL_LIFELINK_LPU_GPU), - "lpu is deleted with gpu", - "@pp", - "lpu,gpu" - }, - { - KBASE_TL_LIFELINK_AS_GPU, - __stringify(KBASE_TL_LIFELINK_AS_GPU), - "address space is deleted with gpu", - "@pp", - "address_space,gpu" - }, - { - KBASE_TL_RET_GPU_CTX, - __stringify(KBASE_TL_RET_GPU_CTX), - "gpu is retained by context", - "@pp", - "gpu,ctx" - }, - { - KBASE_TL_RET_ATOM_CTX, - __stringify(KBASE_TL_RET_ATOM_CTX), - "atom is retained by context", - "@pp", - "atom,ctx" - }, - { - KBASE_TL_RET_ATOM_LPU, - __stringify(KBASE_TL_RET_ATOM_LPU), - "atom is retained by lpu", - "@pp", - "atom,lpu" - }, - { - KBASE_TL_NRET_GPU_CTX, - __stringify(KBASE_TL_NRET_GPU_CTX), - "gpu is released by context", - "@pp", - "gpu,ctx" - }, - { - KBASE_TL_NRET_ATOM_CTX, - __stringify(KBASE_TL_NRET_ATOM_CTX), - "atom is released by context", - "@pp", - "atom,context" - }, - { - KBASE_TL_NRET_ATOM_LPU, - __stringify(KBASE_TL_NRET_ATOM_LPU), - "atom is released by lpu", - "@pp", - "atom,lpu" - }, - { - KBASE_TL_RET_AS_CTX, - __stringify(KBASE_TL_RET_AS_CTX), - "address space is retained by context", - "@pp", - "address_space,ctx" - }, - { - KBASE_TL_NRET_AS_CTX, - __stringify(KBASE_TL_NRET_AS_CTX), - "address space is released by context", - "@pp", - "address_space,ctx" - }, - { - KBASE_TL_RET_ATOM_AS, - __stringify(KBASE_TL_RET_ATOM_AS), - "atom is retained by address space", - "@pp", - "atom,address_space" - }, - { - KBASE_TL_NRET_ATOM_AS, - __stringify(KBASE_TL_NRET_ATOM_AS), - "atom is released by address space", - "@pp", - "atom,address_space" - }, - { - KBASE_TL_ATTRIB_ATOM_CONFIG, - __stringify(KBASE_TL_ATTRIB_ATOM_CONFIG), - "atom job slot attributes", - "@pLLI", - "atom,descriptor,affinity,config" - }, - { - KBASE_TL_ATTRIB_AS_CONFIG, - __stringify(KBASE_TL_ATTRIB_AS_CONFIG), - "address space attributes", - "@pLLL", - "address_space,transtab,memattr,transcfg" - }, - { - KBASE_JD_GPU_SOFT_RESET, - __stringify(KBASE_JD_GPU_SOFT_RESET), - "gpu soft reset", - "@p", - "gpu" - }, -}; - -/* Descriptors of timeline messages transmitted in auxiliary events stream. */ -static const struct tp_desc tp_desc_aux[] = { - { - KBASE_AUX_PM_STATE, - __stringify(KBASE_AUX_PM_STATE), - "PM state", - "@IL", - "core_type,core_state_bitset" - }, - { - KBASE_AUX_JOB_SOFTSTOP, - __stringify(KBASE_AUX_JOB_SOFTSTOP), - "Job soft stop", - "@I", - "tag_id" - }, - { - KBASE_AUX_PAGEFAULT, - __stringify(KBASE_AUX_PAGEFAULT), - "Page fault", - "@II", - "as_id,page_cnt" - }, - { - KBASE_AUX_PAGESALLOC, - __stringify(KBASE_AUX_PAGESALLOC), - "Total alloc pages change", - "@l", - "page_cnt_change" - } -}; - -#if MALI_UNIT_TEST -/* Number of bytes read by user. */ -static atomic_t tlstream_bytes_collected = {0}; - -/* Number of bytes generated by tracepoint messages. */ -static atomic_t tlstream_bytes_generated = {0}; -#endif /* MALI_UNIT_TEST */ - -/*****************************************************************************/ - -/** - * kbasep_tlstream_get_timestamp - return timestamp - * - * Function returns timestamp value based on raw monotonic timer. Value will - * wrap around zero in case of overflow. - * Return: timestamp value - */ -static u64 kbasep_tlstream_get_timestamp(void) -{ - struct timespec ts; - u64 timestamp; - - getrawmonotonic(&ts); - timestamp = (u64)ts.tv_sec * NSECS_IN_SEC + ts.tv_nsec; - return timestamp; -} - -/** - * kbasep_tlstream_write_bytes - write data to message buffer - * @buffer: buffer where data will be written - * @pos: position in the buffer where to place data - * @bytes: pointer to buffer holding data - * @len: length of data to be written - * - * Return: updated position in the buffer - */ -static size_t kbasep_tlstream_write_bytes( - char *buffer, - size_t pos, - const void *bytes, - size_t len) -{ - KBASE_DEBUG_ASSERT(buffer); - KBASE_DEBUG_ASSERT(bytes); - - memcpy(&buffer[pos], bytes, len); - - return pos + len; -} - -/** - * kbasep_tlstream_write_string - write string to message buffer - * @buffer: buffer where data will be written - * @pos: position in the buffer where to place data - * @string: pointer to buffer holding the source string - * @max_write_size: number of bytes that can be stored in buffer - * - * Return: updated position in the buffer - */ -static size_t kbasep_tlstream_write_string( - char *buffer, - size_t pos, - const char *string, - size_t max_write_size) -{ - u32 string_len; - - KBASE_DEBUG_ASSERT(buffer); - KBASE_DEBUG_ASSERT(string); - /* Timeline string consists of at least string length and nul - * terminator. */ - KBASE_DEBUG_ASSERT(max_write_size >= sizeof(string_len) + sizeof(char)); - max_write_size -= sizeof(string_len); - - string_len = strlcpy( - &buffer[pos + sizeof(string_len)], - string, - max_write_size); - string_len += sizeof(char); - - /* Make sure that the source string fit into the buffer. */ - KBASE_DEBUG_ASSERT(string_len <= max_write_size); - - /* Update string length. */ - memcpy(&buffer[pos], &string_len, sizeof(string_len)); - - return pos + sizeof(string_len) + string_len; -} - -/** - * kbasep_tlstream_write_timestamp - write timestamp to message buffer - * @buffer: buffer where data will be written - * @pos: position in the buffer where to place data - * - * Return: updated position in the buffer - */ -static size_t kbasep_tlstream_write_timestamp(void *buffer, size_t pos) -{ - u64 timestamp = kbasep_tlstream_get_timestamp(); - - return kbasep_tlstream_write_bytes( - buffer, pos, - ×tamp, sizeof(timestamp)); -} - -/** - * kbasep_tlstream_put_bits - put bits in a word - * @word: pointer to the words being modified - * @value: value that shall be written to given position - * @bitpos: position where value shall be written (in bits) - * @bitlen: length of value (in bits) - */ -static void kbasep_tlstream_put_bits( - u32 *word, - u32 value, - unsigned int bitpos, - unsigned int bitlen) -{ - const u32 mask = ((1 << bitlen) - 1) << bitpos; - - KBASE_DEBUG_ASSERT(word); - KBASE_DEBUG_ASSERT((0 != bitlen) && (32 >= bitlen)); - KBASE_DEBUG_ASSERT((bitpos + bitlen) <= 32); - - *word &= ~mask; - *word |= ((value << bitpos) & mask); -} - -/** - * kbasep_tlstream_packet_header_setup - setup the packet header - * @buffer: pointer to the buffer - * @pkt_family: packet's family - * @pkt_type: packet's type - * @pkt_class: packet's class - * @stream_id: stream id - * @numbered: non-zero if this stream is numbered - * - * Function sets up immutable part of packet header in the given buffer. - */ -static void kbasep_tlstream_packet_header_setup( - char *buffer, - enum tl_packet_family pkt_family, - enum tl_packet_class pkt_class, - enum tl_packet_type pkt_type, - unsigned int stream_id, - int numbered) -{ - u32 word0 = 0; - u32 word1 = 0; - - KBASE_DEBUG_ASSERT(buffer); - KBASE_DEBUG_ASSERT(pkt_family == TL_PACKET_FAMILY_TL); - KBASE_DEBUG_ASSERT( - (pkt_type == TL_PACKET_TYPE_HEADER) || - (pkt_type == TL_PACKET_TYPE_SUMMARY) || - (pkt_type == TL_PACKET_TYPE_BODY)); - KBASE_DEBUG_ASSERT( - (pkt_class == TL_PACKET_CLASS_OBJ) || - (pkt_class == TL_PACKET_CLASS_AUX)); - - kbasep_tlstream_put_bits( - &word0, pkt_family, - PACKET_FAMILY_POS, PACKET_FAMILY_LEN); - kbasep_tlstream_put_bits( - &word0, pkt_class, - PACKET_CLASS_POS, PACKET_CLASS_LEN); - kbasep_tlstream_put_bits( - &word0, pkt_type, - PACKET_TYPE_POS, PACKET_TYPE_LEN); - kbasep_tlstream_put_bits( - &word0, stream_id, - PACKET_STREAMID_POS, PACKET_STREAMID_LEN); - - if (numbered) - kbasep_tlstream_put_bits( - &word1, 1, - PACKET_SEQBIT_POS, PACKET_SEQBIT_LEN); - - memcpy(&buffer[0], &word0, sizeof(word0)); - memcpy(&buffer[sizeof(word0)], &word1, sizeof(word1)); -} - -/** - * kbasep_tlstream_packet_header_update - update the packet header - * @buffer: pointer to the buffer - * @data_size: amount of data carried in this packet - * - * Function updates mutable part of packet header in the given buffer. - * Note that value of data_size must not including size of the header. - */ -static void kbasep_tlstream_packet_header_update( - char *buffer, - size_t data_size) -{ - u32 word0; - u32 word1; - - KBASE_DEBUG_ASSERT(buffer); - CSTD_UNUSED(word0); - - memcpy(&word1, &buffer[sizeof(word0)], sizeof(word1)); - - kbasep_tlstream_put_bits( - &word1, data_size, - PACKET_LENGTH_POS, PACKET_LENGTH_LEN); - - memcpy(&buffer[sizeof(word0)], &word1, sizeof(word1)); -} - -/** - * kbasep_tlstream_packet_number_update - update the packet number - * @buffer: pointer to the buffer - * @counter: value of packet counter for this packet's stream - * - * Function updates packet number embedded within the packet placed in the - * given buffer. - */ -static void kbasep_tlstream_packet_number_update(char *buffer, u32 counter) -{ - KBASE_DEBUG_ASSERT(buffer); - - memcpy(&buffer[PACKET_HEADER_SIZE], &counter, sizeof(counter)); -} - -/** - * kbasep_timeline_stream_reset - reset stream - * @stream: pointer to the stream structure - * - * Function discards all pending messages and resets packet counters. - */ -static void kbasep_timeline_stream_reset(struct tl_stream *stream) -{ - unsigned int i; - - for (i = 0; i < PACKET_COUNT; i++) { - if (stream->numbered) - atomic_set( - &stream->buffer[i].size, - PACKET_HEADER_SIZE + - PACKET_NUMBER_SIZE); - else - atomic_set(&stream->buffer[i].size, PACKET_HEADER_SIZE); - } - - atomic_set(&stream->wbi, 0); - atomic_set(&stream->rbi, 0); -} - -/** - * kbasep_timeline_stream_init - initialize timeline stream - * @stream: pointer to the stream structure - * @stream_type: stream type - */ -static void kbasep_timeline_stream_init( - struct tl_stream *stream, - enum tl_stream_type stream_type) -{ - unsigned int i; - - KBASE_DEBUG_ASSERT(stream); - KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type); - - spin_lock_init(&stream->lock); - - /* All packets carrying tracepoints shall be numbered. */ - if (TL_PACKET_TYPE_BODY == tl_stream_cfg[stream_type].pkt_type) - stream->numbered = 1; - else - stream->numbered = 0; - - for (i = 0; i < PACKET_COUNT; i++) - kbasep_tlstream_packet_header_setup( - stream->buffer[i].data, - tl_stream_cfg[stream_type].pkt_family, - tl_stream_cfg[stream_type].pkt_class, - tl_stream_cfg[stream_type].pkt_type, - tl_stream_cfg[stream_type].stream_id, - stream->numbered); - - kbasep_timeline_stream_reset(tl_stream[stream_type]); -} - -/** - * kbasep_timeline_stream_term - terminate timeline stream - * @stream: pointer to the stream structure - */ -static void kbasep_timeline_stream_term(struct tl_stream *stream) -{ - KBASE_DEBUG_ASSERT(stream); -} - -/** - * kbasep_tlstream_msgbuf_submit - submit packet to the user space - * @stream: pointer to the stream structure - * @wb_idx_raw: write buffer index - * @wb_size: length of data stored in current buffer - * - * Function updates currently written buffer with packet header. Then write - * index is incremented and buffer is handled to user space. Parameters - * of new buffer are returned using provided arguments. - * - * Return: length of data in new buffer - * - * Warning: User must update the stream structure with returned value. - */ -static size_t kbasep_tlstream_msgbuf_submit( - struct tl_stream *stream, - unsigned int wb_idx_raw, - unsigned int wb_size) -{ - unsigned int rb_idx_raw = atomic_read(&stream->rbi); - unsigned int wb_idx = wb_idx_raw % PACKET_COUNT; - - kbasep_tlstream_packet_header_update( - stream->buffer[wb_idx].data, - wb_size - PACKET_HEADER_SIZE); - - if (stream->numbered) - kbasep_tlstream_packet_number_update( - stream->buffer[wb_idx].data, - wb_idx_raw); - - /* Increasing write buffer index will expose this packet to the reader. - * As stream->lock is not taken on reader side we must make sure memory - * is updated correctly before this will happen. */ - smp_wmb(); - wb_idx_raw++; - atomic_set(&stream->wbi, wb_idx_raw); - - /* Inform user that packets are ready for reading. */ - wake_up_interruptible(&tl_event_queue); - - /* Detect and mark overflow in this stream. */ - if (PACKET_COUNT == wb_idx_raw - rb_idx_raw) { - /* Reader side depends on this increment to correctly handle - * overflows. The value shall be updated only if it was not - * modified by the reader. The data holding buffer will not be - * updated before stream->lock is released, however size of the - * buffer will. Make sure this increment is globally visible - * before information about selected write buffer size. */ - atomic_cmpxchg(&stream->rbi, rb_idx_raw, rb_idx_raw + 1); - } - - wb_size = PACKET_HEADER_SIZE; - if (stream->numbered) - wb_size += PACKET_NUMBER_SIZE; - - return wb_size; -} - -/** - * kbasep_tlstream_msgbuf_acquire - lock selected stream and reserves buffer - * @stream_type: type of the stream that shall be locked - * @msg_size: message size - * @flags: pointer to store flags passed back on stream release - * - * Function will lock the stream and reserve the number of bytes requested - * in msg_size for the user. - * - * Return: pointer to the buffer where message can be stored - * - * Warning: Stream must be relased with kbasep_tlstream_msgbuf_release(). - * Only atomic operations are allowed while stream is locked - * (i.e. do not use any operation that may sleep). - */ -static char *kbasep_tlstream_msgbuf_acquire( - enum tl_stream_type stream_type, - size_t msg_size, - unsigned long *flags) -{ - struct tl_stream *stream; - unsigned int wb_idx_raw; - unsigned int wb_idx; - size_t wb_size; - - KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type); - KBASE_DEBUG_ASSERT( - PACKET_SIZE - PACKET_HEADER_SIZE - PACKET_NUMBER_SIZE >= - msg_size); - - stream = tl_stream[stream_type]; - - spin_lock_irqsave(&stream->lock, *flags); - - wb_idx_raw = atomic_read(&stream->wbi); - wb_idx = wb_idx_raw % PACKET_COUNT; - wb_size = atomic_read(&stream->buffer[wb_idx].size); - - /* Select next buffer if data will not fit into current one. */ - if (PACKET_SIZE < wb_size + msg_size) { - wb_size = kbasep_tlstream_msgbuf_submit( - stream, wb_idx_raw, wb_size); - wb_idx = (wb_idx_raw + 1) % PACKET_COUNT; - } - - /* Reserve space in selected buffer. */ - atomic_set(&stream->buffer[wb_idx].size, wb_size + msg_size); - -#if MALI_UNIT_TEST - atomic_add(msg_size, &tlstream_bytes_generated); -#endif /* MALI_UNIT_TEST */ - - return &stream->buffer[wb_idx].data[wb_size]; -} - -/** - * kbasep_tlstream_msgbuf_release - unlock selected stream - * @stream_type: type of the stream that shall be locked - * @flags: value obtained during stream acquire - * - * Function releases stream that has been previously locked with a call to - * kbasep_tlstream_msgbuf_acquire(). - */ -static void kbasep_tlstream_msgbuf_release( - enum tl_stream_type stream_type, - unsigned long flags) -{ - struct tl_stream *stream; - - KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type); - - stream = tl_stream[stream_type]; - stream->last_write_time = kbasep_tlstream_get_timestamp(); - - spin_unlock_irqrestore(&stream->lock, flags); -} - -/*****************************************************************************/ - -/** - * kbasep_tlstream_flush_stream - flush stream - * @stype: type of stream to be flushed - * - * Flush pending data in timeline stream. - */ -static void kbasep_tlstream_flush_stream(enum tl_stream_type stype) -{ - struct tl_stream *stream = tl_stream[stype]; - unsigned long flags; - unsigned int wb_idx_raw; - unsigned int wb_idx; - size_t wb_size; - size_t min_size = PACKET_HEADER_SIZE; - - if (stream->numbered) - min_size += PACKET_NUMBER_SIZE; - - spin_lock_irqsave(&stream->lock, flags); - - wb_idx_raw = atomic_read(&stream->wbi); - wb_idx = wb_idx_raw % PACKET_COUNT; - wb_size = atomic_read(&stream->buffer[wb_idx].size); - - if (wb_size > min_size) { - wb_size = kbasep_tlstream_msgbuf_submit( - stream, wb_idx_raw, wb_size); - wb_idx = (wb_idx_raw + 1) % PACKET_COUNT; - atomic_set(&stream->buffer[wb_idx].size, wb_size); - } - spin_unlock_irqrestore(&stream->lock, flags); -} - -/** - * kbasep_tlstream_autoflush_timer_callback - autoflush timer callback - * @data: unused - * - * Timer is executed periodically to check if any of the stream contains - * buffer ready to be submitted to user space. - */ -static void kbasep_tlstream_autoflush_timer_callback(unsigned long data) -{ - u64 timestamp = kbasep_tlstream_get_timestamp(); - enum tl_stream_type stype; - int rcode; - - CSTD_UNUSED(data); - - for (stype = 0; stype < TL_STREAM_TYPE_COUNT; stype++) { - struct tl_stream *stream = tl_stream[stype]; - unsigned long flags; - unsigned int wb_idx_raw; - unsigned int wb_idx; - size_t wb_size; - size_t min_size = PACKET_HEADER_SIZE; - - if (stream->numbered) - min_size += PACKET_NUMBER_SIZE; - - spin_lock_irqsave(&stream->lock, flags); - - wb_idx_raw = atomic_read(&stream->wbi); - wb_idx = wb_idx_raw % PACKET_COUNT; - wb_size = atomic_read(&stream->buffer[wb_idx].size); - - if ( - (wb_size > min_size) && - ( - timestamp - stream->last_write_time > - AUTOFLUSH_TIMEOUT)) { - - wb_size = kbasep_tlstream_msgbuf_submit( - stream, wb_idx_raw, wb_size); - wb_idx = (wb_idx_raw + 1) % PACKET_COUNT; - atomic_set(&stream->buffer[wb_idx].size, wb_size); - } - spin_unlock_irqrestore(&stream->lock, flags); - } - - if (atomic_read(&autoflush_timer_active)) - rcode = mod_timer( - &autoflush_timer, - jiffies + msecs_to_jiffies(AUTOFLUSH_INTERVAL)); - CSTD_UNUSED(rcode); -} - -/** - * kbasep_tlstream_packet_pending - check timeline streams for pending packets - * @stype: pointer to variable where stream type will be placed - * @rb_idx_raw: pointer to variable where read buffer index will be placed - * - * Function checks all streams for pending packets. It will stop as soon as - * packet ready to be submitted to user space is detected. Variables under - * pointers, passed as the parameters to this function will be updated with - * values pointing to right stream and buffer. - * - * Return: non-zero if any of timeline streams has at last one packet ready - */ -static int kbasep_tlstream_packet_pending( - enum tl_stream_type *stype, - unsigned int *rb_idx_raw) -{ - int pending = 0; - - KBASE_DEBUG_ASSERT(stype); - KBASE_DEBUG_ASSERT(rb_idx_raw); - - for ( - *stype = 0; - (*stype < TL_STREAM_TYPE_COUNT) && !pending; - (*stype)++) { - if (NULL != tl_stream[*stype]) { - *rb_idx_raw = atomic_read(&tl_stream[*stype]->rbi); - /* Read buffer index may be updated by writer in case of - * overflow. Read and write buffer indexes must be - * loaded in correct order. */ - smp_rmb(); - if (atomic_read(&tl_stream[*stype]->wbi) != *rb_idx_raw) - pending = 1; - } - } - (*stype)--; - - return pending; -} - -/** - * kbasep_tlstream_read - copy data from streams to buffer provided by user - * @filp: pointer to file structure (unused) - * @buffer: pointer to the buffer provided by user - * @size: maximum amount of data that can be stored in the buffer - * @f_pos: pointer to file offset (unused) - * - * Return: number of bytes stored in the buffer - */ -static ssize_t kbasep_tlstream_read( - struct file *filp, - char __user *buffer, - size_t size, - loff_t *f_pos) -{ - ssize_t copy_len = 0; - - KBASE_DEBUG_ASSERT(filp); - KBASE_DEBUG_ASSERT(buffer); - KBASE_DEBUG_ASSERT(f_pos); - CSTD_UNUSED(filp); - - if ((0 > *f_pos) || (PACKET_SIZE > size)) - return -EINVAL; - - mutex_lock(&tl_reader_lock); - - while (copy_len < size) { - enum tl_stream_type stype; - unsigned int rb_idx_raw; - unsigned int rb_idx; - size_t rb_size; - - /* If we don't have any data yet, wait for packet to be - * submitted. If we already read some packets and there is no - * packet pending return back to user. */ - if (0 < copy_len) { - if (!kbasep_tlstream_packet_pending( - &stype, - &rb_idx_raw)) - break; - } else { - if (wait_event_interruptible( - tl_event_queue, - kbasep_tlstream_packet_pending( - &stype, - &rb_idx_raw))) { - copy_len = -ERESTARTSYS; - break; - } - } - - /* Check if this packet fits into the user buffer. - * If so copy its content. */ - rb_idx = rb_idx_raw % PACKET_COUNT; - rb_size = atomic_read(&tl_stream[stype]->buffer[rb_idx].size); - if (rb_size > size - copy_len) - break; - if (copy_to_user( - &buffer[copy_len], - tl_stream[stype]->buffer[rb_idx].data, - rb_size)) { - copy_len = -EFAULT; - break; - } - - /* Verify if there was no overflow in selected stream. Make sure - * that if incorrect size was used we will know about it. */ - smp_rmb(); - if (atomic_read(&tl_stream[stype]->rbi) == rb_idx_raw) { - copy_len += rb_size; - atomic_inc(&tl_stream[stype]->rbi); - -#if MALI_UNIT_TEST - atomic_add(rb_size, &tlstream_bytes_collected); -#endif /* MALI_UNIT_TEST */ - } - } - - mutex_unlock(&tl_reader_lock); - - return copy_len; -} - -/** - * kbasep_tlstream_poll - poll timeline stream for packets - * @filp: pointer to file structure - * @wait: pointer to poll table - * Return: POLLIN if data can be read without blocking, otherwise zero - */ -static unsigned int kbasep_tlstream_poll(struct file *filp, poll_table *wait) -{ - enum tl_stream_type stream_type; - unsigned int rb_idx; - - KBASE_DEBUG_ASSERT(filp); - KBASE_DEBUG_ASSERT(wait); - - poll_wait(filp, &tl_event_queue, wait); - if (kbasep_tlstream_packet_pending(&stream_type, &rb_idx)) - return POLLIN; - return 0; -} - -/** - * kbasep_tlstream_release - release timeline stream descriptor - * @inode: pointer to inode structure - * @filp: pointer to file structure - * - * Return always return zero - */ -static int kbasep_tlstream_release(struct inode *inode, struct file *filp) -{ - KBASE_DEBUG_ASSERT(inode); - KBASE_DEBUG_ASSERT(filp); - CSTD_UNUSED(inode); - CSTD_UNUSED(filp); - atomic_set(&tlstream_busy, 0); - return 0; -} - -/** - * kbasep_tlstream_timeline_header - prepare timeline header stream packet - * @stream_type: type of the stream that will carry header data - * @tp_desc: pointer to array with tracepoint descriptors - * @tp_count: number of descriptors in the given array - * - * Functions fills in information about tracepoints stored in body stream - * associated with this header stream. - */ -static void kbasep_tlstream_timeline_header( - enum tl_stream_type stream_type, - const struct tp_desc *tp_desc, - u32 tp_count) -{ - const u8 tv = KBASEP_TLSTREAM_VERSION; /* tlstream version */ - const u8 ps = sizeof(void *); /* pointer size */ - size_t msg_size = sizeof(tv) + sizeof(ps) + sizeof(tp_count); - char *buffer; - size_t pos = 0; - unsigned long flags; - unsigned int i; - - KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type); - KBASE_DEBUG_ASSERT(tp_desc); - - /* Calculate the size of the timeline message. */ - for (i = 0; i < tp_count; i++) { - msg_size += sizeof(tp_desc[i].id); - msg_size += - strnlen(tp_desc[i].id_str, STRLEN_MAX) + - sizeof(char) + sizeof(u32); - msg_size += - strnlen(tp_desc[i].name, STRLEN_MAX) + - sizeof(char) + sizeof(u32); - msg_size += - strnlen(tp_desc[i].arg_types, STRLEN_MAX) + - sizeof(char) + sizeof(u32); - msg_size += - strnlen(tp_desc[i].arg_names, STRLEN_MAX) + - sizeof(char) + sizeof(u32); - } - - KBASE_DEBUG_ASSERT(PACKET_SIZE - PACKET_HEADER_SIZE >= msg_size); - - buffer = kbasep_tlstream_msgbuf_acquire(stream_type, msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &tv, sizeof(tv)); - pos = kbasep_tlstream_write_bytes(buffer, pos, &ps, sizeof(ps)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &tp_count, sizeof(tp_count)); - - for (i = 0; i < tp_count; i++) { - pos = kbasep_tlstream_write_bytes( - buffer, pos, - &tp_desc[i].id, sizeof(tp_desc[i].id)); - pos = kbasep_tlstream_write_string( - buffer, pos, - tp_desc[i].id_str, msg_size - pos); - pos = kbasep_tlstream_write_string( - buffer, pos, - tp_desc[i].name, msg_size - pos); - pos = kbasep_tlstream_write_string( - buffer, pos, - tp_desc[i].arg_types, msg_size - pos); - pos = kbasep_tlstream_write_string( - buffer, pos, - tp_desc[i].arg_names, msg_size - pos); - } - - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(stream_type, flags); - - /* We don't expect any more data to be read in this stream. - * As header stream must be read before its associated body stream, - * make this packet visible to the user straightaway. */ - kbasep_tlstream_flush_stream(stream_type); -} - -/*****************************************************************************/ - -int kbase_tlstream_init(void) -{ - enum tl_stream_type i; - int rcode; - - /* Prepare stream structures. */ - for (i = 0; i < TL_STREAM_TYPE_COUNT; i++) { - tl_stream[i] = kmalloc(sizeof(**tl_stream), GFP_KERNEL); - if (!tl_stream[i]) - break; - kbasep_timeline_stream_init(tl_stream[i], i); - } - if (TL_STREAM_TYPE_COUNT > i) { - for (; i > 0; i--) { - kbasep_timeline_stream_term(tl_stream[i - 1]); - kfree(tl_stream[i - 1]); - } - return -ENOMEM; - } - - /* Initialize autoflush timer. */ - atomic_set(&autoflush_timer_active, 1); - setup_timer(&autoflush_timer, - kbasep_tlstream_autoflush_timer_callback, - 0); - rcode = mod_timer( - &autoflush_timer, - jiffies + msecs_to_jiffies(AUTOFLUSH_INTERVAL)); - CSTD_UNUSED(rcode); - - return 0; -} - -void kbase_tlstream_term(void) -{ - enum tl_stream_type i; - - atomic_set(&autoflush_timer_active, 0); - del_timer_sync(&autoflush_timer); - - for (i = 0; i < TL_STREAM_TYPE_COUNT; i++) { - kbasep_timeline_stream_term(tl_stream[i]); - kfree(tl_stream[i]); - } -} - -int kbase_tlstream_acquire(struct kbase_context *kctx, int *fd) -{ - if (0 == atomic_cmpxchg(&tlstream_busy, 0, 1)) { - *fd = anon_inode_getfd( - "[mali_tlstream]", - &kbasep_tlstream_fops, - kctx, - O_RDONLY | O_CLOEXEC); - if (0 > *fd) { - atomic_set(&tlstream_busy, 0); - return *fd; - } - - /* Reset and initialize header streams. */ - kbasep_timeline_stream_reset( - tl_stream[TL_STREAM_TYPE_OBJ_HEADER]); - kbasep_timeline_stream_reset( - tl_stream[TL_STREAM_TYPE_OBJ_SUMMARY]); - kbasep_timeline_stream_reset( - tl_stream[TL_STREAM_TYPE_AUX_HEADER]); - kbasep_tlstream_timeline_header( - TL_STREAM_TYPE_OBJ_HEADER, - tp_desc_obj, - ARRAY_SIZE(tp_desc_obj)); - kbasep_tlstream_timeline_header( - TL_STREAM_TYPE_AUX_HEADER, - tp_desc_aux, - ARRAY_SIZE(tp_desc_aux)); - } else { - *fd = -EBUSY; - } - - return 0; -} - -void kbase_tlstream_flush_streams(void) -{ - enum tl_stream_type stype; - - for (stype = 0; stype < TL_STREAM_TYPE_COUNT; stype++) - kbasep_tlstream_flush_stream(stype); -} - -void kbase_tlstream_reset_body_streams(void) -{ - kbasep_timeline_stream_reset( - tl_stream[TL_STREAM_TYPE_OBJ]); - kbasep_timeline_stream_reset( - tl_stream[TL_STREAM_TYPE_AUX]); -} - -#if MALI_UNIT_TEST -void kbase_tlstream_stats(u32 *bytes_collected, u32 *bytes_generated) -{ - KBASE_DEBUG_ASSERT(bytes_collected); - KBASE_DEBUG_ASSERT(bytes_generated); - *bytes_collected = atomic_read(&tlstream_bytes_collected); - *bytes_generated = atomic_read(&tlstream_bytes_generated); -} -#endif /* MALI_UNIT_TEST */ - -/*****************************************************************************/ - -void kbase_tlstream_tl_summary_new_ctx(void *context, u32 nr) -{ - const u32 msg_id = KBASE_TL_NEW_CTX; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(context) + sizeof(nr); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ_SUMMARY, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &context, sizeof(context)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &nr, sizeof(nr)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags); -} - -void kbase_tlstream_tl_summary_new_gpu(void *gpu, u32 id, u32 core_count) -{ - const u32 msg_id = KBASE_TL_NEW_GPU; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(gpu) + sizeof(id) + - sizeof(core_count); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ_SUMMARY, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &gpu, sizeof(gpu)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &id, sizeof(id)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &core_count, sizeof(core_count)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags); -} - -void kbase_tlstream_tl_summary_new_lpu(void *lpu, u32 nr, u32 fn) -{ - const u32 msg_id = KBASE_TL_NEW_LPU; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(lpu) + sizeof(nr) + - sizeof(fn); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ_SUMMARY, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &lpu, sizeof(lpu)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &nr, sizeof(nr)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &fn, sizeof(fn)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags); -} - -void kbase_tlstream_tl_summary_lifelink_lpu_gpu(void *lpu, void *gpu) -{ - const u32 msg_id = KBASE_TL_LIFELINK_LPU_GPU; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(lpu) + sizeof(gpu); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ_SUMMARY, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &lpu, sizeof(lpu)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &gpu, sizeof(gpu)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags); -} - -void kbase_tlstream_tl_summary_new_as(void *as, u32 nr) -{ - const u32 msg_id = KBASE_TL_NEW_AS; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(nr); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ_SUMMARY, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &as, sizeof(as)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &nr, sizeof(nr)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags); -} - -void kbase_tlstream_tl_summary_lifelink_as_gpu(void *as, void *gpu) -{ - const u32 msg_id = KBASE_TL_LIFELINK_AS_GPU; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(gpu); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ_SUMMARY, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &as, sizeof(as)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &gpu, sizeof(gpu)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags); -} - -/*****************************************************************************/ - -void kbase_tlstream_tl_new_ctx(void *context, u32 nr) -{ - const u32 msg_id = KBASE_TL_NEW_CTX; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(context) + sizeof(nr); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &context, sizeof(context)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &nr, sizeof(nr)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); -} - -void kbase_tlstream_tl_new_atom(void *atom, u32 nr) -{ - const u32 msg_id = KBASE_TL_NEW_ATOM; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(nr); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &atom, sizeof(atom)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &nr, sizeof(nr)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); -} - -void kbase_tlstream_tl_del_ctx(void *context) -{ - const u32 msg_id = KBASE_TL_DEL_CTX; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(context); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &context, sizeof(context)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); -} - -void kbase_tlstream_tl_del_atom(void *atom) -{ - const u32 msg_id = KBASE_TL_DEL_ATOM; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(atom); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &atom, sizeof(atom)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); -} - -void kbase_tlstream_tl_ret_gpu_ctx(void *gpu, void *context) -{ - const u32 msg_id = KBASE_TL_RET_GPU_CTX; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(gpu) + sizeof(context); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &gpu, sizeof(gpu)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &context, sizeof(context)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); -} - -void kbase_tlstream_tl_ret_atom_ctx(void *atom, void *context) -{ - const u32 msg_id = KBASE_TL_RET_ATOM_CTX; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(context); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &atom, sizeof(atom)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &context, sizeof(context)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); -} - -void kbase_tlstream_tl_ret_atom_lpu(void *atom, void *lpu) -{ - const u32 msg_id = KBASE_TL_RET_ATOM_LPU; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(lpu); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &atom, sizeof(atom)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &lpu, sizeof(lpu)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); -} - -void kbase_tlstream_tl_nret_gpu_ctx(void *gpu, void *context) -{ - const u32 msg_id = KBASE_TL_NRET_GPU_CTX; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(gpu) + sizeof(context); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &gpu, sizeof(gpu)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &context, sizeof(context)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); -} - -void kbase_tlstream_tl_nret_atom_ctx(void *atom, void *context) -{ - const u32 msg_id = KBASE_TL_NRET_ATOM_CTX; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(context); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &atom, sizeof(atom)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &context, sizeof(context)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); -} - -void kbase_tlstream_tl_nret_atom_lpu(void *atom, void *lpu) -{ - const u32 msg_id = KBASE_TL_NRET_ATOM_LPU; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(lpu); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &atom, sizeof(atom)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &lpu, sizeof(lpu)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); -} - -void kbase_tlstream_tl_ret_as_ctx(void *as, void *ctx) -{ - const u32 msg_id = KBASE_TL_RET_AS_CTX; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(ctx); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &as, sizeof(as)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &ctx, sizeof(ctx)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); -} - -void kbase_tlstream_tl_nret_as_ctx(void *as, void *ctx) -{ - const u32 msg_id = KBASE_TL_NRET_AS_CTX; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(ctx); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &as, sizeof(as)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &ctx, sizeof(ctx)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); -} - -void kbase_tlstream_tl_ret_atom_as(void *atom, void *as) -{ - const u32 msg_id = KBASE_TL_RET_ATOM_AS; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(as); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &atom, sizeof(atom)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &as, sizeof(as)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); -} - -void kbase_tlstream_tl_nret_atom_as(void *atom, void *as) -{ - const u32 msg_id = KBASE_TL_NRET_ATOM_AS; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(as); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &atom, sizeof(atom)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &as, sizeof(as)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); -} - -void kbase_tlstream_tl_attrib_atom_config( - void *atom, u64 jd, u64 affinity, u32 config) -{ - const u32 msg_id = KBASE_TL_ATTRIB_ATOM_CONFIG; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(atom) + - sizeof(jd) + sizeof(affinity) + sizeof(config); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &atom, sizeof(atom)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &jd, sizeof(jd)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &affinity, sizeof(affinity)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &config, sizeof(config)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); -} - -void kbase_tlstream_tl_attrib_as_config( - void *as, u64 transtab, u64 memattr, u64 transcfg) -{ - const u32 msg_id = KBASE_TL_ATTRIB_AS_CONFIG; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(as) + - sizeof(transtab) + sizeof(memattr) + sizeof(transcfg); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &as, sizeof(as)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &transtab, sizeof(transtab)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &memattr, sizeof(memattr)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &transcfg, sizeof(transcfg)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); -} - -void kbase_tlstream_jd_gpu_soft_reset(void *gpu) -{ - const u32 msg_id = KBASE_JD_GPU_SOFT_RESET; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(gpu); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_OBJ, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &gpu, sizeof(gpu)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); -} - -/*****************************************************************************/ - -void kbase_tlstream_aux_pm_state(u32 core_type, u64 state) -{ - const u32 msg_id = KBASE_AUX_PM_STATE; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(core_type) + - sizeof(state); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_AUX, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &core_type, sizeof(core_type)); - pos = kbasep_tlstream_write_bytes(buffer, pos, &state, sizeof(state)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags); -} - -void kbase_tlstream_aux_job_softstop(u32 js_id) -{ - const u32 msg_id = KBASE_AUX_JOB_SOFTSTOP; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(js_id); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_AUX, - msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes(buffer, pos, &js_id, sizeof(js_id)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags); -} - -void kbase_tlstream_aux_pagefault(u32 mmu_as, u32 page_count) -{ - const u32 msg_id = KBASE_AUX_PAGEFAULT; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(mmu_as) + - sizeof(page_count); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_AUX, msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes(buffer, pos, &mmu_as, sizeof(mmu_as)); - pos = kbasep_tlstream_write_bytes( - buffer, pos, &page_count, sizeof(page_count)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags); -} - -void kbase_tlstream_aux_pagesalloc(s64 page_count_change) -{ - const u32 msg_id = KBASE_AUX_PAGESALLOC; - const size_t msg_size = - sizeof(msg_id) + sizeof(u64) + sizeof(page_count_change); - unsigned long flags; - char *buffer; - size_t pos = 0; - - buffer = kbasep_tlstream_msgbuf_acquire( - TL_STREAM_TYPE_AUX, msg_size, &flags); - KBASE_DEBUG_ASSERT(buffer); - - pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); - pos = kbasep_tlstream_write_timestamp(buffer, pos); - pos = kbasep_tlstream_write_bytes( - buffer, pos, - &page_count_change, sizeof(page_count_change)); - KBASE_DEBUG_ASSERT(msg_size == pos); - - kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags); -} - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_tlstream.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_tlstream.h deleted file mode 100755 index 30171044ee1db..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_tlstream.h +++ /dev/null @@ -1,380 +0,0 @@ -/* - * - * (C) COPYRIGHT 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#if !defined(_KBASE_TLSTREAM_H) -#define _KBASE_TLSTREAM_H - -#include - -/*****************************************************************************/ - -/** - * kbase_tlstream_init - initialize timeline infrastructure in kernel - * Return: zero on success, negative number on error - */ -int kbase_tlstream_init(void); - -/** - * kbase_tlstream_term - terminate timeline infrastructure in kernel - * - * Timeline need have to been previously enabled with kbase_tlstream_init(). - */ -void kbase_tlstream_term(void); - -/** - * kbase_tlstream_acquire - acquire timeline stream file descriptor - * @kctx: kernel common context - * @fd: timeline stream file descriptor - * - * This descriptor is meant to be used by userspace timeline to gain access to - * kernel timeline stream. This stream is later broadcasted by user space to the - * timeline client. - * Only one entity can own the descriptor at any given time. Descriptor shall be - * closed if unused. If descriptor cannot be obtained (i.e. when it is already - * being used) argument fd will contain negative value. - * - * Return: zero on success (this does not necessarily mean that stream - * descriptor could be returned), negative number on error - */ -int kbase_tlstream_acquire(struct kbase_context *kctx, int *fd); - -/** - * kbase_tlstream_flush_streams - flush timeline streams. - * - * Function will flush pending data in all timeline streams. - */ -void kbase_tlstream_flush_streams(void); - -/** - * kbase_tlstream_reset_body_streams - reset timeline body streams. - * - * Function will discard pending data in all timeline body streams. - */ -void kbase_tlstream_reset_body_streams(void); - -#if MALI_UNIT_TEST -/** - * kbase_tlstream_test - start timeline stream data generator - * @tpw_count: number of trace point writers in each context - * @msg_delay: time delay in milliseconds between trace points written by one - * writer - * @msg_count: number of trace points written by one writer - * @aux_msg: if non-zero aux messages will be included - * - * This test starts a requested number of asynchronous writers in both IRQ and - * thread context. Each writer will generate required number of test - * tracepoints (tracepoints with embedded information about writer that - * should be verified by user space reader). Tracepoints will be emitted in - * all timeline body streams. If aux_msg is non-zero writer will also - * generate not testable tracepoints (tracepoints without information about - * writer). These tracepoints are used to check correctness of remaining - * timeline message generating functions. Writer will wait requested time - * between generating another set of messages. This call blocks until all - * writers finish. - */ -void kbase_tlstream_test( - unsigned int tpw_count, - unsigned int msg_delay, - unsigned int msg_count, - int aux_msg); - -/** - * kbase_tlstream_stats - read timeline stream statistics - * @bytes_collected: will hold number of bytes read by the user - * @bytes_generated: will hold number of bytes generated by trace points - */ -void kbase_tlstream_stats(u32 *bytes_collected, u32 *bytes_generated); -#endif /* MALI_UNIT_TEST */ - -/*****************************************************************************/ - -/** - * kbase_tlstream_tl_summary_new_ctx - create context object in timeline - * summary - * @context: name of the context object - * @nr: context number - * - * Function emits a timeline message informing about context creation. Context - * is created with context number (its attribute), that can be used to link - * kbase context with userspace context. - * This message is directed to timeline summary stream. - */ -void kbase_tlstream_tl_summary_new_ctx(void *context, u32 nr); - -/** - * kbase_tlstream_tl_summary_new_gpu - create GPU object in timeline summary - * @gpu: name of the GPU object - * @id: id value of this GPU - * @core_count: number of cores this GPU hosts - * - * Function emits a timeline message informing about GPU creation. GPU is - * created with two attributes: id and core count. - * This message is directed to timeline summary stream. - */ -void kbase_tlstream_tl_summary_new_gpu(void *gpu, u32 id, u32 core_count); - -/** - * kbase_tlstream_tl_summary_new_lpu - create LPU object in timeline summary - * @lpu: name of the Logical Processing Unit object - * @nr: sequential number assigned to this LPU - * @fn: property describing this LPU's functional abilities - * - * Function emits a timeline message informing about LPU creation. LPU is - * created with two attributes: number linking this LPU with GPU's job slot - * and function bearing information about this LPU abilities. - * This message is directed to timeline summary stream. - */ -void kbase_tlstream_tl_summary_new_lpu(void *lpu, u32 nr, u32 fn); - -/** - * kbase_tlstream_tl_summary_lifelink_lpu_gpu - lifelink LPU object to GPU - * @lpu: name of the Logical Processing Unit object - * @gpu: name of the GPU object - * - * Function emits a timeline message informing that LPU object shall be deleted - * along with GPU object. - * This message is directed to timeline summary stream. - */ -void kbase_tlstream_tl_summary_lifelink_lpu_gpu(void *lpu, void *gpu); - -/** - * kbase_tlstream_tl_summary_new_as - create address space object in timeline summary - * @as: name of the address space object - * @nr: sequential number assigned to this address space - * - * Function emits a timeline message informing about address space creation. - * Address space is created with one attribute: number identifying this - * address space. - * This message is directed to timeline summary stream. - */ -void kbase_tlstream_tl_summary_new_as(void *as, u32 nr); - -/** - * kbase_tlstream_tl_summary_lifelink_as_gpu - lifelink address space object to GPU - * @as: name of the address space object - * @gpu: name of the GPU object - * - * Function emits a timeline message informing that address space object - * shall be deleted along with GPU object. - * This message is directed to timeline summary stream. - */ -void kbase_tlstream_tl_summary_lifelink_as_gpu(void *as, void *gpu); - -/** - * kbase_tlstream_tl_new_ctx - create context object in timeline - * @context: name of the context object - * @nr: context number - * - * Function emits a timeline message informing about context creation. Context - * is created with context number (its attribute), that can be used to link - * kbase context with userspace context. - */ -void kbase_tlstream_tl_new_ctx(void *context, u32 nr); - -/** - * kbase_tlstream_tl_new_atom - create atom object in timeline - * @atom: name of the atom object - * @nr: sequential number assigned to this atom - * - * Function emits a timeline message informing about atom creation. Atom is - * created with atom number (its attribute) that links it with actual work - * bucket id understood by hardware. - */ -void kbase_tlstream_tl_new_atom(void *atom, u32 nr); - -/** - * kbase_tlstream_tl_del_ctx - destroy context object in timeline - * @context: name of the context object - * - * Function emits a timeline message informing that context object ceased to - * exist. - */ -void kbase_tlstream_tl_del_ctx(void *context); - -/** - * kbase_tlstream_tl_del_atom - destroy atom object in timeline - * @atom: name of the atom object - * - * Function emits a timeline message informing that atom object ceased to - * exist. - */ -void kbase_tlstream_tl_del_atom(void *atom); - -/** - * kbase_tlstream_tl_ret_gpu_ctx - retain GPU by context - * @gpu: name of the GPU object - * @context: name of the context object - * - * Function emits a timeline message informing that GPU object is being held - * by context and must not be deleted unless it is released. - */ -void kbase_tlstream_tl_ret_gpu_ctx(void *gpu, void *context); - -/** - * kbase_tlstream_tl_ret_atom_ctx - retain atom by context - * @atom: name of the atom object - * @context: name of the context object - * - * Function emits a timeline message informing that atom object is being held - * by context and must not be deleted unless it is released. - */ -void kbase_tlstream_tl_ret_atom_ctx(void *atom, void *context); - -/** - * kbase_tlstream_tl_ret_atom_lpu - retain atom by LPU - * @atom: name of the atom object - * @lpu: name of the Logical Processing Unit object - * - * Function emits a timeline message informing that atom object is being held - * by LPU and must not be deleted unless it is released. - */ -void kbase_tlstream_tl_ret_atom_lpu(void *atom, void *lpu); - -/** - * kbase_tlstream_tl_nret_gpu_ctx - release GPU by context - * @gpu: name of the GPU object - * @context: name of the context object - * - * Function emits a timeline message informing that GPU object is being released - * by context. - */ -void kbase_tlstream_tl_nret_gpu_ctx(void *gpu, void *context); - -/** - * kbase_tlstream_tl_nret_atom_ctx - release atom by context - * @atom: name of the atom object - * @context: name of the context object - * - * Function emits a timeline message informing that atom object is being - * released by context. - */ -void kbase_tlstream_tl_nret_atom_ctx(void *atom, void *context); - -/** - * kbase_tlstream_tl_nret_atom_lpu - release atom by LPU - * @atom: name of the atom object - * @lpu: name of the Logical Processing Unit object - * - * Function emits a timeline message informing that atom object is being - * released by LPU. - */ -void kbase_tlstream_tl_nret_atom_lpu(void *atom, void *lpu); - -/** - * kbase_tlstream_tl_ret_as_ctx - lifelink address space object to context - * @as: name of the address space object - * @ctx: name of the context object - * - * Function emits a timeline message informing that address space object - * is being held by the context object. - */ -void kbase_tlstream_tl_ret_as_ctx(void *as, void *ctx); - -/** - * kbase_tlstream_tl_nret_as_ctx - release address space by context - * @as: name of the address space object - * @ctx: name of the context object - * - * Function emits a timeline message informing that address space object - * is being released by atom. - */ -void kbase_tlstream_tl_nret_as_ctx(void *as, void *ctx); - -/** - * kbase_tlstream_tl_ret_atom_as - retain atom by address space - * @atom: name of the atom object - * @as: name of the address space object - * - * Function emits a timeline message informing that atom object is being held - * by address space and must not be deleted unless it is released. - */ -void kbase_tlstream_tl_ret_atom_as(void *atom, void *as); - -/** - * kbase_tlstream_tl_nret_atom_as - release atom by address space - * @atom: name of the atom object - * @as: name of the address space object - * - * Function emits a timeline message informing that atom object is being - * released by address space. - */ -void kbase_tlstream_tl_nret_atom_as(void *atom, void *as); - -/** - * kbase_tlstream_tl_attrib_atom_config - atom job slot attributes - * @atom: name of the atom object - * @jd: job descriptor address - * @affinity: job affinity - * @config: job config - * - * Function emits a timeline message containing atom attributes. - */ -void kbase_tlstream_tl_attrib_atom_config( - void *atom, u64 jd, u64 affinity, u32 config); - -/** - * kbase_tlstream_tl_attrib_as_config - address space attributes - * @as: assigned address space - * @transtab: configuration of the TRANSTAB register - * @memattr: configuration of the MEMATTR register - * @transcfg: configuration of the TRANSCFG register (or zero if not present) - * - * Function emits a timeline message containing address space attributes. - */ -void kbase_tlstream_tl_attrib_as_config( - void *as, u64 transtab, u64 memattr, u64 transcfg); - -/** - * kbase_tlstream_jd_gpu_soft_reset - The GPU is being soft reset - * @gpu: name of the GPU object - * - * This imperative tracepoint is specific to job dumping. - * Function emits a timeline message indicating GPU soft reset. - */ -void kbase_tlstream_jd_gpu_soft_reset(void *gpu); - -/** - * kbase_tlstream_aux_pm_state - timeline message: power management state - * @core_type: core type (shader, tiler, l2 cache, l3 cache) - * @state: 64bits bitmask reporting power state of the cores (1-ON, 0-OFF) - */ -void kbase_tlstream_aux_pm_state(u32 core_type, u64 state); - -/** - * kbase_tlstream_aux_job_softstop - soft job stop occurred - * @js_id: job slot id - */ -void kbase_tlstream_aux_job_softstop(u32 js_id); - -/** - * kbase_tlstream_aux_pagefault - timeline message: MMU page fault event - * resulting in new pages being mapped - * @mmu_as: MMU address space number - * @page_count: number of currently used pages - */ -void kbase_tlstream_aux_pagefault(u32 mmu_as, u32 page_count); - -/** - * kbase_tlstream_aux_pagesalloc - timeline message: total number of allocated - * pages is changed - * @page_count_change: number of pages to be added or subtracted (according to - * the sign) - */ -void kbase_tlstream_aux_pagesalloc(s64 page_count_change); - -#endif /* _KBASE_TLSTREAM_H */ - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_trace_defs.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_trace_defs.h deleted file mode 100755 index e2e0544208cea..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_trace_defs.h +++ /dev/null @@ -1,264 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* ***** IMPORTANT: THIS IS NOT A NORMAL HEADER FILE ***** - * ***** DO NOT INCLUDE DIRECTLY ***** - * ***** THE LACK OF HEADER GUARDS IS INTENTIONAL ***** */ - -/* - * The purpose of this header file is just to contain a list of trace code idenitifers - * - * Each identifier is wrapped in a macro, so that its string form and enum form can be created - * - * Each macro is separated with a comma, to allow insertion into an array initializer or enum definition block. - * - * This allows automatic creation of an enum and a corresponding array of strings - * - * Before #including, the includer MUST #define KBASE_TRACE_CODE_MAKE_CODE. - * After #including, the includer MUST #under KBASE_TRACE_CODE_MAKE_CODE. - * - * e.g.: - * #define KBASE_TRACE_CODE( X ) KBASE_TRACE_CODE_ ## X - * typedef enum - * { - * #define KBASE_TRACE_CODE_MAKE_CODE( X ) KBASE_TRACE_CODE( X ) - * #include "mali_kbase_trace_defs.h" - * #undef KBASE_TRACE_CODE_MAKE_CODE - * } kbase_trace_code; - * - * IMPORTANT: THIS FILE MUST NOT BE USED FOR ANY OTHER PURPOSE OTHER THAN THE ABOVE - * - * - * The use of the macro here is: - * - KBASE_TRACE_CODE_MAKE_CODE( X ) - * - * Which produces: - * - For an enum, KBASE_TRACE_CODE_X - * - For a string, "X" - * - * - * For example: - * - KBASE_TRACE_CODE_MAKE_CODE( JM_JOB_COMPLETE ) expands to: - * - KBASE_TRACE_CODE_JM_JOB_COMPLETE for the enum - * - "JM_JOB_COMPLETE" for the string - * - To use it to trace an event, do: - * - KBASE_TRACE_ADD( kbdev, JM_JOB_COMPLETE, subcode, kctx, uatom, val ); - */ - -#if 0 /* Dummy section to avoid breaking formatting */ -int dummy_array[] = { -#endif - -/* - * Core events - */ - /* no info_val, no gpu_addr, no atom */ - KBASE_TRACE_CODE_MAKE_CODE(CORE_CTX_DESTROY), - /* no info_val, no gpu_addr, no atom */ - KBASE_TRACE_CODE_MAKE_CODE(CORE_CTX_HWINSTR_TERM), - /* info_val == GPU_IRQ_STATUS register */ - KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ), - /* info_val == bits cleared */ - KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ_CLEAR), - /* info_val == GPU_IRQ_STATUS register */ - KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ_DONE), - KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_SOFT_RESET), - KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_HARD_RESET), - KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_PRFCNT_CLEAR), - /* GPU addr==dump address */ - KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_PRFCNT_SAMPLE), - KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_CLEAN_INV_CACHES), -/* - * Job Slot management events - */ - /* info_val==irq rawstat at start */ - KBASE_TRACE_CODE_MAKE_CODE(JM_IRQ), - /* info_val==jobs processed */ - KBASE_TRACE_CODE_MAKE_CODE(JM_IRQ_END), -/* In the following: - * - * - ctx is set if a corresponding job found (NULL otherwise, e.g. some soft-stop cases) - * - uatom==kernel-side mapped uatom address (for correlation with user-side) - */ - /* info_val==exit code; gpu_addr==chain gpuaddr */ - KBASE_TRACE_CODE_MAKE_CODE(JM_JOB_DONE), - /* gpu_addr==JS_HEAD_NEXT written, info_val==lower 32 bits of affinity */ - KBASE_TRACE_CODE_MAKE_CODE(JM_SUBMIT), - /* gpu_addr is as follows: - * - If JS_STATUS active after soft-stop, val==gpu addr written to - * JS_HEAD on submit - * - otherwise gpu_addr==0 */ - KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP), - KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP_0), - KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP_1), - /* gpu_addr==JS_HEAD read */ - KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP), - /* gpu_addr==JS_HEAD read */ - KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP_0), - /* gpu_addr==JS_HEAD read */ - KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP_1), - /* gpu_addr==JS_TAIL read */ - KBASE_TRACE_CODE_MAKE_CODE(JM_UPDATE_HEAD), -/* gpu_addr is as follows: - * - If JS_STATUS active before soft-stop, val==JS_HEAD - * - otherwise gpu_addr==0 - */ - /* gpu_addr==JS_HEAD read */ - KBASE_TRACE_CODE_MAKE_CODE(JM_CHECK_HEAD), - KBASE_TRACE_CODE_MAKE_CODE(JM_FLUSH_WORKQS), - KBASE_TRACE_CODE_MAKE_CODE(JM_FLUSH_WORKQS_DONE), - /* info_val == is_scheduled */ - KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_NON_SCHEDULED), - /* info_val == is_scheduled */ - KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_SCHEDULED), - KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_DONE), - /* info_val == nr jobs submitted */ - KBASE_TRACE_CODE_MAKE_CODE(JM_SLOT_SOFT_OR_HARD_STOP), - /* gpu_addr==JS_HEAD_NEXT last written */ - KBASE_TRACE_CODE_MAKE_CODE(JM_SLOT_EVICT), - KBASE_TRACE_CODE_MAKE_CODE(JM_SUBMIT_AFTER_RESET), - KBASE_TRACE_CODE_MAKE_CODE(JM_BEGIN_RESET_WORKER), - KBASE_TRACE_CODE_MAKE_CODE(JM_END_RESET_WORKER), -/* - * Job dispatch events - */ - /* gpu_addr==value to write into JS_HEAD */ - KBASE_TRACE_CODE_MAKE_CODE(JD_DONE), - /* gpu_addr==value to write into JS_HEAD */ - KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_WORKER), - /* gpu_addr==value to write into JS_HEAD */ - KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_WORKER_END), - /* gpu_addr==value to write into JS_HEAD */ - KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_TRY_RUN_NEXT_JOB), - /* gpu_addr==0, info_val==0, uatom==0 */ - KBASE_TRACE_CODE_MAKE_CODE(JD_ZAP_CONTEXT), - /* gpu_addr==value to write into JS_HEAD */ - KBASE_TRACE_CODE_MAKE_CODE(JD_CANCEL), - /* gpu_addr==value to write into JS_HEAD */ - KBASE_TRACE_CODE_MAKE_CODE(JD_CANCEL_WORKER), -/* - * Scheduler Core events - */ - KBASE_TRACE_CODE_MAKE_CODE(JS_RETAIN_CTX_NOLOCK), - /* gpu_addr==value to write into JS_HEAD */ - KBASE_TRACE_CODE_MAKE_CODE(JS_ADD_JOB), - /* gpu_addr==last value written/would be written to JS_HEAD */ - KBASE_TRACE_CODE_MAKE_CODE(JS_REMOVE_JOB), - KBASE_TRACE_CODE_MAKE_CODE(JS_RETAIN_CTX), - KBASE_TRACE_CODE_MAKE_CODE(JS_RELEASE_CTX), - KBASE_TRACE_CODE_MAKE_CODE(JS_TRY_SCHEDULE_HEAD_CTX), - /* gpu_addr==value to write into JS_HEAD */ - KBASE_TRACE_CODE_MAKE_CODE(JS_JOB_DONE_TRY_RUN_NEXT_JOB), - /* gpu_addr==value to write into JS_HEAD */ - KBASE_TRACE_CODE_MAKE_CODE(JS_JOB_DONE_RETRY_NEEDED), - /* kctx is the one being evicted, info_val == kctx to put in */ - KBASE_TRACE_CODE_MAKE_CODE(JS_FAST_START_EVICTS_CTX), - KBASE_TRACE_CODE_MAKE_CODE(JS_AFFINITY_SUBMIT_TO_BLOCKED), - /* info_val == lower 32 bits of affinity */ - KBASE_TRACE_CODE_MAKE_CODE(JS_AFFINITY_CURRENT), - /* info_val == lower 32 bits of affinity */ - KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REQUEST_CORES_FAILED), - /* info_val == lower 32 bits of affinity */ - KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REGISTER_INUSE_FAILED), - /* info_val == lower 32 bits of rechecked affinity */ - KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REQUEST_ON_RECHECK_FAILED), - /* info_val == lower 32 bits of rechecked affinity */ - KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REGISTER_ON_RECHECK_FAILED), - /* info_val == lower 32 bits of affinity */ - KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_AFFINITY_WOULD_VIOLATE), - /* info_val == the ctx attribute now on ctx */ - KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_ON_CTX), - /* info_val == the ctx attribute now on runpool */ - KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_ON_RUNPOOL), - /* info_val == the ctx attribute now off ctx */ - KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_OFF_CTX), - /* info_val == the ctx attribute now off runpool */ - KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_OFF_RUNPOOL), -/* - * Scheduler Policy events - */ - KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_INIT_CTX), - KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TERM_CTX), - /* info_val == whether it was evicted */ - KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TRY_EVICT_CTX), - KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_FOREACH_CTX_JOBS), - KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_ENQUEUE_CTX), - KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_HEAD_CTX), - KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_RUNPOOL_ADD_CTX), - KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_RUNPOOL_REMOVE_CTX), - KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_JOB), - KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_JOB_IRQ), - /* gpu_addr==JS_HEAD to write if the job were run */ - KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_ENQUEUE_JOB), - KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TIMER_START), - KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TIMER_END), -/* - * Power Management Events - */ - KBASE_TRACE_CODE_MAKE_CODE(PM_JOB_SUBMIT_AFTER_POWERING_UP), - KBASE_TRACE_CODE_MAKE_CODE(PM_JOB_SUBMIT_AFTER_POWERED_UP), - KBASE_TRACE_CODE_MAKE_CODE(PM_PWRON), - KBASE_TRACE_CODE_MAKE_CODE(PM_PWRON_TILER), - KBASE_TRACE_CODE_MAKE_CODE(PM_PWRON_L2), - KBASE_TRACE_CODE_MAKE_CODE(PM_PWROFF), - KBASE_TRACE_CODE_MAKE_CODE(PM_PWROFF_TILER), - KBASE_TRACE_CODE_MAKE_CODE(PM_PWROFF_L2), - KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_POWERED), - KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_POWERED_TILER), - KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_POWERED_L2), - KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_DESIRED), - KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_DESIRED_TILER), - KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_AVAILABLE), - KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_AVAILABLE_TILER), - KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_AVAILABLE), - KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_AVAILABLE_TILER), - /* PM_DESIRED_REACHED: gpu_addr == pm.gpu_in_desired_state */ - KBASE_TRACE_CODE_MAKE_CODE(PM_DESIRED_REACHED), - KBASE_TRACE_CODE_MAKE_CODE(PM_DESIRED_REACHED_TILER), - KBASE_TRACE_CODE_MAKE_CODE(PM_REGISTER_CHANGE_SHADER_INUSE), - KBASE_TRACE_CODE_MAKE_CODE(PM_REGISTER_CHANGE_TILER_INUSE), - KBASE_TRACE_CODE_MAKE_CODE(PM_REGISTER_CHANGE_SHADER_NEEDED), - KBASE_TRACE_CODE_MAKE_CODE(PM_REGISTER_CHANGE_TILER_NEEDED), - KBASE_TRACE_CODE_MAKE_CODE(PM_RELEASE_CHANGE_SHADER_INUSE), - KBASE_TRACE_CODE_MAKE_CODE(PM_RELEASE_CHANGE_TILER_INUSE), - KBASE_TRACE_CODE_MAKE_CODE(PM_UNREQUEST_CHANGE_SHADER_NEEDED), - KBASE_TRACE_CODE_MAKE_CODE(PM_UNREQUEST_CHANGE_TILER_NEEDED), - KBASE_TRACE_CODE_MAKE_CODE(PM_REQUEST_CHANGE_SHADER_NEEDED), - KBASE_TRACE_CODE_MAKE_CODE(PM_REQUEST_CHANGE_TILER_NEEDED), - KBASE_TRACE_CODE_MAKE_CODE(PM_WAKE_WAITERS), - KBASE_TRACE_CODE_MAKE_CODE(PM_CONTEXT_ACTIVE), - KBASE_TRACE_CODE_MAKE_CODE(PM_CONTEXT_IDLE), - KBASE_TRACE_CODE_MAKE_CODE(PM_GPU_ON), - KBASE_TRACE_CODE_MAKE_CODE(PM_GPU_OFF), - /* info_val == policy number, or -1 for "Already changing" */ - KBASE_TRACE_CODE_MAKE_CODE(PM_SET_POLICY), - KBASE_TRACE_CODE_MAKE_CODE(PM_CA_SET_POLICY), - /* info_val == policy number */ - KBASE_TRACE_CODE_MAKE_CODE(PM_CURRENT_POLICY_INIT), - /* info_val == policy number */ - KBASE_TRACE_CODE_MAKE_CODE(PM_CURRENT_POLICY_TERM), -/* Unused code just to make it easier to not have a comma at the end. - * All other codes MUST come before this */ - KBASE_TRACE_CODE_MAKE_CODE(DUMMY) - -#if 0 /* Dummy section to avoid breaking formatting */ -}; -#endif - -/* ***** THE LACK OF HEADER GUARDS IS INTENTIONAL ***** */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_trace_timeline.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_trace_timeline.c deleted file mode 100755 index aac9858875ad4..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_trace_timeline.c +++ /dev/null @@ -1,232 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#include -#include -#include - -#define CREATE_TRACE_POINTS - -#ifdef CONFIG_MALI_TRACE_TIMELINE -#include "mali_timeline.h" - -#include -#include - -EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_atoms_in_flight); -EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_atom); -EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_gpu_slot_active); -EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_gpu_slot_action); -EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_gpu_power_active); -EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_l2_power_active); -EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_pm_event); -EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_slot_atom); -EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_pm_checktrans); -EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_context_active); - -struct kbase_trace_timeline_desc { - char *enum_str; - char *desc; - char *format; - char *format_desc; -}; - -static struct kbase_trace_timeline_desc kbase_trace_timeline_desc_table[] = { - #define KBASE_TIMELINE_TRACE_CODE(enum_val, desc, format, format_desc) { #enum_val, desc, format, format_desc } - #include "mali_kbase_trace_timeline_defs.h" - #undef KBASE_TIMELINE_TRACE_CODE -}; - -#define KBASE_NR_TRACE_CODES ARRAY_SIZE(kbase_trace_timeline_desc_table) - -static void *kbasep_trace_timeline_seq_start(struct seq_file *s, loff_t *pos) -{ - if (*pos >= KBASE_NR_TRACE_CODES) - return NULL; - - return &kbase_trace_timeline_desc_table[*pos]; -} - -static void kbasep_trace_timeline_seq_stop(struct seq_file *s, void *data) -{ -} - -static void *kbasep_trace_timeline_seq_next(struct seq_file *s, void *data, loff_t *pos) -{ - (*pos)++; - - if (*pos == KBASE_NR_TRACE_CODES) - return NULL; - - return &kbase_trace_timeline_desc_table[*pos]; -} - -static int kbasep_trace_timeline_seq_show(struct seq_file *s, void *data) -{ - struct kbase_trace_timeline_desc *trace_desc = data; - - seq_printf(s, "%s#%s#%s#%s\n", trace_desc->enum_str, trace_desc->desc, trace_desc->format, trace_desc->format_desc); - return 0; -} - - -static const struct seq_operations kbasep_trace_timeline_seq_ops = { - .start = kbasep_trace_timeline_seq_start, - .next = kbasep_trace_timeline_seq_next, - .stop = kbasep_trace_timeline_seq_stop, - .show = kbasep_trace_timeline_seq_show, -}; - -static int kbasep_trace_timeline_debugfs_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &kbasep_trace_timeline_seq_ops); -} - -static const struct file_operations kbasep_trace_timeline_debugfs_fops = { - .open = kbasep_trace_timeline_debugfs_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release_private, -}; - -void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev) -{ - debugfs_create_file("mali_timeline_defs", - S_IRUGO, kbdev->mali_debugfs_directory, NULL, - &kbasep_trace_timeline_debugfs_fops); -} - -void kbase_timeline_job_slot_submit(struct kbase_device *kbdev, struct kbase_context *kctx, - struct kbase_jd_atom *katom, int js) -{ - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - if (kbdev->timeline.slot_atoms_submitted[js] > 0) { - KBASE_TIMELINE_JOB_START_NEXT(kctx, js, 1); - } else { - base_atom_id atom_number = kbase_jd_atom_id(kctx, katom); - - KBASE_TIMELINE_JOB_START_HEAD(kctx, js, 1); - KBASE_TIMELINE_JOB_START(kctx, js, atom_number); - } - ++kbdev->timeline.slot_atoms_submitted[js]; - - KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, kbdev->timeline.slot_atoms_submitted[js]); -} - -void kbase_timeline_job_slot_done(struct kbase_device *kbdev, struct kbase_context *kctx, - struct kbase_jd_atom *katom, int js, - kbasep_js_atom_done_code done_code) -{ - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); - - if (done_code & KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT) { - KBASE_TIMELINE_JOB_START_NEXT(kctx, js, 0); - } else { - /* Job finished in JS_HEAD */ - base_atom_id atom_number = kbase_jd_atom_id(kctx, katom); - - KBASE_TIMELINE_JOB_START_HEAD(kctx, js, 0); - KBASE_TIMELINE_JOB_STOP(kctx, js, atom_number); - - /* see if we need to trace the job in JS_NEXT moving to JS_HEAD */ - if (kbase_backend_nr_atoms_submitted(kbdev, js)) { - struct kbase_jd_atom *next_katom; - struct kbase_context *next_kctx; - - /* Peek the next atom - note that the atom in JS_HEAD will already - * have been dequeued */ - next_katom = kbase_backend_inspect_head(kbdev, js); - WARN_ON(!next_katom); - next_kctx = next_katom->kctx; - KBASE_TIMELINE_JOB_START_NEXT(next_kctx, js, 0); - KBASE_TIMELINE_JOB_START_HEAD(next_kctx, js, 1); - KBASE_TIMELINE_JOB_START(next_kctx, js, kbase_jd_atom_id(next_kctx, next_katom)); - } - } - - --kbdev->timeline.slot_atoms_submitted[js]; - - KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, kbdev->timeline.slot_atoms_submitted[js]); -} - -void kbase_timeline_pm_send_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event_sent) -{ - int uid = 0; - int old_uid; - - /* If a producer already exists for the event, try to use their UID (multiple-producers) */ - uid = atomic_read(&kbdev->timeline.pm_event_uid[event_sent]); - old_uid = uid; - - /* Get a new non-zero UID if we don't have one yet */ - while (!uid) - uid = atomic_inc_return(&kbdev->timeline.pm_event_uid_counter); - - /* Try to use this UID */ - if (old_uid != atomic_cmpxchg(&kbdev->timeline.pm_event_uid[event_sent], old_uid, uid)) - /* If it changed, raced with another producer: we've lost this UID */ - uid = 0; - - KBASE_TIMELINE_PM_SEND_EVENT(kbdev, event_sent, uid); -} - -void kbase_timeline_pm_check_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event) -{ - int uid = atomic_read(&kbdev->timeline.pm_event_uid[event]); - - if (uid != 0) { - if (uid != atomic_cmpxchg(&kbdev->timeline.pm_event_uid[event], uid, 0)) - /* If it changed, raced with another consumer: we've lost this UID */ - uid = 0; - - KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event, uid); - } -} - -void kbase_timeline_pm_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event) -{ - int uid = atomic_read(&kbdev->timeline.pm_event_uid[event]); - - if (uid != atomic_cmpxchg(&kbdev->timeline.pm_event_uid[event], uid, 0)) - /* If it changed, raced with another consumer: we've lost this UID */ - uid = 0; - - KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event, uid); -} - -void kbase_timeline_pm_l2_transition_start(struct kbase_device *kbdev) -{ - lockdep_assert_held(&kbdev->pm.power_change_lock); - /* Simply log the start of the transition */ - kbdev->timeline.l2_transitioning = true; - KBASE_TIMELINE_POWERING_L2(kbdev); -} - -void kbase_timeline_pm_l2_transition_done(struct kbase_device *kbdev) -{ - lockdep_assert_held(&kbdev->pm.power_change_lock); - /* Simply log the end of the transition */ - if (kbdev->timeline.l2_transitioning) { - kbdev->timeline.l2_transitioning = false; - KBASE_TIMELINE_POWERED_L2(kbdev); - } -} - -#endif /* CONFIG_MALI_TRACE_TIMELINE */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_trace_timeline.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_trace_timeline.h deleted file mode 100755 index d92caf054804f..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_trace_timeline.h +++ /dev/null @@ -1,358 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#if !defined(_KBASE_TRACE_TIMELINE_H) -#define _KBASE_TRACE_TIMELINE_H - -#ifdef CONFIG_MALI_TRACE_TIMELINE - -enum kbase_trace_timeline_code { - #define KBASE_TIMELINE_TRACE_CODE(enum_val, desc, format, format_desc) enum_val - #include "mali_kbase_trace_timeline_defs.h" - #undef KBASE_TIMELINE_TRACE_CODE -}; - -/** Initialize Timeline DebugFS entries */ -void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev); - -/* mali_timeline.h defines kernel tracepoints used by the KBASE_TIMELINE - * functions. - * Output is timestamped by either sched_clock() (default), local_clock(), or - * cpu_clock(), depending on /sys/kernel/debug/tracing/trace_clock */ -#include "mali_timeline.h" - -/* Trace number of atoms in flight for kctx (atoms either not completed, or in - process of being returned to user */ -#define KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, count) \ - do { \ - struct timespec ts; \ - getrawmonotonic(&ts); \ - trace_mali_timeline_atoms_in_flight(ts.tv_sec, ts.tv_nsec, \ - (int)kctx->timeline.owner_tgid, \ - count); \ - } while (0) - -/* Trace atom_id being Ready to Run */ -#define KBASE_TIMELINE_ATOM_READY(kctx, atom_id) \ - do { \ - struct timespec ts; \ - getrawmonotonic(&ts); \ - trace_mali_timeline_atom(ts.tv_sec, ts.tv_nsec, \ - CTX_FLOW_ATOM_READY, \ - (int)kctx->timeline.owner_tgid, \ - atom_id); \ - } while (0) - -/* Trace number of atoms submitted to job slot js - * - * NOTE: This uses a different tracepoint to the head/next/soft-stop actions, - * so that those actions can be filtered out separately from this - * - * This is because this is more useful, as we can use it to calculate general - * utilization easily and accurately */ -#define KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, count) \ - do { \ - struct timespec ts; \ - getrawmonotonic(&ts); \ - trace_mali_timeline_gpu_slot_active(ts.tv_sec, ts.tv_nsec, \ - SW_SET_GPU_SLOT_ACTIVE, \ - (int)kctx->timeline.owner_tgid, \ - js, count); \ - } while (0) - - -/* Trace atoms present in JS_NEXT */ -#define KBASE_TIMELINE_JOB_START_NEXT(kctx, js, count) \ - do { \ - struct timespec ts; \ - getrawmonotonic(&ts); \ - trace_mali_timeline_gpu_slot_action(ts.tv_sec, ts.tv_nsec, \ - SW_SET_GPU_SLOT_NEXT, \ - (int)kctx->timeline.owner_tgid, \ - js, count); \ - } while (0) - -/* Trace atoms present in JS_HEAD */ -#define KBASE_TIMELINE_JOB_START_HEAD(kctx, js, count) \ - do { \ - struct timespec ts; \ - getrawmonotonic(&ts); \ - trace_mali_timeline_gpu_slot_action(ts.tv_sec, ts.tv_nsec, \ - SW_SET_GPU_SLOT_HEAD, \ - (int)kctx->timeline.owner_tgid, \ - js, count); \ - } while (0) - -/* Trace that a soft stop/evict from next is being attempted on a slot */ -#define KBASE_TIMELINE_TRY_SOFT_STOP(kctx, js, count) \ - do { \ - struct timespec ts; \ - getrawmonotonic(&ts); \ - trace_mali_timeline_gpu_slot_action(ts.tv_sec, ts.tv_nsec, \ - SW_SET_GPU_SLOT_STOPPING, \ - (kctx) ? (int)kctx->timeline.owner_tgid : 0, \ - js, count); \ - } while (0) - - - -/* Trace state of overall GPU power */ -#define KBASE_TIMELINE_GPU_POWER(kbdev, active) \ - do { \ - struct timespec ts; \ - getrawmonotonic(&ts); \ - trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \ - SW_SET_GPU_POWER_ACTIVE, active); \ - } while (0) - -/* Trace state of tiler power */ -#define KBASE_TIMELINE_POWER_TILER(kbdev, bitmap) \ - do { \ - struct timespec ts; \ - getrawmonotonic(&ts); \ - trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \ - SW_SET_GPU_POWER_TILER_ACTIVE, \ - hweight64(bitmap)); \ - } while (0) - -/* Trace number of shaders currently powered */ -#define KBASE_TIMELINE_POWER_SHADER(kbdev, bitmap) \ - do { \ - struct timespec ts; \ - getrawmonotonic(&ts); \ - trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \ - SW_SET_GPU_POWER_SHADER_ACTIVE, \ - hweight64(bitmap)); \ - } while (0) - -/* Trace state of L2 power */ -#define KBASE_TIMELINE_POWER_L2(kbdev, bitmap) \ - do { \ - struct timespec ts; \ - getrawmonotonic(&ts); \ - trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \ - SW_SET_GPU_POWER_L2_ACTIVE, \ - hweight64(bitmap)); \ - } while (0) - -/* Trace state of L2 cache*/ -#define KBASE_TIMELINE_POWERING_L2(kbdev) \ - do { \ - struct timespec ts; \ - getrawmonotonic(&ts); \ - trace_mali_timeline_l2_power_active(ts.tv_sec, ts.tv_nsec, \ - SW_FLOW_GPU_POWER_L2_POWERING, \ - 1); \ - } while (0) - -#define KBASE_TIMELINE_POWERED_L2(kbdev) \ - do { \ - struct timespec ts; \ - getrawmonotonic(&ts); \ - trace_mali_timeline_l2_power_active(ts.tv_sec, ts.tv_nsec, \ - SW_FLOW_GPU_POWER_L2_ACTIVE, \ - 1); \ - } while (0) - -/* Trace kbase_pm_send_event message send */ -#define KBASE_TIMELINE_PM_SEND_EVENT(kbdev, event_type, pm_event_id) \ - do { \ - struct timespec ts; \ - getrawmonotonic(&ts); \ - trace_mali_timeline_pm_event(ts.tv_sec, ts.tv_nsec, \ - SW_FLOW_PM_SEND_EVENT, \ - event_type, pm_event_id); \ - } while (0) - -/* Trace kbase_pm_worker message receive */ -#define KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event_type, pm_event_id) \ - do { \ - struct timespec ts; \ - getrawmonotonic(&ts); \ - trace_mali_timeline_pm_event(ts.tv_sec, ts.tv_nsec, \ - SW_FLOW_PM_HANDLE_EVENT, \ - event_type, pm_event_id); \ - } while (0) - - -/* Trace atom_id starting in JS_HEAD */ -#define KBASE_TIMELINE_JOB_START(kctx, js, _consumerof_atom_number) \ - do { \ - struct timespec ts; \ - getrawmonotonic(&ts); \ - trace_mali_timeline_slot_atom(ts.tv_sec, ts.tv_nsec, \ - HW_START_GPU_JOB_CHAIN_SW_APPROX, \ - (int)kctx->timeline.owner_tgid, \ - js, _consumerof_atom_number); \ - } while (0) - -/* Trace atom_id stopping on JS_HEAD */ -#define KBASE_TIMELINE_JOB_STOP(kctx, js, _producerof_atom_number_completed) \ - do { \ - struct timespec ts; \ - getrawmonotonic(&ts); \ - trace_mali_timeline_slot_atom(ts.tv_sec, ts.tv_nsec, \ - HW_STOP_GPU_JOB_CHAIN_SW_APPROX, \ - (int)kctx->timeline.owner_tgid, \ - js, _producerof_atom_number_completed); \ - } while (0) - - -/** Trace beginning/end of a call to kbase_pm_check_transitions_nolock from a - * certin caller */ -#define KBASE_TIMELINE_PM_CHECKTRANS(kbdev, trace_code) \ - do { \ - struct timespec ts; \ - getrawmonotonic(&ts); \ - trace_mali_timeline_pm_checktrans(ts.tv_sec, ts.tv_nsec, \ - trace_code, 1); \ - } while (0) - -/* Trace number of contexts active */ -#define KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, count) \ - do { \ - struct timespec ts; \ - getrawmonotonic(&ts); \ - trace_mali_timeline_context_active(ts.tv_sec, ts.tv_nsec, \ - count); \ - } while (0) - - -/* NOTE: kbase_timeline_pm_cores_func() is in mali_kbase_pm_policy.c */ - -/** - * Trace that an atom is starting on a job slot - * - * The caller must be holding kbasep_js_device_data::runpool_irq::lock - */ -void kbase_timeline_job_slot_submit(struct kbase_device *kbdev, struct kbase_context *kctx, - struct kbase_jd_atom *katom, int js); - -/** - * Trace that an atom has done on a job slot - * - * 'Done' in this sense can occur either because: - * - the atom in JS_HEAD finished - * - the atom in JS_NEXT was evicted - * - * Whether the atom finished or was evicted is passed in @a done_code - * - * It is assumed that the atom has already been removed from the submit slot, - * with either: - * - kbasep_jm_dequeue_submit_slot() - * - kbasep_jm_dequeue_tail_submit_slot() - * - * The caller must be holding kbasep_js_device_data::runpool_irq::lock - */ -void kbase_timeline_job_slot_done(struct kbase_device *kbdev, struct kbase_context *kctx, - struct kbase_jd_atom *katom, int js, - kbasep_js_atom_done_code done_code); - - -/** Trace a pm event starting */ -void kbase_timeline_pm_send_event(struct kbase_device *kbdev, - enum kbase_timeline_pm_event event_sent); - -/** Trace a pm event finishing */ -void kbase_timeline_pm_check_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event); - -/** Check whether a pm event was present, and if so trace finishing it */ -void kbase_timeline_pm_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event); - -/** Trace L2 power-up start */ -void kbase_timeline_pm_l2_transition_start(struct kbase_device *kbdev); - -/** Trace L2 power-up done */ -void kbase_timeline_pm_l2_transition_done(struct kbase_device *kbdev); - -#else - -#define KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, count) CSTD_NOP() - -#define KBASE_TIMELINE_ATOM_READY(kctx, atom_id) CSTD_NOP() - -#define KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, count) CSTD_NOP() - -#define KBASE_TIMELINE_JOB_START_NEXT(kctx, js, count) CSTD_NOP() - -#define KBASE_TIMELINE_JOB_START_HEAD(kctx, js, count) CSTD_NOP() - -#define KBASE_TIMELINE_TRY_SOFT_STOP(kctx, js, count) CSTD_NOP() - -#define KBASE_TIMELINE_GPU_POWER(kbdev, active) CSTD_NOP() - -#define KBASE_TIMELINE_POWER_TILER(kbdev, bitmap) CSTD_NOP() - -#define KBASE_TIMELINE_POWER_SHADER(kbdev, bitmap) CSTD_NOP() - -#define KBASE_TIMELINE_POWER_L2(kbdev, active) CSTD_NOP() - -#define KBASE_TIMELINE_POWERING_L2(kbdev) CSTD_NOP() - -#define KBASE_TIMELINE_POWERED_L2(kbdev) CSTD_NOP() - -#define KBASE_TIMELINE_PM_SEND_EVENT(kbdev, event_type, pm_event_id) CSTD_NOP() - -#define KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event_type, pm_event_id) CSTD_NOP() - -#define KBASE_TIMELINE_JOB_START(kctx, js, _consumerof_atom_number) CSTD_NOP() - -#define KBASE_TIMELINE_JOB_STOP(kctx, js, _producerof_atom_number_completed) CSTD_NOP() - -#define KBASE_TIMELINE_PM_CHECKTRANS(kbdev, trace_code) CSTD_NOP() - -#define KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, count) CSTD_NOP() - - -static inline void kbase_timeline_job_slot_submit(struct kbase_device *kbdev, struct kbase_context *kctx, - struct kbase_jd_atom *katom, int js) -{ - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); -} - -static inline void kbase_timeline_job_slot_done(struct kbase_device *kbdev, struct kbase_context *kctx, - struct kbase_jd_atom *katom, int js, - kbasep_js_atom_done_code done_code) -{ - lockdep_assert_held(&kbdev->js_data.runpool_irq.lock); -} - -static inline void kbase_timeline_pm_send_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event_sent) -{ -} - -static inline void kbase_timeline_pm_check_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event) -{ -} - -static inline void kbase_timeline_pm_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event) -{ -} - -static inline void kbase_timeline_pm_l2_transition_start(struct kbase_device *kbdev) -{ -} - -static inline void kbase_timeline_pm_l2_transition_done(struct kbase_device *kbdev) -{ -} -#endif /* CONFIG_MALI_TRACE_TIMELINE */ - -#endif /* _KBASE_TRACE_TIMELINE_H */ - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_trace_timeline_defs.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_trace_timeline_defs.h deleted file mode 100755 index 156a95a67f4ae..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_trace_timeline_defs.h +++ /dev/null @@ -1,140 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/* ***** IMPORTANT: THIS IS NOT A NORMAL HEADER FILE ***** - * ***** DO NOT INCLUDE DIRECTLY ***** - * ***** THE LACK OF HEADER GUARDS IS INTENTIONAL ***** */ - -/* - * Conventions on Event Names: - * - * - The prefix determines something about how the timeline should be - * displayed, and is split up into various parts, separated by underscores: - * - 'SW' and 'HW' as the first part will be used to determine whether a - * timeline is to do with Software or Hardware - effectively, separate - * 'channels' for Software and Hardware - * - 'START', 'STOP', 'ENTER', 'LEAVE' can be used in the second part, and - * signify related pairs of events - these are optional. - * - 'FLOW' indicates a generic event, which can use dependencies - * - This gives events such as: - * - 'SW_ENTER_FOO' - * - 'SW_LEAVE_FOO' - * - 'SW_FLOW_BAR_1' - * - 'SW_FLOW_BAR_2' - * - 'HW_START_BAZ' - * - 'HW_STOP_BAZ' - * - And an unadorned HW event: - * - 'HW_BAZ_FROZBOZ' - */ - -/* - * Conventions on parameter names: - * - anything with 'instance' in the name will have a separate timeline based - * on that instances. - * - underscored-prefixed parameters will by hidden by default on timelines - * - * Hence: - * - Different job slots have their own 'instance', based on the instance value - * - Per-context info (e.g. atoms on a context) have their own 'instance' - * (i.e. each context should be on a different timeline) - * - * Note that globally-shared resources can be tagged with a tgid, but we don't - * want an instance per context: - * - There's no point having separate Job Slot timelines for each context, that - * would be confusing - there's only really 3 job slots! - * - There's no point having separate Shader-powered timelines for each - * context, that would be confusing - all shader cores (whether it be 4, 8, - * etc) are shared in the system. - */ - - /* - * CTX events - */ - /* Separate timelines for each context 'instance'*/ - KBASE_TIMELINE_TRACE_CODE(CTX_SET_NR_ATOMS_IN_FLIGHT, "CTX: Atoms in flight", "%d,%d", "_instance_tgid,_value_number_of_atoms"), - KBASE_TIMELINE_TRACE_CODE(CTX_FLOW_ATOM_READY, "CTX: Atoms Ready to Run", "%d,%d,%d", "_instance_tgid,_consumerof_atom_number,_producerof_atom_number_ready"), - - /* - * SW Events - */ - /* Separate timelines for each slot 'instance' */ - KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_SLOT_ACTIVE, "SW: GPU slot active", "%d,%d,%d", "_tgid,_instance_slot,_value_number_of_atoms"), - KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_SLOT_NEXT, "SW: GPU atom in NEXT", "%d,%d,%d", "_tgid,_instance_slot,_value_is_an_atom_in_next"), - KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_SLOT_HEAD, "SW: GPU atom in HEAD", "%d,%d,%d", "_tgid,_instance_slot,_value_is_an_atom_in_head"), - KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_SLOT_STOPPING, "SW: Try Soft-Stop on GPU slot", "%d,%d,%d", "_tgid,_instance_slot,_value_is_slot_stopping"), - /* Shader and overall power is shared - can't have separate instances of - * it, just tagging with the context */ - KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_POWER_ACTIVE, "SW: GPU power active", "%d,%d", "_tgid,_value_is_power_active"), - KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_POWER_TILER_ACTIVE, "SW: GPU tiler powered", "%d,%d", "_tgid,_value_number_of_tilers"), - KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_POWER_SHADER_ACTIVE, "SW: GPU shaders powered", "%d,%d", "_tgid,_value_number_of_shaders"), - KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_POWER_L2_ACTIVE, "SW: GPU L2 powered", "%d,%d", "_tgid,_value_number_of_l2"), - - /* SW Power event messaging. _event_type is one from the kbase_pm_event enum */ - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_SEND_EVENT, "SW: PM Send Event", "%d,%d,%d", "_tgid,_event_type,_writerof_pm_event_id"), - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_HANDLE_EVENT, "SW: PM Handle Event", "%d,%d,%d", "_tgid,_event_type,_finalconsumerof_pm_event_id"), - /* SW L2 power events */ - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_GPU_POWER_L2_POWERING, "SW: GPU L2 powering", "%d,%d", "_tgid,_writerof_l2_transitioning"), - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_GPU_POWER_L2_ACTIVE, "SW: GPU L2 powering done", "%d,%d", "_tgid,_finalconsumerof_l2_transitioning"), - - KBASE_TIMELINE_TRACE_CODE(SW_SET_CONTEXT_ACTIVE, "SW: Context Active", "%d,%d", "_tgid,_value_active"), - - /* - * BEGIN: Significant SW Functions that call kbase_pm_check_transitions_nolock() - */ - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START, "SW: PM CheckTrans from kbase_pm_do_poweroff", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_do_poweroff"), - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END, "SW: PM CheckTrans from kbase_pm_do_poweroff", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_do_poweroff"), - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START, "SW: PM CheckTrans from kbase_pm_do_poweron", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_do_poweron"), - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END, "SW: PM CheckTrans from kbase_pm_do_poweron", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_do_poweron"), - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START, "SW: PM CheckTrans from kbase_gpu_interrupt", "%d,%d", "_tgid,_writerof_pm_checktrans_gpu_interrupt"), - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END, "SW: PM CheckTrans from kbase_gpu_interrupt", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_gpu_interrupt"), - - /* - * Significant Indirect callers of kbase_pm_check_transitions_nolock() - */ - /* kbase_pm_request_cores */ - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_START, "SW: PM CheckTrans from kbase_pm_request_cores(shader)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_request_cores_shader"), - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_END, "SW: PM CheckTrans from kbase_pm_request_cores(shader)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_request_cores_shader"), - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_START, "SW: PM CheckTrans from kbase_pm_request_cores(tiler)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_request_cores_tiler"), - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_END, "SW: PM CheckTrans from kbase_pm_request_cores(tiler)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_request_cores_tiler"), - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_START, "SW: PM CheckTrans from kbase_pm_request_cores(shader+tiler)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_request_cores_shader_tiler"), - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_END, "SW: PM CheckTrans from kbase_pm_request_cores(shader+tiler)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_request_cores_shader_tiler"), - /* kbase_pm_release_cores */ - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_START, "SW: PM CheckTrans from kbase_pm_release_cores(shader)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_release_cores_shader"), - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_END, "SW: PM CheckTrans from kbase_pm_release_cores(shader)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_release_cores_shader"), - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_START, "SW: PM CheckTrans from kbase_pm_release_cores(tiler)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_release_cores_tiler"), - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_END, "SW: PM CheckTrans from kbase_pm_release_cores(tiler)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_release_cores_tiler"), - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_START, "SW: PM CheckTrans from kbase_pm_release_cores(shader+tiler)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_release_cores_shader_tiler"), - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_END, "SW: PM CheckTrans from kbase_pm_release_cores(shader+tiler)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_release_cores_shader_tiler"), - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_START, "SW: PM CheckTrans from kbasep_pm_do_shader_poweroff_callback", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_do_shader_poweroff_callback"), - KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_END, "SW: PM CheckTrans from kbasep_pm_do_shader_poweroff_callback", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_do_shader_poweroff_callback"), - /* - * END: SW Functions that call kbase_pm_check_transitions_nolock() - */ - - /* - * HW Events - */ - KBASE_TIMELINE_TRACE_CODE(HW_MMU_FAULT, -"HW: MMU Fault", "%d,%d,%d", "_tgid,fault_type,fault_stage,asid"), - KBASE_TIMELINE_TRACE_CODE(HW_START_GPU_JOB_CHAIN_SW_APPROX, -"HW: Job Chain start (SW approximated)", "%d,%d,%d", -"_tgid,job_slot,_consumerof_atom_number_ready"), - KBASE_TIMELINE_TRACE_CODE(HW_STOP_GPU_JOB_CHAIN_SW_APPROX, -"HW: Job Chain stop (SW approximated)", "%d,%d,%d", -"_tgid,job_slot,_producerof_atom_number_completed") diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_uku.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_uku.h deleted file mode 100755 index 8108677542aba..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_uku.h +++ /dev/null @@ -1,462 +0,0 @@ -/* - * - * (C) COPYRIGHT 2008-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#ifndef _KBASE_UKU_H_ -#define _KBASE_UKU_H_ - -#include "mali_uk.h" -#include "mali_base_kernel.h" - -/* This file needs to support being included from kernel and userside (which use different defines) */ -#if defined(CONFIG_MALI_ERROR_INJECT) || MALI_ERROR_INJECT_ON -#define SUPPORT_MALI_ERROR_INJECT -#endif /* defined(CONFIG_MALI_ERROR_INJECT) || MALI_ERROR_INJECT_ON */ -#if defined(CONFIG_MALI_NO_MALI) -#define SUPPORT_MALI_NO_MALI -#elif defined(MALI_NO_MALI) -#if MALI_NO_MALI -#define SUPPORT_MALI_NO_MALI -#endif -#endif - -#if defined(SUPPORT_MALI_NO_MALI) || defined(SUPPORT_MALI_ERROR_INJECT) -#include "backend/gpu/mali_kbase_model_dummy.h" -#endif - -#include "mali_kbase_gpuprops_types.h" - -#define BASE_UK_VERSION_MAJOR 10 -#define BASE_UK_VERSION_MINOR 0 - -struct kbase_uk_mem_alloc { - union uk_header header; - /* IN */ - u64 va_pages; - u64 commit_pages; - u64 extent; - /* IN/OUT */ - u64 flags; - /* OUT */ - u64 gpu_va; - u16 va_alignment; - u8 padding[6]; -}; - -struct kbase_uk_mem_free { - union uk_header header; - /* IN */ - u64 gpu_addr; - /* OUT */ -}; - -struct kbase_uk_mem_alias { - union uk_header header; - /* IN/OUT */ - u64 flags; - /* IN */ - u64 stride; - u64 nents; - union kbase_pointer ai; - /* OUT */ - u64 gpu_va; - u64 va_pages; -}; - -struct kbase_uk_mem_import { - union uk_header header; - /* IN */ - union kbase_pointer phandle; - u32 type; - u32 padding; - /* IN/OUT */ - u64 flags; - /* OUT */ - u64 gpu_va; - u64 va_pages; -}; - -struct kbase_uk_mem_flags_change { - union uk_header header; - /* IN */ - u64 gpu_va; - u64 flags; - u64 mask; -}; - -struct kbase_uk_job_submit { - union uk_header header; - /* IN */ - union kbase_pointer addr; - u32 nr_atoms; - u32 stride; /* bytes between atoms, i.e. sizeof(base_jd_atom_v2) */ - /* OUT */ -}; - -struct kbase_uk_post_term { - union uk_header header; -}; - -struct kbase_uk_sync_now { - union uk_header header; - - /* IN */ - struct base_syncset sset; - - /* OUT */ -}; - -struct kbase_uk_hwcnt_setup { - union uk_header header; - - /* IN */ - u64 dump_buffer; - u32 jm_bm; - u32 shader_bm; - u32 tiler_bm; - u32 unused_1; /* keep for backwards compatibility */ - u32 mmu_l2_bm; - u32 padding; - /* OUT */ -}; - -/** - * struct kbase_uk_hwcnt_reader_setup - User/Kernel space data exchange structure - * @header: UK structure header - * @buffer_count: requested number of dumping buffers - * @jm_bm: counters selection bitmask (JM) - * @shader_bm: counters selection bitmask (Shader) - * @tiler_bm: counters selection bitmask (Tiler) - * @mmu_l2_bm: counters selection bitmask (MMU_L2) - * @fd: dumping notification file descriptor - * - * This structure sets up HWC dumper/reader for this context. - * Multiple instances can be created for single context. - */ -struct kbase_uk_hwcnt_reader_setup { - union uk_header header; - - /* IN */ - u32 buffer_count; - u32 jm_bm; - u32 shader_bm; - u32 tiler_bm; - u32 mmu_l2_bm; - - /* OUT */ - s32 fd; -}; - -struct kbase_uk_hwcnt_dump { - union uk_header header; -}; - -struct kbase_uk_hwcnt_clear { - union uk_header header; -}; - -struct kbase_uk_fence_validate { - union uk_header header; - /* IN */ - s32 fd; - u32 padding; - /* OUT */ -}; - -struct kbase_uk_stream_create { - union uk_header header; - /* IN */ - char name[32]; - /* OUT */ - s32 fd; - u32 padding; -}; - -struct kbase_uk_gpuprops { - union uk_header header; - - /* IN */ - struct mali_base_gpu_props props; - /* OUT */ -}; - -struct kbase_uk_mem_query { - union uk_header header; - /* IN */ - u64 gpu_addr; -#define KBASE_MEM_QUERY_COMMIT_SIZE 1 -#define KBASE_MEM_QUERY_VA_SIZE 2 -#define KBASE_MEM_QUERY_FLAGS 3 - u64 query; - /* OUT */ - u64 value; -}; - -struct kbase_uk_mem_commit { - union uk_header header; - /* IN */ - u64 gpu_addr; - u64 pages; - /* OUT */ - u32 result_subcode; - u32 padding; -}; - -struct kbase_uk_find_cpu_offset { - union uk_header header; - /* IN */ - u64 gpu_addr; - u64 cpu_addr; - u64 size; - /* OUT */ - u64 offset; -}; - -#define KBASE_GET_VERSION_BUFFER_SIZE 64 -struct kbase_uk_get_ddk_version { - union uk_header header; - /* OUT */ - char version_buffer[KBASE_GET_VERSION_BUFFER_SIZE]; - u32 version_string_size; - u32 padding; -}; - -struct kbase_uk_disjoint_query { - union uk_header header; - /* OUT */ - u32 counter; - u32 padding; -}; - -struct kbase_uk_set_flags { - union uk_header header; - /* IN */ - u32 create_flags; - u32 padding; -}; - -#if MALI_UNIT_TEST -#define TEST_ADDR_COUNT 4 -#define KBASE_TEST_BUFFER_SIZE 128 -struct kbase_exported_test_data { - u64 test_addr[TEST_ADDR_COUNT]; /**< memory address */ - u32 test_addr_pages[TEST_ADDR_COUNT]; /**< memory size in pages */ - union kbase_pointer kctx; /**< base context created by process */ - union kbase_pointer mm; /**< pointer to process address space */ - u8 buffer1[KBASE_TEST_BUFFER_SIZE]; /**< unit test defined parameter */ - u8 buffer2[KBASE_TEST_BUFFER_SIZE]; /**< unit test defined parameter */ -}; - -struct kbase_uk_set_test_data { - union uk_header header; - /* IN */ - struct kbase_exported_test_data test_data; -}; - -#endif /* MALI_UNIT_TEST */ - -#ifdef SUPPORT_MALI_ERROR_INJECT -struct kbase_uk_error_params { - union uk_header header; - /* IN */ - struct kbase_error_params params; -}; -#endif /* SUPPORT_MALI_ERROR_INJECT */ - -#ifdef SUPPORT_MALI_NO_MALI -struct kbase_uk_model_control_params { - union uk_header header; - /* IN */ - struct kbase_model_control_params params; -}; -#endif /* SUPPORT_MALI_NO_MALI */ - -#define KBASE_MAXIMUM_EXT_RESOURCES 255 - -struct kbase_uk_ext_buff_kds_data { - union uk_header header; - union kbase_pointer external_resource; - union kbase_pointer file_descriptor; - u32 num_res; /* limited to KBASE_MAXIMUM_EXT_RESOURCES */ - u32 padding; -}; - -#ifdef BASE_LEGACY_UK8_SUPPORT -struct kbase_uk_keep_gpu_powered { - union uk_header header; - u32 enabled; - u32 padding; -}; -#endif /* BASE_LEGACY_UK8_SUPPORT */ - -struct kbase_uk_profiling_controls { - union uk_header header; - u32 profiling_controls[FBDUMP_CONTROL_MAX]; -}; - -struct kbase_uk_debugfs_mem_profile_add { - union uk_header header; - u32 len; - union kbase_pointer buf; -}; - -struct kbase_uk_context_id { - union uk_header header; - /* OUT */ - int id; -}; - -#if (defined(MALI_KTLSTREAM_ENABLED) && MALI_KTLSTREAM_ENABLED) || \ - defined(CONFIG_MALI_MIPE_ENABLED) -/** - * struct kbase_uk_tlstream_acquire - User/Kernel space data exchange structure - * @header: UK structure header - * @fd: timeline stream file descriptor - * - * This structure is used used when performing a call to acquire kernel side - * timeline stream file descriptor. - */ -struct kbase_uk_tlstream_acquire { - union uk_header header; - /* IN */ - /* OUT */ - s32 fd; -}; - -/** - * struct kbase_uk_tlstream_flush - User/Kernel space data exchange structure - * @header: UK structure header - * - * This structure is used when performing a call to flush kernel side - * timeline streams. - */ -struct kbase_uk_tlstream_flush { - union uk_header header; - /* IN */ - /* OUT */ -}; - -#if MALI_UNIT_TEST -/** - * struct kbase_uk_tlstream_acquire - User/Kernel space data exchange structure - * @header: UK structure header - * @tpw_count: number of trace point writers in each context - * @msg_delay: time delay between tracepoints from one writer in milliseconds - * @msg_count: number of trace points written by one writer - * @aux_msg: if non-zero aux messages will be included - * - * This structure is used when performing a call to start timeline stream test - * embedded in kernel. - */ -struct kbase_uk_tlstream_test { - union uk_header header; - /* IN */ - u32 tpw_count; - u32 msg_delay; - u32 msg_count; - u32 aux_msg; - /* OUT */ -}; - -/** - * struct kbase_uk_tlstream_acquire - User/Kernel space data exchange structure - * @header: UK structure header - * @bytes_collected: number of bytes read by user - * @bytes_generated: number of bytes generated by tracepoints - * - * This structure is used when performing a call to obtain timeline stream - * statistics. - */ -struct kbase_uk_tlstream_stats { - union uk_header header; /**< UK structure header. */ - /* IN */ - /* OUT */ - u32 bytes_collected; - u32 bytes_generated; -}; -#endif /* MALI_UNIT_TEST */ -#endif /* MALI_KTLSTREAM_ENABLED */ - -enum kbase_uk_function_id { - KBASE_FUNC_MEM_ALLOC = (UK_FUNC_ID + 0), - KBASE_FUNC_MEM_IMPORT = (UK_FUNC_ID + 1), - KBASE_FUNC_MEM_COMMIT = (UK_FUNC_ID + 2), - KBASE_FUNC_MEM_QUERY = (UK_FUNC_ID + 3), - KBASE_FUNC_MEM_FREE = (UK_FUNC_ID + 4), - KBASE_FUNC_MEM_FLAGS_CHANGE = (UK_FUNC_ID + 5), - KBASE_FUNC_MEM_ALIAS = (UK_FUNC_ID + 6), - -#ifdef BASE_LEGACY_UK6_SUPPORT - KBASE_FUNC_JOB_SUBMIT_UK6 = (UK_FUNC_ID + 7), -#endif /* BASE_LEGACY_UK6_SUPPORT */ - - KBASE_FUNC_SYNC = (UK_FUNC_ID + 8), - - KBASE_FUNC_POST_TERM = (UK_FUNC_ID + 9), - - KBASE_FUNC_HWCNT_SETUP = (UK_FUNC_ID + 10), - KBASE_FUNC_HWCNT_DUMP = (UK_FUNC_ID + 11), - KBASE_FUNC_HWCNT_CLEAR = (UK_FUNC_ID + 12), - - KBASE_FUNC_GPU_PROPS_REG_DUMP = (UK_FUNC_ID + 14), - - KBASE_FUNC_FIND_CPU_OFFSET = (UK_FUNC_ID + 15), - - KBASE_FUNC_GET_VERSION = (UK_FUNC_ID + 16), - KBASE_FUNC_EXT_BUFFER_LOCK = (UK_FUNC_ID + 17), - KBASE_FUNC_SET_FLAGS = (UK_FUNC_ID + 18), - - KBASE_FUNC_SET_TEST_DATA = (UK_FUNC_ID + 19), - KBASE_FUNC_INJECT_ERROR = (UK_FUNC_ID + 20), - KBASE_FUNC_MODEL_CONTROL = (UK_FUNC_ID + 21), - -#ifdef BASE_LEGACY_UK8_SUPPORT - KBASE_FUNC_KEEP_GPU_POWERED = (UK_FUNC_ID + 22), -#endif /* BASE_LEGACY_UK8_SUPPORT */ - - KBASE_FUNC_FENCE_VALIDATE = (UK_FUNC_ID + 23), - KBASE_FUNC_STREAM_CREATE = (UK_FUNC_ID + 24), - KBASE_FUNC_GET_PROFILING_CONTROLS = (UK_FUNC_ID + 25), - KBASE_FUNC_SET_PROFILING_CONTROLS = (UK_FUNC_ID + 26), - /* to be used only for testing - * purposes, otherwise these controls - * are set through gator API */ - - KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD = (UK_FUNC_ID + 27), - KBASE_FUNC_JOB_SUBMIT = (UK_FUNC_ID + 28), - KBASE_FUNC_DISJOINT_QUERY = (UK_FUNC_ID + 29), - - KBASE_FUNC_GET_CONTEXT_ID = (UK_FUNC_ID + 31), - -#if (defined(MALI_KTLSTREAM_ENABLED) && MALI_KTLSTREAM_ENABLED) || \ - defined(CONFIG_MALI_MIPE_ENABLED) - KBASE_FUNC_TLSTREAM_ACQUIRE = (UK_FUNC_ID + 32), -#if MALI_UNIT_TEST - KBASE_FUNC_TLSTREAM_TEST = (UK_FUNC_ID + 33), - KBASE_FUNC_TLSTREAM_STATS = (UK_FUNC_ID + 34), -#endif /* MALI_UNIT_TEST */ - KBASE_FUNC_TLSTREAM_FLUSH = (UK_FUNC_ID + 35), -#endif /* MALI_KTLSTREAM_ENABLED */ - - KBASE_FUNC_HWCNT_READER_SETUP = (UK_FUNC_ID + 36), - - KBASE_FUNC_MAX -}; - -#endif /* _KBASE_UKU_H_ */ - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_utility.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_utility.c deleted file mode 100755 index be474ff874012..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_utility.c +++ /dev/null @@ -1,33 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2013, 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#include - -bool kbasep_list_member_of(const struct list_head *base, struct list_head *entry) -{ - struct list_head *pos = base->next; - - while (pos != base) { - if (pos == entry) - return true; - - pos = pos->next; - } - return false; -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_utility.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_utility.h deleted file mode 100755 index fd7252dab0de2..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_utility.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2013, 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#ifndef _KBASE_UTILITY_H -#define _KBASE_UTILITY_H - -#ifndef _KBASE_H_ -#error "Don't include this file directly, use mali_kbase.h instead" -#endif - -/** Test whether the given list entry is a member of the given list. - * - * @param base The head of the list to be tested - * @param entry The list entry to be tested - * - * @return true if entry is a member of base - * false otherwise - */ -bool kbasep_list_member_of(const struct list_head *base, struct list_head *entry); - -#endif /* _KBASE_UTILITY_H */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_vinstr.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_vinstr.c deleted file mode 100755 index bfa8bfa0e14f5..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_vinstr.c +++ /dev/null @@ -1,1680 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -/*****************************************************************************/ - -/* Hwcnt reader API version */ -#define HWCNT_READER_API 1 - -/* The number of nanoseconds in a second. */ -#define NSECS_IN_SEC 1000000000ull /* ns */ - -/* The time resolution of dumping service. */ -#define DUMPING_RESOLUTION 500000ull /* ns */ - -/* The maximal supported number of dumping buffers. */ -#define MAX_BUFFER_COUNT 32 - -/* Size and number of hw counters blocks. */ -#define NR_CNT_BLOCKS_PER_GROUP 8 -#define NR_CNT_PER_BLOCK 64 -#define NR_BYTES_PER_CNT 4 -#define NR_BYTES_PER_HDR 16 -#define PRFCNT_EN_MASK_OFFSET 0x8 - -/*****************************************************************************/ - -enum { - SHADER_HWCNT_BM, - TILER_HWCNT_BM, - MMU_L2_HWCNT_BM, - JM_HWCNT_BM -}; - -/** - * struct kbase_vinstr_context - vinstr context per device - * @lock: protects the entire vinstr context - * @kbdev: pointer to kbase device - * @kctx: pointer to kbase context - * @vmap: vinstr vmap for mapping hwcnt dump buffer - * @gpu_va: GPU hwcnt dump buffer address - * @cpu_va: the CPU side mapping of the hwcnt dump buffer - * @dump_size: size of the dump buffer in bytes - * @bitmap: current set of counters monitored, not always in sync - * with hardware - * @reprogram: when true, reprogram hwcnt block with the new set of - * counters - * @suspended: when true, the context has been suspended - * @nclients: number of attached clients, pending or otherwise - * @waiting_clients: head of list of clients being periodically sampled - * @idle_clients: head of list of clients being idle - * @suspended_clients: head of list of clients being suspended - * @thread: periodic sampling thread - * @waitq: notification queue of sampling thread - * @request_pending: request for action for sampling thread - */ -struct kbase_vinstr_context { - struct mutex lock; - struct kbase_device *kbdev; - struct kbase_context *kctx; - - struct kbase_vmap_struct vmap; - u64 gpu_va; - void *cpu_va; - size_t dump_size; - u32 bitmap[4]; - bool reprogram; - bool suspended; - - u32 nclients; - struct list_head waiting_clients; - struct list_head idle_clients; - struct list_head suspended_clients; - - struct task_struct *thread; - wait_queue_head_t waitq; - atomic_t request_pending; -}; - -/** - * struct kbase_vinstr_client - a vinstr client attached to a vinstr context - * @vinstr_ctx: vinstr context client is attached to - * @list: node used to attach this client to list in vinstr context - * @buffer_count: number of buffers this client is using - * @event_mask: events this client reacts to - * @dump_size: size of one dump buffer in bytes - * @bitmap: bitmap request for JM, TILER, SHADER and MMU counters - * @legacy_buffer: userspace hwcnt dump buffer (legacy interface) - * @accum_buffer: temporary accumulation buffer for preserving counters - * @dump_time: next time this clients shall request hwcnt dump - * @dump_interval: interval between periodic hwcnt dumps - * @dump_buffers: kernel hwcnt dump buffers allocated by this client - * @dump_buffers_meta: metadata of dump buffers - * @meta_idx: index of metadata being accessed by userspace - * @read_idx: index of buffer read by userspace - * @write_idx: index of buffer being written by dumping service - * @waitq: client's notification queue - * @pending: when true, client has attached but hwcnt not yet updated - */ -struct kbase_vinstr_client { - struct kbase_vinstr_context *vinstr_ctx; - struct list_head list; - unsigned int buffer_count; - u32 event_mask; - size_t dump_size; - u32 bitmap[4]; - void __user *legacy_buffer; - void *accum_buffer; - u64 dump_time; - u32 dump_interval; - char *dump_buffers; - struct kbase_hwcnt_reader_metadata *dump_buffers_meta; - atomic_t meta_idx; - atomic_t read_idx; - atomic_t write_idx; - wait_queue_head_t waitq; - bool pending; -}; - -/** - * struct kbasep_vinstr_wake_up_timer - vinstr service thread wake up timer - * @hrtimer: high resolution timer - * @vinstr_ctx: vinstr context - */ -struct kbasep_vinstr_wake_up_timer { - struct hrtimer hrtimer; - struct kbase_vinstr_context *vinstr_ctx; -}; - -/*****************************************************************************/ - -static int kbasep_vinstr_service_task(void *data); - -static unsigned int kbasep_vinstr_hwcnt_reader_poll( - struct file *filp, - poll_table *wait); -static long kbasep_vinstr_hwcnt_reader_ioctl( - struct file *filp, - unsigned int cmd, - unsigned long arg); -static int kbasep_vinstr_hwcnt_reader_mmap( - struct file *filp, - struct vm_area_struct *vma); -static int kbasep_vinstr_hwcnt_reader_release( - struct inode *inode, - struct file *filp); - -/* The timeline stream file operations structure. */ -static const struct file_operations vinstr_client_fops = { - .poll = kbasep_vinstr_hwcnt_reader_poll, - .unlocked_ioctl = kbasep_vinstr_hwcnt_reader_ioctl, - .compat_ioctl = kbasep_vinstr_hwcnt_reader_ioctl, - .mmap = kbasep_vinstr_hwcnt_reader_mmap, - .release = kbasep_vinstr_hwcnt_reader_release, -}; - -/*****************************************************************************/ - -static int enable_hwcnt(struct kbase_vinstr_context *vinstr_ctx) -{ - struct kbase_uk_hwcnt_setup setup; - - setup.dump_buffer = vinstr_ctx->gpu_va; - setup.jm_bm = vinstr_ctx->bitmap[JM_HWCNT_BM]; - setup.tiler_bm = vinstr_ctx->bitmap[TILER_HWCNT_BM]; - setup.shader_bm = vinstr_ctx->bitmap[SHADER_HWCNT_BM]; - setup.mmu_l2_bm = vinstr_ctx->bitmap[MMU_L2_HWCNT_BM]; - - return kbase_instr_hwcnt_enable(vinstr_ctx->kctx, &setup); -} - -static void disable_hwcnt(struct kbase_vinstr_context *vinstr_ctx) -{ - kbase_instr_hwcnt_disable(vinstr_ctx->kctx); -} - -static int reprogram_hwcnt(struct kbase_vinstr_context *vinstr_ctx) -{ - disable_hwcnt(vinstr_ctx); - return enable_hwcnt(vinstr_ctx); -} - -static void hwcnt_bitmap_set(u32 dst[4], u32 src[4]) -{ - dst[JM_HWCNT_BM] = src[JM_HWCNT_BM]; - dst[TILER_HWCNT_BM] = src[TILER_HWCNT_BM]; - dst[SHADER_HWCNT_BM] = src[SHADER_HWCNT_BM]; - dst[MMU_L2_HWCNT_BM] = src[MMU_L2_HWCNT_BM]; -} - -static void hwcnt_bitmap_union(u32 dst[4], u32 src[4]) -{ - dst[JM_HWCNT_BM] |= src[JM_HWCNT_BM]; - dst[TILER_HWCNT_BM] |= src[TILER_HWCNT_BM]; - dst[SHADER_HWCNT_BM] |= src[SHADER_HWCNT_BM]; - dst[MMU_L2_HWCNT_BM] |= src[MMU_L2_HWCNT_BM]; -} - -static size_t kbasep_vinstr_dump_size(struct kbase_vinstr_context *vinstr_ctx) -{ - struct kbase_device *kbdev = vinstr_ctx->kctx->kbdev; - size_t dump_size; - - if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_V4)) { - u32 nr_cg; - - nr_cg = kbdev->gpu_props.num_core_groups; - dump_size = nr_cg * NR_CNT_BLOCKS_PER_GROUP * - NR_CNT_PER_BLOCK * - NR_BYTES_PER_CNT; - } else { - /* assume v5 for now */ - base_gpu_props *props = &kbdev->gpu_props.props; - u32 nr_l2 = props->l2_props.num_l2_slices; - u64 core_mask = props->coherency_info.group[0].core_mask; - u32 nr_blocks = fls64(core_mask); - - /* JM and tiler counter blocks are always present */ - dump_size = (2 + nr_l2 + nr_blocks) * - NR_CNT_PER_BLOCK * - NR_BYTES_PER_CNT; - } - return dump_size; -} - -static int kbasep_vinstr_map_kernel_dump_buffer( - struct kbase_vinstr_context *vinstr_ctx) -{ - struct kbase_va_region *reg; - struct kbase_context *kctx = vinstr_ctx->kctx; - u64 flags, nr_pages; - u16 va_align = 0; - - flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_WR; - vinstr_ctx->dump_size = kbasep_vinstr_dump_size(vinstr_ctx); - nr_pages = PFN_UP(vinstr_ctx->dump_size); - - reg = kbase_mem_alloc(kctx, nr_pages, nr_pages, 0, &flags, - &vinstr_ctx->gpu_va, &va_align); - if (!reg) - return -ENOMEM; - - vinstr_ctx->cpu_va = kbase_vmap( - kctx, - vinstr_ctx->gpu_va, - vinstr_ctx->dump_size, - &vinstr_ctx->vmap); - if (!vinstr_ctx->cpu_va) { - kbase_mem_free(kctx, vinstr_ctx->gpu_va); - return -ENOMEM; - } - - return 0; -} - -static void kbasep_vinstr_unmap_kernel_dump_buffer( - struct kbase_vinstr_context *vinstr_ctx) -{ - struct kbase_context *kctx = vinstr_ctx->kctx; - - kbase_vunmap(kctx, &vinstr_ctx->vmap); - kbase_mem_free(kctx, vinstr_ctx->gpu_va); -} - -/** - * kbasep_vinstr_create_kctx - create kernel context for vinstr - * @vinstr_ctx: vinstr context - * Return: zero on success - */ -static int kbasep_vinstr_create_kctx(struct kbase_vinstr_context *vinstr_ctx) -{ - int err; - - vinstr_ctx->kctx = kbase_create_context(vinstr_ctx->kbdev, true); - if (!vinstr_ctx->kctx) - return -ENOMEM; - - /* Map the master kernel dump buffer. The HW dumps the counters - * into this memory region. */ - err = kbasep_vinstr_map_kernel_dump_buffer(vinstr_ctx); - if (err) { - kbase_destroy_context(vinstr_ctx->kctx); - vinstr_ctx->kctx = NULL; - return err; - } - - err = enable_hwcnt(vinstr_ctx); - if (err) { - kbasep_vinstr_unmap_kernel_dump_buffer(vinstr_ctx); - kbase_destroy_context(vinstr_ctx->kctx); - vinstr_ctx->kctx = NULL; - return err; - } - - vinstr_ctx->thread = kthread_run( - kbasep_vinstr_service_task, - vinstr_ctx, - "mali_vinstr_service"); - if (!vinstr_ctx->thread) { - disable_hwcnt(vinstr_ctx); - kbasep_vinstr_unmap_kernel_dump_buffer(vinstr_ctx); - kbase_destroy_context(vinstr_ctx->kctx); - vinstr_ctx->kctx = NULL; - return -EFAULT; - } - - return 0; -} - -/** - * kbasep_vinstr_destroy_kctx - destroy vinstr's kernel context - * @vinstr_ctx: vinstr context - */ -static void kbasep_vinstr_destroy_kctx(struct kbase_vinstr_context *vinstr_ctx) -{ - /* Release hw counters dumping resources. */ - vinstr_ctx->thread = NULL; - disable_hwcnt(vinstr_ctx); - kbasep_vinstr_unmap_kernel_dump_buffer(vinstr_ctx); - kbase_destroy_context(vinstr_ctx->kctx); - vinstr_ctx->kctx = NULL; -} - -/** - * kbasep_vinstr_attach_client - Attach a client to the vinstr core - * @vinstr_ctx: vinstr context - * @buffer_count: requested number of dump buffers - * @bitmap: bitmaps describing which counters should be enabled - * @argp: pointer where notification descriptor shall be stored - * - * Return: vinstr opaque client handle or NULL on failure - */ -static struct kbase_vinstr_client *kbasep_vinstr_attach_client( - struct kbase_vinstr_context *vinstr_ctx, u32 buffer_count, - u32 bitmap[4], void *argp) -{ - struct task_struct *thread = NULL; - struct kbase_vinstr_client *cli; - - KBASE_DEBUG_ASSERT(vinstr_ctx); - KBASE_DEBUG_ASSERT(argp); - KBASE_DEBUG_ASSERT(buffer_count >= 0); - KBASE_DEBUG_ASSERT(buffer_count <= MAX_BUFFER_COUNT); - KBASE_DEBUG_ASSERT(!(buffer_count & (buffer_count - 1))); - - cli = kzalloc(sizeof(*cli), GFP_KERNEL); - if (!cli) - return NULL; - - cli->vinstr_ctx = vinstr_ctx; - cli->buffer_count = buffer_count; - cli->event_mask = - (1 << BASE_HWCNT_READER_EVENT_MANUAL) | - (1 << BASE_HWCNT_READER_EVENT_PERIODIC); - cli->pending = true; - - hwcnt_bitmap_set(cli->bitmap, bitmap); - - mutex_lock(&vinstr_ctx->lock); - - hwcnt_bitmap_union(vinstr_ctx->bitmap, cli->bitmap); - vinstr_ctx->reprogram = true; - - /* If this is the first client, create the vinstr kbase - * context. This context is permanently resident until the - * last client exits. */ - if (!vinstr_ctx->nclients) { - hwcnt_bitmap_set(vinstr_ctx->bitmap, cli->bitmap); - if (kbasep_vinstr_create_kctx(vinstr_ctx) < 0) - goto error; - - vinstr_ctx->reprogram = false; - cli->pending = false; - } - - /* The GPU resets the counter block every time there is a request - * to dump it. We need a per client kernel buffer for accumulating - * the counters. */ - cli->dump_size = kbasep_vinstr_dump_size(vinstr_ctx); - cli->accum_buffer = kzalloc(cli->dump_size, GFP_KERNEL); - if (!cli->accum_buffer) - goto error; - - /* Prepare buffers. */ - if (cli->buffer_count) { - int *fd = (int *)argp; - size_t tmp; - - /* Allocate area for buffers metadata storage. */ - tmp = sizeof(struct kbase_hwcnt_reader_metadata) * - cli->buffer_count; - cli->dump_buffers_meta = kmalloc(tmp, GFP_KERNEL); - if (!cli->dump_buffers_meta) - goto error; - - /* Allocate required number of dumping buffers. */ - cli->dump_buffers = (char *)__get_free_pages( - GFP_KERNEL, - get_order(cli->dump_size * cli->buffer_count)); - if (!cli->dump_buffers) - goto error; - - /* Create descriptor for user-kernel data exchange. */ - *fd = anon_inode_getfd( - "[mali_vinstr_desc]", - &vinstr_client_fops, - cli, - O_RDONLY | O_CLOEXEC); - if (0 > *fd) - goto error; - } else { - cli->legacy_buffer = (void __user *)argp; - } - - atomic_set(&cli->read_idx, 0); - atomic_set(&cli->meta_idx, 0); - atomic_set(&cli->write_idx, 0); - init_waitqueue_head(&cli->waitq); - - vinstr_ctx->nclients++; - list_add(&cli->list, &vinstr_ctx->idle_clients); - - mutex_unlock(&vinstr_ctx->lock); - - return cli; - -error: - kfree(cli->dump_buffers_meta); - if (cli->dump_buffers) - free_pages( - (unsigned long)cli->dump_buffers, - get_order(cli->dump_size * cli->buffer_count)); - kfree(cli->accum_buffer); - if (!vinstr_ctx->nclients && vinstr_ctx->kctx) { - thread = vinstr_ctx->thread; - kbasep_vinstr_destroy_kctx(vinstr_ctx); - } - kfree(cli); - - mutex_unlock(&vinstr_ctx->lock); - - /* Thread must be stopped after lock is released. */ - if (thread) - kthread_stop(thread); - - return NULL; -} - -/** - * kbasep_vinstr_detach_client - Detach a client from the vinstr core - * @cli: Pointer to vinstr client - */ -static void kbasep_vinstr_detach_client(struct kbase_vinstr_client *cli) -{ - struct kbase_vinstr_context *vinstr_ctx; - struct kbase_vinstr_client *iter, *tmp; - struct task_struct *thread = NULL; - u32 zerobitmap[4] = { 0 }; - int cli_found = 0; - - KBASE_DEBUG_ASSERT(cli); - vinstr_ctx = cli->vinstr_ctx; - KBASE_DEBUG_ASSERT(vinstr_ctx); - - mutex_lock(&vinstr_ctx->lock); - - list_for_each_entry_safe(iter, tmp, &vinstr_ctx->idle_clients, list) { - if (iter == cli) { - vinstr_ctx->reprogram = true; - cli_found = 1; - list_del(&iter->list); - break; - } - } - if (!cli_found) { - list_for_each_entry_safe( - iter, tmp, &vinstr_ctx->waiting_clients, list) { - if (iter == cli) { - vinstr_ctx->reprogram = true; - cli_found = 1; - list_del(&iter->list); - break; - } - } - } - KBASE_DEBUG_ASSERT(cli_found); - - kfree(cli->dump_buffers_meta); - free_pages( - (unsigned long)cli->dump_buffers, - get_order(cli->dump_size * cli->buffer_count)); - kfree(cli->accum_buffer); - kfree(cli); - - vinstr_ctx->nclients--; - if (!vinstr_ctx->nclients) { - thread = vinstr_ctx->thread; - kbasep_vinstr_destroy_kctx(vinstr_ctx); - } - - /* Rebuild context bitmap now that the client has detached */ - hwcnt_bitmap_set(vinstr_ctx->bitmap, zerobitmap); - list_for_each_entry(iter, &vinstr_ctx->idle_clients, list) - hwcnt_bitmap_union(vinstr_ctx->bitmap, iter->bitmap); - list_for_each_entry(iter, &vinstr_ctx->waiting_clients, list) - hwcnt_bitmap_union(vinstr_ctx->bitmap, iter->bitmap); - - mutex_unlock(&vinstr_ctx->lock); - - /* Thread must be stopped after lock is released. */ - if (thread) - kthread_stop(thread); -} - -/* Accumulate counters in the dump buffer */ -static void accum_dump_buffer(void *dst, void *src, size_t dump_size) -{ - size_t block_size = NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT; - u32 *d = dst; - u32 *s = src; - size_t i, j; - - for (i = 0; i < dump_size; i += block_size) { - /* skip over the header block */ - d += NR_BYTES_PER_HDR / sizeof(u32); - s += NR_BYTES_PER_HDR / sizeof(u32); - for (j = 0; j < (block_size - NR_BYTES_PER_HDR) / sizeof(u32); j++) { - /* saturate result if addition would result in wraparound */ - if (U32_MAX - *d < *s) - *d = U32_MAX; - else - *d += *s; - d++; - s++; - } - } -} - -/* This is the Midgard v4 patch function. It copies the headers for each - * of the defined blocks from the master kernel buffer and then patches up - * the performance counter enable mask for each of the blocks to exclude - * counters that were not requested by the client. */ -static void patch_dump_buffer_hdr_v4( - struct kbase_vinstr_context *vinstr_ctx, - struct kbase_vinstr_client *cli) -{ - u32 *mask; - u8 *dst = cli->accum_buffer; - u8 *src = vinstr_ctx->cpu_va; - u32 nr_cg = vinstr_ctx->kctx->kbdev->gpu_props.num_core_groups; - size_t i, group_size, group; - enum { - SC0_BASE = 0 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT, - SC1_BASE = 1 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT, - SC2_BASE = 2 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT, - SC3_BASE = 3 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT, - TILER_BASE = 4 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT, - MMU_L2_BASE = 5 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT, - JM_BASE = 7 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT - }; - - group_size = NR_CNT_BLOCKS_PER_GROUP * - NR_CNT_PER_BLOCK * - NR_BYTES_PER_CNT; - for (i = 0; i < nr_cg; i++) { - group = i * group_size; - /* copy shader core headers */ - memcpy(&dst[group + SC0_BASE], &src[group + SC0_BASE], - NR_BYTES_PER_HDR); - memcpy(&dst[group + SC1_BASE], &src[group + SC1_BASE], - NR_BYTES_PER_HDR); - memcpy(&dst[group + SC2_BASE], &src[group + SC2_BASE], - NR_BYTES_PER_HDR); - memcpy(&dst[group + SC3_BASE], &src[group + SC3_BASE], - NR_BYTES_PER_HDR); - - /* copy tiler header */ - memcpy(&dst[group + TILER_BASE], &src[group + TILER_BASE], - NR_BYTES_PER_HDR); - - /* copy mmu header */ - memcpy(&dst[group + MMU_L2_BASE], &src[group + MMU_L2_BASE], - NR_BYTES_PER_HDR); - - /* copy job manager header */ - memcpy(&dst[group + JM_BASE], &src[group + JM_BASE], - NR_BYTES_PER_HDR); - - /* patch the shader core enable mask */ - mask = (u32 *)&dst[group + SC0_BASE + PRFCNT_EN_MASK_OFFSET]; - *mask &= cli->bitmap[SHADER_HWCNT_BM]; - mask = (u32 *)&dst[group + SC1_BASE + PRFCNT_EN_MASK_OFFSET]; - *mask &= cli->bitmap[SHADER_HWCNT_BM]; - mask = (u32 *)&dst[group + SC2_BASE + PRFCNT_EN_MASK_OFFSET]; - *mask &= cli->bitmap[SHADER_HWCNT_BM]; - mask = (u32 *)&dst[group + SC3_BASE + PRFCNT_EN_MASK_OFFSET]; - *mask &= cli->bitmap[SHADER_HWCNT_BM]; - - /* patch the tiler core enable mask */ - mask = (u32 *)&dst[group + TILER_BASE + PRFCNT_EN_MASK_OFFSET]; - *mask &= cli->bitmap[TILER_HWCNT_BM]; - - /* patch the mmu core enable mask */ - mask = (u32 *)&dst[group + MMU_L2_BASE + PRFCNT_EN_MASK_OFFSET]; - *mask &= cli->bitmap[MMU_L2_HWCNT_BM]; - - /* patch the job manager enable mask */ - mask = (u32 *)&dst[group + JM_BASE + PRFCNT_EN_MASK_OFFSET]; - *mask &= cli->bitmap[JM_HWCNT_BM]; - } -} - -/* This is the Midgard v5 patch function. It copies the headers for each - * of the defined blocks from the master kernel buffer and then patches up - * the performance counter enable mask for each of the blocks to exclude - * counters that were not requested by the client. */ -static void patch_dump_buffer_hdr_v5( - struct kbase_vinstr_context *vinstr_ctx, - struct kbase_vinstr_client *cli) -{ - struct kbase_device *kbdev = vinstr_ctx->kctx->kbdev; - u32 i, nr_l2; - u64 core_mask; - u32 *mask; - u8 *dst = cli->accum_buffer; - u8 *src = vinstr_ctx->cpu_va; - size_t block_size = NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT; - - /* copy and patch job manager header */ - memcpy(dst, src, NR_BYTES_PER_HDR); - mask = (u32 *)&dst[PRFCNT_EN_MASK_OFFSET]; - *mask &= cli->bitmap[JM_HWCNT_BM]; - dst += block_size; - src += block_size; - - /* copy and patch tiler header */ - memcpy(dst, src, NR_BYTES_PER_HDR); - mask = (u32 *)&dst[PRFCNT_EN_MASK_OFFSET]; - *mask &= cli->bitmap[TILER_HWCNT_BM]; - dst += block_size; - src += block_size; - - /* copy and patch MMU/L2C headers */ - nr_l2 = kbdev->gpu_props.props.l2_props.num_l2_slices; - for (i = 0; i < nr_l2; i++) { - memcpy(dst, src, NR_BYTES_PER_HDR); - mask = (u32 *)&dst[PRFCNT_EN_MASK_OFFSET]; - *mask &= cli->bitmap[MMU_L2_HWCNT_BM]; - dst += block_size; - src += block_size; - } - - /* copy and patch shader core headers */ - core_mask = kbdev->gpu_props.props.coherency_info.group[0].core_mask; - while (0ull != core_mask) { - memcpy(dst, src, NR_BYTES_PER_HDR); - if (0ull != (core_mask & 1ull)) { - /* if block is not reserved update header */ - mask = (u32 *)&dst[PRFCNT_EN_MASK_OFFSET]; - *mask &= cli->bitmap[SHADER_HWCNT_BM]; - } - dst += block_size; - src += block_size; - - core_mask >>= 1; - } -} - -/** - * accum_clients - accumulate dumped hw counters for all known clients - * @vinstr_ctx: vinstr context - */ -static void accum_clients(struct kbase_vinstr_context *vinstr_ctx) -{ - struct kbase_vinstr_client *iter; - int v4; - - v4 = kbase_hw_has_feature(vinstr_ctx->kbdev, BASE_HW_FEATURE_V4); - list_for_each_entry(iter, &vinstr_ctx->idle_clients, list) { - /* Don't bother accumulating clients whose hwcnt requests - * have not yet been honoured. */ - if (iter->pending) - continue; - if (v4) - patch_dump_buffer_hdr_v4(vinstr_ctx, iter); - else - patch_dump_buffer_hdr_v5(vinstr_ctx, iter); - accum_dump_buffer( - iter->accum_buffer, - vinstr_ctx->cpu_va, - iter->dump_size); - } - list_for_each_entry(iter, &vinstr_ctx->waiting_clients, list) { - /* Don't bother accumulating clients whose hwcnt requests - * have not yet been honoured. */ - if (iter->pending) - continue; - if (v4) - patch_dump_buffer_hdr_v4(vinstr_ctx, iter); - else - patch_dump_buffer_hdr_v5(vinstr_ctx, iter); - accum_dump_buffer( - iter->accum_buffer, - vinstr_ctx->cpu_va, - iter->dump_size); - } -} - -/*****************************************************************************/ - -/** - * kbasep_vinstr_get_timestamp - return timestamp - * - * Function returns timestamp value based on raw monotonic timer. Value will - * wrap around zero in case of overflow. - * - * Return: timestamp value - */ -static u64 kbasep_vinstr_get_timestamp(void) -{ - struct timespec ts; - - getrawmonotonic(&ts); - return (u64)ts.tv_sec * NSECS_IN_SEC + ts.tv_nsec; -} - -/** - * kbasep_vinstr_add_dump_request - register client's dumping request - * @cli: requesting client - * @waiting_clients: list of pending dumping requests - */ -static void kbasep_vinstr_add_dump_request( - struct kbase_vinstr_client *cli, - struct list_head *waiting_clients) -{ - struct kbase_vinstr_client *tmp; - - if (list_empty(waiting_clients)) { - list_add(&cli->list, waiting_clients); - return; - } - list_for_each_entry(tmp, waiting_clients, list) { - if (tmp->dump_time > cli->dump_time) { - list_add_tail(&cli->list, &tmp->list); - return; - } - } - list_add_tail(&cli->list, waiting_clients); -} - -/** - * kbasep_vinstr_collect_and_accumulate - collect hw counters via low level - * dump and accumulate them for known - * clients - * @vinstr_ctx: vinstr context - * @timestamp: pointer where collection timestamp will be recorded - * - * Return: zero on success - */ -static int kbasep_vinstr_collect_and_accumulate( - struct kbase_vinstr_context *vinstr_ctx, u64 *timestamp) -{ - int rcode; - - /* Request HW counters dump. - * Disable preemption to make dump timestamp more accurate. */ - preempt_disable(); - *timestamp = kbasep_vinstr_get_timestamp(); - rcode = kbase_instr_hwcnt_request_dump(vinstr_ctx->kctx); - preempt_enable(); - - if (!rcode) - rcode = kbase_instr_hwcnt_wait_for_dump(vinstr_ctx->kctx); - WARN_ON(rcode); - - /* Accumulate values of collected counters. */ - if (!rcode) - accum_clients(vinstr_ctx); - - return rcode; -} - -/** - * kbasep_vinstr_fill_dump_buffer - copy accumulated counters to empty kernel - * buffer - * @cli: requesting client - * @timestamp: timestamp when counters were collected - * @event_id: id of event that caused triggered counters collection - * - * Return: zero on success - */ -static int kbasep_vinstr_fill_dump_buffer( - struct kbase_vinstr_client *cli, u64 timestamp, - enum base_hwcnt_reader_event event_id) -{ - unsigned int write_idx = atomic_read(&cli->write_idx); - unsigned int read_idx = atomic_read(&cli->read_idx); - - struct kbase_hwcnt_reader_metadata *meta; - void *buffer; - - /* Check if there is a place to copy HWC block into. */ - if (write_idx - read_idx == cli->buffer_count) - return -1; - write_idx %= cli->buffer_count; - - /* Fill in dump buffer and its metadata. */ - buffer = &cli->dump_buffers[write_idx * cli->dump_size]; - meta = &cli->dump_buffers_meta[write_idx]; - meta->timestamp = timestamp; - meta->event_id = event_id; - meta->buffer_idx = write_idx; - memcpy(buffer, cli->accum_buffer, cli->dump_size); - return 0; -} - -/** - * kbasep_vinstr_fill_dump_buffer_legacy - copy accumulated counters to buffer - * allocated in userspace - * @cli: requesting client - * - * Return: zero on success - * - * This is part of legacy ioctl interface. - */ -static int kbasep_vinstr_fill_dump_buffer_legacy( - struct kbase_vinstr_client *cli) -{ - void __user *buffer = cli->legacy_buffer; - int rcode; - - /* Copy data to user buffer. */ - rcode = copy_to_user(buffer, cli->accum_buffer, cli->dump_size); - if (rcode) - pr_warn("error while copying buffer to user\n"); - return rcode; -} - -/** - * kbasep_vinstr_reprogram - reprogram hwcnt set collected by inst - * @vinstr_ctx: vinstr context - */ -static void kbasep_vinstr_reprogram( - struct kbase_vinstr_context *vinstr_ctx) -{ - if (vinstr_ctx->reprogram) { - struct kbase_vinstr_client *iter; - - if (!reprogram_hwcnt(vinstr_ctx)) { - vinstr_ctx->reprogram = false; - list_for_each_entry( - iter, - &vinstr_ctx->idle_clients, - list) - iter->pending = false; - list_for_each_entry( - iter, - &vinstr_ctx->waiting_clients, - list) - iter->pending = false; - } - } -} - -/** - * kbasep_vinstr_update_client - copy accumulated counters to user readable - * buffer and notify the user - * @cli: requesting client - * @timestamp: timestamp when counters were collected - * @event_id: id of event that caused triggered counters collection - * - * Return: zero on success - */ -static int kbasep_vinstr_update_client( - struct kbase_vinstr_client *cli, u64 timestamp, - enum base_hwcnt_reader_event event_id) -{ - int rcode = 0; - - /* Copy collected counters to user readable buffer. */ - if (cli->buffer_count) - rcode = kbasep_vinstr_fill_dump_buffer( - cli, timestamp, event_id); - else - rcode = kbasep_vinstr_fill_dump_buffer_legacy(cli); - - if (rcode) - goto exit; - - - /* Notify client. Make sure all changes to memory are visible. */ - wmb(); - atomic_inc(&cli->write_idx); - wake_up_interruptible(&cli->waitq); - - /* Prepare for next request. */ - memset(cli->accum_buffer, 0, cli->dump_size); - -exit: - return rcode; -} - -/** - * kbasep_vinstr_wake_up_callback - vinstr wake up timer wake up function - * - * @hrtimer: high resolution timer - * - * Return: High resolution timer restart enum. - */ -static enum hrtimer_restart kbasep_vinstr_wake_up_callback( - struct hrtimer *hrtimer) -{ - struct kbasep_vinstr_wake_up_timer *timer = - container_of( - hrtimer, - struct kbasep_vinstr_wake_up_timer, - hrtimer); - - KBASE_DEBUG_ASSERT(timer); - - atomic_set(&timer->vinstr_ctx->request_pending, 1); - wake_up_all(&timer->vinstr_ctx->waitq); - - return HRTIMER_NORESTART; -} - -/** - * kbasep_vinstr_service_task - HWC dumping service thread - * - * @data: Pointer to vinstr context structure. - * - * Return: Always returns zero. - */ -static int kbasep_vinstr_service_task(void *data) -{ - struct kbase_vinstr_context *vinstr_ctx = data; - struct kbasep_vinstr_wake_up_timer timer; - - KBASE_DEBUG_ASSERT(vinstr_ctx); - - hrtimer_init(&timer.hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - timer.hrtimer.function = kbasep_vinstr_wake_up_callback; - timer.vinstr_ctx = vinstr_ctx; - - while (!kthread_should_stop()) { - struct kbase_vinstr_client *cli = NULL; - struct kbase_vinstr_client *tmp; - - u64 timestamp = kbasep_vinstr_get_timestamp(); - u64 dump_time = 0; - struct list_head expired_requests; - - /* Hold lock while performing operations on lists of clients. */ - mutex_lock(&vinstr_ctx->lock); - - /* Closing thread must not interact with client requests. */ - if (current == vinstr_ctx->thread) { - atomic_set(&vinstr_ctx->request_pending, 0); - - if (!list_empty(&vinstr_ctx->waiting_clients)) { - cli = list_first_entry( - &vinstr_ctx->waiting_clients, - struct kbase_vinstr_client, - list); - dump_time = cli->dump_time; - } - } - - if (!cli || ((s64)timestamp - (s64)dump_time < 0ll)) { - mutex_unlock(&vinstr_ctx->lock); - - /* Sleep until next dumping event or service request. */ - if (cli) { - u64 diff = dump_time - timestamp; - - hrtimer_start( - &timer.hrtimer, - ns_to_ktime(diff), - HRTIMER_MODE_REL); - } - wait_event( - vinstr_ctx->waitq, - atomic_read( - &vinstr_ctx->request_pending) || - kthread_should_stop()); - hrtimer_cancel(&timer.hrtimer); - continue; - } - - kbasep_vinstr_collect_and_accumulate(vinstr_ctx, ×tamp); - - INIT_LIST_HEAD(&expired_requests); - - /* Find all expired requests. */ - list_for_each_entry_safe( - cli, - tmp, - &vinstr_ctx->waiting_clients, - list) { - s64 tdiff = - (s64)(timestamp + DUMPING_RESOLUTION) - - (s64)cli->dump_time; - if (tdiff >= 0ll) { - list_del(&cli->list); - list_add(&cli->list, &expired_requests); - } else { - break; - } - } - - /* Fill data for each request found. */ - list_for_each_entry_safe(cli, tmp, &expired_requests, list) { - /* Ensure that legacy buffer will not be used from - * this kthread context. */ - BUG_ON(0 == cli->buffer_count); - /* Expect only periodically sampled clients. */ - BUG_ON(0 == cli->dump_interval); - - kbasep_vinstr_update_client( - cli, - timestamp, - BASE_HWCNT_READER_EVENT_PERIODIC); - - /* Set new dumping time. Drop missed probing times. */ - do { - cli->dump_time += cli->dump_interval; - } while (cli->dump_time < timestamp); - - list_del(&cli->list); - kbasep_vinstr_add_dump_request( - cli, - &vinstr_ctx->waiting_clients); - } - - /* Reprogram counters set if required. */ - kbasep_vinstr_reprogram(vinstr_ctx); - - mutex_unlock(&vinstr_ctx->lock); - } - - return 0; -} - -/*****************************************************************************/ - -/** - * kbasep_vinstr_hwcnt_reader_buffer_ready - check if client has ready buffers - * @cli: pointer to vinstr client structure - * - * Return: non-zero if client has at least one dumping buffer filled that was - * not notified to user yet - */ -static int kbasep_vinstr_hwcnt_reader_buffer_ready( - struct kbase_vinstr_client *cli) -{ - KBASE_DEBUG_ASSERT(cli); - return atomic_read(&cli->write_idx) != atomic_read(&cli->meta_idx); -} - -/** - * kbasep_vinstr_hwcnt_reader_ioctl_get_buffer - hwcnt reader's ioctl command - * @cli: pointer to vinstr client structure - * @buffer: pointer to userspace buffer - * @size: size of buffer - * - * Return: zero on success - */ -static long kbasep_vinstr_hwcnt_reader_ioctl_get_buffer( - struct kbase_vinstr_client *cli, void __user *buffer, - size_t size) -{ - unsigned int meta_idx = atomic_read(&cli->meta_idx); - unsigned int idx = meta_idx % cli->buffer_count; - - struct kbase_hwcnt_reader_metadata *meta = &cli->dump_buffers_meta[idx]; - - /* Metadata sanity check. */ - KBASE_DEBUG_ASSERT(idx == meta->buffer_idx); - - if (sizeof(struct kbase_hwcnt_reader_metadata) != size) - return -EINVAL; - - /* Check if there is any buffer available. */ - if (atomic_read(&cli->write_idx) == meta_idx) - return -EAGAIN; - - /* Check if previously taken buffer was put back. */ - if (atomic_read(&cli->read_idx) != meta_idx) - return -EBUSY; - - /* Copy next available buffer's metadata to user. */ - if (copy_to_user(buffer, meta, size)) - return -EFAULT; - - atomic_inc(&cli->meta_idx); - - return 0; -} - -/** - * kbasep_vinstr_hwcnt_reader_ioctl_put_buffer - hwcnt reader's ioctl command - * @cli: pointer to vinstr client structure - * @buffer: pointer to userspace buffer - * @size: size of buffer - * - * Return: zero on success - */ -static long kbasep_vinstr_hwcnt_reader_ioctl_put_buffer( - struct kbase_vinstr_client *cli, void __user *buffer, - size_t size) -{ - unsigned int read_idx = atomic_read(&cli->read_idx); - unsigned int idx = read_idx % cli->buffer_count; - - struct kbase_hwcnt_reader_metadata meta; - - if (sizeof(struct kbase_hwcnt_reader_metadata) != size) - return -EINVAL; - - /* Check if any buffer was taken. */ - if (atomic_read(&cli->meta_idx) == read_idx) - return -EPERM; - - /* Check if correct buffer is put back. */ - if (copy_from_user(&meta, buffer, size)) - return -EFAULT; - if (idx != meta.buffer_idx) - return -EINVAL; - - atomic_inc(&cli->read_idx); - - return 0; -} - -/** - * kbasep_vinstr_hwcnt_reader_ioctl_set_interval - hwcnt reader's ioctl command - * @cli: pointer to vinstr client structure - * @interval: periodic dumping interval (disable periodic dumping if zero) - * - * Return: zero on success - */ -static long kbasep_vinstr_hwcnt_reader_ioctl_set_interval( - struct kbase_vinstr_client *cli, u32 interval) -{ - struct kbase_vinstr_context *vinstr_ctx = cli->vinstr_ctx; - - KBASE_DEBUG_ASSERT(vinstr_ctx); - - mutex_lock(&vinstr_ctx->lock); - - if (vinstr_ctx->suspended) { - mutex_unlock(&vinstr_ctx->lock); - return -EBUSY; - } - - list_del(&cli->list); - - cli->dump_interval = interval; - - /* If interval is non-zero, enable periodic dumping for this client. */ - if (cli->dump_interval) { - if (DUMPING_RESOLUTION > cli->dump_interval) - cli->dump_interval = DUMPING_RESOLUTION; - cli->dump_time = - kbasep_vinstr_get_timestamp() + cli->dump_interval; - - kbasep_vinstr_add_dump_request( - cli, &vinstr_ctx->waiting_clients); - - atomic_set(&vinstr_ctx->request_pending, 1); - wake_up_all(&vinstr_ctx->waitq); - } else { - list_add(&cli->list, &vinstr_ctx->idle_clients); - } - - mutex_unlock(&vinstr_ctx->lock); - - return 0; -} - -/** - * kbasep_vinstr_hwcnt_reader_event_mask - return event mask for event id - * @event_id: id of event - * Return: event_mask or zero if event is not supported or maskable - */ -static u32 kbasep_vinstr_hwcnt_reader_event_mask( - enum base_hwcnt_reader_event event_id) -{ - u32 event_mask = 0; - - switch (event_id) { - case BASE_HWCNT_READER_EVENT_PREJOB: - case BASE_HWCNT_READER_EVENT_POSTJOB: - /* These event are maskable. */ - event_mask = (1 << event_id); - break; - - case BASE_HWCNT_READER_EVENT_MANUAL: - case BASE_HWCNT_READER_EVENT_PERIODIC: - /* These event are non-maskable. */ - default: - /* These event are not supported. */ - break; - } - - return event_mask; -} - -/** - * kbasep_vinstr_hwcnt_reader_ioctl_enable_event - hwcnt reader's ioctl command - * @cli: pointer to vinstr client structure - * @event_id: id of event to enable - * - * Return: zero on success - */ -static long kbasep_vinstr_hwcnt_reader_ioctl_enable_event( - struct kbase_vinstr_client *cli, - enum base_hwcnt_reader_event event_id) -{ - struct kbase_vinstr_context *vinstr_ctx = cli->vinstr_ctx; - u32 event_mask; - - KBASE_DEBUG_ASSERT(vinstr_ctx); - - event_mask = kbasep_vinstr_hwcnt_reader_event_mask(event_id); - if (!event_mask) - return -EINVAL; - - mutex_lock(&vinstr_ctx->lock); - cli->event_mask |= event_mask; - mutex_unlock(&vinstr_ctx->lock); - - return 0; -} - -/** - * kbasep_vinstr_hwcnt_reader_ioctl_disable_event - hwcnt reader's ioctl command - * @cli: pointer to vinstr client structure - * @event_id: id of event to disable - * - * Return: zero on success - */ -static long kbasep_vinstr_hwcnt_reader_ioctl_disable_event( - struct kbase_vinstr_client *cli, - enum base_hwcnt_reader_event event_id) -{ - struct kbase_vinstr_context *vinstr_ctx = cli->vinstr_ctx; - u32 event_mask; - - KBASE_DEBUG_ASSERT(vinstr_ctx); - - event_mask = kbasep_vinstr_hwcnt_reader_event_mask(event_id); - if (!event_mask) - return -EINVAL; - - mutex_lock(&vinstr_ctx->lock); - cli->event_mask &= ~event_mask; - mutex_unlock(&vinstr_ctx->lock); - - return 0; -} - -/** - * kbasep_vinstr_hwcnt_reader_ioctl_get_hwver - hwcnt reader's ioctl command - * @cli: pointer to vinstr client structure - * @hwver: pointer to user buffer where hw version will be stored - * - * Return: zero on success - */ -static long kbasep_vinstr_hwcnt_reader_ioctl_get_hwver( - struct kbase_vinstr_client *cli, u32 __user *hwver) -{ - struct kbase_vinstr_context *vinstr_ctx = cli->vinstr_ctx; - u32 ver; - - KBASE_DEBUG_ASSERT(vinstr_ctx); - - ver = 4; - if (!kbase_hw_has_feature(vinstr_ctx->kbdev, BASE_HW_FEATURE_V4)) - ver = 5; - return put_user(ver, hwver); -} - -/** - * kbasep_vinstr_hwcnt_reader_ioctl - hwcnt reader's ioctl - * @filp: pointer to file structure - * @cmd: user command - * @arg: command's argument - * - * Return: zero on success - */ -static long kbasep_vinstr_hwcnt_reader_ioctl(struct file *filp, - unsigned int cmd, unsigned long arg) -{ - long rcode = 0; - struct kbase_vinstr_client *cli; - - KBASE_DEBUG_ASSERT(filp); - - cli = filp->private_data; - KBASE_DEBUG_ASSERT(cli); - - if (unlikely(KBASE_HWCNT_READER != _IOC_TYPE(cmd))) - return -EINVAL; - - switch (cmd) { - case KBASE_HWCNT_READER_GET_API_VERSION: - rcode = put_user(HWCNT_READER_API, (u32 __user *)arg); - break; - case KBASE_HWCNT_READER_GET_HWVER: - rcode = kbasep_vinstr_hwcnt_reader_ioctl_get_hwver( - cli, (u32 __user *)arg); - break; - case KBASE_HWCNT_READER_GET_BUFFER_SIZE: - KBASE_DEBUG_ASSERT(cli->vinstr_ctx); - rcode = put_user( - (u32)cli->vinstr_ctx->dump_size, - (u32 __user *)arg); - break; - case KBASE_HWCNT_READER_DUMP: - rcode = kbase_vinstr_hwc_dump( - cli, BASE_HWCNT_READER_EVENT_MANUAL); - break; - case KBASE_HWCNT_READER_CLEAR: - rcode = kbase_vinstr_hwc_clear(cli); - break; - case KBASE_HWCNT_READER_GET_BUFFER: - rcode = kbasep_vinstr_hwcnt_reader_ioctl_get_buffer( - cli, (void __user *)arg, _IOC_SIZE(cmd)); - break; - case KBASE_HWCNT_READER_PUT_BUFFER: - rcode = kbasep_vinstr_hwcnt_reader_ioctl_put_buffer( - cli, (void __user *)arg, _IOC_SIZE(cmd)); - break; - case KBASE_HWCNT_READER_SET_INTERVAL: - rcode = kbasep_vinstr_hwcnt_reader_ioctl_set_interval( - cli, (u32)arg); - break; - case KBASE_HWCNT_READER_ENABLE_EVENT: - rcode = kbasep_vinstr_hwcnt_reader_ioctl_enable_event( - cli, (enum base_hwcnt_reader_event)arg); - break; - case KBASE_HWCNT_READER_DISABLE_EVENT: - rcode = kbasep_vinstr_hwcnt_reader_ioctl_disable_event( - cli, (enum base_hwcnt_reader_event)arg); - break; - default: - rcode = -EINVAL; - break; - } - - return rcode; -} - -/** - * kbasep_vinstr_hwcnt_reader_poll - hwcnt reader's poll - * @filp: pointer to file structure - * @wait: pointer to poll table - * Return: POLLIN if data can be read without blocking, otherwise zero - */ -static unsigned int kbasep_vinstr_hwcnt_reader_poll(struct file *filp, - poll_table *wait) -{ - struct kbase_vinstr_client *cli; - - KBASE_DEBUG_ASSERT(filp); - KBASE_DEBUG_ASSERT(wait); - - cli = filp->private_data; - KBASE_DEBUG_ASSERT(cli); - - poll_wait(filp, &cli->waitq, wait); - if (kbasep_vinstr_hwcnt_reader_buffer_ready(cli)) - return POLLIN; - return 0; -} - -/** - * kbasep_vinstr_hwcnt_reader_mmap - hwcnt reader's mmap - * @filp: pointer to file structure - * @vma: pointer to vma structure - * Return: zero on success - */ -static int kbasep_vinstr_hwcnt_reader_mmap(struct file *filp, - struct vm_area_struct *vma) -{ - struct kbase_vinstr_client *cli; - size_t size; - - KBASE_DEBUG_ASSERT(filp); - KBASE_DEBUG_ASSERT(vma); - - cli = filp->private_data; - KBASE_DEBUG_ASSERT(cli); - - size = cli->buffer_count * cli->dump_size; - if (vma->vm_end - vma->vm_start > size) - return -ENOMEM; - - return remap_pfn_range( - vma, - vma->vm_start, - __pa((unsigned long)cli->dump_buffers) >> PAGE_SHIFT, - size, - vma->vm_page_prot); -} - -/** - * kbasep_vinstr_hwcnt_reader_release - hwcnt reader's release - * @inode: pointer to inode structure - * @filp: pointer to file structure - * Return always return zero - */ -static int kbasep_vinstr_hwcnt_reader_release(struct inode *inode, - struct file *filp) -{ - struct kbase_vinstr_client *cli; - - KBASE_DEBUG_ASSERT(inode); - KBASE_DEBUG_ASSERT(filp); - - cli = filp->private_data; - KBASE_DEBUG_ASSERT(cli); - - kbasep_vinstr_detach_client(cli); - return 0; -} - -/*****************************************************************************/ - -struct kbase_vinstr_context *kbase_vinstr_init(struct kbase_device *kbdev) -{ - struct kbase_vinstr_context *vinstr_ctx; - - vinstr_ctx = kzalloc(sizeof(*vinstr_ctx), GFP_KERNEL); - if (!vinstr_ctx) - return NULL; - - INIT_LIST_HEAD(&vinstr_ctx->idle_clients); - INIT_LIST_HEAD(&vinstr_ctx->waiting_clients); - mutex_init(&vinstr_ctx->lock); - vinstr_ctx->kbdev = kbdev; - vinstr_ctx->thread = NULL; - - atomic_set(&vinstr_ctx->request_pending, 0); - init_waitqueue_head(&vinstr_ctx->waitq); - - return vinstr_ctx; -} - -void kbase_vinstr_term(struct kbase_vinstr_context *vinstr_ctx) -{ - struct kbase_vinstr_client *cli; - - /* Stop service thread first. */ - if (vinstr_ctx->thread) - kthread_stop(vinstr_ctx->thread); - - while (1) { - struct list_head *list = &vinstr_ctx->idle_clients; - - if (list_empty(list)) { - list = &vinstr_ctx->waiting_clients; - if (list_empty(list)) - break; - } - - cli = list_first_entry(list, struct kbase_vinstr_client, list); - list_del(&cli->list); - kfree(cli->accum_buffer); - kfree(cli); - vinstr_ctx->nclients--; - } - KBASE_DEBUG_ASSERT(!vinstr_ctx->nclients); - if (vinstr_ctx->kctx) - kbasep_vinstr_destroy_kctx(vinstr_ctx); - kfree(vinstr_ctx); -} - -int kbase_vinstr_hwcnt_reader_setup(struct kbase_vinstr_context *vinstr_ctx, - struct kbase_uk_hwcnt_reader_setup *setup) -{ - struct kbase_vinstr_client *cli; - u32 bitmap[4]; - - KBASE_DEBUG_ASSERT(vinstr_ctx); - KBASE_DEBUG_ASSERT(setup); - KBASE_DEBUG_ASSERT(setup->buffer_count); - - bitmap[SHADER_HWCNT_BM] = setup->shader_bm; - bitmap[TILER_HWCNT_BM] = setup->tiler_bm; - bitmap[MMU_L2_HWCNT_BM] = setup->mmu_l2_bm; - bitmap[JM_HWCNT_BM] = setup->jm_bm; - - cli = kbasep_vinstr_attach_client( - vinstr_ctx, - setup->buffer_count, - bitmap, - &setup->fd); - - if (!cli) - return -ENOMEM; - - return 0; -} - -int kbase_vinstr_legacy_hwc_setup( - struct kbase_vinstr_context *vinstr_ctx, - struct kbase_vinstr_client **cli, - struct kbase_uk_hwcnt_setup *setup) -{ - KBASE_DEBUG_ASSERT(vinstr_ctx); - KBASE_DEBUG_ASSERT(setup); - KBASE_DEBUG_ASSERT(cli); - - if (setup->dump_buffer) { - u32 bitmap[4]; - - bitmap[SHADER_HWCNT_BM] = setup->shader_bm; - bitmap[TILER_HWCNT_BM] = setup->tiler_bm; - bitmap[MMU_L2_HWCNT_BM] = setup->mmu_l2_bm; - bitmap[JM_HWCNT_BM] = setup->jm_bm; - - if (*cli) - return -EBUSY; - - *cli = kbasep_vinstr_attach_client( - vinstr_ctx, - 0, - bitmap, - (void *)(long)setup->dump_buffer); - - if (!(*cli)) - return -ENOMEM; - } else { - if (!*cli) - return -EINVAL; - - kbasep_vinstr_detach_client(*cli); - *cli = NULL; - } - - return 0; -} - -int kbase_vinstr_hwc_dump(struct kbase_vinstr_client *cli, - enum base_hwcnt_reader_event event_id) -{ - int rcode = 0; - struct kbase_vinstr_context *vinstr_ctx; - u64 timestamp; - u32 event_mask; - - if (!cli) - return -EINVAL; - - vinstr_ctx = cli->vinstr_ctx; - KBASE_DEBUG_ASSERT(vinstr_ctx); - - KBASE_DEBUG_ASSERT(event_id < BASE_HWCNT_READER_EVENT_COUNT); - event_mask = 1 << event_id; - - mutex_lock(&vinstr_ctx->lock); - - if (vinstr_ctx->suspended) { - rcode = -EBUSY; - goto exit; - } - - if (event_mask & cli->event_mask) { - rcode = kbasep_vinstr_collect_and_accumulate( - vinstr_ctx, - ×tamp); - if (rcode) - goto exit; - - rcode = kbasep_vinstr_update_client(cli, timestamp, event_id); - if (rcode) - goto exit; - - kbasep_vinstr_reprogram(vinstr_ctx); - } - -exit: - mutex_unlock(&vinstr_ctx->lock); - - return rcode; -} - -int kbase_vinstr_hwc_clear(struct kbase_vinstr_client *cli) -{ - struct kbase_vinstr_context *vinstr_ctx; - int rcode; - u64 unused; - - if (!cli) - return -EINVAL; - - vinstr_ctx = cli->vinstr_ctx; - KBASE_DEBUG_ASSERT(vinstr_ctx); - - mutex_lock(&vinstr_ctx->lock); - - if (vinstr_ctx->suspended) { - rcode = -EBUSY; - goto exit; - } - - rcode = kbasep_vinstr_collect_and_accumulate(vinstr_ctx, &unused); - if (rcode) - goto exit; - rcode = kbase_instr_hwcnt_clear(vinstr_ctx->kctx); - if (rcode) - goto exit; - memset(cli->accum_buffer, 0, cli->dump_size); - - kbasep_vinstr_reprogram(vinstr_ctx); - -exit: - mutex_unlock(&vinstr_ctx->lock); - - return rcode; -} - -void kbase_vinstr_hwc_suspend(struct kbase_vinstr_context *vinstr_ctx) -{ - u64 unused; - - KBASE_DEBUG_ASSERT(vinstr_ctx); - - mutex_lock(&vinstr_ctx->lock); - kbasep_vinstr_collect_and_accumulate(vinstr_ctx, &unused); - vinstr_ctx->suspended = true; - vinstr_ctx->suspended_clients = vinstr_ctx->waiting_clients; - INIT_LIST_HEAD(&vinstr_ctx->waiting_clients); - mutex_unlock(&vinstr_ctx->lock); -} - -void kbase_vinstr_hwc_resume(struct kbase_vinstr_context *vinstr_ctx) -{ - KBASE_DEBUG_ASSERT(vinstr_ctx); - - mutex_lock(&vinstr_ctx->lock); - vinstr_ctx->suspended = false; - vinstr_ctx->waiting_clients = vinstr_ctx->suspended_clients; - vinstr_ctx->reprogram = true; - kbasep_vinstr_reprogram(vinstr_ctx); - atomic_set(&vinstr_ctx->request_pending, 1); - wake_up_all(&vinstr_ctx->waitq); - mutex_unlock(&vinstr_ctx->lock); -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_vinstr.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_vinstr.h deleted file mode 100755 index 12340e5c647db..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_kbase_vinstr.h +++ /dev/null @@ -1,104 +0,0 @@ -/* - * - * (C) COPYRIGHT 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#ifndef _KBASE_VINSTR_H_ -#define _KBASE_VINSTR_H_ - -#include -#include - -/*****************************************************************************/ - -struct kbase_vinstr_context; -struct kbase_vinstr_client; - -/*****************************************************************************/ - -/** - * kbase_vinstr_init() - initialize the vinstr core - * @kbdev: kbase device - * - * Return: pointer to the vinstr context on success or NULL on failure - */ -struct kbase_vinstr_context *kbase_vinstr_init(struct kbase_device *kbdev); - -/** - * kbase_vinstr_term() - terminate the vinstr core - * @vinstr_ctx: vinstr context - */ -void kbase_vinstr_term(struct kbase_vinstr_context *vinstr_ctx); - -/** - * kbase_vinstr_hwcnt_reader_setup - configure hw counters reader - * @vinstr_ctx: vinstr context - * @setup: reader's configuration - * - * Return: zero on success - */ -int kbase_vinstr_hwcnt_reader_setup( - struct kbase_vinstr_context *vinstr_ctx, - struct kbase_uk_hwcnt_reader_setup *setup); - -/** - * kbase_vinstr_legacy_hwc_setup - configure hw counters for dumping - * @vinstr_ctx: vinstr context - * @cli: pointer where to store pointer to new vinstr client structure - * @setup: hwc configuration - * - * Return: zero on success - */ -int kbase_vinstr_legacy_hwc_setup( - struct kbase_vinstr_context *vinstr_ctx, - struct kbase_vinstr_client **cli, - struct kbase_uk_hwcnt_setup *setup); - -/** - * kbase_vinstr_hwc_dump - issue counter dump for vinstr client - * @cli: pointer to vinstr client - * @event_id: id of event that triggered hwcnt dump - * - * Return: zero on success - */ -int kbase_vinstr_hwc_dump( - struct kbase_vinstr_client *cli, - enum base_hwcnt_reader_event event_id); - -/** - * kbase_vinstr_hwc_clear - performs a reset of the hardware counters for - * a given kbase context - * @cli: pointer to vinstr client - * - * Return: zero on success - */ -int kbase_vinstr_hwc_clear(struct kbase_vinstr_client *cli); - -/** - * kbase_vinstr_hwc_suspend - suspends hardware counter collection for - * a given kbase context - * @vinstr_ctx: vinstr context - */ -void kbase_vinstr_hwc_suspend(struct kbase_vinstr_context *vinstr_ctx); - -/** - * kbase_vinstr_hwc_resume - resumes hardware counter collection for - * a given kbase context - * @vinstr_ctx: vinstr context - */ -void kbase_vinstr_hwc_resume(struct kbase_vinstr_context *vinstr_ctx); - -#endif /* _KBASE_VINSTR_H_ */ - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_linux_kbase_trace.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_linux_kbase_trace.h deleted file mode 100755 index 5d6b4021d626f..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_linux_kbase_trace.h +++ /dev/null @@ -1,201 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - -#if !defined(_TRACE_MALI_KBASE_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_MALI_KBASE_H - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM mali - -#include - -DECLARE_EVENT_CLASS(mali_slot_template, - TP_PROTO(int jobslot, unsigned int info_val), - TP_ARGS(jobslot, info_val), - TP_STRUCT__entry( - __field(unsigned int, jobslot) - __field(unsigned int, info_val) - ), - TP_fast_assign( - __entry->jobslot = jobslot; - __entry->info_val = info_val; - ), - TP_printk("jobslot=%u info=%u", __entry->jobslot, __entry->info_val) -); - -#define DEFINE_MALI_SLOT_EVENT(name) \ -DEFINE_EVENT(mali_slot_template, mali_##name, \ - TP_PROTO(int jobslot, unsigned int info_val), \ - TP_ARGS(jobslot, info_val)) -DEFINE_MALI_SLOT_EVENT(JM_SUBMIT); -DEFINE_MALI_SLOT_EVENT(JM_JOB_DONE); -DEFINE_MALI_SLOT_EVENT(JM_UPDATE_HEAD); -DEFINE_MALI_SLOT_EVENT(JM_CHECK_HEAD); -DEFINE_MALI_SLOT_EVENT(JM_SOFTSTOP); -DEFINE_MALI_SLOT_EVENT(JM_SOFTSTOP_0); -DEFINE_MALI_SLOT_EVENT(JM_SOFTSTOP_1); -DEFINE_MALI_SLOT_EVENT(JM_HARDSTOP); -DEFINE_MALI_SLOT_EVENT(JM_HARDSTOP_0); -DEFINE_MALI_SLOT_EVENT(JM_HARDSTOP_1); -DEFINE_MALI_SLOT_EVENT(JM_SLOT_SOFT_OR_HARD_STOP); -DEFINE_MALI_SLOT_EVENT(JM_SLOT_EVICT); -DEFINE_MALI_SLOT_EVENT(JM_BEGIN_RESET_WORKER); -DEFINE_MALI_SLOT_EVENT(JM_END_RESET_WORKER); -DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REGISTER_ON_RECHECK_FAILED); -DEFINE_MALI_SLOT_EVENT(JS_AFFINITY_SUBMIT_TO_BLOCKED); -DEFINE_MALI_SLOT_EVENT(JS_AFFINITY_CURRENT); -DEFINE_MALI_SLOT_EVENT(JD_DONE_TRY_RUN_NEXT_JOB); -DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REQUEST_CORES_FAILED); -DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REGISTER_INUSE_FAILED); -DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REQUEST_ON_RECHECK_FAILED); -DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_AFFINITY_WOULD_VIOLATE); -DEFINE_MALI_SLOT_EVENT(JS_JOB_DONE_TRY_RUN_NEXT_JOB); -DEFINE_MALI_SLOT_EVENT(JS_JOB_DONE_RETRY_NEEDED); -DEFINE_MALI_SLOT_EVENT(JS_POLICY_DEQUEUE_JOB); -DEFINE_MALI_SLOT_EVENT(JS_POLICY_DEQUEUE_JOB_IRQ); -#undef DEFINE_MALI_SLOT_EVENT - -DECLARE_EVENT_CLASS(mali_refcount_template, - TP_PROTO(int refcount, unsigned int info_val), - TP_ARGS(refcount, info_val), - TP_STRUCT__entry( - __field(unsigned int, refcount) - __field(unsigned int, info_val) - ), - TP_fast_assign( - __entry->refcount = refcount; - __entry->info_val = info_val; - ), - TP_printk("refcount=%u info=%u", __entry->refcount, __entry->info_val) -); - -#define DEFINE_MALI_REFCOUNT_EVENT(name) \ -DEFINE_EVENT(mali_refcount_template, mali_##name, \ - TP_PROTO(int refcount, unsigned int info_val), \ - TP_ARGS(refcount, info_val)) -DEFINE_MALI_REFCOUNT_EVENT(JS_RETAIN_CTX_NOLOCK); -DEFINE_MALI_REFCOUNT_EVENT(JS_ADD_JOB); -DEFINE_MALI_REFCOUNT_EVENT(JS_REMOVE_JOB); -DEFINE_MALI_REFCOUNT_EVENT(JS_RETAIN_CTX); -DEFINE_MALI_REFCOUNT_EVENT(JS_RELEASE_CTX); -DEFINE_MALI_REFCOUNT_EVENT(JS_TRY_SCHEDULE_HEAD_CTX); -DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_INIT_CTX); -DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_TERM_CTX); -DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_ENQUEUE_CTX); -DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_DEQUEUE_HEAD_CTX); -DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_TRY_EVICT_CTX); -DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_RUNPOOL_ADD_CTX); -DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_RUNPOOL_REMOVE_CTX); -DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_FOREACH_CTX_JOBS); -DEFINE_MALI_REFCOUNT_EVENT(PM_CONTEXT_ACTIVE); -DEFINE_MALI_REFCOUNT_EVENT(PM_CONTEXT_IDLE); -#undef DEFINE_MALI_REFCOUNT_EVENT - -DECLARE_EVENT_CLASS(mali_add_template, - TP_PROTO(int gpu_addr, unsigned int info_val), - TP_ARGS(gpu_addr, info_val), - TP_STRUCT__entry( - __field(unsigned int, gpu_addr) - __field(unsigned int, info_val) - ), - TP_fast_assign( - __entry->gpu_addr = gpu_addr; - __entry->info_val = info_val; - ), - TP_printk("gpu_addr=%u info=%u", __entry->gpu_addr, __entry->info_val) -); - -#define DEFINE_MALI_ADD_EVENT(name) \ -DEFINE_EVENT(mali_add_template, mali_##name, \ - TP_PROTO(int gpu_addr, unsigned int info_val), \ - TP_ARGS(gpu_addr, info_val)) -DEFINE_MALI_ADD_EVENT(CORE_CTX_DESTROY); -DEFINE_MALI_ADD_EVENT(CORE_CTX_HWINSTR_TERM); -DEFINE_MALI_ADD_EVENT(CORE_GPU_IRQ); -DEFINE_MALI_ADD_EVENT(CORE_GPU_IRQ_CLEAR); -DEFINE_MALI_ADD_EVENT(CORE_GPU_IRQ_DONE); -DEFINE_MALI_ADD_EVENT(CORE_GPU_SOFT_RESET); -DEFINE_MALI_ADD_EVENT(CORE_GPU_HARD_RESET); -DEFINE_MALI_ADD_EVENT(CORE_GPU_PRFCNT_SAMPLE); -DEFINE_MALI_ADD_EVENT(CORE_GPU_PRFCNT_CLEAR); -DEFINE_MALI_ADD_EVENT(CORE_GPU_CLEAN_INV_CACHES); -DEFINE_MALI_ADD_EVENT(JD_DONE_WORKER); -DEFINE_MALI_ADD_EVENT(JD_DONE_WORKER_END); -DEFINE_MALI_ADD_EVENT(JD_CANCEL_WORKER); -DEFINE_MALI_ADD_EVENT(JD_DONE); -DEFINE_MALI_ADD_EVENT(JD_CANCEL); -DEFINE_MALI_ADD_EVENT(JD_ZAP_CONTEXT); -DEFINE_MALI_ADD_EVENT(JM_IRQ); -DEFINE_MALI_ADD_EVENT(JM_IRQ_END); -DEFINE_MALI_ADD_EVENT(JM_FLUSH_WORKQS); -DEFINE_MALI_ADD_EVENT(JM_FLUSH_WORKQS_DONE); -DEFINE_MALI_ADD_EVENT(JM_ZAP_NON_SCHEDULED); -DEFINE_MALI_ADD_EVENT(JM_ZAP_SCHEDULED); -DEFINE_MALI_ADD_EVENT(JM_ZAP_DONE); -DEFINE_MALI_ADD_EVENT(JM_SUBMIT_AFTER_RESET); -DEFINE_MALI_ADD_EVENT(JM_JOB_COMPLETE); -DEFINE_MALI_ADD_EVENT(JS_FAST_START_EVICTS_CTX); -DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_ON_RUNPOOL); -DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_OFF_RUNPOOL); -DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_ON_CTX); -DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_OFF_CTX); -DEFINE_MALI_ADD_EVENT(JS_POLICY_TIMER_END); -DEFINE_MALI_ADD_EVENT(JS_POLICY_TIMER_START); -DEFINE_MALI_ADD_EVENT(JS_POLICY_ENQUEUE_JOB); -DEFINE_MALI_ADD_EVENT(PM_CORES_CHANGE_DESIRED); -DEFINE_MALI_ADD_EVENT(PM_JOB_SUBMIT_AFTER_POWERING_UP); -DEFINE_MALI_ADD_EVENT(PM_JOB_SUBMIT_AFTER_POWERED_UP); -DEFINE_MALI_ADD_EVENT(PM_PWRON); -DEFINE_MALI_ADD_EVENT(PM_PWRON_TILER); -DEFINE_MALI_ADD_EVENT(PM_PWRON_L2); -DEFINE_MALI_ADD_EVENT(PM_PWROFF); -DEFINE_MALI_ADD_EVENT(PM_PWROFF_TILER); -DEFINE_MALI_ADD_EVENT(PM_PWROFF_L2); -DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED); -DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED_TILER); -DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED_L2); -DEFINE_MALI_ADD_EVENT(PM_DESIRED_REACHED); -DEFINE_MALI_ADD_EVENT(PM_DESIRED_REACHED_TILER); -DEFINE_MALI_ADD_EVENT(PM_UNREQUEST_CHANGE_SHADER_NEEDED); -DEFINE_MALI_ADD_EVENT(PM_REQUEST_CHANGE_SHADER_NEEDED); -DEFINE_MALI_ADD_EVENT(PM_REGISTER_CHANGE_SHADER_NEEDED); -DEFINE_MALI_ADD_EVENT(PM_REGISTER_CHANGE_SHADER_INUSE); -DEFINE_MALI_ADD_EVENT(PM_RELEASE_CHANGE_SHADER_INUSE); -DEFINE_MALI_ADD_EVENT(PM_CORES_AVAILABLE); -DEFINE_MALI_ADD_EVENT(PM_CORES_AVAILABLE_TILER); -DEFINE_MALI_ADD_EVENT(PM_CORES_CHANGE_AVAILABLE); -DEFINE_MALI_ADD_EVENT(PM_CORES_CHANGE_AVAILABLE_TILER); -DEFINE_MALI_ADD_EVENT(PM_GPU_ON); -DEFINE_MALI_ADD_EVENT(PM_GPU_OFF); -DEFINE_MALI_ADD_EVENT(PM_SET_POLICY); -DEFINE_MALI_ADD_EVENT(PM_CURRENT_POLICY_INIT); -DEFINE_MALI_ADD_EVENT(PM_CURRENT_POLICY_TERM); -DEFINE_MALI_ADD_EVENT(PM_CA_SET_POLICY); -DEFINE_MALI_ADD_EVENT(PM_WAKE_WAITERS); -#undef DEFINE_MALI_ADD_EVENT - -#endif /* _TRACE_MALI_KBASE_H */ - -#undef TRACE_INCLUDE_PATH -#undef linux -#define TRACE_INCLUDE_PATH . -#undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_FILE mali_linux_kbase_trace - -/* This part must be outside protection */ -#include diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_linux_trace.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_linux_trace.h deleted file mode 100755 index fc3cf32ba4d28..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_linux_trace.h +++ /dev/null @@ -1,211 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#if !defined(_TRACE_MALI_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_MALI_H - -#include -#include - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM mali -#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM) -#define TRACE_INCLUDE_FILE mali_linux_trace - -#define MALI_JOB_SLOTS_EVENT_CHANGED - -/** - * mali_job_slots_event - called from mali_kbase_core_linux.c - * @event_id: ORed together bitfields representing a type of event, made with the GATOR_MAKE_EVENT() macro. - */ -TRACE_EVENT(mali_job_slots_event, - TP_PROTO(unsigned int event_id, unsigned int tgid, unsigned int pid, - unsigned char job_id), - TP_ARGS(event_id, tgid, pid, job_id), - TP_STRUCT__entry( - __field(unsigned int, event_id) - __field(unsigned int, tgid) - __field(unsigned int, pid) - __field(unsigned char, job_id) - ), - TP_fast_assign( - __entry->event_id = event_id; - __entry->tgid = tgid; - __entry->pid = pid; - __entry->job_id = job_id; - ), - TP_printk("event=%u tgid=%u pid=%u job_id=%u", - __entry->event_id, __entry->tgid, __entry->pid, __entry->job_id) -); - -/** - * mali_pm_status - Called by mali_kbase_pm_driver.c - * @event_id: core type (shader, tiler, l2 cache) - * @value: 64bits bitmask reporting either power status of the cores (1-ON, 0-OFF) - */ -TRACE_EVENT(mali_pm_status, - TP_PROTO(unsigned int event_id, unsigned long long value), - TP_ARGS(event_id, value), - TP_STRUCT__entry( - __field(unsigned int, event_id) - __field(unsigned long long, value) - ), - TP_fast_assign( - __entry->event_id = event_id; - __entry->value = value; - ), - TP_printk("event %u = %llu", __entry->event_id, __entry->value) -); - -/** - * mali_pm_power_on - Called by mali_kbase_pm_driver.c - * @event_id: core type (shader, tiler, l2 cache) - * @value: 64bits bitmask reporting the cores to power up - */ -TRACE_EVENT(mali_pm_power_on, - TP_PROTO(unsigned int event_id, unsigned long long value), - TP_ARGS(event_id, value), - TP_STRUCT__entry( - __field(unsigned int, event_id) - __field(unsigned long long, value) - ), - TP_fast_assign( - __entry->event_id = event_id; - __entry->value = value; - ), - TP_printk("event %u = %llu", __entry->event_id, __entry->value) -); - -/** - * mali_pm_power_off - Called by mali_kbase_pm_driver.c - * @event_id: core type (shader, tiler, l2 cache) - * @value: 64bits bitmask reporting the cores to power down - */ -TRACE_EVENT(mali_pm_power_off, - TP_PROTO(unsigned int event_id, unsigned long long value), - TP_ARGS(event_id, value), - TP_STRUCT__entry( - __field(unsigned int, event_id) - __field(unsigned long long, value) - ), - TP_fast_assign( - __entry->event_id = event_id; - __entry->value = value; - ), - TP_printk("event %u = %llu", __entry->event_id, __entry->value) -); - -/** - * mali_page_fault_insert_pages - Called by page_fault_worker() - * it reports an MMU page fault resulting in new pages being mapped. - * @event_id: MMU address space number. - * @value: number of newly allocated pages - */ -TRACE_EVENT(mali_page_fault_insert_pages, - TP_PROTO(int event_id, unsigned long value), - TP_ARGS(event_id, value), - TP_STRUCT__entry( - __field(int, event_id) - __field(unsigned long, value) - ), - TP_fast_assign( - __entry->event_id = event_id; - __entry->value = value; - ), - TP_printk("event %d = %lu", __entry->event_id, __entry->value) -); - -/** - * mali_mmu_as_in_use - Called by assign_and_activate_kctx_addr_space() - * it reports that a certain MMU address space is in use now. - * @event_id: MMU address space number. - */ -TRACE_EVENT(mali_mmu_as_in_use, - TP_PROTO(int event_id), - TP_ARGS(event_id), - TP_STRUCT__entry( - __field(int, event_id) - ), - TP_fast_assign( - __entry->event_id = event_id; - ), - TP_printk("event=%d", __entry->event_id) -); - -/** - * mali_mmu_as_released - Called by kbasep_js_runpool_release_ctx_internal() - * it reports that a certain MMU address space has been released now. - * @event_id: MMU address space number. - */ -TRACE_EVENT(mali_mmu_as_released, - TP_PROTO(int event_id), - TP_ARGS(event_id), - TP_STRUCT__entry( - __field(int, event_id) - ), - TP_fast_assign( - __entry->event_id = event_id; - ), - TP_printk("event=%d", __entry->event_id) -); - -/** - * mali_total_alloc_pages_change - Called by kbase_atomic_add_pages() - * and by kbase_atomic_sub_pages() - * it reports that the total number of allocated pages is changed. - * @event_id: number of pages to be added or subtracted (according to the sign). - */ -TRACE_EVENT(mali_total_alloc_pages_change, - TP_PROTO(long long int event_id), - TP_ARGS(event_id), - TP_STRUCT__entry( - __field(long long int, event_id) - ), - TP_fast_assign( - __entry->event_id = event_id; - ), - TP_printk("event=%lld", __entry->event_id) -); - -/** - * mali_sw_counter - not currently used - * @event_id: counter id - */ -TRACE_EVENT(mali_sw_counter, - TP_PROTO(unsigned int event_id, signed long long value), - TP_ARGS(event_id, value), - TP_STRUCT__entry( - __field(int, event_id) - __field(long long, value) - ), - TP_fast_assign( - __entry->event_id = event_id; - __entry->value = value; - ), - TP_printk("event %d = %lld", __entry->event_id, __entry->value) -); - -#endif /* _TRACE_MALI_H */ - -#undef TRACE_INCLUDE_PATH -#undef linux -#define TRACE_INCLUDE_PATH . - -/* This part must be outside protection */ -#include diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_malisw.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_malisw.h deleted file mode 100755 index 99452933eab48..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_malisw.h +++ /dev/null @@ -1,131 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * Kernel-wide include for common macros and types. - */ - -#ifndef _MALISW_H_ -#define _MALISW_H_ - -#include -#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) -#define U8_MAX ((u8)~0U) -#define S8_MAX ((s8)(U8_MAX>>1)) -#define S8_MIN ((s8)(-S8_MAX - 1)) -#define U16_MAX ((u16)~0U) -#define S16_MAX ((s16)(U16_MAX>>1)) -#define S16_MIN ((s16)(-S16_MAX - 1)) -#define U32_MAX ((u32)~0U) -#define S32_MAX ((s32)(U32_MAX>>1)) -#define S32_MIN ((s32)(-S32_MAX - 1)) -#define U64_MAX ((u64)~0ULL) -#define S64_MAX ((s64)(U64_MAX>>1)) -#define S64_MIN ((s64)(-S64_MAX - 1)) -#endif /* LINUX_VERSION_CODE */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) -#define SIZE_MAX (~(size_t)0) -#endif /* LINUX_VERSION_CODE */ - -/** - * MIN - Return the lesser of two values. - * - * As a macro it may evaluate its arguments more than once. - * Refer to MAX macro for more details - */ -#define MIN(x, y) ((x) < (y) ? (x) : (y)) - -/** - * MAX - Return the greater of two values. - * - * As a macro it may evaluate its arguments more than once. - * If called on the same two arguments as MIN it is guaranteed to return - * the one that MIN didn't return. This is significant for types where not - * all values are comparable e.g. NaNs in floating-point types. But if you want - * to retrieve the min and max of two values, consider using a conditional swap - * instead. - */ -#define MAX(x, y) ((x) < (y) ? (y) : (x)) - -/** - * @hideinitializer - * Function-like macro for suppressing unused variable warnings. Where possible - * such variables should be removed; this macro is present for cases where we - * much support API backwards compatibility. - */ -#define CSTD_UNUSED(x) ((void)(x)) - -/** - * @hideinitializer - * Function-like macro for use where "no behavior" is desired. This is useful - * when compile time macros turn a function-like macro in to a no-op, but - * where having no statement is otherwise invalid. - */ -#define CSTD_NOP(...) ((void)#__VA_ARGS__) - -/** - * Function-like macro for converting a pointer in to a u64 for storing into - * an external data structure. This is commonly used when pairing a 32-bit - * CPU with a 64-bit peripheral, such as a Midgard GPU. C's type promotion - * is complex and a straight cast does not work reliably as pointers are - * often considered as signed. - */ -#define PTR_TO_U64(x) ((uint64_t)((uintptr_t)(x))) - -/** - * @hideinitializer - * Function-like macro for stringizing a single level macro. - * @code - * #define MY_MACRO 32 - * CSTD_STR1( MY_MACRO ) - * > "MY_MACRO" - * @endcode - */ -#define CSTD_STR1(x) #x - -/** - * @hideinitializer - * Function-like macro for stringizing a macro's value. This should not be used - * if the macro is defined in a way which may have no value; use the - * alternative @c CSTD_STR2N macro should be used instead. - * @code - * #define MY_MACRO 32 - * CSTD_STR2( MY_MACRO ) - * > "32" - * @endcode - */ -#define CSTD_STR2(x) CSTD_STR1(x) - -/** - * Specify an assertion value which is evaluated at compile time. Recommended - * usage is specification of a @c static @c INLINE function containing all of - * the assertions thus: - * - * @code - * static INLINE [module]_compile_time_assertions( void ) - * { - * COMPILE_TIME_ASSERT( sizeof(uintptr_t) == sizeof(intptr_t) ); - * } - * @endcode - * - * @note Use @c static not @c STATIC. We never want to turn off this @c static - * specification for testing purposes. - */ -#define CSTD_COMPILE_TIME_ASSERT(expr) \ - do { switch (0) { case 0: case (expr):; } } while (false) - -#endif /* _MALISW_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_midg_regmap.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_midg_regmap.h deleted file mode 100755 index c3def83dba244..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_midg_regmap.h +++ /dev/null @@ -1,546 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#ifndef _MIDGARD_REGMAP_H_ -#define _MIDGARD_REGMAP_H_ - -/* - * Begin Register Offsets - */ - -#define GPU_CONTROL_BASE 0x0000 -#define GPU_CONTROL_REG(r) (GPU_CONTROL_BASE + (r)) -#define GPU_ID 0x000 /* (RO) GPU and revision identifier */ -#define L2_FEATURES 0x004 /* (RO) Level 2 cache features */ -#define SUSPEND_SIZE 0x008 /* (RO) Fixed-function suspend buffer - size */ -#define TILER_FEATURES 0x00C /* (RO) Tiler Features */ -#define MEM_FEATURES 0x010 /* (RO) Memory system features */ -#define MMU_FEATURES 0x014 /* (RO) MMU features */ -#define AS_PRESENT 0x018 /* (RO) Address space slots present */ -#define JS_PRESENT 0x01C /* (RO) Job slots present */ -#define GPU_IRQ_RAWSTAT 0x020 /* (RW) */ -#define GPU_IRQ_CLEAR 0x024 /* (WO) */ -#define GPU_IRQ_MASK 0x028 /* (RW) */ -#define GPU_IRQ_STATUS 0x02C /* (RO) */ - -/* IRQ flags */ -#define GPU_FAULT (1 << 0) /* A GPU Fault has occurred */ -#define MULTIPLE_GPU_FAULTS (1 << 7) /* More than one GPU Fault occurred. */ -#define RESET_COMPLETED (1 << 8) /* Set when a reset has completed. Intended to use with SOFT_RESET - commands which may take time. */ -#define POWER_CHANGED_SINGLE (1 << 9) /* Set when a single core has finished powering up or down. */ -#define POWER_CHANGED_ALL (1 << 10) /* Set when all cores have finished powering up or down - and the power manager is idle. */ - -#define PRFCNT_SAMPLE_COMPLETED (1 << 16) /* Set when a performance count sample has completed. */ -#define CLEAN_CACHES_COMPLETED (1 << 17) /* Set when a cache clean operation has completed. */ - -#define GPU_IRQ_REG_ALL (GPU_FAULT | MULTIPLE_GPU_FAULTS | RESET_COMPLETED \ - | POWER_CHANGED_ALL | PRFCNT_SAMPLE_COMPLETED) - -#define GPU_COMMAND 0x030 /* (WO) */ -#define GPU_STATUS 0x034 /* (RO) */ - - -#define GROUPS_L2_COHERENT (1 << 0) /* Cores groups are l2 coherent */ - -#define GPU_FAULTSTATUS 0x03C /* (RO) GPU exception type and fault status */ -#define GPU_FAULTADDRESS_LO 0x040 /* (RO) GPU exception fault address, low word */ -#define GPU_FAULTADDRESS_HI 0x044 /* (RO) GPU exception fault address, high word */ - -#define PWR_KEY 0x050 /* (WO) Power manager key register */ -#define PWR_OVERRIDE0 0x054 /* (RW) Power manager override settings */ -#define PWR_OVERRIDE1 0x058 /* (RW) Power manager override settings */ - -#define PRFCNT_BASE_LO 0x060 /* (RW) Performance counter memory region base address, low word */ -#define PRFCNT_BASE_HI 0x064 /* (RW) Performance counter memory region base address, high word */ -#define PRFCNT_CONFIG 0x068 /* (RW) Performance counter configuration */ -#define PRFCNT_JM_EN 0x06C /* (RW) Performance counter enable flags for Job Manager */ -#define PRFCNT_SHADER_EN 0x070 /* (RW) Performance counter enable flags for shader cores */ -#define PRFCNT_TILER_EN 0x074 /* (RW) Performance counter enable flags for tiler */ -#define PRFCNT_MMU_L2_EN 0x07C /* (RW) Performance counter enable flags for MMU/L2 cache */ - -#define CYCLE_COUNT_LO 0x090 /* (RO) Cycle counter, low word */ -#define CYCLE_COUNT_HI 0x094 /* (RO) Cycle counter, high word */ -#define TIMESTAMP_LO 0x098 /* (RO) Global time stamp counter, low word */ -#define TIMESTAMP_HI 0x09C /* (RO) Global time stamp counter, high word */ - -#define THREAD_MAX_THREADS 0x0A0 /* (RO) Maximum number of threads per core */ -#define THREAD_MAX_WORKGROUP_SIZE 0x0A4 /* (RO) Maximum workgroup size */ -#define THREAD_MAX_BARRIER_SIZE 0x0A8 /* (RO) Maximum threads waiting at a barrier */ -#define THREAD_FEATURES 0x0AC /* (RO) Thread features */ - -#define TEXTURE_FEATURES_0 0x0B0 /* (RO) Support flags for indexed texture formats 0..31 */ -#define TEXTURE_FEATURES_1 0x0B4 /* (RO) Support flags for indexed texture formats 32..63 */ -#define TEXTURE_FEATURES_2 0x0B8 /* (RO) Support flags for indexed texture formats 64..95 */ - -#define TEXTURE_FEATURES_REG(n) GPU_CONTROL_REG(TEXTURE_FEATURES_0 + ((n) << 2)) - -#define JS0_FEATURES 0x0C0 /* (RO) Features of job slot 0 */ -#define JS1_FEATURES 0x0C4 /* (RO) Features of job slot 1 */ -#define JS2_FEATURES 0x0C8 /* (RO) Features of job slot 2 */ -#define JS3_FEATURES 0x0CC /* (RO) Features of job slot 3 */ -#define JS4_FEATURES 0x0D0 /* (RO) Features of job slot 4 */ -#define JS5_FEATURES 0x0D4 /* (RO) Features of job slot 5 */ -#define JS6_FEATURES 0x0D8 /* (RO) Features of job slot 6 */ -#define JS7_FEATURES 0x0DC /* (RO) Features of job slot 7 */ -#define JS8_FEATURES 0x0E0 /* (RO) Features of job slot 8 */ -#define JS9_FEATURES 0x0E4 /* (RO) Features of job slot 9 */ -#define JS10_FEATURES 0x0E8 /* (RO) Features of job slot 10 */ -#define JS11_FEATURES 0x0EC /* (RO) Features of job slot 11 */ -#define JS12_FEATURES 0x0F0 /* (RO) Features of job slot 12 */ -#define JS13_FEATURES 0x0F4 /* (RO) Features of job slot 13 */ -#define JS14_FEATURES 0x0F8 /* (RO) Features of job slot 14 */ -#define JS15_FEATURES 0x0FC /* (RO) Features of job slot 15 */ - -#define JS_FEATURES_REG(n) GPU_CONTROL_REG(JS0_FEATURES + ((n) << 2)) - -#define SHADER_PRESENT_LO 0x100 /* (RO) Shader core present bitmap, low word */ -#define SHADER_PRESENT_HI 0x104 /* (RO) Shader core present bitmap, high word */ - -#define TILER_PRESENT_LO 0x110 /* (RO) Tiler core present bitmap, low word */ -#define TILER_PRESENT_HI 0x114 /* (RO) Tiler core present bitmap, high word */ - -#define L2_PRESENT_LO 0x120 /* (RO) Level 2 cache present bitmap, low word */ -#define L2_PRESENT_HI 0x124 /* (RO) Level 2 cache present bitmap, high word */ - - -#define SHADER_READY_LO 0x140 /* (RO) Shader core ready bitmap, low word */ -#define SHADER_READY_HI 0x144 /* (RO) Shader core ready bitmap, high word */ - -#define TILER_READY_LO 0x150 /* (RO) Tiler core ready bitmap, low word */ -#define TILER_READY_HI 0x154 /* (RO) Tiler core ready bitmap, high word */ - -#define L2_READY_LO 0x160 /* (RO) Level 2 cache ready bitmap, low word */ -#define L2_READY_HI 0x164 /* (RO) Level 2 cache ready bitmap, high word */ - - -#define SHADER_PWRON_LO 0x180 /* (WO) Shader core power on bitmap, low word */ -#define SHADER_PWRON_HI 0x184 /* (WO) Shader core power on bitmap, high word */ - -#define TILER_PWRON_LO 0x190 /* (WO) Tiler core power on bitmap, low word */ -#define TILER_PWRON_HI 0x194 /* (WO) Tiler core power on bitmap, high word */ - -#define L2_PWRON_LO 0x1A0 /* (WO) Level 2 cache power on bitmap, low word */ -#define L2_PWRON_HI 0x1A4 /* (WO) Level 2 cache power on bitmap, high word */ - -#define SHADER_PWROFF_LO 0x1C0 /* (WO) Shader core power off bitmap, low word */ -#define SHADER_PWROFF_HI 0x1C4 /* (WO) Shader core power off bitmap, high word */ - -#define TILER_PWROFF_LO 0x1D0 /* (WO) Tiler core power off bitmap, low word */ -#define TILER_PWROFF_HI 0x1D4 /* (WO) Tiler core power off bitmap, high word */ - -#define L2_PWROFF_LO 0x1E0 /* (WO) Level 2 cache power off bitmap, low word */ -#define L2_PWROFF_HI 0x1E4 /* (WO) Level 2 cache power off bitmap, high word */ - -#define SHADER_PWRTRANS_LO 0x200 /* (RO) Shader core power transition bitmap, low word */ -#define SHADER_PWRTRANS_HI 0x204 /* (RO) Shader core power transition bitmap, high word */ - -#define TILER_PWRTRANS_LO 0x210 /* (RO) Tiler core power transition bitmap, low word */ -#define TILER_PWRTRANS_HI 0x214 /* (RO) Tiler core power transition bitmap, high word */ - -#define L2_PWRTRANS_LO 0x220 /* (RO) Level 2 cache power transition bitmap, low word */ -#define L2_PWRTRANS_HI 0x224 /* (RO) Level 2 cache power transition bitmap, high word */ - -#define SHADER_PWRACTIVE_LO 0x240 /* (RO) Shader core active bitmap, low word */ -#define SHADER_PWRACTIVE_HI 0x244 /* (RO) Shader core active bitmap, high word */ - -#define TILER_PWRACTIVE_LO 0x250 /* (RO) Tiler core active bitmap, low word */ -#define TILER_PWRACTIVE_HI 0x254 /* (RO) Tiler core active bitmap, high word */ - -#define L2_PWRACTIVE_LO 0x260 /* (RO) Level 2 cache active bitmap, low word */ -#define L2_PWRACTIVE_HI 0x264 /* (RO) Level 2 cache active bitmap, high word */ - - -#define JM_CONFIG 0xF00 /* (RW) Job Manager configuration register (Implementation specific register) */ -#define SHADER_CONFIG 0xF04 /* (RW) Shader core configuration settings (Implementation specific register) */ -#define TILER_CONFIG 0xF08 /* (RW) Tiler core configuration settings (Implementation specific register) */ -#define L2_MMU_CONFIG 0xF0C /* (RW) Configuration of the L2 cache and MMU (Implementation specific register) */ - -#define JOB_CONTROL_BASE 0x1000 - -#define JOB_CONTROL_REG(r) (JOB_CONTROL_BASE + (r)) - -#define JOB_IRQ_RAWSTAT 0x000 /* Raw interrupt status register */ -#define JOB_IRQ_CLEAR 0x004 /* Interrupt clear register */ -#define JOB_IRQ_MASK 0x008 /* Interrupt mask register */ -#define JOB_IRQ_STATUS 0x00C /* Interrupt status register */ -#define JOB_IRQ_JS_STATE 0x010 /* status==active and _next == busy snapshot from last JOB_IRQ_CLEAR */ -#define JOB_IRQ_THROTTLE 0x014 /* cycles to delay delivering an interrupt externally. The JOB_IRQ_STATUS is NOT affected by this, just the delivery of the interrupt. */ - -#define JOB_SLOT0 0x800 /* Configuration registers for job slot 0 */ -#define JOB_SLOT1 0x880 /* Configuration registers for job slot 1 */ -#define JOB_SLOT2 0x900 /* Configuration registers for job slot 2 */ -#define JOB_SLOT3 0x980 /* Configuration registers for job slot 3 */ -#define JOB_SLOT4 0xA00 /* Configuration registers for job slot 4 */ -#define JOB_SLOT5 0xA80 /* Configuration registers for job slot 5 */ -#define JOB_SLOT6 0xB00 /* Configuration registers for job slot 6 */ -#define JOB_SLOT7 0xB80 /* Configuration registers for job slot 7 */ -#define JOB_SLOT8 0xC00 /* Configuration registers for job slot 8 */ -#define JOB_SLOT9 0xC80 /* Configuration registers for job slot 9 */ -#define JOB_SLOT10 0xD00 /* Configuration registers for job slot 10 */ -#define JOB_SLOT11 0xD80 /* Configuration registers for job slot 11 */ -#define JOB_SLOT12 0xE00 /* Configuration registers for job slot 12 */ -#define JOB_SLOT13 0xE80 /* Configuration registers for job slot 13 */ -#define JOB_SLOT14 0xF00 /* Configuration registers for job slot 14 */ -#define JOB_SLOT15 0xF80 /* Configuration registers for job slot 15 */ - -#define JOB_SLOT_REG(n, r) (JOB_CONTROL_REG(JOB_SLOT0 + ((n) << 7)) + (r)) - -#define JS_HEAD_LO 0x00 /* (RO) Job queue head pointer for job slot n, low word */ -#define JS_HEAD_HI 0x04 /* (RO) Job queue head pointer for job slot n, high word */ -#define JS_TAIL_LO 0x08 /* (RO) Job queue tail pointer for job slot n, low word */ -#define JS_TAIL_HI 0x0C /* (RO) Job queue tail pointer for job slot n, high word */ -#define JS_AFFINITY_LO 0x10 /* (RO) Core affinity mask for job slot n, low word */ -#define JS_AFFINITY_HI 0x14 /* (RO) Core affinity mask for job slot n, high word */ -#define JS_CONFIG 0x18 /* (RO) Configuration settings for job slot n */ - -#define JS_COMMAND 0x20 /* (WO) Command register for job slot n */ -#define JS_STATUS 0x24 /* (RO) Status register for job slot n */ - -#define JS_HEAD_NEXT_LO 0x40 /* (RW) Next job queue head pointer for job slot n, low word */ -#define JS_HEAD_NEXT_HI 0x44 /* (RW) Next job queue head pointer for job slot n, high word */ - -#define JS_AFFINITY_NEXT_LO 0x50 /* (RW) Next core affinity mask for job slot n, low word */ -#define JS_AFFINITY_NEXT_HI 0x54 /* (RW) Next core affinity mask for job slot n, high word */ -#define JS_CONFIG_NEXT 0x58 /* (RW) Next configuration settings for job slot n */ - -#define JS_COMMAND_NEXT 0x60 /* (RW) Next command register for job slot n */ - - -#define MEMORY_MANAGEMENT_BASE 0x2000 -#define MMU_REG(r) (MEMORY_MANAGEMENT_BASE + (r)) - -#define MMU_IRQ_RAWSTAT 0x000 /* (RW) Raw interrupt status register */ -#define MMU_IRQ_CLEAR 0x004 /* (WO) Interrupt clear register */ -#define MMU_IRQ_MASK 0x008 /* (RW) Interrupt mask register */ -#define MMU_IRQ_STATUS 0x00C /* (RO) Interrupt status register */ - -#define MMU_AS0 0x400 /* Configuration registers for address space 0 */ -#define MMU_AS1 0x440 /* Configuration registers for address space 1 */ -#define MMU_AS2 0x480 /* Configuration registers for address space 2 */ -#define MMU_AS3 0x4C0 /* Configuration registers for address space 3 */ -#define MMU_AS4 0x500 /* Configuration registers for address space 4 */ -#define MMU_AS5 0x540 /* Configuration registers for address space 5 */ -#define MMU_AS6 0x580 /* Configuration registers for address space 6 */ -#define MMU_AS7 0x5C0 /* Configuration registers for address space 7 */ -#define MMU_AS8 0x600 /* Configuration registers for address space 8 */ -#define MMU_AS9 0x640 /* Configuration registers for address space 9 */ -#define MMU_AS10 0x680 /* Configuration registers for address space 10 */ -#define MMU_AS11 0x6C0 /* Configuration registers for address space 11 */ -#define MMU_AS12 0x700 /* Configuration registers for address space 12 */ -#define MMU_AS13 0x740 /* Configuration registers for address space 13 */ -#define MMU_AS14 0x780 /* Configuration registers for address space 14 */ -#define MMU_AS15 0x7C0 /* Configuration registers for address space 15 */ - -#define MMU_AS_REG(n, r) (MMU_REG(MMU_AS0 + ((n) << 6)) + (r)) - -#define AS_TRANSTAB_LO 0x00 /* (RW) Translation Table Base Address for address space n, low word */ -#define AS_TRANSTAB_HI 0x04 /* (RW) Translation Table Base Address for address space n, high word */ -#define AS_MEMATTR_LO 0x08 /* (RW) Memory attributes for address space n, low word. */ -#define AS_MEMATTR_HI 0x0C /* (RW) Memory attributes for address space n, high word. */ -#define AS_LOCKADDR_LO 0x10 /* (RW) Lock region address for address space n, low word */ -#define AS_LOCKADDR_HI 0x14 /* (RW) Lock region address for address space n, high word */ -#define AS_COMMAND 0x18 /* (WO) MMU command register for address space n */ -#define AS_FAULTSTATUS 0x1C /* (RO) MMU fault status register for address space n */ -#define AS_FAULTADDRESS_LO 0x20 /* (RO) Fault Address for address space n, low word */ -#define AS_FAULTADDRESS_HI 0x24 /* (RO) Fault Address for address space n, high word */ -#define AS_STATUS 0x28 /* (RO) Status flags for address space n */ - - - -/* End Register Offsets */ - -/* - * MMU_IRQ_RAWSTAT register values. Values are valid also for - MMU_IRQ_CLEAR, MMU_IRQ_MASK, MMU_IRQ_STATUS registers. - */ - -#define MMU_PAGE_FAULT_FLAGS 16 - -/* Macros returning a bitmask to retrieve page fault or bus error flags from - * MMU registers */ -#define MMU_PAGE_FAULT(n) (1UL << (n)) -#define MMU_BUS_ERROR(n) (1UL << ((n) + MMU_PAGE_FAULT_FLAGS)) - -/* - * Begin LPAE MMU TRANSTAB register values - */ -#define AS_TRANSTAB_LPAE_ADDR_SPACE_MASK 0xfffff000 -#define AS_TRANSTAB_LPAE_ADRMODE_UNMAPPED (0u << 0) -#define AS_TRANSTAB_LPAE_ADRMODE_IDENTITY (1u << 1) -#define AS_TRANSTAB_LPAE_ADRMODE_TABLE (3u << 0) -#define AS_TRANSTAB_LPAE_READ_INNER (1u << 2) -#define AS_TRANSTAB_LPAE_SHARE_OUTER (1u << 4) - -#define AS_TRANSTAB_LPAE_ADRMODE_MASK 0x00000003 - - -/* - * Begin MMU STATUS register values - */ -#define AS_STATUS_AS_ACTIVE 0x01 - -#define AS_FAULTSTATUS_EXCEPTION_CODE_MASK (0x7<<3) -#define AS_FAULTSTATUS_EXCEPTION_CODE_TRANSLATION_FAULT (0x0<<3) -#define AS_FAULTSTATUS_EXCEPTION_CODE_PERMISSION_FAULT (0x1<<3) -#define AS_FAULTSTATUS_EXCEPTION_CODE_TRANSTAB_BUS_FAULT (0x2<<3) -#define AS_FAULTSTATUS_EXCEPTION_CODE_ACCESS_FLAG (0x3<<3) - - -#define AS_FAULTSTATUS_ACCESS_TYPE_MASK (0x3<<8) -#define AS_FAULTSTATUS_ACCESS_TYPE_EX (0x1<<8) -#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2<<8) -#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3<<8) - - -/* - * Begin Command Values - */ - -/* JS_COMMAND register commands */ -#define JS_COMMAND_NOP 0x00 /* NOP Operation. Writing this value is ignored */ -#define JS_COMMAND_START 0x01 /* Start processing a job chain. Writing this value is ignored */ -#define JS_COMMAND_SOFT_STOP 0x02 /* Gently stop processing a job chain */ -#define JS_COMMAND_HARD_STOP 0x03 /* Rudely stop processing a job chain */ -#define JS_COMMAND_SOFT_STOP_0 0x04 /* Execute SOFT_STOP if JOB_CHAIN_FLAG is 0 */ -#define JS_COMMAND_HARD_STOP_0 0x05 /* Execute HARD_STOP if JOB_CHAIN_FLAG is 0 */ -#define JS_COMMAND_SOFT_STOP_1 0x06 /* Execute SOFT_STOP if JOB_CHAIN_FLAG is 1 */ -#define JS_COMMAND_HARD_STOP_1 0x07 /* Execute HARD_STOP if JOB_CHAIN_FLAG is 1 */ - -#define JS_COMMAND_MASK 0x07 /* Mask of bits currently in use by the HW */ - -/* AS_COMMAND register commands */ -#define AS_COMMAND_NOP 0x00 /* NOP Operation */ -#define AS_COMMAND_UPDATE 0x01 /* Broadcasts the values in AS_TRANSTAB and ASn_MEMATTR to all MMUs */ -#define AS_COMMAND_LOCK 0x02 /* Issue a lock region command to all MMUs */ -#define AS_COMMAND_UNLOCK 0x03 /* Issue a flush region command to all MMUs */ -#define AS_COMMAND_FLUSH 0x04 /* Flush all L2 caches then issue a flush region command to all MMUs - (deprecated - only for use with T60x) */ -#define AS_COMMAND_FLUSH_PT 0x04 /* Flush all L2 caches then issue a flush region command to all MMUs */ -#define AS_COMMAND_FLUSH_MEM 0x05 /* Wait for memory accesses to complete, flush all the L1s cache then - flush all L2 caches then issue a flush region command to all MMUs */ - -/* Possible values of JS_CONFIG and JS_CONFIG_NEXT registers */ -#define JS_CONFIG_START_FLUSH_NO_ACTION (0u << 0) -#define JS_CONFIG_START_FLUSH_CLEAN (1u << 8) -#define JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE (3u << 8) -#define JS_CONFIG_START_MMU (1u << 10) -#define JS_CONFIG_JOB_CHAIN_FLAG (1u << 11) -#define JS_CONFIG_END_FLUSH_NO_ACTION JS_CONFIG_START_FLUSH_NO_ACTION -#define JS_CONFIG_END_FLUSH_CLEAN (1u << 12) -#define JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE (3u << 12) -#define JS_CONFIG_THREAD_PRI(n) ((n) << 16) - -/* JS_STATUS register values */ - -/* NOTE: Please keep this values in sync with enum base_jd_event_code in mali_base_kernel.h. - * The values are separated to avoid dependency of userspace and kernel code. - */ - -/* Group of values representing the job status insead a particular fault */ -#define JS_STATUS_NO_EXCEPTION_BASE 0x00 -#define JS_STATUS_INTERRUPTED (JS_STATUS_NO_EXCEPTION_BASE + 0x02) /* 0x02 means INTERRUPTED */ -#define JS_STATUS_STOPPED (JS_STATUS_NO_EXCEPTION_BASE + 0x03) /* 0x03 means STOPPED */ -#define JS_STATUS_TERMINATED (JS_STATUS_NO_EXCEPTION_BASE + 0x04) /* 0x04 means TERMINATED */ - -/* General fault values */ -#define JS_STATUS_FAULT_BASE 0x40 -#define JS_STATUS_CONFIG_FAULT (JS_STATUS_FAULT_BASE) /* 0x40 means CONFIG FAULT */ -#define JS_STATUS_POWER_FAULT (JS_STATUS_FAULT_BASE + 0x01) /* 0x41 means POWER FAULT */ -#define JS_STATUS_READ_FAULT (JS_STATUS_FAULT_BASE + 0x02) /* 0x42 means READ FAULT */ -#define JS_STATUS_WRITE_FAULT (JS_STATUS_FAULT_BASE + 0x03) /* 0x43 means WRITE FAULT */ -#define JS_STATUS_AFFINITY_FAULT (JS_STATUS_FAULT_BASE + 0x04) /* 0x44 means AFFINITY FAULT */ -#define JS_STATUS_BUS_FAULT (JS_STATUS_FAULT_BASE + 0x08) /* 0x48 means BUS FAULT */ - -/* Instruction or data faults */ -#define JS_STATUS_INSTRUCTION_FAULT_BASE 0x50 -#define JS_STATUS_INSTR_INVALID_PC (JS_STATUS_INSTRUCTION_FAULT_BASE) /* 0x50 means INSTR INVALID PC */ -#define JS_STATUS_INSTR_INVALID_ENC (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x01) /* 0x51 means INSTR INVALID ENC */ -#define JS_STATUS_INSTR_TYPE_MISMATCH (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x02) /* 0x52 means INSTR TYPE MISMATCH */ -#define JS_STATUS_INSTR_OPERAND_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x03) /* 0x53 means INSTR OPERAND FAULT */ -#define JS_STATUS_INSTR_TLS_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x04) /* 0x54 means INSTR TLS FAULT */ -#define JS_STATUS_INSTR_BARRIER_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x05) /* 0x55 means INSTR BARRIER FAULT */ -#define JS_STATUS_INSTR_ALIGN_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x06) /* 0x56 means INSTR ALIGN FAULT */ -/* NOTE: No fault with 0x57 code defined in spec. */ -#define JS_STATUS_DATA_INVALID_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x08) /* 0x58 means DATA INVALID FAULT */ -#define JS_STATUS_TILE_RANGE_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x09) /* 0x59 means TILE RANGE FAULT */ -#define JS_STATUS_ADDRESS_RANGE_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x0A) /* 0x5A means ADDRESS RANGE FAULT */ - -/* Other faults */ -#define JS_STATUS_MEMORY_FAULT_BASE 0x60 -#define JS_STATUS_OUT_OF_MEMORY (JS_STATUS_MEMORY_FAULT_BASE) /* 0x60 means OUT OF MEMORY */ -#define JS_STATUS_UNKNOWN 0x7F /* 0x7F means UNKNOWN */ - -/* GPU_COMMAND values */ -#define GPU_COMMAND_NOP 0x00 /* No operation, nothing happens */ -#define GPU_COMMAND_SOFT_RESET 0x01 /* Stop all external bus interfaces, and then reset the entire GPU. */ -#define GPU_COMMAND_HARD_RESET 0x02 /* Immediately reset the entire GPU. */ -#define GPU_COMMAND_PRFCNT_CLEAR 0x03 /* Clear all performance counters, setting them all to zero. */ -#define GPU_COMMAND_PRFCNT_SAMPLE 0x04 /* Sample all performance counters, writing them out to memory */ -#define GPU_COMMAND_CYCLE_COUNT_START 0x05 /* Starts the cycle counter, and system timestamp propagation */ -#define GPU_COMMAND_CYCLE_COUNT_STOP 0x06 /* Stops the cycle counter, and system timestamp propagation */ -#define GPU_COMMAND_CLEAN_CACHES 0x07 /* Clean all caches */ -#define GPU_COMMAND_CLEAN_INV_CACHES 0x08 /* Clean and invalidate all caches */ - -/* End Command Values */ - -/* GPU_STATUS values */ -#define GPU_STATUS_PRFCNT_ACTIVE (1 << 2) /* Set if the performance counters are active. */ - -/* PRFCNT_CONFIG register values */ -#define PRFCNT_CONFIG_AS_SHIFT 4 /* address space bitmap starts from bit 4 of the register */ -#define PRFCNT_CONFIG_MODE_OFF 0 /* The performance counters are disabled. */ -#define PRFCNT_CONFIG_MODE_MANUAL 1 /* The performance counters are enabled, but are only written out when a PRFCNT_SAMPLE command is issued using the GPU_COMMAND register. */ -#define PRFCNT_CONFIG_MODE_TILE 2 /* The performance counters are enabled, and are written out each time a tile finishes rendering. */ - -/* AS_MEMATTR values: */ - -/* Use GPU implementation-defined caching policy. */ -#define AS_MEMATTR_LPAE_IMPL_DEF_CACHE_POLICY 0x48ull -/* The attribute set to force all resources to be cached. */ -#define AS_MEMATTR_LPAE_FORCE_TO_CACHE_ALL 0x4Full -/* Inner write-alloc cache setup, no outer caching */ -#define AS_MEMATTR_LPAE_WRITE_ALLOC 0x4Dull -/* Set to implementation defined, outer caching */ -#define AS_MEMATTR_LPAE_OUTER_IMPL_DEF 0x88ull -/* Set to write back memory, outer caching */ -#define AS_MEMATTR_LPAE_OUTER_WA 0x8Dull - -/* Symbol for default MEMATTR to use */ - -/* Default is - HW implementation defined caching */ -#define AS_MEMATTR_INDEX_DEFAULT 0 -#define AS_MEMATTR_INDEX_DEFAULT_ACE 3 - -/* HW implementation defined caching */ -#define AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY 0 -/* Force cache on */ -#define AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL 1 -/* Write-alloc */ -#define AS_MEMATTR_INDEX_WRITE_ALLOC 2 -/* Outer coherent, inner implementation defined policy */ -#define AS_MEMATTR_INDEX_OUTER_IMPL_DEF 3 -/* Outer coherent, write alloc inner */ -#define AS_MEMATTR_INDEX_OUTER_WA 4 - -/* GPU_ID register */ -#define GPU_ID_VERSION_STATUS_SHIFT 0 -#define GPU_ID_VERSION_MINOR_SHIFT 4 -#define GPU_ID_VERSION_MAJOR_SHIFT 12 -#define GPU_ID_VERSION_PRODUCT_ID_SHIFT 16 -#define GPU_ID_VERSION_STATUS (0xF << GPU_ID_VERSION_STATUS_SHIFT) -#define GPU_ID_VERSION_MINOR (0xFF << GPU_ID_VERSION_MINOR_SHIFT) -#define GPU_ID_VERSION_MAJOR (0xF << GPU_ID_VERSION_MAJOR_SHIFT) -#define GPU_ID_VERSION_PRODUCT_ID (0xFFFF << GPU_ID_VERSION_PRODUCT_ID_SHIFT) - -/* Values for GPU_ID_VERSION_PRODUCT_ID bitfield */ -#define GPU_ID_PI_T60X 0x6956 -#define GPU_ID_PI_T62X 0x0620 -#define GPU_ID_PI_T76X 0x0750 -#define GPU_ID_PI_T72X 0x0720 -#define GPU_ID_PI_TFRX 0x0880 -#define GPU_ID_PI_T86X 0x0860 -#define GPU_ID_PI_T82X 0x0820 -#define GPU_ID_PI_T83X 0x0830 - -/* Values for GPU_ID_VERSION_STATUS field for PRODUCT_ID GPU_ID_PI_T60X */ -#define GPU_ID_S_15DEV0 0x1 -#define GPU_ID_S_EAC 0x2 - -/* Helper macro to create a GPU_ID assuming valid values for id, major, minor, status */ -#define GPU_ID_MAKE(id, major, minor, status) \ - (((id) << GPU_ID_VERSION_PRODUCT_ID_SHIFT) | \ - ((major) << GPU_ID_VERSION_MAJOR_SHIFT) | \ - ((minor) << GPU_ID_VERSION_MINOR_SHIFT) | \ - ((status) << GPU_ID_VERSION_STATUS_SHIFT)) - -/* End GPU_ID register */ - -/* JS_FEATURES register */ - -#define JS_FEATURE_NULL_JOB (1u << 1) -#define JS_FEATURE_SET_VALUE_JOB (1u << 2) -#define JS_FEATURE_CACHE_FLUSH_JOB (1u << 3) -#define JS_FEATURE_COMPUTE_JOB (1u << 4) -#define JS_FEATURE_VERTEX_JOB (1u << 5) -#define JS_FEATURE_GEOMETRY_JOB (1u << 6) -#define JS_FEATURE_TILER_JOB (1u << 7) -#define JS_FEATURE_FUSED_JOB (1u << 8) -#define JS_FEATURE_FRAGMENT_JOB (1u << 9) - -/* End JS_FEATURES register */ - -/* L2_MMU_CONFIG register */ -#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT (24) -#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT) -#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_OCTANT (0x1 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT) -#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_QUARTER (0x2 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT) -#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_HALF (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT) - -#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT (26) -#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT) -#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_OCTANT (0x1 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT) -#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_QUARTER (0x2 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT) -#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_HALF (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT) -/* End L2_MMU_CONFIG register */ - -/* THREAD_* registers */ - -/* THREAD_FEATURES IMPLEMENTATION_TECHNOLOGY values */ -#define IMPLEMENTATION_UNSPECIFIED 0 -#define IMPLEMENTATION_SILICON 1 -#define IMPLEMENTATION_FPGA 2 -#define IMPLEMENTATION_MODEL 3 - -/* Default values when registers are not supported by the implemented hardware */ -#define THREAD_MT_DEFAULT 256 -#define THREAD_MWS_DEFAULT 256 -#define THREAD_MBS_DEFAULT 256 -#define THREAD_MR_DEFAULT 1024 -#define THREAD_MTQ_DEFAULT 4 -#define THREAD_MTGS_DEFAULT 10 - -/* End THREAD_* registers */ - -/* COHERENCY_* values*/ -#define COHERENCY_ACE_LITE 0 -#define COHERENCY_ACE 1 -#define COHERENCY_NONE 31 -#define COHERENCY_FEATURE_BIT(x) (1 << (x)) -/* End COHERENCY_* values */ - -/* SHADER_CONFIG register */ - -#define SC_ALT_COUNTERS (1ul << 3) -#define SC_OVERRIDE_FWD_PIXEL_KILL (1ul << 4) -#define SC_SDC_DISABLE_OQ_DISCARD (1ul << 6) -#define SC_LS_PAUSEBUFFER_DISABLE (1ul << 16) -#define SC_ENABLE_TEXGRD_FLAGS (1ul << 25) -/* End SHADER_CONFIG register */ - -/* TILER_CONFIG register */ - -#define TC_CLOCK_GATE_OVERRIDE (1ul << 0) - -/* End TILER_CONFIG register */ - - -#endif /* _MIDGARD_REGMAP_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_timeline.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_timeline.h deleted file mode 100755 index c3563723cb633..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_timeline.h +++ /dev/null @@ -1,397 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM mali_timeline - -#if !defined(_MALI_TIMELINE_H) || defined(TRACE_HEADER_MULTI_READ) -#define _MALI_TIMELINE_H - -#include - -TRACE_EVENT(mali_timeline_atoms_in_flight, - - TP_PROTO(u64 ts_sec, - u32 ts_nsec, - int tgid, - int count), - - TP_ARGS(ts_sec, - ts_nsec, - tgid, - count), - - TP_STRUCT__entry( - __field(u64, ts_sec) - __field(u32, ts_nsec) - __field(int, tgid) - __field(int, count) - ), - - TP_fast_assign( - __entry->ts_sec = ts_sec; - __entry->ts_nsec = ts_nsec; - __entry->tgid = tgid; - __entry->count = count; - ), - - TP_printk("%i,%i.%.9i,%i,%i", CTX_SET_NR_ATOMS_IN_FLIGHT, - (int)__entry->ts_sec, - (int)__entry->ts_nsec, - __entry->tgid, - __entry->count) -); - - -TRACE_EVENT(mali_timeline_atom, - - TP_PROTO(u64 ts_sec, - u32 ts_nsec, - int event_type, - int tgid, - int atom_id), - - TP_ARGS(ts_sec, - ts_nsec, - event_type, - tgid, - atom_id), - - TP_STRUCT__entry( - __field(u64, ts_sec) - __field(u32, ts_nsec) - __field(int, event_type) - __field(int, tgid) - __field(int, atom_id) - ), - - TP_fast_assign( - __entry->ts_sec = ts_sec; - __entry->ts_nsec = ts_nsec; - __entry->event_type = event_type; - __entry->tgid = tgid; - __entry->atom_id = atom_id; - ), - - TP_printk("%i,%i.%.9i,%i,%i,%i", __entry->event_type, - (int)__entry->ts_sec, - (int)__entry->ts_nsec, - __entry->tgid, - __entry->atom_id, - __entry->atom_id) -); - -TRACE_EVENT(mali_timeline_gpu_slot_active, - - TP_PROTO(u64 ts_sec, - u32 ts_nsec, - int event_type, - int tgid, - int js, - int count), - - TP_ARGS(ts_sec, - ts_nsec, - event_type, - tgid, - js, - count), - - TP_STRUCT__entry( - __field(u64, ts_sec) - __field(u32, ts_nsec) - __field(int, event_type) - __field(int, tgid) - __field(int, js) - __field(int, count) - ), - - TP_fast_assign( - __entry->ts_sec = ts_sec; - __entry->ts_nsec = ts_nsec; - __entry->event_type = event_type; - __entry->tgid = tgid; - __entry->js = js; - __entry->count = count; - ), - - TP_printk("%i,%i.%.9i,%i,%i,%i", __entry->event_type, - (int)__entry->ts_sec, - (int)__entry->ts_nsec, - __entry->tgid, - __entry->js, - __entry->count) -); - -TRACE_EVENT(mali_timeline_gpu_slot_action, - - TP_PROTO(u64 ts_sec, - u32 ts_nsec, - int event_type, - int tgid, - int js, - int count), - - TP_ARGS(ts_sec, - ts_nsec, - event_type, - tgid, - js, - count), - - TP_STRUCT__entry( - __field(u64, ts_sec) - __field(u32, ts_nsec) - __field(int, event_type) - __field(int, tgid) - __field(int, js) - __field(int, count) - ), - - TP_fast_assign( - __entry->ts_sec = ts_sec; - __entry->ts_nsec = ts_nsec; - __entry->event_type = event_type; - __entry->tgid = tgid; - __entry->js = js; - __entry->count = count; - ), - - TP_printk("%i,%i.%.9i,%i,%i,%i", __entry->event_type, - (int)__entry->ts_sec, - (int)__entry->ts_nsec, - __entry->tgid, - __entry->js, - __entry->count) -); - -TRACE_EVENT(mali_timeline_gpu_power_active, - - TP_PROTO(u64 ts_sec, - u32 ts_nsec, - int event_type, - int active), - - TP_ARGS(ts_sec, - ts_nsec, - event_type, - active), - - TP_STRUCT__entry( - __field(u64, ts_sec) - __field(u32, ts_nsec) - __field(int, event_type) - __field(int, active) - ), - - TP_fast_assign( - __entry->ts_sec = ts_sec; - __entry->ts_nsec = ts_nsec; - __entry->event_type = event_type; - __entry->active = active; - ), - - TP_printk("%i,%i.%.9i,0,%i", __entry->event_type, - (int)__entry->ts_sec, - (int)__entry->ts_nsec, - __entry->active) - -); - -TRACE_EVENT(mali_timeline_l2_power_active, - - TP_PROTO(u64 ts_sec, - u32 ts_nsec, - int event_type, - int state), - - TP_ARGS(ts_sec, - ts_nsec, - event_type, - state), - - TP_STRUCT__entry( - __field(u64, ts_sec) - __field(u32, ts_nsec) - __field(int, event_type) - __field(int, state) - ), - - TP_fast_assign( - __entry->ts_sec = ts_sec; - __entry->ts_nsec = ts_nsec; - __entry->event_type = event_type; - __entry->state = state; - ), - - TP_printk("%i,%i.%.9i,0,%i", __entry->event_type, - (int)__entry->ts_sec, - (int)__entry->ts_nsec, - __entry->state) - -); -TRACE_EVENT(mali_timeline_pm_event, - - TP_PROTO(u64 ts_sec, - u32 ts_nsec, - int event_type, - int pm_event_type, - unsigned int pm_event_id), - - TP_ARGS(ts_sec, - ts_nsec, - event_type, - pm_event_type, - pm_event_id), - - TP_STRUCT__entry( - __field(u64, ts_sec) - __field(u32, ts_nsec) - __field(int, event_type) - __field(int, pm_event_type) - __field(unsigned int, pm_event_id) - ), - - TP_fast_assign( - __entry->ts_sec = ts_sec; - __entry->ts_nsec = ts_nsec; - __entry->event_type = event_type; - __entry->pm_event_type = pm_event_type; - __entry->pm_event_id = pm_event_id; - ), - - TP_printk("%i,%i.%.9i,0,%i,%u", __entry->event_type, - (int)__entry->ts_sec, - (int)__entry->ts_nsec, - __entry->pm_event_type, __entry->pm_event_id) - -); - -TRACE_EVENT(mali_timeline_slot_atom, - - TP_PROTO(u64 ts_sec, - u32 ts_nsec, - int event_type, - int tgid, - int js, - int atom_id), - - TP_ARGS(ts_sec, - ts_nsec, - event_type, - tgid, - js, - atom_id), - - TP_STRUCT__entry( - __field(u64, ts_sec) - __field(u32, ts_nsec) - __field(int, event_type) - __field(int, tgid) - __field(int, js) - __field(int, atom_id) - ), - - TP_fast_assign( - __entry->ts_sec = ts_sec; - __entry->ts_nsec = ts_nsec; - __entry->event_type = event_type; - __entry->tgid = tgid; - __entry->js = js; - __entry->atom_id = atom_id; - ), - - TP_printk("%i,%i.%.9i,%i,%i,%i", __entry->event_type, - (int)__entry->ts_sec, - (int)__entry->ts_nsec, - __entry->tgid, - __entry->js, - __entry->atom_id) -); - -TRACE_EVENT(mali_timeline_pm_checktrans, - - TP_PROTO(u64 ts_sec, - u32 ts_nsec, - int trans_code, - int trans_id), - - TP_ARGS(ts_sec, - ts_nsec, - trans_code, - trans_id), - - TP_STRUCT__entry( - __field(u64, ts_sec) - __field(u32, ts_nsec) - __field(int, trans_code) - __field(int, trans_id) - ), - - TP_fast_assign( - __entry->ts_sec = ts_sec; - __entry->ts_nsec = ts_nsec; - __entry->trans_code = trans_code; - __entry->trans_id = trans_id; - ), - - TP_printk("%i,%i.%.9i,0,%i", __entry->trans_code, - (int)__entry->ts_sec, - (int)__entry->ts_nsec, - __entry->trans_id) - -); - -TRACE_EVENT(mali_timeline_context_active, - - TP_PROTO(u64 ts_sec, - u32 ts_nsec, - int count), - - TP_ARGS(ts_sec, - ts_nsec, - count), - - TP_STRUCT__entry( - __field(u64, ts_sec) - __field(u32, ts_nsec) - __field(int, count) - ), - - TP_fast_assign( - __entry->ts_sec = ts_sec; - __entry->ts_nsec = ts_nsec; - __entry->count = count; - ), - - TP_printk("%i,%i.%.9i,0,%i", SW_SET_CONTEXT_ACTIVE, - (int)__entry->ts_sec, - (int)__entry->ts_nsec, - __entry->count) -); - - -#endif /* _MALI_TIMELINE_H */ - -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . - -/* This part must be outside protection */ -#include - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_uk.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_uk.h deleted file mode 100755 index 841d03fb5873b..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/mali_uk.h +++ /dev/null @@ -1,141 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010, 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file mali_uk.h - * Types and definitions that are common across OSs for both the user - * and kernel side of the User-Kernel interface. - */ - -#ifndef _UK_H_ -#define _UK_H_ - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -/** - * @addtogroup base_api - * @{ - */ - -/** - * @defgroup uk_api User-Kernel Interface API - * - * The User-Kernel Interface abstracts the communication mechanism between the user and kernel-side code of device - * drivers developed as part of the Midgard DDK. Currently that includes the Base driver and the UMP driver. - * - * It exposes an OS independent API to user-side code (UKU) which routes functions calls to an OS-independent - * kernel-side API (UKK) via an OS-specific communication mechanism. - * - * This API is internal to the Midgard DDK and is not exposed to any applications. - * - * @{ - */ - -/** - * These are identifiers for kernel-side drivers implementing a UK interface, aka UKK clients. The - * UK module maps this to an OS specific device name, e.g. "gpu_base" -> "GPU0:". Specify this - * identifier to select a UKK client to the uku_open() function. - * - * When a new UKK client driver is created a new identifier needs to be added to the uk_client_id - * enumeration and the uku_open() implemenation for the various OS ports need to be updated to - * provide a mapping of the identifier to the OS specific device name. - * - */ -enum uk_client_id { - /** - * Value used to identify the Base driver UK client. - */ - UK_CLIENT_MALI_T600_BASE, - - /** The number of uk clients supported. This must be the last member of the enum */ - UK_CLIENT_COUNT -}; - -/** - * Each function callable through the UK interface has a unique number. - * Functions provided by UK clients start from number UK_FUNC_ID. - * Numbers below UK_FUNC_ID are used for internal UK functions. - */ -enum uk_func { - UKP_FUNC_ID_CHECK_VERSION, /**< UKK Core internal function */ - /** - * Each UK client numbers the functions they provide starting from - * number UK_FUNC_ID. This number is then eventually assigned to the - * id field of the union uk_header structure when preparing to make a - * UK call. See your UK client for a list of their function numbers. - */ - UK_FUNC_ID = 512 -}; - -/** - * Arguments for a UK call are stored in a structure. This structure consists - * of a fixed size header and a payload. The header carries a 32-bit number - * identifying the UK function to be called (see uk_func). When the UKK client - * receives this header and executed the requested UK function, it will use - * the same header to store the result of the function in the form of a - * int return code. The size of this structure is such that the - * first member of the payload following the header can be accessed efficiently - * on a 32 and 64-bit kernel and the structure has the same size regardless - * of a 32 or 64-bit kernel. The uk_kernel_size_type type should be defined - * accordingly in the OS specific mali_uk_os.h header file. - */ -union uk_header { - /** - * 32-bit number identifying the UK function to be called. - * Also see uk_func. - */ - u32 id; - /** - * The int return code returned by the called UK function. - * See the specification of the particular UK function you are - * calling for the meaning of the error codes returned. All - * UK functions return 0 on success. - */ - u32 ret; - /* - * Used to ensure 64-bit alignment of this union. Do not remove. - * This field is used for padding and does not need to be initialized. - */ - u64 sizer; -}; - -/** - * This structure carries a 16-bit major and minor number and is sent along with an internal UK call - * used during uku_open to identify the versions of the UK module in use by the user-side and kernel-side. - */ -struct uku_version_check_args { - union uk_header header; - /**< UK call header */ - u16 major; - /**< This field carries the user-side major version on input and the kernel-side major version on output */ - u16 minor; - /**< This field carries the user-side minor version on input and the kernel-side minor version on output. */ - u8 padding[4]; -}; - -/** @} end group uk_api */ - -/** @} *//* end group base_api */ - -#ifdef __cplusplus -} -#endif /* __cplusplus */ -#endif /* _UK_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/Kbuild b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/Kbuild deleted file mode 100755 index 558657bbced90..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/Kbuild +++ /dev/null @@ -1,21 +0,0 @@ -# -# (C) COPYRIGHT 2012 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -ifeq ($(CONFIG_MALI_PLATFORM_THIRDPARTY),y) -# remove begin and end quotes from the Kconfig string type - platform_name := $(shell echo $(CONFIG_MALI_PLATFORM_THIRDPARTY_NAME)) - obj-y += $(platform_name)/ -endif diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/Kconfig b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/Kconfig deleted file mode 100755 index 8fb4e917c4fa3..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/Kconfig +++ /dev/null @@ -1,24 +0,0 @@ -# -# (C) COPYRIGHT 2012 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - - -# Add your platform specific Kconfig file here -# -# "drivers/gpu/arm/midgard/platform/xxx/Kconfig" -# -# Where xxx is the platform name is the name set in MALI_PLATFORM_THIRDPARTY_NAME -# - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/Kbuild b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/Kbuild deleted file mode 100755 index b9a30da07aff2..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/Kbuild +++ /dev/null @@ -1,13 +0,0 @@ -# -# (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_clock.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_clock.c deleted file mode 100644 index 6ddf39f5de90c..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_clock.c +++ /dev/null @@ -1,371 +0,0 @@ -#include -#include -#include -#include -#include -#include "mali_scaling.h" -#include "mali_clock.h" - -#ifndef AML_CLK_LOCK_ERROR -#define AML_CLK_LOCK_ERROR 1 -#endif - -static unsigned gpu_dbg_level = 0; -module_param(gpu_dbg_level, uint, 0644); -MODULE_PARM_DESC(gpu_dbg_level, "gpu debug level"); - -#define gpu_dbg(level, fmt, arg...) \ - do { \ - if (gpu_dbg_level >= (level)) \ - printk("gpu_debug"fmt , ## arg); \ - } while (0) - -#define GPU_CLK_DBG(fmt, arg...) \ - do { \ - gpu_dbg(1, "line(%d), clk_cntl=0x%08x\n" fmt, __LINE__, mplt_read(HHI_MALI_CLK_CNTL), ## arg);\ - } while (0) - -//disable print -#define _dev_info(...) - -//static DEFINE_SPINLOCK(lock); -static mali_plat_info_t* pmali_plat = NULL; -//static u32 mali_extr_backup = 0; -//static u32 mali_extr_sample_backup = 0; -struct timeval start; -struct timeval end; -int mali_pm_statue = 0; - -int mali_clock_init_clk_tree(struct platform_device* pdev) -{ - mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[pmali_plat->def_clock]; - struct clk *clk_mali_0_parent = dvfs_tbl->clkp_handle; - struct clk *clk_mali_0 = pmali_plat->clk_mali_0; -#ifdef AML_CLK_LOCK_ERROR - struct clk *clk_mali_1 = pmali_plat->clk_mali_1; -#endif - struct clk *clk_mali = pmali_plat->clk_mali; - - clk_set_parent(clk_mali_0, clk_mali_0_parent); - - clk_prepare_enable(clk_mali_0); - - clk_set_parent(clk_mali, clk_mali_0); - -#ifdef AML_CLK_LOCK_ERROR - clk_set_parent(clk_mali_1, clk_mali_0_parent); - clk_prepare_enable(clk_mali_1); -#endif - - GPU_CLK_DBG("%s:enable(%d), %s:enable(%d)\n", - clk_mali_0->name, clk_mali_0->enable_count, - clk_mali_0_parent->name, clk_mali_0_parent->enable_count); - - return 0; -} - -int mali_clock_init(mali_plat_info_t *pdev) -{ - *pdev = *pdev; - return 0; -} - -int mali_clock_critical(critical_t critical, size_t param) -{ - int ret = 0; - - ret = critical(param); - - return ret; -} - -static int critical_clock_set(size_t param) -{ - int ret = 0; - unsigned int idx = param; - mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[idx]; - - struct clk *clk_mali_0 = pmali_plat->clk_mali_0; - struct clk *clk_mali_1 = pmali_plat->clk_mali_1; - struct clk *clk_mali_x = NULL; - struct clk *clk_mali_x_parent = NULL; - struct clk *clk_mali_x_old = NULL; - struct clk *clk_mali = pmali_plat->clk_mali; - unsigned long time_use=0; - - clk_mali_x_old = clk_get_parent(clk_mali); - - if (!clk_mali_x_old) { - printk("gpu: could not get clk_mali_x_old or clk_mali_x_old\n"); - return 0; - } - if (clk_mali_x_old == clk_mali_0) { - clk_mali_x = clk_mali_1; - } else if (clk_mali_x_old == clk_mali_1) { - clk_mali_x = clk_mali_0; - } else { - printk("gpu: unmatched clk_mali_x_old\n"); - return 0; - } - - GPU_CLK_DBG("idx=%d, clk_freq=%d\n", idx, dvfs_tbl->clk_freq); - clk_mali_x_parent = dvfs_tbl->clkp_handle; - if (!clk_mali_x_parent) { - printk("gpu: could not get clk_mali_x_parent\n"); - return 0; - } - - GPU_CLK_DBG(); - ret = clk_set_rate(clk_mali_x_parent, dvfs_tbl->clkp_freq); - GPU_CLK_DBG(); - ret = clk_set_parent(clk_mali_x, clk_mali_x_parent); - GPU_CLK_DBG(); - ret = clk_set_rate(clk_mali_x, dvfs_tbl->clk_freq); - GPU_CLK_DBG(); -#ifndef AML_CLK_LOCK_ERROR - ret = clk_prepare_enable(clk_mali_x); -#endif - GPU_CLK_DBG("new %s:enable(%d)\n", clk_mali_x->name, clk_mali_x->enable_count); - do_gettimeofday(&start); - udelay(1);// delay 10ns - do_gettimeofday(&end); - ret = clk_set_parent(clk_mali, clk_mali_x); - GPU_CLK_DBG(); - -#ifndef AML_CLK_LOCK_ERROR - clk_disable_unprepare(clk_mali_x_old); -#endif - GPU_CLK_DBG("old %s:enable(%d)\n", clk_mali_x_old->name, clk_mali_x_old->enable_count); - time_use = (end.tv_sec - start.tv_sec)*1000000 + end.tv_usec - start.tv_usec; - GPU_CLK_DBG("step 1, mali_mux use: %ld us\n", time_use); - - return 0; -} - -int mali_clock_set(unsigned int clock) -{ - return mali_clock_critical(critical_clock_set, (size_t)clock); -} - -void disable_clock(void) -{ - struct clk *clk_mali = pmali_plat->clk_mali; - struct clk *clk_mali_x = NULL; - - clk_mali_x = clk_get_parent(clk_mali); - GPU_CLK_DBG(); -#ifndef AML_CLK_LOCK_ERROR - clk_disable_unprepare(clk_mali_x); -#endif - GPU_CLK_DBG(); -} - -void enable_clock(void) -{ - struct clk *clk_mali = pmali_plat->clk_mali; - struct clk *clk_mali_x = NULL; - - clk_mali_x = clk_get_parent(clk_mali); - GPU_CLK_DBG(); -#ifndef AML_CLK_LOCK_ERROR - clk_prepare_enable(clk_mali_x); -#endif - GPU_CLK_DBG(); -} - -u32 get_mali_freq(u32 idx) -{ - if (!mali_pm_statue) { - return pmali_plat->clk_sample[idx]; - } else { - return 0; - } -} - -void set_str_src(u32 data) -{ - printk("gpu: %s, %s, %d\n", __FILE__, __func__, __LINE__); -} - -int mali_dt_info(struct platform_device *pdev, struct mali_plat_info_t *mpdata) -{ - struct device_node *gpu_dn = pdev->dev.of_node; - struct device_node *gpu_clk_dn; - phandle dvfs_clk_hdl; - mali_dvfs_threshold_table *dvfs_tbl = NULL; - uint32_t *clk_sample = NULL; - - struct property *prop; - const __be32 *p; - int length = 0, i = 0; - u32 u; - int ret = 0; - if (!gpu_dn) { - dev_notice(&pdev->dev, "gpu device node not right\n"); - return -ENODEV; - } - - ret = of_property_read_u32(gpu_dn,"num_of_pp", - &mpdata->cfg_pp); - if (ret) { - dev_notice(&pdev->dev, "set max pp to default 6\n"); - mpdata->cfg_pp = 6; - } - mpdata->scale_info.maxpp = mpdata->cfg_pp; - mpdata->maxpp_sysfs = mpdata->cfg_pp; - _dev_info(&pdev->dev, "max pp is %d\n", mpdata->scale_info.maxpp); - - ret = of_property_read_u32(gpu_dn,"min_pp", - &mpdata->cfg_min_pp); - if (ret) { - dev_notice(&pdev->dev, "set min pp to default 1\n"); - mpdata->cfg_min_pp = 1; - } - mpdata->scale_info.minpp = mpdata->cfg_min_pp; - _dev_info(&pdev->dev, "min pp is %d\n", mpdata->scale_info.minpp); - - ret = of_property_read_u32(gpu_dn,"min_clk", - &mpdata->cfg_min_clock); - if (ret) { - dev_notice(&pdev->dev, "set min clk default to 0\n"); - mpdata->cfg_min_clock = 0; - } - mpdata->scale_info.minclk = mpdata->cfg_min_clock; - _dev_info(&pdev->dev, "min clk is %d\n", mpdata->scale_info.minclk); - - mpdata->reg_base_hiubus = of_iomap(gpu_dn, 3); - _dev_info(&pdev->dev, "hiu io source 0x%p\n", mpdata->reg_base_hiubus); - - mpdata->reg_base_aobus = of_iomap(gpu_dn, 2); - _dev_info(&pdev->dev, "ao io source 0x%p\n", mpdata->reg_base_aobus); - - ret = of_property_read_u32(gpu_dn,"sc_mpp", - &mpdata->sc_mpp); - if (ret) { - dev_notice(&pdev->dev, "set pp used most of time default to %d\n", mpdata->cfg_pp); - mpdata->sc_mpp = mpdata->cfg_pp; - } - _dev_info(&pdev->dev, "num of pp used most of time %d\n", mpdata->sc_mpp); - - of_get_property(gpu_dn, "tbl", &length); - - length = length /sizeof(u32); - _dev_info(&pdev->dev, "clock dvfs cfg table size is %d\n", length); - - mpdata->dvfs_table = devm_kzalloc(&pdev->dev, - sizeof(struct mali_dvfs_threshold_table)*length, - GFP_KERNEL); - dvfs_tbl = mpdata->dvfs_table; - if (mpdata->dvfs_table == NULL) { - dev_err(&pdev->dev, "failed to alloc dvfs table\n"); - return -ENOMEM; - } - mpdata->clk_sample = devm_kzalloc(&pdev->dev, sizeof(u32)*length, GFP_KERNEL); - if (mpdata->clk_sample == NULL) { - dev_err(&pdev->dev, "failed to alloc clk_sample table\n"); - return -ENOMEM; - } - clk_sample = mpdata->clk_sample; - mpdata->dvfs_table_size = 0; - - of_property_for_each_u32(gpu_dn, "tbl", prop, p, u) { - dvfs_clk_hdl = (phandle) u; - gpu_clk_dn = of_find_node_by_phandle(dvfs_clk_hdl); - ret = of_property_read_u32(gpu_clk_dn,"clk_freq", &dvfs_tbl->clk_freq); - if (ret) { - dev_notice(&pdev->dev, "read clk_freq failed\n"); - } - ret = of_property_read_string(gpu_clk_dn,"clk_parent", - &dvfs_tbl->clk_parent); - if (ret) { - dev_notice(&pdev->dev, "read clk_parent failed\n"); - } - dvfs_tbl->clkp_handle = devm_clk_get(&pdev->dev, dvfs_tbl->clk_parent); - if (IS_ERR(dvfs_tbl->clkp_handle)) { - dev_notice(&pdev->dev, "failed to get %s's clock pointer\n", dvfs_tbl->clk_parent); - } - ret = of_property_read_u32(gpu_clk_dn,"clkp_freq", &dvfs_tbl->clkp_freq); - if (ret) { - dev_notice(&pdev->dev, "read clk_parent freq failed\n"); - } - ret = of_property_read_u32(gpu_clk_dn,"voltage", &dvfs_tbl->voltage); - if (ret) { - dev_notice(&pdev->dev, "read voltage failed\n"); - } - ret = of_property_read_u32(gpu_clk_dn,"keep_count", &dvfs_tbl->keep_count); - if (ret) { - dev_notice(&pdev->dev, "read keep_count failed\n"); - } - //downthreshold and upthreshold shall be u32 - ret = of_property_read_u32_array(gpu_clk_dn,"threshold", - &dvfs_tbl->downthreshold, 2); - if (ret) { - dev_notice(&pdev->dev, "read threshold failed\n"); - } - dvfs_tbl->freq_index = i; - - *clk_sample = dvfs_tbl->clk_freq / 1000000; - - dvfs_tbl ++; - clk_sample ++; - i++; - mpdata->dvfs_table_size ++; - } - dev_notice(&pdev->dev, "dvfs table is %d\n", mpdata->dvfs_table_size); - dev_notice(&pdev->dev, "dvfs table addr %p, ele size=%zd\n", - mpdata->dvfs_table, - sizeof(mpdata->dvfs_table[0])); - - ret = of_property_read_u32(gpu_dn,"max_clk", - &mpdata->cfg_clock); - if (ret) { - dev_notice(&pdev->dev, "max clk set %d\n", mpdata->dvfs_table_size-2); - mpdata->cfg_clock = mpdata->dvfs_table_size-2; - } - - mpdata->cfg_clock_bkup = mpdata->cfg_clock; - mpdata->maxclk_sysfs = mpdata->cfg_clock; - mpdata->scale_info.maxclk = mpdata->cfg_clock; - _dev_info(&pdev->dev, "max clk is %d\n", mpdata->scale_info.maxclk); - - ret = of_property_read_u32(gpu_dn,"turbo_clk", - &mpdata->turbo_clock); - if (ret) { - dev_notice(&pdev->dev, "turbo clk set to %d\n", mpdata->dvfs_table_size-1); - mpdata->turbo_clock = mpdata->dvfs_table_size-1; - } - _dev_info(&pdev->dev, "turbo clk is %d\n", mpdata->turbo_clock); - - ret = of_property_read_u32(gpu_dn,"def_clk", - &mpdata->def_clock); - if (ret) { - dev_notice(&pdev->dev, "default clk set to %d\n", mpdata->dvfs_table_size/2-1); - mpdata->def_clock = mpdata->dvfs_table_size/2 - 1; - } - _dev_info(&pdev->dev, "default clk is %d\n", mpdata->def_clock); - - dvfs_tbl = mpdata->dvfs_table; - clk_sample = mpdata->clk_sample; - for (i = 0; i< mpdata->dvfs_table_size; i++) { - _dev_info(&pdev->dev, "====================%d====================\n" - "clk_freq=%10d, clk_parent=%9s, voltage=%d, keep_count=%d, threshod=<%d %d>, clk_sample=%d\n", - i, - dvfs_tbl->clk_freq, dvfs_tbl->clk_parent, - dvfs_tbl->voltage, dvfs_tbl->keep_count, - dvfs_tbl->downthreshold, dvfs_tbl->upthreshold, *clk_sample); - dvfs_tbl ++; - clk_sample ++; - } - - mpdata->clk_mali = devm_clk_get(&pdev->dev, "clk_gpu"); - mpdata->clk_mali_0 = devm_clk_get(&pdev->dev, "clk_gpu_0"); - mpdata->clk_mali_1 = devm_clk_get(&pdev->dev, "clk_gpu_1"); - if (IS_ERR(mpdata->clk_mali) || IS_ERR(mpdata->clk_mali_0) || IS_ERR(mpdata->clk_mali_1)) { - dev_err(&pdev->dev, "failed to get clock pointer\n"); - return -EFAULT; - } - - pmali_plat = mpdata; - mpdata->pdev = pdev; - return 0; -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_clock.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_clock.h deleted file mode 100644 index 9b8b39287a3f8..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_clock.h +++ /dev/null @@ -1,37 +0,0 @@ -#ifndef __MALI_CLOCK_H__ -#define __MALI_CLOCK_H__ -#include -#include -#include -#include -#include - -#include -#include -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 29)) -#include -#endif - -#ifndef HHI_MALI_CLK_CNTL -#define HHI_MALI_CLK_CNTL 0x6C -#define mplt_read(r) readl((pmali_plat->reg_base_hiubus) + ((r)<<2)) -#define mplt_write(r, v) writel((v), ((pmali_plat->reg_base_hiubus) + ((r)<<2))) -#define mplt_setbits(r, m) mplt_write((r), (mplt_read(r) | (m))); -#define mplt_clrbits(r, m) mplt_write((r), (mplt_read(r) & (~(m)))); -#endif - -//extern int mali_clock_init(struct platform_device *dev); -int mali_clock_init_clk_tree(struct platform_device *pdev); - -typedef int (*critical_t)(size_t param); -int mali_clock_critical(critical_t critical, size_t param); - -int mali_clock_init(mali_plat_info_t*); -int mali_clock_set(unsigned int index); -void disable_clock(void); -void enable_clock(void); -u32 get_mali_freq(u32 idx); -void set_str_src(u32 data); -int mali_dt_info(struct platform_device *pdev, - struct mali_plat_info_t *mpdata); -#endif diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_devicetree.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_devicetree.c deleted file mode 100755 index 59c36b45a5c60..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_devicetree.c +++ /dev/null @@ -1,97 +0,0 @@ -/* - * - * (C) COPYRIGHT 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#ifdef CONFIG_DEVFREQ_THERMAL -#include -#include - -#define FALLBACK_STATIC_TEMPERATURE 55000 - -static unsigned long t83x_static_power(unsigned long voltage) -{ -#if 0 - struct thermal_zone_device *tz; - unsigned long temperature, temp; - unsigned long temp_squared, temp_cubed, temp_scaling_factor; - const unsigned long coefficient = (410UL << 20) / (729000000UL >> 10); - const unsigned long voltage_cubed = (voltage * voltage * voltage) >> 10; - - tz = thermal_zone_get_zone_by_name("gpu"); - if (IS_ERR(tz)) { - pr_warn_ratelimited("Error getting gpu thermal zone (%ld), not yet ready?\n", - PTR_ERR(tz)); - temperature = FALLBACK_STATIC_TEMPERATURE; - } else { - int ret; - - ret = tz->ops->get_temp(tz, &temperature); - if (ret) { - pr_warn_ratelimited("Error reading temperature for gpu thermal zone: %d\n", - ret); - temperature = FALLBACK_STATIC_TEMPERATURE; - } - } - - /* Calculate the temperature scaling factor. To be applied to the - * voltage scaled power. - */ - temp = temperature / 1000; - temp_squared = temp * temp; - temp_cubed = temp_squared * temp; - temp_scaling_factor = - (2 * temp_cubed) - - (80 * temp_squared) - + (4700 * temp) - + 32000; - - return (((coefficient * voltage_cubed) >> 20) - * temp_scaling_factor) - / 1000000; -#else - return 0; -#endif -} - -static unsigned long t83x_dynamic_power(unsigned long freq, - unsigned long voltage) -{ - /* The inputs: freq (f) is in Hz, and voltage (v) in mV. - * The coefficient (c) is in mW/(MHz mV mV). - * - * This function calculates the dynamic power after this formula: - * Pdyn (mW) = c (mW/(MHz*mV*mV)) * v (mV) * v (mV) * f (MHz) - */ - const unsigned long v2 = (voltage * voltage) / 1000; /* m*(V*V) */ - const unsigned long f_mhz = freq / 1000000; /* MHz */ - const unsigned long coefficient = 3600; /* mW/(MHz*mV*mV) */ - - return (coefficient * v2 * f_mhz) / 1000000; /* mW */ -} - -struct devfreq_cooling_ops t83x_model_ops = { - .get_static_power = t83x_static_power, - .get_dynamic_power = t83x_dynamic_power, -}; - -#endif - -int kbase_platform_early_init(void) -{ - /* Nothing needed at this stage */ - return 0; -} - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_platform.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_platform.h deleted file mode 100755 index adc052ca57c9c..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_platform.h +++ /dev/null @@ -1,94 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * Maximum frequency GPU will be clocked at. Given in kHz. - * This must be specified as there is no default value. - * - * Attached value: number in kHz - * Default value: NA - */ -#define GPU_FREQ_KHZ_MAX (750000) -/** - * Minimum frequency GPU will be clocked at. Given in kHz. - * This must be specified as there is no default value. - * - * Attached value: number in kHz - * Default value: NA - */ -#define GPU_FREQ_KHZ_MIN (100000) - -/** - * CPU_SPEED_FUNC - A pointer to a function that calculates the CPU clock - * - * CPU clock speed of the platform is in MHz - see kbase_cpu_clk_speed_func - * for the function prototype. - * - * Attached value: A kbase_cpu_clk_speed_func. - * Default Value: NA - */ -#define CPU_SPEED_FUNC (NULL) - -/** - * GPU_SPEED_FUNC - A pointer to a function that calculates the GPU clock - * - * GPU clock speed of the platform in MHz - see kbase_gpu_clk_speed_func - * for the function prototype. - * - * Attached value: A kbase_gpu_clk_speed_func. - * Default Value: NA - */ -#define GPU_SPEED_FUNC (NULL) - -/** - * Power management configuration - * - * Attached value: pointer to @ref kbase_pm_callback_conf - * Default value: See @ref kbase_pm_callback_conf - */ -#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks) - -/** - * Platform specific configuration functions - * - * Attached value: pointer to @ref kbase_platform_funcs_conf - * Default value: See @ref kbase_platform_funcs_conf - */ -extern struct kbase_platform_funcs_conf dt_funcs_conf; -#define PLATFORM_FUNCS (&dt_funcs_conf) - -/** Power model for IPA - * - * Attached value: pointer to @ref mali_pa_model_ops - */ -#ifdef CONFIG_DEVFREQ_THERMAL -#define POWER_MODEL_CALLBACKS (&t83x_model_ops) -extern struct devfreq_cooling_ops t83x_model_ops; -#else -#define POWER_MODEL_CALLBACKS (NULL) -#endif -extern struct kbase_pm_callback_conf pm_callbacks; - -void mali_dev_freeze(void); -void mali_dev_restore(void); - -/** - * Secure mode switch - * - * Attached value: pointer to @ref kbase_secure_ops - */ -#define SECURE_CALLBACKS (NULL) diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_runtime_pm.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_runtime_pm.c deleted file mode 100755 index 3c9e65d39e5a8..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_runtime_pm.c +++ /dev/null @@ -1,239 +0,0 @@ -/* - * - * (C) COPYRIGHT 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include -#include -#include -#include -#include -#include -#include - -void *reg_base_hiubus = NULL; -u32 override_value_aml = 0; -static int first = 1; - -extern u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev, enum kbase_pm_core_type type); -static void Mali_pwr_on_with_kdev ( struct kbase_device *kbdev, uint32_t mask) -{ - uint32_t part1_done; - part1_done = 0; - Mali_WrReg(0x0, 0x0, 0x0000024, 0xffffffff); // clear interrupts - - if ((mask & 0x1) != 0 ) { - Mali_WrReg(0x0, 0x0, 0x00000190, 0xffffffff); // Power on all cores - Mali_WrReg(0x0, 0x0, 0x00000194, 0xffffffff); // Power on all cores - Mali_WrReg(0x0, 0x0, 0x000001a0, 0xffffffff); // Power on all cores - Mali_WrReg(0x0, 0x0, 0x000001a4, 0xffffffff); // Power on all cores - } - - if ( (mask >> 1) != 0 ) { - Mali_WrReg(0x0, 0x0, 0x00000180, mask >> 1); // Power on all cores - Mali_WrReg(0x0, 0x0, 0x00000184, 0x0); // Power on all cores - } - - if ( mask != 0 ) { - udelay(10); - part1_done = Mali_RdReg(0x0, 0x0, 0x0000020); - //printk("%s, %d\n", __func__, __LINE__); - while((part1_done ==0)) { part1_done = Mali_RdReg(0x0, 0x0, 0x00000020); udelay(10); } - //printk("Mali_pwr_on:gpu_irq : %x\n", part1_done); - Mali_WrReg(0x0, 0x0, 0x0000024, 0xffffffff); // clear interrupts - } -} - -static void gpu_power_main( struct kbase_device *kbdev) { - - Mali_pwr_on ( 0x7); - - printk("%s, %d\n", __func__, __LINE__); -} - -static int pm_callback_power_on(struct kbase_device *kbdev) -{ - int ret; - struct platform_device *pdev = to_platform_device(kbdev->dev); - struct resource *reg_res; - u64 core_ready; - u64 l2_ready; - u64 tiler_ready; - u32 value; - - //printk("20151013, %s, %d\n", __FILE__, __LINE__); - if (first == 0) goto ret; - - reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!reg_res) { - dev_err(kbdev->dev, "Invalid register resource\n"); - ret = -ENOENT; - } - //printk("%s, %d\n", __FILE__, __LINE__); - if (NULL == reg_base_hiubus) - reg_base_hiubus = ioremap(reg_res->start, resource_size(reg_res)); - - //printk("%s, %d\n", __FILE__, __LINE__); - if (NULL == reg_base_hiubus) { - dev_err(kbdev->dev, "Invalid register resource\n"); - ret = -ENOENT; - } - - //printk("%s, %d\n", __FILE__, __LINE__); - -//JOHNT - // Level reset mail - - // Level reset mail - //Wr(P_RESET2_MASK, ~(0x1<<14)); - //Wr(P_RESET2_LEVEL, ~(0x1<<14)); - - //Wr(P_RESET2_LEVEL, 0xffffffff); - //Wr(P_RESET0_LEVEL, 0xffffffff); - - value = Rd(RESET0_MASK); - value = value & (~(0x1<<20)); - //printk("line(%d), value=%x\n", __LINE__, value); - Wr(RESET0_MASK, value); - - value = Rd(RESET0_LEVEL); - value = value & (~(0x1<<20)); - //printk("line(%d), value=%x\n", __LINE__, value); - Wr(RESET0_LEVEL, value); -/////////////// - value = Rd(RESET2_MASK); - value = value & (~(0x1<<14)); - //printk("line(%d), value=%x\n", __LINE__, value); - Wr(RESET2_MASK, value); - - value = Rd(RESET2_LEVEL); - value = value & (~(0x1<<14)); - //printk("line(%d), value=%x\n", __LINE__, value); - Wr(RESET2_LEVEL, value); - - value = Rd(RESET0_LEVEL); - value = value | ((0x1<<20)); - //printk("line(%d), value=%x\n", __LINE__, value); - Wr(RESET0_LEVEL, value); - - value = Rd(RESET2_LEVEL); - value = value | ((0x1<<14)); - //printk("line(%d), value=%x\n", __LINE__, value); - Wr(RESET2_LEVEL, value); - - udelay(10); // OR POLL for reset done - - kbase_reg_write(kbdev, GPU_CONTROL_REG(PWR_KEY), 0x2968A819, NULL); - kbase_reg_write(kbdev, GPU_CONTROL_REG(PWR_OVERRIDE1), 0xfff | (0x20<<16), NULL); - - gpu_power_main(kbdev); - //printk("set PWR_ORRIDE, reg=%p, reg_start=%llx, reg_size=%zx, reg_mapped=%p\n", - // kbdev->reg, kbdev->reg_start, kbdev->reg_size, reg_base_hiubus); - dev_dbg(kbdev->dev, "pm_callback_power_on %p\n", - (void *)kbdev->dev->pm_domain); - - first = 0; - //printk("%s, %d\n", __FILE__, __LINE__); -ret: - ret = pm_runtime_get_sync(kbdev->dev); - udelay(100); -#if 1 - - core_ready = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_SHADER); - l2_ready = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2); - tiler_ready = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_TILER); - //printk("core_ready=%llx, l2_ready=%llx, tiler_ready=%llx\n", core_ready, l2_ready, tiler_ready); -#endif - dev_dbg(kbdev->dev, "pm_runtime_get returned %d\n", ret); - - return 1; -} - -static void pm_callback_power_off(struct kbase_device *kbdev) -{ - dev_dbg(kbdev->dev, "pm_callback_power_off\n"); - //printk("%s, %d\n", __FILE__, __LINE__); -#if 0 - iounmap(reg_base_hiubus); - reg_base_hiubus = NULL; -#endif - - pm_runtime_put_autosuspend(kbdev->dev); -} - -int kbase_device_runtime_init(struct kbase_device *kbdev) -{ - dev_dbg(kbdev->dev, "kbase_device_runtime_init\n"); - pm_runtime_enable(kbdev->dev); -#ifdef CONFIG_MALI_MIDGARD_DEBUG_SYS - { - int err = kbase_platform_create_sysfs_file(kbdev->dev); - - if (err) - return err; - } -#endif /* CONFIG_MALI_MIDGARD_DEBUG_SYS */ - return 0; -} - -void kbase_device_runtime_disable(struct kbase_device *kbdev) -{ - dev_dbg(kbdev->dev, "kbase_device_runtime_disable\n"); - pm_runtime_disable(kbdev->dev); -} - -static int pm_callback_runtime_on(struct kbase_device *kbdev) -{ - dev_dbg(kbdev->dev, "pm_callback_runtime_on\n"); - - return 0; -} - -static void pm_callback_runtime_off(struct kbase_device *kbdev) -{ - dev_dbg(kbdev->dev, "pm_callback_runtime_off\n"); -} - -static void pm_callback_resume(struct kbase_device *kbdev) -{ - int ret = pm_callback_runtime_on(kbdev); - - WARN_ON(ret); -} - -static void pm_callback_suspend(struct kbase_device *kbdev) -{ - pm_callback_runtime_off(kbdev); -} - -struct kbase_pm_callback_conf pm_callbacks = { - .power_on_callback = pm_callback_power_on, - .power_off_callback = pm_callback_power_off, - .power_suspend_callback = pm_callback_suspend, - .power_resume_callback = pm_callback_resume, -#ifdef KBASE_PM_RUNTIME - .power_runtime_init_callback = kbase_device_runtime_init, - .power_runtime_term_callback = kbase_device_runtime_disable, - .power_runtime_on_callback = pm_callback_runtime_on, - .power_runtime_off_callback = pm_callback_runtime_off, -#else /* KBASE_PM_RUNTIME */ - .power_runtime_init_callback = NULL, - .power_runtime_term_callback = NULL, - .power_runtime_on_callback = NULL, - .power_runtime_off_callback = NULL, -#endif /* KBASE_PM_RUNTIME */ -}; - - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_platform.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_platform.h deleted file mode 100644 index 41185d0249312..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_platform.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * mali_platform.h - * - * Created on: Nov 8, 2013 - * Author: amlogic - */ - -#include -#ifndef MALI_PLATFORM_H_ -#define MALI_PLATFORM_H_ - -extern u32 mali_gp_reset_fail; -extern u32 mali_core_timeout; - -#endif /* MALI_PLATFORM_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_scaling.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_scaling.h deleted file mode 100644 index dd2a53ba370f5..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_scaling.h +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright (C) 2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained from Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -/** - * @file arm_core_scaling.h - * Example core scaling policy. - */ - -#ifndef __ARM_CORE_SCALING_H__ -#define __ARM_CORE_SCALING_H__ - -#include -#include -#include - -enum mali_scale_mode_t { - MALI_PP_SCALING = 0, - MALI_PP_FS_SCALING, - MALI_SCALING_DISABLE, - MALI_TURBO_MODE, - MALI_SCALING_MODE_MAX -}; - -typedef struct mali_dvfs_threshold_table { - uint32_t freq_index; - uint32_t voltage; - uint32_t keep_count; - uint32_t downthreshold; - uint32_t upthreshold; - uint32_t clk_freq; - const char *clk_parent; - struct clk *clkp_handle; - uint32_t clkp_freq; -} mali_dvfs_threshold_table; - -/** - * restrictions on frequency and number of pp. - */ -typedef struct mali_scale_info_t { - u32 minpp; - u32 maxpp; - u32 minclk; - u32 maxclk; -} mali_scale_info_t; - -/** - * Platform spesific data for meson chips. - */ -typedef struct mali_plat_info_t { - u32 cfg_pp; /* number of pp. */ - u32 cfg_min_pp; - u32 turbo_clock; /* reserved clock src. */ - u32 def_clock; /* gpu clock used most of time.*/ - u32 cfg_clock; /* max clock could be used.*/ - u32 cfg_clock_bkup; /* same as cfg_clock, for backup. */ - u32 cfg_min_clock; - - u32 sc_mpp; /* number of pp used most of time.*/ - u32 bst_gpu; /* threshold for boosting gpu. */ - u32 bst_pp; /* threshold for boosting PP. */ - - u32 *clk; - u32 *clk_sample; - u32 clk_len; - u32 have_switch; /* have clock gate switch or not. */ - - mali_dvfs_threshold_table *dvfs_table; - u32 dvfs_table_size; - - mali_scale_info_t scale_info; - u32 maxclk_sysfs; - u32 maxpp_sysfs; - - /* set upper limit of pp or frequency, for THERMAL thermal or band width saving.*/ - u32 limit_on; - - /* for boost up gpu by user. */ - void (*plat_preheat)(void); - - struct platform_device *pdev; - void __iomem *reg_base_hiubus; - void __iomem *reg_base_aobus; - struct work_struct wq_work; - struct clk *clk_mali; - struct clk *clk_mali_0; - struct clk *clk_mali_1; -} mali_plat_info_t; -mali_plat_info_t* get_mali_plat_data(void); - -/** - * Initialize core scaling policy. - * - * @note The core scaling policy will assume that all PP cores are on initially. - * - * @param num_pp_cores Total number of PP cores. - */ -int mali_core_scaling_init(mali_plat_info_t*); - -/** - * Terminate core scaling policy. - */ -void mali_core_scaling_term(void); - -/** - * cancel and flush scaling job queue. - */ -void flush_scaling_job(void); - -/* get current state(pp, clk). */ -void get_mali_rt_clkpp(u32* clk, u32* pp); -u32 set_mali_rt_clkpp(u32 clk, u32 pp, u32 flush); -void revise_mali_rt(void); -/* get max gpu clk level of this chip*/ -int get_gpu_max_clk_level(void); - -/* get or set the scale mode. */ -u32 get_mali_schel_mode(void); -void set_mali_schel_mode(u32 mode); - -/* for frequency reporter in DS-5 streamline. */ -u32 get_current_frequency(void); -void mali_dev_freeze(void); -void mali_dev_restore(void); - -#endif /* __ARM_CORE_SCALING_H__ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/meson_main2.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/meson_main2.c deleted file mode 100644 index 7687f92bd6c29..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/meson_main2.c +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright (C) 2010, 2012-2014 Amlogic Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. - * - */ - -/** - * @file mali_platform.c - * Platform specific Mali driver functions for: - * meson8m2 and the newer chip - */ -#include -#include -#include -#include -#include -#ifdef CONFIG_PM_RUNTIME -#include -#endif -#include -#include -#include - -#include -#include -#include -#include - -#include "mali_scaling.h" -#include "mali_clock.h" -#include "meson_main2.h" - -extern void mali_post_init(void); -struct kbase_device; -//static int gpu_dvfs_probe(struct platform_device *pdev) -int platform_dt_init_func(struct kbase_device *kbdev) -{ - struct device *dev = kbdev->dev; - struct platform_device *pdev = to_platform_device(dev); - - int err = -1; - - err = mali_meson_init_start(pdev); - mali_meson_init_finish(pdev); - mpgpu_class_init(); - mali_post_init(); - return err; -} - -//static int gpu_dvfs_remove(struct platform_device *pdev) -void platform_dt_term_func(struct kbase_device *kbdev) -{ - struct device *dev = kbdev->dev; - struct platform_device *pdev = to_platform_device(dev); - - printk("%s, %d\n", __FILE__, __LINE__); - - mpgpu_class_exit(); - mali_meson_uninit(pdev); - -} - -inline int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation, - u32 util_gl_share, u32 util_cl_share[2]) -{ - mali_gpu_utilization_callback(utilisation*255/100); - return 1; -} - -struct kbase_platform_funcs_conf dt_funcs_conf = { - .platform_init_func = platform_dt_init_func, - .platform_term_func = platform_dt_term_func, -}; -#if 0 -static const struct of_device_id gpu_dvfs_ids[] = { - { .compatible = "meson, gpu-dvfs-1.00.a" }, - { }, -}; -MODULE_DEVICE_TABLE(of, gpu_dvfs_ids); - -static struct platform_driver gpu_dvfs_driver = { - .driver = { - .name = "meson-gpu-dvfs", - .owner = THIS_MODULE, - .of_match_table = gpu_dvfs_ids, - }, - .probe = gpu_dvfs_probe, - .remove = gpu_dvfs_remove, -}; -module_platform_driver(gpu_dvfs_driver); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Amlogic SH, MM"); -MODULE_DESCRIPTION("Driver for the Meson GPU dvfs"); -#endif diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/meson_main2.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/meson_main2.h deleted file mode 100644 index ca797524a2d02..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/meson_main2.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * mali_platform.h - * - * Created on: Nov 8, 2013 - * Author: amlogic - */ - -#ifndef MESON_MAIN_H_ -#define MESON_MAIN_H_ -#include -#include -#include -#ifdef CONFIG_PM_RUNTIME -#include -#endif -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 29)) -#include -#endif - -#include "mali_scaling.h" -#include "mali_clock.h" - -u32 set_max_mali_freq(u32 idx); -u32 get_max_mali_freq(void); - -int mali_meson_init_start(struct platform_device* ptr_plt_dev); -int mali_meson_init_finish(struct platform_device* ptr_plt_dev); -int mali_meson_uninit(struct platform_device* ptr_plt_dev); -int mpgpu_class_init(void); -void mpgpu_class_exit(void); -void mali_gpu_utilization_callback(int utilization_pp); - -#endif /* MESON_MAIN_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mpgpu.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mpgpu.c deleted file mode 100644 index 4c5e26fa11bb1..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/mpgpu.c +++ /dev/null @@ -1,351 +0,0 @@ -/******************************************************************* - * - * Copyright C 2013 by Amlogic, Inc. All Rights Reserved. - * - * Description: - * - * Author: Amlogic Software - * Created: 2010/4/1 19:46 - * - *******************************************************************/ -/* Standard Linux headers */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 29)) -#include -#include -#include -#endif - -#include "meson_main2.h" - -static ssize_t domain_stat_read(struct class *class, - struct class_attribute *attr, char *buf) -{ - unsigned int val; - mali_plat_info_t* pmali_plat = get_mali_plat_data(); - - val = readl(pmali_plat->reg_base_aobus + 0xf0) & 0xff; - return sprintf(buf, "%x\n", val>>4); - return 0; -} - -#define PREHEAT_CMD "preheat" -#define PLL2_CMD "mpl2" /* mpl2 [11] or [0xxxxxxx] */ -#define SCMPP_CMD "scmpp" /* scmpp [number of pp your want in most of time]. */ -#define BSTGPU_CMD "bstgpu" /* bstgpu [0-256] */ -#define BSTPP_CMD "bstpp" /* bstpp [0-256] */ -#define LIMIT_CMD "lmt" /* lmt [0 or 1] */ -#define MAX_TOKEN 20 -#define FULL_UTILIZATION 256 - -static ssize_t mpgpu_write(struct class *class, - struct class_attribute *attr, const char *buf, size_t count) -{ - char *pstart, *cprt = NULL; - u32 val = 0; - mali_plat_info_t* pmali_plat = get_mali_plat_data(); - - cprt = skip_spaces(buf); - pstart = strsep(&cprt," "); - if (strlen(pstart) < 1) - goto quit; - - if (!strncmp(pstart, PREHEAT_CMD, MAX_TOKEN)) { - if (pmali_plat->plat_preheat) { - pmali_plat->plat_preheat(); - } - } else if (!strncmp(pstart, PLL2_CMD, MAX_TOKEN)) { - int base = 10; - if ((strlen(cprt) > 2) && (cprt[0] == '0') && - (cprt[1] == 'x' || cprt[1] == 'X')) - base = 16; - if (kstrtouint(cprt, base, &val) <0) - goto quit; - if (val < 11) - pmali_plat->cfg_clock = pmali_plat->cfg_clock_bkup; - else - pmali_plat->cfg_clock = pmali_plat->turbo_clock; - pmali_plat->scale_info.maxclk = pmali_plat->cfg_clock; - set_str_src(val); - } else if (!strncmp(pstart, SCMPP_CMD, MAX_TOKEN)) { - if ((kstrtouint(cprt, 10, &val) <0) || pmali_plat == NULL) - goto quit; - if ((val > 0) && (val < pmali_plat->cfg_pp)) { - pmali_plat->sc_mpp = val; - } - } else if (!strncmp(pstart, BSTGPU_CMD, MAX_TOKEN)) { - if ((kstrtouint(cprt, 10, &val) <0) || pmali_plat == NULL) - goto quit; - if ((val > 0) && (val < FULL_UTILIZATION)) { - pmali_plat->bst_gpu = val; - } - } else if (!strncmp(pstart, BSTPP_CMD, MAX_TOKEN)) { - if ((kstrtouint(cprt, 10, &val) <0) || pmali_plat == NULL) - goto quit; - if ((val > 0) && (val < FULL_UTILIZATION)) { - pmali_plat->bst_pp = val; - } - } else if (!strncmp(pstart, LIMIT_CMD, MAX_TOKEN)) { - if ((kstrtouint(cprt, 10, &val) <0) || pmali_plat == NULL) - goto quit; - - if (val < 2) { - pmali_plat->limit_on = val; - if (val == 0) { - pmali_plat->scale_info.maxclk = pmali_plat->cfg_clock; - pmali_plat->scale_info.maxpp = pmali_plat->cfg_pp; - revise_mali_rt(); - } - } - } -quit: - return count; -} - -static ssize_t scale_mode_read(struct class *class, - struct class_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", get_mali_schel_mode()); -} - -static ssize_t scale_mode_write(struct class *class, - struct class_attribute *attr, const char *buf, size_t count) -{ - int ret; - unsigned int val; - - ret = kstrtouint(buf, 10, &val); - if (0 != ret) - { - return -EINVAL; - } - - set_mali_schel_mode(val); - - return count; -} - -static ssize_t max_pp_read(struct class *class, - struct class_attribute *attr, char *buf) -{ - mali_plat_info_t* pmali_plat = get_mali_plat_data(); - printk("maxpp:%d, maxpp_sysfs:%d, total=%d\n", - pmali_plat->scale_info.maxpp, pmali_plat->maxpp_sysfs, - pmali_plat->cfg_pp); - return sprintf(buf, "%d\n", pmali_plat->cfg_pp); -} - -static ssize_t max_pp_write(struct class *class, - struct class_attribute *attr, const char *buf, size_t count) -{ - int ret; - unsigned int val; - mali_plat_info_t* pmali_plat; - mali_scale_info_t* pinfo; - - pmali_plat = get_mali_plat_data(); - pinfo = &pmali_plat->scale_info; - - ret = kstrtouint(buf, 10, &val); - if ((0 != ret) || (val > pmali_plat->cfg_pp) || (val < pinfo->minpp)) - return -EINVAL; - - pmali_plat->maxpp_sysfs = val; - pinfo->maxpp = val; - revise_mali_rt(); - - return count; -} - -static ssize_t min_pp_read(struct class *class, - struct class_attribute *attr, char *buf) -{ - mali_plat_info_t* pmali_plat = get_mali_plat_data(); - return sprintf(buf, "%d\n", pmali_plat->scale_info.minpp); -} - -static ssize_t min_pp_write(struct class *class, - struct class_attribute *attr, const char *buf, size_t count) -{ - int ret; - unsigned int val; - mali_plat_info_t* pmali_plat; - mali_scale_info_t* pinfo; - - pmali_plat = get_mali_plat_data(); - pinfo = &pmali_plat->scale_info; - - ret = kstrtouint(buf, 10, &val); - if ((0 != ret) || (val > pinfo->maxpp) || (val < 1)) - return -EINVAL; - - pinfo->minpp = val; - revise_mali_rt(); - - return count; -} - -static ssize_t max_freq_read(struct class *class, - struct class_attribute *attr, char *buf) -{ - mali_plat_info_t* pmali_plat = get_mali_plat_data(); - printk("maxclk:%d, maxclk_sys:%d, max gpu level=%d\n", - pmali_plat->scale_info.maxclk, pmali_plat->maxclk_sysfs, get_gpu_max_clk_level()); - return sprintf(buf, "%d\n", get_gpu_max_clk_level()); -} - -static ssize_t max_freq_write(struct class *class, - struct class_attribute *attr, const char *buf, size_t count) -{ - int ret; - unsigned int val; - mali_plat_info_t* pmali_plat; - mali_scale_info_t* pinfo; - - pmali_plat = get_mali_plat_data(); - pinfo = &pmali_plat->scale_info; - - ret = kstrtouint(buf, 10, &val); - if ((0 != ret) || (val > pmali_plat->cfg_clock) || (val < pinfo->minclk)) - return -EINVAL; - - pmali_plat->maxclk_sysfs = val; - pinfo->maxclk = val; - revise_mali_rt(); - - return count; -} - -static ssize_t min_freq_read(struct class *class, - struct class_attribute *attr, char *buf) -{ - mali_plat_info_t* pmali_plat = get_mali_plat_data(); - return sprintf(buf, "%d\n", pmali_plat->scale_info.minclk); -} - -static ssize_t min_freq_write(struct class *class, - struct class_attribute *attr, const char *buf, size_t count) -{ - int ret; - unsigned int val; - mali_plat_info_t* pmali_plat; - mali_scale_info_t* pinfo; - - pmali_plat = get_mali_plat_data(); - pinfo = &pmali_plat->scale_info; - - ret = kstrtouint(buf, 10, &val); - if ((0 != ret) || (val > pinfo->maxclk)) - return -EINVAL; - - pinfo->minclk = val; - revise_mali_rt(); - - return count; -} - -static ssize_t freq_read(struct class *class, - struct class_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", get_current_frequency()); -} - -static ssize_t freq_write(struct class *class, - struct class_attribute *attr, const char *buf, size_t count) -{ - int ret; - unsigned int val; - u32 clk, pp; - get_mali_rt_clkpp(&clk, &pp); - - ret = kstrtouint(buf, 10, &val); - if (0 != ret) - return -EINVAL; - - set_mali_rt_clkpp(val, pp, 1); - - return count; -} - -static ssize_t current_pp_read(struct class *class, - struct class_attribute *attr, char *buf) -{ - u32 clk, pp; - get_mali_rt_clkpp(&clk, &pp); - return sprintf(buf, "%d\n", pp); -} - -static ssize_t current_pp_write(struct class *class, - struct class_attribute *attr, const char *buf, size_t count) -{ - int ret; - unsigned int val; - u32 clk, pp; - - get_mali_rt_clkpp(&clk, &pp); - ret = kstrtouint(buf, 10, &val); - if (0 != ret) - { - return -EINVAL; - } - - ret = kstrtouint(buf, 10, &val); - if (0 != ret) - return -EINVAL; - - set_mali_rt_clkpp(clk, val, 1); - - return count; -} - -static struct class_attribute mali_class_attrs[] = { - __ATTR(domain_stat, 0644, domain_stat_read, NULL), - __ATTR(mpgpucmd, 0644, NULL, mpgpu_write), - __ATTR(scale_mode, 0644, scale_mode_read, scale_mode_write), - __ATTR(min_freq, 0644, min_freq_read, min_freq_write), - __ATTR(max_freq, 0644, max_freq_read, max_freq_write), - __ATTR(min_pp, 0644, min_pp_read, min_pp_write), - __ATTR(max_pp, 0644, max_pp_read, max_pp_write), - __ATTR(cur_freq, 0644, freq_read, freq_write), - __ATTR(cur_pp, 0644, current_pp_read, current_pp_write), -}; - -static struct class mpgpu_class = { - .name = "mpgpu", -}; - -int mpgpu_class_init(void) -{ - int ret = 0; - int i; - int attr_num = ARRAY_SIZE(mali_class_attrs); - - ret = class_register(&mpgpu_class); - if (ret) { - printk(KERN_ERR "%s: class_register failed\n", __func__); - return ret; - } - for (i = 0; i< attr_num; i++) { - ret = class_create_file(&mpgpu_class, &mali_class_attrs[i]); - if (ret) { - printk(KERN_ERR "%d ST: class item failed to register\n", i); - } - } - return ret; -} - -void mpgpu_class_exit(void) -{ - class_unregister(&mpgpu_class); -} - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/platform_gx.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/platform_gx.c deleted file mode 100644 index 4fa23bfdcec01..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/platform_gx.c +++ /dev/null @@ -1,233 +0,0 @@ -/* - * platform.c - * - * clock source setting and resource config - * - * Created on: Dec 4, 2013 - * Author: amlogic - */ - -#include -#include -#include -#include -#include /* kernel module definitions */ -#include /* request_mem_region */ -#include -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 29)) -#include -#include -#include -#endif -#include -#ifdef CONFIG_GPU_THERMAL -#include -#include -#include -#endif - -#include "mali_scaling.h" -#include "mali_clock.h" -#include "meson_main2.h" - -/* - * For Meson 8 M2. - * - */ -static void mali_plat_preheat(void); -static mali_plat_info_t mali_plat_data = { - .bst_gpu = 210, /* threshold for boosting gpu. */ - .bst_pp = 160, /* threshold for boosting PP. */ - .have_switch = 1, - .limit_on = 1, - .plat_preheat = mali_plat_preheat, -}; - -static void mali_plat_preheat(void) -{ -#ifndef CONFIG_MALI_DVFS - u32 pre_fs; - u32 clk, pp; - - if (get_mali_schel_mode() != MALI_PP_FS_SCALING) - return; - - get_mali_rt_clkpp(&clk, &pp); - pre_fs = mali_plat_data.def_clock + 1; - if (clk < pre_fs) - clk = pre_fs; - if (pp < mali_plat_data.sc_mpp) - pp = mali_plat_data.sc_mpp; - set_mali_rt_clkpp(clk, pp, 1); -#endif -} - -mali_plat_info_t* get_mali_plat_data(void) { - return &mali_plat_data; -} - -int get_mali_freq_level(int freq) -{ - int i = 0, level = -1; - int mali_freq_num; - - if (freq < 0) - return level; - - mali_freq_num = mali_plat_data.dvfs_table_size - 1; - if (freq <= mali_plat_data.clk_sample[0]) - level = mali_freq_num-1; - if (freq >= mali_plat_data.clk_sample[mali_freq_num - 1]) - level = 0; - for (i=0; i= mali_plat_data.clk_sample[i] && freq <= mali_plat_data.clk_sample[i + 1]) { - level = i; - level = mali_freq_num-level - 1; - } - } - return level; -} - -unsigned int get_mali_max_level(void) -{ - return mali_plat_data.dvfs_table_size - 1; -} - -int get_gpu_max_clk_level(void) -{ - return mali_plat_data.cfg_clock; -} - -#ifdef CONFIG_GPU_THERMAL -static void set_limit_mali_freq(u32 idx) -{ - if (mali_plat_data.limit_on == 0) - return; - if (idx > mali_plat_data.turbo_clock || idx < mali_plat_data.scale_info.minclk) - return; - if (idx > mali_plat_data.maxclk_sysfs) { - printk("idx > max freq\n"); - return; - } - mali_plat_data.scale_info.maxclk= idx; - revise_mali_rt(); -} - -static u32 get_limit_mali_freq(void) -{ - return mali_plat_data.scale_info.maxclk; -} - -static u32 get_mali_utilization(void) -{ -#ifndef MESON_DRV_BRING - return 55; -#else - return (_mali_ukk_utilization_pp() * 100) / 256; -#endif -} -#endif - -#ifdef CONFIG_GPU_THERMAL -static u32 set_limit_pp_num(u32 num) -{ - u32 ret = -1; - if (mali_plat_data.limit_on == 0) - goto quit; - if (num > mali_plat_data.cfg_pp || - num < mali_plat_data.scale_info.minpp) - goto quit; - - if (num > mali_plat_data.maxpp_sysfs) { - printk("pp > sysfs set pp\n"); - goto quit; - } - - mali_plat_data.scale_info.maxpp = num; - revise_mali_rt(); - ret = 0; -quit: - return ret; -} -static u32 mali_get_online_pp(void) -{ - unsigned int val; - mali_plat_info_t* pmali_plat = get_mali_plat_data(); - - val = readl(pmali_plat->reg_base_aobus + 0xf0) & 0xff; - if (val == 0x07) /* No pp is working */ - return 0; - -#ifndef MESON_DRV_BRING - return 2; -#else - return mali_executor_get_num_cores_enabled(); -#endif -} -#endif - -int mali_meson_init_start(struct platform_device* ptr_plt_dev) -{ - //dev_set_drvdata(&ptr_plt_dev->dev, &mali_plat_data); - mali_dt_info(ptr_plt_dev, &mali_plat_data); - mali_clock_init_clk_tree(ptr_plt_dev); - return 0; -} - -int mali_meson_init_finish(struct platform_device* ptr_plt_dev) -{ - if (mali_core_scaling_init(&mali_plat_data) < 0) - return -1; - return 0; -} - -int mali_meson_uninit(struct platform_device* ptr_plt_dev) -{ - mali_core_scaling_term(); - return 0; -} - -void mali_post_init(void) -{ -#ifdef CONFIG_GPU_THERMAL - int err; - struct gpufreq_cooling_device *gcdev = NULL; - struct gpucore_cooling_device *gccdev = NULL; - - gcdev = gpufreq_cooling_alloc(); - register_gpu_freq_info(get_current_frequency); - if (IS_ERR(gcdev)) - printk("malloc gpu cooling buffer error!!\n"); - else if (!gcdev) - printk("system does not enable thermal driver\n"); - else { - gcdev->get_gpu_freq_level = get_mali_freq_level; - gcdev->get_gpu_max_level = get_mali_max_level; - gcdev->set_gpu_freq_idx = set_limit_mali_freq; - gcdev->get_gpu_current_max_level = get_limit_mali_freq; - gcdev->get_gpu_freq = get_mali_freq; - gcdev->get_gpu_loading = get_mali_utilization; - gcdev->get_online_pp = mali_get_online_pp; - err = gpufreq_cooling_register(gcdev); - aml_thermal_min_update(gcdev->cool_dev); - if (err < 0) - printk("register GPU cooling error\n"); - printk("gpu cooling register okay with err=%d\n",err); - } - - gccdev = gpucore_cooling_alloc(); - if (IS_ERR(gccdev)) - printk("malloc gpu core cooling buffer error!!\n"); - else if (!gccdev) - printk("system does not enable thermal driver\n"); - else { - gccdev->max_gpu_core_num=mali_plat_data.cfg_pp; - gccdev->set_max_pp_num=set_limit_pp_num; - err = (int)gpucore_cooling_register(gccdev); - aml_thermal_min_update(gccdev->cool_dev); - if (err < 0) - printk("register GPU cooling error\n"); - printk("gpu core cooling register okay with err=%d\n",err); - } -#endif -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/scaling.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/scaling.c deleted file mode 100644 index c41d461ee4b76..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/devicetree/scaling.c +++ /dev/null @@ -1,551 +0,0 @@ -/* - * Copyright (C) 2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained from Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -/** - * @file arm_core_scaling.c - * Example core scaling policy. - */ - -#include -#include -#include - -#include -#define LOG_MALI_SCALING 1 -#include "meson_main2.h" -#include "mali_clock.h" - -static int currentStep; -#ifndef CONFIG_MALI_DVFS -static int num_cores_enabled; -static int lastStep; -static struct work_struct wq_work; -static mali_plat_info_t* pmali_plat = NULL; -#endif -static int scaling_mode = MALI_PP_FS_SCALING; -extern int mali_pm_statue; -//static int scaling_mode = MALI_SCALING_DISABLE; -//static int scaling_mode = MALI_PP_SCALING; - -static struct gp_pll_user_handle_s *gp_pll_user_gpu; -static int is_gp_pll_get; -static int is_gp_pll_put; - -static unsigned scaling_dbg_level = 0; -module_param(scaling_dbg_level, uint, 0644); -MODULE_PARM_DESC(scaling_dbg_level , "scaling debug level"); - -#define scalingdbg(level, fmt, arg...) \ - do { \ - if (scaling_dbg_level >= (level)) \ - printk(fmt , ## arg); \ - } while (0) - -#ifndef CONFIG_MALI_DVFS -static inline void mali_clk_exected(void) -{ - mali_dvfs_threshold_table * pdvfs = pmali_plat->dvfs_table; - uint32_t execStep = currentStep; - mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[currentStep]; - - if (0 == strcmp(dvfs_tbl->clk_parent, "gp0_pll")) { - gp_pll_request(gp_pll_user_gpu); - if (!is_gp_pll_get) { - //printk("not get pll\n"); - execStep = currentStep - 1; - } - } else { - //not get the gp pll, do need put - is_gp_pll_get = 0; - is_gp_pll_put = 0; - gp_pll_release(gp_pll_user_gpu); - } - - //if (pdvfs[currentStep].freq_index == pdvfs[lastStep].freq_index) return; - if (pdvfs[execStep].freq_index == pdvfs[lastStep].freq_index) { - return; - } - - //mali_dev_pause(); - mali_clock_set(pdvfs[execStep].freq_index); - //mali_dev_resume(); - lastStep = execStep; - if (is_gp_pll_put) { - //printk("release gp0 pll\n"); - gp_pll_release(gp_pll_user_gpu); - gp_pll_request(gp_pll_user_gpu); - is_gp_pll_get = 0; - is_gp_pll_put = 0; - } - -} -static int gp_pll_user_cb_gpu(struct gp_pll_user_handle_s *user, - int event) -{ - if (event == GP_PLL_USER_EVENT_GRANT) { - //printk("granted\n"); - is_gp_pll_get = 1; - is_gp_pll_put = 0; - schedule_work(&wq_work); - } else if (event == GP_PLL_USER_EVENT_YIELD) { - //printk("ask for yield\n"); - is_gp_pll_get = 0; - is_gp_pll_put = 1; - schedule_work(&wq_work); - } - - return 0; -} - -int mali_perf_set_num_pp_cores(int cores) -{ - cores = cores; - return 0; -} - -static void do_scaling(struct work_struct *work) -{ - mali_dvfs_threshold_table * pdvfs = pmali_plat->dvfs_table; - int err = mali_perf_set_num_pp_cores(num_cores_enabled); - if (err < 0) scalingdbg(1, "set pp failed"); - - scalingdbg(1, "set pp cores to %d\n", num_cores_enabled); - scalingdbg(1, "pdvfs[%d].freq_index=%d, pdvfs[%d].freq_index=%d\n", - currentStep, pdvfs[currentStep].freq_index, - lastStep, pdvfs[lastStep].freq_index); - mali_clk_exected(); -#ifdef CONFIG_MALI400_PROFILING - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | - MALI_PROFILING_EVENT_CHANNEL_GPU | - MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, - get_current_frequency(), - 0, 0, 0, 0); -#endif -} -#endif - -u32 revise_set_clk(u32 val, u32 flush) -{ - u32 ret = 0; -#ifndef CONFIG_MALI_DVFS - mali_scale_info_t* pinfo; - - pinfo = &pmali_plat->scale_info; - - if (val < pinfo->minclk) - val = pinfo->minclk; - else if (val > pinfo->maxclk) - val = pinfo->maxclk; - - if (val != currentStep) { - currentStep = val; - if (flush) - schedule_work(&wq_work); - else - ret = 1; - } -#endif - return ret; -} - -void get_mali_rt_clkpp(u32* clk, u32* pp) -{ -#ifndef CONFIG_MALI_DVFS - *clk = currentStep; - *pp = num_cores_enabled; -#endif -} - -u32 set_mali_rt_clkpp(u32 clk, u32 pp, u32 flush) -{ - u32 ret = 0; -#ifndef CONFIG_MALI_DVFS - mali_scale_info_t* pinfo; - u32 flush_work = 0; - - pinfo = &pmali_plat->scale_info; - if (clk < pinfo->minclk) - clk = pinfo->minclk; - else if (clk > pinfo->maxclk) - clk = pinfo->maxclk; - - if (clk != currentStep) { - currentStep = clk; - if (flush) - flush_work++; - else - ret = 1; - } - if (pp < pinfo->minpp) - pp = pinfo->minpp; - else if (pp > pinfo->maxpp) - pp = pinfo->maxpp; - - if (pp != num_cores_enabled) { - num_cores_enabled = pp; - if (flush) - flush_work++; - else - ret = 1; - } - - if (flush_work) - schedule_work(&wq_work); -#endif - return ret; -} - -void revise_mali_rt(void) -{ -#ifndef CONFIG_MALI_DVFS - set_mali_rt_clkpp(currentStep, num_cores_enabled, 1); -#endif -} - -void flush_scaling_job(void) -{ -#ifndef CONFIG_MALI_DVFS - cancel_work_sync(&wq_work); -#endif -} - -#ifndef CONFIG_MALI_DVFS -static u32 enable_one_core(void) -{ - scalingdbg(2, "meson: one more pp, curent has %d pp cores\n", num_cores_enabled + 1); - return set_mali_rt_clkpp(currentStep, num_cores_enabled + 1, 0); -} - -static u32 disable_one_core(void) -{ - scalingdbg(2, "meson: disable one pp, current has %d pp cores\n", num_cores_enabled - 1); - return set_mali_rt_clkpp(currentStep, num_cores_enabled - 1, 0); -} - -static u32 enable_max_num_cores(void) -{ - return set_mali_rt_clkpp(currentStep, pmali_plat->scale_info.maxpp, 0); -} - -static u32 enable_pp_cores(u32 val) -{ - scalingdbg(2, "meson: enable %d pp cores\n", val); - return set_mali_rt_clkpp(currentStep, val, 0); -} -#endif - -int mali_core_scaling_init(mali_plat_info_t *mali_plat) -{ -#ifndef CONFIG_MALI_DVFS - if (mali_plat == NULL) { - scalingdbg(2, " Mali platform data is NULL!!!\n"); - return -1; - } - - pmali_plat = mali_plat; - printk("mali_plat=%p\n", mali_plat); - num_cores_enabled = pmali_plat->sc_mpp; - gp_pll_user_gpu = gp_pll_user_register("gpu", 1, - gp_pll_user_cb_gpu); - //not get the gp pll, do need put - is_gp_pll_get = 0; - is_gp_pll_put = 0; - if (gp_pll_user_gpu == NULL) printk("register gp pll user for gpu failed\n"); - - currentStep = pmali_plat->def_clock; - lastStep = currentStep; - INIT_WORK(&wq_work, do_scaling); -#endif - return 0; - /* NOTE: Mali is not fully initialized at this point. */ -} - -void mali_core_scaling_term(void) -{ -#ifndef CONFIG_MALI_DVFS - flush_scheduled_work(); - gp_pll_user_unregister(gp_pll_user_gpu); -#endif -} - -#ifndef CONFIG_MALI_DVFS -static u32 mali_threshold [] = { - 40, /* 40% */ - 50, /* 50% */ - 90, /* 90% */ -}; -#endif - -void mali_pp_scaling_update(int utilization_pp) -{ -#ifndef CONFIG_MALI_DVFS - int ret = 0; - - if (mali_threshold[2] < utilization_pp) - ret = enable_max_num_cores(); - else if (mali_threshold[1]< utilization_pp) - ret = enable_one_core(); - else if (0 < utilization_pp) - ret = disable_one_core(); - if (ret == 1) - schedule_work(&wq_work); -#endif -} - -#if LOG_MALI_SCALING -void trace_utilization(int utilization_gpu, u32 current_idx, u32 next, - u32 current_pp, u32 next_pp) -{ - char direction; - if (next > current_idx) - direction = '>'; - else if ((current_idx > pmali_plat->scale_info.minpp) && (next < current_idx)) - direction = '<'; - else - direction = '~'; - - scalingdbg(2, "[SCALING]%c (%3d-->%3d)@%3d{%3d - %3d}. pp:(%d-->%d)\n", - direction, - get_mali_freq(current_idx), - get_mali_freq(next), - utilization_gpu, - pmali_plat->dvfs_table[current_idx].downthreshold, - pmali_plat->dvfs_table[current_idx].upthreshold, - current_pp, next_pp); -} -#endif - -#ifndef CONFIG_MALI_DVFS -static int mali_stay_count = 0; -static void mali_decide_next_status(int utilization_pp, int* next_fs_idx, - int* pp_change_flag) -{ - u32 mali_up_limit, decided_fs_idx; - u32 ld_left, ld_right; - u32 ld_up, ld_down; - u32 change_mode; - - *pp_change_flag = 0; - change_mode = 0; - - scalingdbg(5, "line(%d), scaling_mode=%d, MALI_TURBO_MODE=%d, turbo=%d, maxclk=%d\n", - __LINE__, scaling_mode, MALI_TURBO_MODE, - pmali_plat->turbo_clock, pmali_plat->scale_info.maxclk); - - mali_up_limit = (scaling_mode == MALI_TURBO_MODE) ? - pmali_plat->turbo_clock : pmali_plat->scale_info.maxclk; - decided_fs_idx = currentStep; - - ld_up = pmali_plat->dvfs_table[currentStep].upthreshold; - ld_down = pmali_plat->dvfs_table[currentStep].downthreshold; - - scalingdbg(2, "utilization=%d, ld_up=%d\n ", utilization_pp, ld_up); - if (utilization_pp >= ld_up) { /* go up */ - - scalingdbg(2, "currentStep=%d, mali_up_limit=%d\n ", currentStep, mali_up_limit); - if (currentStep < mali_up_limit) { - change_mode = 1; - if ((currentStep < pmali_plat->def_clock) && (utilization_pp > pmali_plat->bst_gpu)) - decided_fs_idx = pmali_plat->def_clock; - else - decided_fs_idx++; - } - if ((utilization_pp >= ld_up) && - (num_cores_enabled < pmali_plat->scale_info.maxpp)) { - if ((num_cores_enabled < pmali_plat->sc_mpp) && (utilization_pp >= pmali_plat->bst_pp)) { - *pp_change_flag = 1; - change_mode = 1; - } else if (change_mode == 0) { - *pp_change_flag = 2; - change_mode = 1; - } - } -#if LOG_MALI_SCALING - scalingdbg(2, "[nexting..] [LD:%d]-> FS[CRNT:%d LMT:%d NEXT:%d] PP[NUM:%d LMT:%d MD:%d][F:%d]\n", - utilization_pp, currentStep, mali_up_limit, decided_fs_idx, - num_cores_enabled, pmali_plat->scale_info.maxpp, *pp_change_flag, change_mode); -#endif - } else if (utilization_pp <= ld_down) { /* go down */ - if (mali_stay_count > 0) { - *next_fs_idx = decided_fs_idx; - mali_stay_count--; - return; - } - - if (num_cores_enabled > pmali_plat->sc_mpp) { - change_mode = 1; - if (utilization_pp <= ld_down) { - ld_left = utilization_pp * num_cores_enabled; - ld_right = (pmali_plat->dvfs_table[currentStep].upthreshold) * - (num_cores_enabled - 1); - if (ld_left < ld_right) { - change_mode = 2; - } - } - } else if (currentStep > pmali_plat->scale_info.minclk) { - change_mode = 1; - } else if (num_cores_enabled > 1) { /* decrease PPS */ - if (utilization_pp <= ld_down) { - ld_left = utilization_pp * num_cores_enabled; - ld_right = (pmali_plat->dvfs_table[currentStep].upthreshold) * - (num_cores_enabled - 1); - scalingdbg(2, "ld_left=%d, ld_right=%d\n", ld_left, ld_right); - if (ld_left < ld_right) { - change_mode = 2; - } - } - } - - if (change_mode == 1) { - decided_fs_idx--; - } else if (change_mode == 2) { /* decrease PPS */ - *pp_change_flag = -1; - } - } - - if (decided_fs_idx < 0 ) { - printk("gpu debug, next index below 0\n"); - decided_fs_idx = 0; - } - if (decided_fs_idx > pmali_plat->scale_info.maxclk) { - decided_fs_idx = pmali_plat->scale_info.maxclk; - printk("gpu debug, next index above max-1, set to %d\n", decided_fs_idx); - } - - if (change_mode) - mali_stay_count = pmali_plat->dvfs_table[decided_fs_idx].keep_count; - - *next_fs_idx = decided_fs_idx; -} -#endif - -void mali_pp_fs_scaling_update(int utilization_pp) -{ -#ifndef CONFIG_MALI_DVFS - int ret = 0; - int pp_change_flag = 0; - u32 next_idx = 0; - -#if LOG_MALI_SCALING - u32 last_pp = num_cores_enabled; -#endif - mali_decide_next_status(utilization_pp, &next_idx, &pp_change_flag); - - if (pp_change_flag == 1) - ret = enable_pp_cores(pmali_plat->sc_mpp); - else if (pp_change_flag == 2) - ret = enable_one_core(); - else if (pp_change_flag == -1) { - ret = disable_one_core(); - } - -#if LOG_MALI_SCALING - if (pp_change_flag || (next_idx != currentStep)) - trace_utilization(utilization_pp, currentStep, next_idx, last_pp, num_cores_enabled); -#endif - - if (next_idx != currentStep) { - ret = 1; - currentStep = next_idx; - } - - if (ret == 1) - schedule_work(&wq_work); -#ifdef CONFIG_MALI400_PROFILING - else - _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | - MALI_PROFILING_EVENT_CHANNEL_GPU | - MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, - get_current_frequency(), - 0, 0, 0, 0); -#endif -#endif -} - -u32 get_mali_schel_mode(void) -{ - return scaling_mode; -} - -void set_mali_schel_mode(u32 mode) -{ -#ifndef CONFIG_MALI_DVFS - if (mode >= MALI_SCALING_MODE_MAX) - return; - scaling_mode = mode; - - //disable thermal in turbo mode - if (scaling_mode == MALI_TURBO_MODE) { - pmali_plat->limit_on = 0; - } else { - pmali_plat->limit_on = 1; - } - /* set default performance range. */ - pmali_plat->scale_info.minclk = pmali_plat->cfg_min_clock; - pmali_plat->scale_info.maxclk = pmali_plat->cfg_clock; - pmali_plat->scale_info.minpp = pmali_plat->cfg_min_pp; - pmali_plat->scale_info.maxpp = pmali_plat->cfg_pp; - - /* set current status and tune max freq */ - if (scaling_mode == MALI_PP_FS_SCALING) { - pmali_plat->scale_info.maxclk = pmali_plat->cfg_clock; - enable_pp_cores(pmali_plat->sc_mpp); - } else if (scaling_mode == MALI_SCALING_DISABLE) { - pmali_plat->scale_info.maxclk = pmali_plat->cfg_clock; - enable_max_num_cores(); - } else if (scaling_mode == MALI_TURBO_MODE) { - pmali_plat->scale_info.maxclk = pmali_plat->turbo_clock; - enable_max_num_cores(); - } - currentStep = pmali_plat->scale_info.maxclk; - schedule_work(&wq_work); -#endif -} - -u32 get_current_frequency(void) -{ - return get_mali_freq(currentStep); -} - -void mali_gpu_utilization_callback(int utilization_pp) -{ -#ifndef CONFIG_MALI_DVFS - if (mali_pm_statue) - return; - - switch (scaling_mode) { - case MALI_PP_FS_SCALING: - mali_pp_fs_scaling_update(utilization_pp); - break; - case MALI_PP_SCALING: - mali_pp_scaling_update(utilization_pp); - break; - default: - break; - } -#endif -} -static u32 clk_cntl_save = 0; -void mali_dev_freeze(void) -{ - clk_cntl_save = mplt_read(HHI_MALI_CLK_CNTL); -} - -void mali_dev_restore(void) -{ - - mplt_write(HHI_MALI_CLK_CNTL, clk_cntl_save); - if (pmali_plat && pmali_plat->pdev) { - mali_clock_init_clk_tree(pmali_plat->pdev); - } else { - printk("error: init clock failed, pmali_plat=%p, pmali_plat->pdev=%p\n", - pmali_plat, pmali_plat == NULL ? NULL: pmali_plat->pdev); - } -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/juno_soc/Kbuild b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/juno_soc/Kbuild deleted file mode 100755 index 5b6ef37bb714e..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/juno_soc/Kbuild +++ /dev/null @@ -1,19 +0,0 @@ -# -# (C) COPYRIGHT 2013-2014 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - -obj-y += mali_kbase_config_juno_soc.o - - -obj-m += juno_mali_opp.o diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/juno_soc/juno_mali_opp.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/juno_soc/juno_mali_opp.c deleted file mode 100755 index ccfd8ccb01247..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/juno_soc/juno_mali_opp.c +++ /dev/null @@ -1,84 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include -#include -#include -#include -#include -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) -#include -#else /* Linux >= 3.13 */ -/* In 3.13 the OPP include header file, types, and functions were all - * renamed. Use the old filename for the include, and define the new names to - * the old, when an old kernel is detected. - */ -#include -#define dev_pm_opp_add opp_add -#endif /* Linux >= 3.13 */ - - -static int init_juno_opps_from_scpi(struct device *dev) -{ - struct scpi_opp *sopp; - int i; - - /* Hard coded for Juno. 2 is GPU domain */ - sopp = scpi_dvfs_get_opps(2); - if (IS_ERR_OR_NULL(sopp)) - return PTR_ERR(sopp); - - for (i = 0; i < sopp->count; i++) { - struct scpi_opp_entry *e = &sopp->opp[i]; - - dev_info(dev, "Mali OPP from SCPI: %u Hz @ %u mV\n", - e->freq_hz, e->volt_mv); - - dev_pm_opp_add(dev, e->freq_hz, e->volt_mv * 1000); - } - - return 0; -} - -static int juno_setup_opps(void) -{ - struct device_node *np; - struct platform_device *pdev; - int err; - - np = of_find_node_by_name(NULL, "gpu"); - if (!np) { - pr_err("Failed to find DT entry for Mali\n"); - return -EFAULT; - } - - pdev = of_find_device_by_node(np); - if (!pdev) { - pr_err("Failed to find device for Mali\n"); - of_node_put(np); - return -EFAULT; - } - - err = init_juno_opps_from_scpi(&pdev->dev); - - of_node_put(np); - - return err; -} - -module_init(juno_setup_opps); -MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/juno_soc/mali_kbase_config_juno_soc.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/juno_soc/mali_kbase_config_juno_soc.c deleted file mode 100755 index 82669510e941b..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/juno_soc/mali_kbase_config_juno_soc.c +++ /dev/null @@ -1,199 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include -#ifdef CONFIG_DEVFREQ_THERMAL -#include -#endif -#include -#include -#include -#include -#include - -/* Versatile Express (VE) Juno Development Platform */ - -#define HARD_RESET_AT_POWER_OFF 0 - -#ifndef CONFIG_OF -static struct kbase_io_resources io_resources = { - .job_irq_number = 65, - .mmu_irq_number = 66, - .gpu_irq_number = 64, - .io_memory_region = { - .start = 0x2D000000, - .end = 0x2D000000 + (4096 * 4) - 1} -}; -#endif - -static int pm_callback_power_on(struct kbase_device *kbdev) -{ - /* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */ - return 1; -} - -static void pm_callback_power_off(struct kbase_device *kbdev) -{ -#if HARD_RESET_AT_POWER_OFF - /* Cause a GPU hard reset to test whether we have actually idled the GPU - * and that we properly reconfigure the GPU on power up. - * Usually this would be dangerous, but if the GPU is working correctly it should - * be completely safe as the GPU should not be active at this point. - * However this is disabled normally because it will most likely interfere with - * bus logging etc. - */ - KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0); - kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET); -#endif -} - -struct kbase_pm_callback_conf pm_callbacks = { - .power_on_callback = pm_callback_power_on, - .power_off_callback = pm_callback_power_off, - .power_suspend_callback = NULL, - .power_resume_callback = NULL -}; - -#ifdef CONFIG_DEVFREQ_THERMAL - -#define FALLBACK_STATIC_TEMPERATURE 55000 - -static unsigned long juno_model_static_power(unsigned long voltage) -{ - struct thermal_zone_device *tz; - unsigned long temperature, temp; - unsigned long temp_squared, temp_cubed, temp_scaling_factor; - const unsigned long coefficient = (410UL << 20) / (729000000UL >> 10); - const unsigned long voltage_cubed = (voltage * voltage * voltage) >> 10; - - tz = thermal_zone_get_zone_by_name("gpu"); - if (IS_ERR(tz)) { - pr_warn_ratelimited("Error getting gpu thermal zone (%ld), not yet ready?\n", - PTR_ERR(tz)); - temperature = FALLBACK_STATIC_TEMPERATURE; - } else { - int ret; - - ret = tz->ops->get_temp(tz, &temperature); - if (ret) { - pr_warn_ratelimited("Error reading temperature for gpu thermal zone: %d\n", - ret); - temperature = FALLBACK_STATIC_TEMPERATURE; - } - } - - /* Calculate the temperature scaling factor. To be applied to the - * voltage scaled power. - */ - temp = temperature / 1000; - temp_squared = temp * temp; - temp_cubed = temp_squared * temp; - temp_scaling_factor = - (2 * temp_cubed) - - (80 * temp_squared) - + (4700 * temp) - + 32000; - - return (((coefficient * voltage_cubed) >> 20) - * temp_scaling_factor) - / 1000000; -} - -static unsigned long juno_model_dynamic_power(unsigned long freq, - unsigned long voltage) -{ - /* The inputs: freq (f) is in Hz, and voltage (v) in mV. - * The coefficient (c) is in mW/(MHz mV mV). - * - * This function calculates the dynamic power after this formula: - * Pdyn (mW) = c (mW/(MHz*mV*mV)) * v (mV) * v (mV) * f (MHz) - */ - const unsigned long v2 = (voltage * voltage) / 1000; /* m*(V*V) */ - const unsigned long f_mhz = freq / 1000000; /* MHz */ - const unsigned long coefficient = 3600; /* mW/(MHz*mV*mV) */ - - return (coefficient * v2 * f_mhz) / 1000000; /* mW */ -} - -struct devfreq_cooling_ops juno_model_ops = { - .get_static_power = juno_model_static_power, - .get_dynamic_power = juno_model_dynamic_power, -}; - -#endif /* CONFIG_DEVFREQ_THERMAL */ - -/* - * Juno Secure Mode integration - */ - -/* SMC Function Numbers */ -#define JUNO_SMC_SECURE_ENABLE_FUNC 0xff06 -#define JUNO_SMC_SECURE_DISABLE_FUNC 0xff07 - -static int juno_secure_mode_enable(struct kbase_device *kbdev) -{ - u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id; - - if (gpu_id == GPU_ID_MAKE(GPU_ID_PI_T62X, 0, 1, 0) && - kbdev->reg_start == 0x2d000000) { - /* T62X in SoC detected */ - u64 ret = kbase_invoke_smc(SMC_OEN_SIP, - JUNO_SMC_SECURE_ENABLE_FUNC, false, - 0, 0, 0); - return ret; - } - - return -EINVAL; /* Not supported */ -} - -static int juno_secure_mode_disable(struct kbase_device *kbdev) -{ - u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id; - - if (gpu_id == GPU_ID_MAKE(GPU_ID_PI_T62X, 0, 1, 0) && - kbdev->reg_start == 0x2d000000) { - /* T62X in SoC detected */ - u64 ret = kbase_invoke_smc(SMC_OEN_SIP, - JUNO_SMC_SECURE_DISABLE_FUNC, false, - 0, 0, 0); - return ret; - } - - return -EINVAL; /* Not supported */ -} - -struct kbase_secure_ops juno_secure_ops = { - .secure_mode_enable = juno_secure_mode_enable, - .secure_mode_disable = juno_secure_mode_disable, -}; - -static struct kbase_platform_config versatile_platform_config = { -#ifndef CONFIG_OF - .io_resources = &io_resources -#endif -}; - -struct kbase_platform_config *kbase_get_platform_config(void) -{ - return &versatile_platform_config; -} - -int kbase_platform_early_init(void) -{ - /* Nothing needed at this stage */ - return 0; -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/juno_soc/mali_kbase_config_platform.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/juno_soc/mali_kbase_config_platform.h deleted file mode 100755 index fa5e9e9a5b112..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/juno_soc/mali_kbase_config_platform.h +++ /dev/null @@ -1,94 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * Maximum frequency GPU will be clocked at. Given in kHz. - * This must be specified as there is no default value. - * - * Attached value: number in kHz - * Default value: NA - */ -#define GPU_FREQ_KHZ_MAX 600000 -/** - * Minimum frequency GPU will be clocked at. Given in kHz. - * This must be specified as there is no default value. - * - * Attached value: number in kHz - * Default value: NA - */ -#define GPU_FREQ_KHZ_MIN 600000 - -/** - * CPU_SPEED_FUNC - A pointer to a function that calculates the CPU clock - * - * CPU clock speed of the platform is in MHz - see kbase_cpu_clk_speed_func - * for the function prototype. - * - * Attached value: A kbase_cpu_clk_speed_func. - * Default Value: NA - */ -#define CPU_SPEED_FUNC (&kbase_cpuprops_get_default_clock_speed) - -/** - * GPU_SPEED_FUNC - A pointer to a function that calculates the GPU clock - * - * GPU clock speed of the platform in MHz - see kbase_gpu_clk_speed_func - * for the function prototype. - * - * Attached value: A kbase_gpu_clk_speed_func. - * Default Value: NA - */ -#define GPU_SPEED_FUNC (NULL) - -/** - * Power management configuration - * - * Attached value: pointer to @ref kbase_pm_callback_conf - * Default value: See @ref kbase_pm_callback_conf - */ -#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks) - -/** - * Platform specific configuration functions - * - * Attached value: pointer to @ref kbase_platform_funcs_conf - * Default value: See @ref kbase_platform_funcs_conf - */ -#define PLATFORM_FUNCS (NULL) - -/** Power model for IPA - * - * Attached value: pointer to @ref mali_pa_model_ops - */ -#ifdef CONFIG_DEVFREQ_THERMAL -#define POWER_MODEL_CALLBACKS (&juno_model_ops) -#else -#define POWER_MODEL_CALLBACKS (NULL) -#endif - -/** - * Secure mode switch - * - * Attached value: pointer to @ref kbase_secure_ops - */ -#define SECURE_CALLBACKS (&juno_secure_ops) - -extern struct kbase_pm_callback_conf pm_callbacks; -#ifdef CONFIG_DEVFREQ_THERMAL -extern struct devfreq_cooling_ops juno_model_ops; -#endif -extern struct kbase_secure_ops juno_secure_ops; diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/mali_kbase_platform_common.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/mali_kbase_platform_common.h deleted file mode 100755 index 7cb3be7f78cef..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/mali_kbase_platform_common.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * @brief Entry point to transfer control to a platform for early initialization - * - * This function is called early on in the initialization during execution of - * @ref kbase_driver_init. - * - * @return Zero to indicate success non-zero for failure. - */ -int kbase_platform_early_init(void); diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/mali_kbase_platform_fake.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/mali_kbase_platform_fake.h deleted file mode 100755 index 01f9dfce93cc0..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/mali_kbase_platform_fake.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#ifdef CONFIG_MALI_PLATFORM_FAKE - -/** - * kbase_platform_fake_register - Entry point for fake platform registration - * - * This function is called early on in the initialization during execution of - * kbase_driver_init. - * - * Return: 0 to indicate success, non-zero for failure. - */ -int kbase_platform_fake_register(void); - -/** - * kbase_platform_fake_unregister - Entry point for fake platform unregistration - * - * This function is called in the termination during execution of - * kbase_driver_exit. - */ -void kbase_platform_fake_unregister(void); - -#endif /* CONFIG_MALI_PLATFORM_FAKE */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress/Kbuild b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress/Kbuild deleted file mode 100755 index 084a156134363..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress/Kbuild +++ /dev/null @@ -1,18 +0,0 @@ -# -# (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -obj-y += mali_kbase_config_vexpress.o -obj-y += mali_kbase_cpu_vexpress.o diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_platform.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_platform.h deleted file mode 100755 index ac5060af6a7e6..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_platform.h +++ /dev/null @@ -1,97 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include "mali_kbase_cpu_vexpress.h" - -/** - * Maximum frequency GPU will be clocked at. Given in kHz. - * This must be specified as there is no default value. - * - * Attached value: number in kHz - * Default value: NA - */ -#define GPU_FREQ_KHZ_MAX (5000) -/** - * Minimum frequency GPU will be clocked at. Given in kHz. - * This must be specified as there is no default value. - * - * Attached value: number in kHz - * Default value: NA - */ -#define GPU_FREQ_KHZ_MIN (5000) - -/** - * Values used for determining the GPU frequency based on the LogicTile type - * Used by the function kbase_get_platform_logic_tile_type - */ -#define VE_VIRTEX6_GPU_FREQ_MIN 5000 -#define VE_VIRTEX6_GPU_FREQ_MAX 5000 -#define VE_VIRTEX7_GPU_FREQ_MIN 40000 -#define VE_VIRTEX7_GPU_FREQ_MAX 40000 - -/** - * CPU_SPEED_FUNC - A pointer to a function that calculates the CPU clock - * - * CPU clock speed of the platform is in MHz - see kbase_cpu_clk_speed_func - * for the function prototype. - * - * Attached value: A kbase_cpu_clk_speed_func. - * Default Value: NA - */ -#define CPU_SPEED_FUNC (&kbase_get_vexpress_cpu_clock_speed) - -/** - * GPU_SPEED_FUNC - A pointer to a function that calculates the GPU clock - * - * GPU clock speed of the platform in MHz - see kbase_gpu_clk_speed_func - * for the function prototype. - * - * Attached value: A kbase_gpu_clk_speed_func. - * Default Value: NA - */ -#define GPU_SPEED_FUNC (NULL) - -/** - * Power management configuration - * - * Attached value: pointer to @ref kbase_pm_callback_conf - * Default value: See @ref kbase_pm_callback_conf - */ -#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks) - -/** - * Platform specific configuration functions - * - * Attached value: pointer to @ref kbase_platform_funcs_conf - * Default value: See @ref kbase_platform_funcs_conf - */ -#define PLATFORM_FUNCS (NULL) - -/** Power model for IPA - * - * Attached value: pointer to @ref mali_pa_model_ops - */ -#define POWER_MODEL_CALLBACKS (NULL) - -/** - * Secure mode switch - * - * Attached value: pointer to @ref kbase_secure_ops - */ -#define SECURE_CALLBACKS (NULL) - -extern struct kbase_pm_callback_conf pm_callbacks; diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_vexpress.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_vexpress.c deleted file mode 100755 index 687b1a8c04319..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_vexpress.c +++ /dev/null @@ -1,85 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#include -#include -#include -#include -#include "mali_kbase_cpu_vexpress.h" -#include "mali_kbase_config_platform.h" - -#define HARD_RESET_AT_POWER_OFF 0 - -#ifndef CONFIG_OF -static struct kbase_io_resources io_resources = { - .job_irq_number = 68, - .mmu_irq_number = 69, - .gpu_irq_number = 70, - .io_memory_region = { - .start = 0xFC010000, - .end = 0xFC010000 + (4096 * 4) - 1 - } -}; -#endif /* CONFIG_OF */ - -static int pm_callback_power_on(struct kbase_device *kbdev) -{ - /* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */ - return 1; -} - -static void pm_callback_power_off(struct kbase_device *kbdev) -{ -#if HARD_RESET_AT_POWER_OFF - /* Cause a GPU hard reset to test whether we have actually idled the GPU - * and that we properly reconfigure the GPU on power up. - * Usually this would be dangerous, but if the GPU is working correctly it should - * be completely safe as the GPU should not be active at this point. - * However this is disabled normally because it will most likely interfere with - * bus logging etc. - */ - KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0); - kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET); -#endif -} - -struct kbase_pm_callback_conf pm_callbacks = { - .power_on_callback = pm_callback_power_on, - .power_off_callback = pm_callback_power_off, - .power_suspend_callback = NULL, - .power_resume_callback = NULL -}; - -static struct kbase_platform_config versatile_platform_config = { -#ifndef CONFIG_OF - .io_resources = &io_resources -#endif -}; - -struct kbase_platform_config *kbase_get_platform_config(void) -{ - return &versatile_platform_config; -} - - -int kbase_platform_early_init(void) -{ - /* Nothing needed at this stage */ - return 0; -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_cpu_vexpress.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_cpu_vexpress.c deleted file mode 100755 index 9bc51f1e2da82..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_cpu_vexpress.c +++ /dev/null @@ -1,210 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include -#include -#include "mali_kbase_cpu_vexpress.h" - -#define HZ_IN_MHZ (1000000) - -#define CORETILE_EXPRESS_A9X4_SCC_START (0x100E2000) -#define MOTHERBOARD_SYS_CFG_START (0x10000000) -#define SYS_CFGDATA_OFFSET (0x000000A0) -#define SYS_CFGCTRL_OFFSET (0x000000A4) -#define SYS_CFGSTAT_OFFSET (0x000000A8) - -#define SYS_CFGCTRL_START_BIT_VALUE (1 << 31) -#define READ_REG_BIT_VALUE (0 << 30) -#define DCC_DEFAULT_BIT_VALUE (0 << 26) -#define SYS_CFG_OSC_FUNC_BIT_VALUE (1 << 20) -#define SITE_DEFAULT_BIT_VALUE (1 << 16) -#define BOARD_STACK_POS_DEFAULT_BIT_VALUE (0 << 12) -#define DEVICE_DEFAULT_BIT_VALUE (2 << 0) -#define SYS_CFG_COMPLETE_BIT_VALUE (1 << 0) -#define SYS_CFG_ERROR_BIT_VALUE (1 << 1) - -#define FEED_REG_BIT_MASK (0x0F) -#define FCLK_PA_DIVIDE_BIT_SHIFT (0x03) -#define FCLK_PB_DIVIDE_BIT_SHIFT (0x07) -#define FCLK_PC_DIVIDE_BIT_SHIFT (0x0B) -#define AXICLK_PA_DIVIDE_BIT_SHIFT (0x0F) -#define AXICLK_PB_DIVIDE_BIT_SHIFT (0x13) - -/* the following three values used for reading - * HBI value of the LogicTile daughterboard */ -#define VE_MOTHERBOARD_PERIPHERALS_SMB_CS7 (0x10000000) -#define VE_SYS_PROC_ID1_OFFSET (0x00000088) -#define VE_LOGIC_TILE_HBI_MASK (0x00000FFF) - -#define IS_SINGLE_BIT_SET(val, pos) (val&(1<> - FCLK_PA_DIVIDE_BIT_SHIFT); - /* CFGRW0[10:7] */ - pb_divide = ((reg_val & (FEED_REG_BIT_MASK << - FCLK_PB_DIVIDE_BIT_SHIFT)) >> - FCLK_PB_DIVIDE_BIT_SHIFT); - *cpu_clock = osc2_value * (pa_divide + 1) / (pb_divide + 1); - } else if (IS_SINGLE_BIT_SET(reg_val, 1)) { - /* CFGRW0[1] - CLKOC */ - /* CFGRW0[6:3] */ - pa_divide = ((reg_val & (FEED_REG_BIT_MASK << - FCLK_PA_DIVIDE_BIT_SHIFT)) >> - FCLK_PA_DIVIDE_BIT_SHIFT); - /* CFGRW0[14:11] */ - pc_divide = ((reg_val & (FEED_REG_BIT_MASK << - FCLK_PC_DIVIDE_BIT_SHIFT)) >> - FCLK_PC_DIVIDE_BIT_SHIFT); - *cpu_clock = osc2_value * (pa_divide + 1) / (pc_divide + 1); - } else if (IS_SINGLE_BIT_SET(reg_val, 2)) { - /* CFGRW0[2] - FACLK */ - /* CFGRW0[18:15] */ - pa_divide = ((reg_val & (FEED_REG_BIT_MASK << - AXICLK_PA_DIVIDE_BIT_SHIFT)) >> - AXICLK_PA_DIVIDE_BIT_SHIFT); - /* CFGRW0[22:19] */ - pb_divide = ((reg_val & (FEED_REG_BIT_MASK << - AXICLK_PB_DIVIDE_BIT_SHIFT)) >> - AXICLK_PB_DIVIDE_BIT_SHIFT); - *cpu_clock = osc2_value * (pa_divide + 1) / (pb_divide + 1); - } else { - err = -EIO; - } - -set_reg_error: -ongoing_request: - raw_spin_unlock(&syscfg_lock); - *cpu_clock /= HZ_IN_MHZ; - - if (!err) - cpu_clock_speed = *cpu_clock; - - iounmap(scc_reg); - -scc_reg_map_failed: - iounmap(syscfg_reg); - -syscfg_reg_map_failed: - - return err; -} - -u32 kbase_get_platform_logic_tile_type(void) -{ - void __iomem *syscfg_reg = NULL; - u32 sys_procid1 = 0; - - syscfg_reg = ioremap(VE_MOTHERBOARD_PERIPHERALS_SMB_CS7 + VE_SYS_PROC_ID1_OFFSET, 4); - - sys_procid1 = (NULL != syscfg_reg) ? readl(syscfg_reg) : 0; - - return sys_procid1 & VE_LOGIC_TILE_HBI_MASK; -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_cpu_vexpress.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_cpu_vexpress.h deleted file mode 100755 index ef9bfd7216189..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_cpu_vexpress.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2013, 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#ifndef _KBASE_CPU_VEXPRESS_H_ -#define _KBASE_CPU_VEXPRESS_H_ - -/** - * Versatile Express implementation of @ref kbase_cpu_clk_speed_func. - */ -int kbase_get_vexpress_cpu_clock_speed(u32 *cpu_clock); - -/** - * kbase_get_platform_logic_tile_type - determines which LogicTile type - * is used by Versatile Express - * - * When platform_config build parameter is specified as vexpress, i.e., - * platform_config=vexpress, GPU frequency may vary dependent on the - * particular platform. The GPU frequency depends on the LogicTile type. - * - * This function is called by kbase_common_device_init to determine - * which LogicTile type is used by the platform by reading the HBI value - * of the daughterboard which holds the LogicTile: - * - * 0x192 HBI0192 Virtex-5 - * 0x217 HBI0217 Virtex-6 - * 0x247 HBI0247 Virtex-7 - * - * Return: HBI value of the logic tile daughterboard, zero if not accessible - */ -u32 kbase_get_platform_logic_tile_type(void); - -#endif /* _KBASE_CPU_VEXPRESS_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/Kbuild b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/Kbuild deleted file mode 100755 index d9bfabc28b6bc..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/Kbuild +++ /dev/null @@ -1,16 +0,0 @@ -# -# (C) COPYRIGHT 2013 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - -obj-y += mali_kbase_config_vexpress.o diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h deleted file mode 100755 index 11c320ecc1f75..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h +++ /dev/null @@ -1,86 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * Maximum frequency GPU will be clocked at. Given in kHz. - * This must be specified as there is no default value. - * - * Attached value: number in kHz - * Default value: NA - */ -#define GPU_FREQ_KHZ_MAX 5000 -/** - * Minimum frequency GPU will be clocked at. Given in kHz. - * This must be specified as there is no default value. - * - * Attached value: number in kHz - * Default value: NA - */ -#define GPU_FREQ_KHZ_MIN 5000 - -/** - * CPU_SPEED_FUNC - A pointer to a function that calculates the CPU clock - * - * CPU clock speed of the platform is in MHz - see kbase_cpu_clk_speed_func - * for the function prototype. - * - * Attached value: A kbase_cpu_clk_speed_func. - * Default Value: NA - */ -#define CPU_SPEED_FUNC (&kbase_cpuprops_get_default_clock_speed) - -/** - * GPU_SPEED_FUNC - A pointer to a function that calculates the GPU clock - * - * GPU clock speed of the platform in MHz - see kbase_gpu_clk_speed_func - * for the function prototype. - * - * Attached value: A kbase_gpu_clk_speed_func. - * Default Value: NA - */ -#define GPU_SPEED_FUNC (NULL) - -/** - * Power management configuration - * - * Attached value: pointer to @ref kbase_pm_callback_conf - * Default value: See @ref kbase_pm_callback_conf - */ -#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks) - -/** - * Platform specific configuration functions - * - * Attached value: pointer to @ref kbase_platform_funcs_conf - * Default value: See @ref kbase_platform_funcs_conf - */ -#define PLATFORM_FUNCS (NULL) - -/** Power model for IPA - * - * Attached value: pointer to @ref mali_pa_model_ops - */ -#define POWER_MODEL_CALLBACKS (NULL) - -/** - * Secure mode switch - * - * Attached value: pointer to @ref kbase_secure_ops - */ -#define SECURE_CALLBACKS (NULL) - -extern struct kbase_pm_callback_conf pm_callbacks; diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c deleted file mode 100755 index 3ff0930fb4a3c..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c +++ /dev/null @@ -1,79 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include -#include -#include -#include - -#define HARD_RESET_AT_POWER_OFF 0 - -#ifndef CONFIG_OF -static struct kbase_io_resources io_resources = { - .job_irq_number = 68, - .mmu_irq_number = 69, - .gpu_irq_number = 70, - .io_memory_region = { - .start = 0x2f010000, - .end = 0x2f010000 + (4096 * 4) - 1} -}; -#endif - -static int pm_callback_power_on(struct kbase_device *kbdev) -{ - /* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */ - return 1; -} - -static void pm_callback_power_off(struct kbase_device *kbdev) -{ -#if HARD_RESET_AT_POWER_OFF - /* Cause a GPU hard reset to test whether we have actually idled the GPU - * and that we properly reconfigure the GPU on power up. - * Usually this would be dangerous, but if the GPU is working correctly it should - * be completely safe as the GPU should not be active at this point. - * However this is disabled normally because it will most likely interfere with - * bus logging etc. - */ - KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0); - kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET); -#endif -} - -struct kbase_pm_callback_conf pm_callbacks = { - .power_on_callback = pm_callback_power_on, - .power_off_callback = pm_callback_power_off, - .power_suspend_callback = NULL, - .power_resume_callback = NULL -}; - -static struct kbase_platform_config versatile_platform_config = { -#ifndef CONFIG_OF - .io_resources = &io_resources -#endif -}; - -struct kbase_platform_config *kbase_get_platform_config(void) -{ - return &versatile_platform_config; -} - -int kbase_platform_early_init(void) -{ - /* Nothing needed at this stage */ - return 0; -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/Kbuild b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/Kbuild deleted file mode 100755 index 0cb41ce8952d4..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/Kbuild +++ /dev/null @@ -1,18 +0,0 @@ -# -# (C) COPYRIGHT 2012 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -obj-y += mali_kbase_config_vexpress.o -obj-y += mali_kbase_cpu_vexpress.o diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h deleted file mode 100755 index 9bc2985fef0f0..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h +++ /dev/null @@ -1,88 +0,0 @@ -/* - * - * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#include "mali_kbase_cpu_vexpress.h" - -/** - * Maximum frequency GPU will be clocked at. Given in kHz. - * This must be specified as there is no default value. - * - * Attached value: number in kHz - * Default value: NA - */ -#define GPU_FREQ_KHZ_MAX 10000 -/** - * Minimum frequency GPU will be clocked at. Given in kHz. - * This must be specified as there is no default value. - * - * Attached value: number in kHz - * Default value: NA - */ -#define GPU_FREQ_KHZ_MIN 10000 - -/** - * CPU_SPEED_FUNC - A pointer to a function that calculates the CPU clock - * - * CPU clock speed of the platform is in MHz - see kbase_cpu_clk_speed_func - * for the function prototype. - * - * Attached value: A kbase_cpu_clk_speed_func. - * Default Value: NA - */ -#define CPU_SPEED_FUNC (&kbase_get_vexpress_cpu_clock_speed) - -/** - * GPU_SPEED_FUNC - A pointer to a function that calculates the GPU clock - * - * GPU clock speed of the platform in MHz - see kbase_gpu_clk_speed_func - * for the function prototype. - * - * Attached value: A kbase_gpu_clk_speed_func. - * Default Value: NA - */ -#define GPU_SPEED_FUNC (NULL) - -/** - * Power management configuration - * - * Attached value: pointer to @ref kbase_pm_callback_conf - * Default value: See @ref kbase_pm_callback_conf - */ -#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks) - -/** - * Platform specific configuration functions - * - * Attached value: pointer to @ref kbase_platform_funcs_conf - * Default value: See @ref kbase_platform_funcs_conf - */ -#define PLATFORM_FUNCS (NULL) - -/** Power model for IPA - * - * Attached value: pointer to @ref mali_pa_model_ops - */ -#define POWER_MODEL_CALLBACKS (NULL) - -/** - * Secure mode switch - * - * Attached value: pointer to @ref kbase_secure_ops - */ -#define SECURE_CALLBACKS (NULL) - -extern struct kbase_pm_callback_conf pm_callbacks; diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c deleted file mode 100755 index 76ffe4a1e59ea..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c +++ /dev/null @@ -1,83 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#include -#include -#include -#include -#include "mali_kbase_cpu_vexpress.h" - -#define HARD_RESET_AT_POWER_OFF 0 - -#ifndef CONFIG_OF -static struct kbase_io_resources io_resources = { - .job_irq_number = 75, - .mmu_irq_number = 76, - .gpu_irq_number = 77, - .io_memory_region = { - .start = 0x2F000000, - .end = 0x2F000000 + (4096 * 4) - 1} -}; -#endif - -static int pm_callback_power_on(struct kbase_device *kbdev) -{ - /* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */ - return 1; -} - -static void pm_callback_power_off(struct kbase_device *kbdev) -{ -#if HARD_RESET_AT_POWER_OFF - /* Cause a GPU hard reset to test whether we have actually idled the GPU - * and that we properly reconfigure the GPU on power up. - * Usually this would be dangerous, but if the GPU is working correctly it should - * be completely safe as the GPU should not be active at this point. - * However this is disabled normally because it will most likely interfere with - * bus logging etc. - */ - KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0); - kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET); -#endif -} - -struct kbase_pm_callback_conf pm_callbacks = { - .power_on_callback = pm_callback_power_on, - .power_off_callback = pm_callback_power_off, - .power_suspend_callback = NULL, - .power_resume_callback = NULL -}; - -static struct kbase_platform_config versatile_platform_config = { -#ifndef CONFIG_OF - .io_resources = &io_resources -#endif -}; - -struct kbase_platform_config *kbase_get_platform_config(void) -{ - return &versatile_platform_config; -} - -int kbase_platform_early_init(void) -{ - /* Nothing needed at this stage */ - return 0; -} - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.c deleted file mode 100755 index 816dff49835f3..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.c +++ /dev/null @@ -1,71 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#include -#include -#include "mali_kbase_cpu_vexpress.h" - -#define HZ_IN_MHZ (1000000) - -#define CORETILE_EXPRESS_A9X4_SCC_START (0x100E2000) -#define MOTHERBOARD_SYS_CFG_START (0x10000000) -#define SYS_CFGDATA_OFFSET (0x000000A0) -#define SYS_CFGCTRL_OFFSET (0x000000A4) -#define SYS_CFGSTAT_OFFSET (0x000000A8) - -#define SYS_CFGCTRL_START_BIT_VALUE (1 << 31) -#define READ_REG_BIT_VALUE (0 << 30) -#define DCC_DEFAULT_BIT_VALUE (0 << 26) -#define SYS_CFG_OSC_FUNC_BIT_VALUE (1 << 20) -#define SITE_DEFAULT_BIT_VALUE (1 << 16) -#define BOARD_STACK_POS_DEFAULT_BIT_VALUE (0 << 12) -#define DEVICE_DEFAULT_BIT_VALUE (2 << 0) -#define SYS_CFG_COMPLETE_BIT_VALUE (1 << 0) -#define SYS_CFG_ERROR_BIT_VALUE (1 << 1) - -#define FEED_REG_BIT_MASK (0x0F) -#define FCLK_PA_DIVIDE_BIT_SHIFT (0x03) -#define FCLK_PB_DIVIDE_BIT_SHIFT (0x07) -#define FCLK_PC_DIVIDE_BIT_SHIFT (0x0B) -#define AXICLK_PA_DIVIDE_BIT_SHIFT (0x0F) -#define AXICLK_PB_DIVIDE_BIT_SHIFT (0x13) - -#define IS_SINGLE_BIT_SET(val, pos) (val&(1< - -/** - * @addtogroup uk_api User-Kernel Interface API - * @{ - */ - -/** - * @addtogroup uk_api_kernel UKK (Kernel side) - * @{ - */ - -/** - * Internal OS specific data structure associated with each UKK session. Part - * of a ukk_session object. - */ -typedef struct ukkp_session { - int dummy; /**< No internal OS specific data at this time */ -} ukkp_session; - -/** @} end group uk_api_kernel */ - -/** @} end group uk_api */ - -#endif /* _UKK_OS_H__ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/sconscript b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/sconscript deleted file mode 100755 index 7e41a438ac539..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/midgard/sconscript +++ /dev/null @@ -1,134 +0,0 @@ -# -# (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -import os -import re -import sys -Import('env') - -if Glob('tests/sconscript'): - SConscript( 'tests/sconscript' ) - -mock_test = 0 - -if env['v'] != '1': - env['MAKECOMSTR'] = '[MAKE] ${SOURCE.dir}' - -# Fake platform is a transient solution for GPL drivers running in kernel that does not provide configuration via platform data. -# For such kernels fake_platform_device should be set to 1. For kernels providing platform data fake_platform_device should be set to 0. -if env['platform_config']=='devicetree': - fake_platform_device = 0 -else: - fake_platform_device = 1 - -# Source files required for kbase. -kbase_src = [Glob('#kernel/drivers/gpu/arm/midgard/*.c'), - Glob('#kernel/drivers/gpu/arm/midgard/*.c'), - Glob('#kernel/drivers/gpu/arm/midgard/backend/*/*.c'), - Glob('#kernel/drivers/gpu/arm/midgard/backend/*/*.h'), - Glob('#kernel/drivers/gpu/arm/midgard/platform/%s/*.c' % (env['platform_config'])), - Glob('#kernel/drivers/gpu/arm/midgard/*.h'), - Glob('#kernel/drivers/gpu/arm/midgard/*.h'), - Glob('#kernel/drivers/gpu/arm/midgard/Makefile', - Glob('#kernel/drivers/gpu/arm/midgard/K*')) - ] - -kbase_src += [Glob('#kernel/drivers/gpu/arm/midgard/internal/*/*.c'), - Glob('#kernel/drivers/gpu/arm/midgard/internal/*/*/*.c')] - -if Glob('#kernel/drivers/gpu/arm/midgard/tests/internal/src/mock') and env['unit'] == '1': - kbase_src += [Glob('#kernel/drivers/gpu/arm/midgard/tests/internal/src/mock/*.c')] - mock_test = 1 - -# we need platform config for GPL version using fake platform -if fake_platform_device==1: - # Check if we are compiling for PBX - linux_config_file = os.path.normpath(os.environ['KDIR']) + '/.config' - search_term = '^[\ ]*CONFIG_MACH_REALVIEW_PBX[\ ]*=[\ ]*y' - REALVIEW_PBX = 0 - for line in open(linux_config_file, 'r'): - if re.search(search_term, line): - REALVIEW_PBX = 1 - break - if REALVIEW_PBX == 1 and (env['platform_config'] == 'vexpress' or env['platform_config'] == 'vexpress_6xvirtex7_10mhz'): - sys.stderr.write("WARNING: Building for a PBX kernel but with platform_config=vexpress*\n") - # if the file platform config file is in the tpip directory then use that, otherwise use the default config directory - if Glob('#kernel/drivers/gpu/arm/midgard/config/tpip/*%s.c' % (env['platform_config'])): - kbase_src += Glob('#kernel/drivers/gpu/arm/midgard/config/tpip/*%s.c' % (env['platform_config'])) - else: - kbase_src += Glob('#kernel/drivers/gpu/arm/midgard/config/*%s.c' % (env['platform_config'])) - -# Note: cleaning via the Linux kernel build system does not yet work -if env.GetOption('clean') : - env.Execute(Action("make clean", '[clean] kbase')) - cmd = env.Command(['$STATIC_LIB_PATH/mali_kbase.ko', '$STATIC_LIB_PATH/mali_platform_fake.ko'], kbase_src, []) -else: - if env['os'] == 'android': - env['android'] = 1 - else: - env['android'] = 0 - - if env['unit'] == '1': - env['kernel_test'] = 1 - else: - env['kernel_test'] = 0 - - #Extract environment options, note the trailing spaces are important - env_options = \ - "PLATFORM=${platform} " +\ - "MALI_ERROR_INJECT_ON=${error_inject} " +\ - "MALI_ANDROID=${android} " +\ - "MALI_KERNEL_TEST_API=${kernel_test} " +\ - "MALI_UNIT_TEST=${unit} " +\ - "MALI_RELEASE_NAME=\"${mali_release_name}\" "+\ - "MALI_MOCK_TEST=%s " % mock_test +\ - "MALI_CUSTOMER_RELEASE=${release} " +\ - "MALI_INSTRUMENTATION_LEVEL=${instr} " +\ - "MALI_COVERAGE=${coverage} " +\ - "MALI_BUS_LOG=${buslog} " - - make_action_start = "cd ${SOURCE.dir} && make -j%d " % GetOption('num_jobs') - make_action_end = "%s && cp mali_kbase.ko $STATIC_LIB_PATH/mali_kbase.ko" % env.kernel_get_config_defines(fake_platform_device) - make_action = make_action_start + env_options + make_action_end - - makeAction=Action(make_action, '$MAKECOMSTR') - cmd = env.Command('$STATIC_LIB_PATH/mali_kbase.ko', kbase_src, [makeAction]) - -# Add a dependency on kds.ko. -# Only necessary when KDS is not built into the kernel. -# -if env['os'] != 'android': - linux_config_file = os.path.normpath(os.environ['KDIR']) + '/.config' - search_term = '^[\ ]*CONFIG_KDS[\ ]*=[\ ]*y' - kds_in_kernel = 0 - for line in open(linux_config_file, 'r'): - if re.search(search_term, line): - # KDS in kernel. - kds_in_kernel = 1 - if not kds_in_kernel: - env.Depends('$STATIC_LIB_PATH/mali_kbase.ko', '$STATIC_LIB_PATH/kds.ko') - -# need Module.symvers from ump.ko build -if int(env['ump']) == 1: - env.Depends('$STATIC_LIB_PATH/mali_kbase.ko', '$STATIC_LIB_PATH/ump.ko') - -if Glob('internal/sconsfrag'): - execfile('internal/sconsfrag') - get_internal(env) - -env.ProgTarget('kbase', cmd) - -env.AppendUnique(BASE=['cutils_linked_list']) diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/sconscript b/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/sconscript deleted file mode 100755 index 135113c664012..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/arm/sconscript +++ /dev/null @@ -1,22 +0,0 @@ -# -# (C) COPYRIGHT 2015 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - -import glob - - -SConscript('midgard/sconscript') - -if glob.glob('kmim/sconscript'): - SConscript('kmim/sconscript') diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/Kbuild b/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/Kbuild deleted file mode 100755 index f10d58c70dff2..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/Kbuild +++ /dev/null @@ -1,28 +0,0 @@ -# -# (C) COPYRIGHT ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - -pl111_drm-y += pl111_drm_device.o \ - pl111_drm_connector.o \ - pl111_drm_crtc.o \ - pl111_drm_cursor.o \ - pl111_drm_dma_buf.o \ - pl111_drm_encoder.o \ - pl111_drm_fb.o \ - pl111_drm_gem.o \ - pl111_drm_pl111.o \ - pl111_drm_platform.o \ - pl111_drm_suspend.o \ - pl111_drm_vma.o - -obj-$(CONFIG_DRM_PL111) += pl111_drm.o diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/Kconfig b/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/Kconfig deleted file mode 100755 index 60b465c56c513..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/Kconfig +++ /dev/null @@ -1,23 +0,0 @@ -# -# (C) COPYRIGHT ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - -config DRM_PL111 - tristate "DRM Support for PL111 CLCD Controller" - depends on DRM - select DRM_KMS_HELPER - select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE - help - Choose this option for DRM support for the PL111 CLCD controller. - If M is selected the module will be called pl111_drm. - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/Makefile b/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/Makefile deleted file mode 100755 index 2869f587266b9..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/Makefile +++ /dev/null @@ -1,32 +0,0 @@ -# -# (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - -# linux build system bootstrap for out-of-tree module - -# default to building for the host -ARCH ?= $(shell uname -m) - -ifeq ($(KDIR),) -$(error Must specify KDIR to point to the kernel to target)) -endif - -all: pl111_drm - -pl111_drm: - $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../include" CONFIG_DMA_SHARED_BUFFER_USES_KDS=y CONFIG_DRM_PL111=m - -clean: - $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_clcd_ext.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_clcd_ext.h deleted file mode 100755 index d3e0086ff0241..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_clcd_ext.h +++ /dev/null @@ -1,95 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - -/** - * pl111_clcd_ext.h - * Extended CLCD register definitions - */ - -#ifndef PL111_CLCD_EXT_H_ -#define PL111_CLCD_EXT_H_ - -/* - * PL111 cursor register definitions not defined in the kernel's clcd header. - * - * TODO MIDEGL-1718: move to include/linux/amba/clcd.h - */ - -#define CLCD_CRSR_IMAGE 0x00000800 -#define CLCD_CRSR_IMAGE_MAX_WORDS 256 -#define CLCD_CRSR_IMAGE_WORDS_PER_LINE 4 -#define CLCD_CRSR_IMAGE_PIXELS_PER_WORD 16 - -#define CLCD_CRSR_LBBP_COLOR_MASK 0x00000003 -#define CLCD_CRSR_LBBP_BACKGROUND 0x0 -#define CLCD_CRSR_LBBP_FOREGROUND 0x1 -#define CLCD_CRSR_LBBP_TRANSPARENT 0x2 -#define CLCD_CRSR_LBBP_INVERSE 0x3 - - -#define CLCD_CRSR_CTRL 0x00000c00 -#define CLCD_CRSR_CONFIG 0x00000c04 -#define CLCD_CRSR_PALETTE_0 0x00000c08 -#define CLCD_CRSR_PALETTE_1 0x00000c0c -#define CLCD_CRSR_XY 0x00000c10 -#define CLCD_CRSR_CLIP 0x00000c14 -#define CLCD_CRSR_IMSC 0x00000c20 -#define CLCD_CRSR_ICR 0x00000c24 -#define CLCD_CRSR_RIS 0x00000c28 -#define CLCD_MIS 0x00000c2c - -#define CRSR_CTRL_CRSR_ON (1 << 0) -#define CRSR_CTRL_CRSR_MAX 3 -#define CRSR_CTRL_CRSR_NUM_SHIFT 4 -#define CRSR_CTRL_CRSR_NUM_MASK \ - (CRSR_CTRL_CRSR_MAX << CRSR_CTRL_CRSR_NUM_SHIFT) -#define CRSR_CTRL_CURSOR_0 0 -#define CRSR_CTRL_CURSOR_1 1 -#define CRSR_CTRL_CURSOR_2 2 -#define CRSR_CTRL_CURSOR_3 3 - -#define CRSR_CONFIG_CRSR_SIZE (1 << 0) -#define CRSR_CONFIG_CRSR_FRAME_SYNC (1 << 1) - -#define CRSR_PALETTE_RED_SHIFT 0 -#define CRSR_PALETTE_GREEN_SHIFT 8 -#define CRSR_PALETTE_BLUE_SHIFT 16 - -#define CRSR_PALETTE_RED_MASK 0x000000ff -#define CRSR_PALETTE_GREEN_MASK 0x0000ff00 -#define CRSR_PALETTE_BLUE_MASK 0x00ff0000 -#define CRSR_PALETTE_MASK (~0xff000000) - -#define CRSR_XY_MASK 0x000003ff -#define CRSR_XY_X_SHIFT 0 -#define CRSR_XY_Y_SHIFT 16 - -#define CRSR_XY_X_MASK CRSR_XY_MASK -#define CRSR_XY_Y_MASK (CRSR_XY_MASK << CRSR_XY_Y_SHIFT) - -#define CRSR_CLIP_MASK 0x3f -#define CRSR_CLIP_X_SHIFT 0 -#define CRSR_CLIP_Y_SHIFT 8 - -#define CRSR_CLIP_X_MASK CRSR_CLIP_MASK -#define CRSR_CLIP_Y_MASK (CRSR_CLIP_MASK << CRSR_CLIP_Y_SHIFT) - -#define CRSR_IMSC_CRSR_IM (1<<0) -#define CRSR_ICR_CRSR_IC (1<<0) -#define CRSR_RIS_CRSR_RIS (1<<0) -#define CRSR_MIS_CRSR_MIS (1<<0) - -#endif /* PL111_CLCD_EXT_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm.h deleted file mode 100755 index 64d87b60ff6db..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm.h +++ /dev/null @@ -1,270 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -#ifndef _PL111_DRM_H_ -#define _PL111_DRM_H_ - -#define DRIVER_AUTHOR "ARM Ltd." -#define DRIVER_NAME "pl111_drm" -#define DRIVER_DESC "DRM module for PL111" -#define DRIVER_LICENCE "GPL" -#define DRIVER_ALIAS "platform:pl111_drm" -#define DRIVER_DATE "20101111" -#define DRIVER_VERSION "0.2" -#define DRIVER_MAJOR 2 -#define DRIVER_MINOR 1 -#define DRIVER_PATCHLEVEL 1 - -/* - * Number of flips allowed in flight at any one time. Any more flips requested - * beyond this value will cause the caller to block until earlier flips have - * completed. - * - * For performance reasons, this must be greater than the number of buffers - * used in the rendering pipeline. Note that the rendering pipeline can contain - * different types of buffer, e.g.: - * - 2 final framebuffers - * - >2 geometry buffers for GPU use-cases - * - >2 vertex buffers for GPU use-cases - * - * For example, a system using 5 geometry buffers could have 5 flips in flight, - * and so NR_FLIPS_IN_FLIGHT_THRESHOLD must be 5 or greater. - * - * Whilst there may be more intermediate buffers (such as vertex/geometry) than - * final framebuffers, KDS is used to ensure that GPU rendering waits for the - * next off-screen buffer, so it doesn't overwrite an on-screen buffer and - * produce tearing. - */ - -/* - * Here, we choose a conservative value. A lower value is most likely - * suitable for GPU use-cases. - */ -#define NR_FLIPS_IN_FLIGHT_THRESHOLD 16 - -#define CLCD_IRQ_NEXTBASE_UPDATE (1u<<2) - -struct pl111_drm_flip_resource; - -struct pl111_gem_bo_dma { - dma_addr_t fb_dev_addr; - void *fb_cpu_addr; -}; - -struct pl111_gem_bo_shm { - struct page **pages; - dma_addr_t *dma_addrs; -}; - -struct pl111_gem_bo { - struct drm_gem_object gem_object; - u32 type; - union { - struct pl111_gem_bo_dma dma; - struct pl111_gem_bo_shm shm; - } backing_data; - struct sg_table *sgt; -}; - -extern struct pl111_drm_dev_private priv; - -struct pl111_drm_framebuffer { - struct drm_framebuffer fb; - struct pl111_gem_bo *bo; -}; - -struct pl111_drm_flip_resource { -#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS - /* This is the kds set associated to the dma_buf we want to flip */ - struct kds_resource_set *kds_res_set; -#endif - struct drm_framebuffer *fb; - struct drm_crtc *crtc; - struct list_head link; - bool page_flip; - struct drm_pending_vblank_event *event; -}; - -struct pl111_drm_crtc { - struct drm_crtc crtc; - int crtc_index; - -#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS - /* This protects "old_kds_res_set" and "displaying_fb" */ - spinlock_t current_displaying_lock; - /* - * When a buffer is displayed its associated kds resource - * will be obtained and stored here. Every time a buffer - * flip is completed this old kds set is released and assigned - * the kds set of the new buffer. - */ - struct kds_resource_set *old_kds_res_set; - /* - * Stores which frame buffer is currently being displayed by - * this CRTC or NULL if nothing is being displayed. It is used - * to tell whether we need to obtain a set of kds resources for - * exported buffer objects. - */ - struct drm_framebuffer *displaying_fb; -#endif - struct drm_display_mode *new_mode; - struct drm_display_mode *current_mode; - int last_bpp; - - /* - * This spinlock protects "update_queue", "current_update_res" - * and calls to do_flip_to_res() which updates the CLCD base - * registers. - */ - spinlock_t base_update_lock; - /* - * The resource that caused a base address update. Only one can be - * pending, hence it's != NULL if there's a pending update - */ - struct pl111_drm_flip_resource *current_update_res; - /* Queue of things waiting to update the base address */ - struct list_head update_queue; - - void (*show_framebuffer_cb)(struct pl111_drm_flip_resource *flip_res, - struct drm_framebuffer *fb); -}; - -struct pl111_drm_connector { - struct drm_connector connector; -}; - -struct pl111_drm_encoder { - struct drm_encoder encoder; -}; - -struct pl111_drm_dev_private { - struct pl111_drm_crtc *pl111_crtc; - - struct amba_device *amba_dev; - unsigned long mmio_start; - __u32 mmio_len; - void *regs; - struct clk *clk; -#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS - struct kds_callback kds_cb; - struct kds_callback kds_obtain_current_cb; -#endif - /* - * Number of flips that were started in show_framebuffer_on_crtc(), - * but haven't completed yet - because we do deferred flipping - */ - atomic_t nr_flips_in_flight; - wait_queue_head_t wait_for_flips; - - /* - * Used to prevent race between pl111_dma_buf_release and - * drm_gem_prime_handle_to_fd - */ - struct mutex export_dma_buf_lock; - - uint32_t number_crtcs; - - /* Cache for flip resources used to avoid kmalloc on each page flip */ - struct kmem_cache *page_flip_slab; -}; - -enum pl111_cursor_size { - CURSOR_32X32, - CURSOR_64X64 -}; - -enum pl111_cursor_sync { - CURSOR_SYNC_NONE, - CURSOR_SYNC_VSYNC -}; - - -/** - * Buffer allocation function which is more flexible than dumb_create(), - * it allows passing driver specific flags to control the kind of buffer - * to be allocated. - */ -int pl111_drm_gem_create_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); - -/****** TODO MIDEGL-1718: this should be moved to uapi/include/drm/pl111_drm.h ********/ - -/* - * Parameters for different buffer objects: - * bit [0]: backing storage - * (0 -> SHM) - * (1 -> DMA) - * bit [2:1]: kind of mapping - * (0x0 -> uncached) - * (0x1 -> write combine) - * (0x2 -> cached) - */ -#define PL111_BOT_MASK (0x7) -#define PL111_BOT_SHM (0x0 << 0) -#define PL111_BOT_DMA (0x1 << 0) -#define PL111_BOT_UNCACHED (0x0 << 1) -#define PL111_BOT_WC (0x1 << 1) -#define PL111_BOT_CACHED (0x2 << 1) - -/** - * User-desired buffer creation information structure. - * - * @size: user-desired memory allocation size. - * - this size value would be page-aligned internally. - * @flags: user request for setting memory type or cache attributes as a bit op - * - PL111_BOT_DMA / PL111_BOT_SHM - * - PL111_BOT_UNCACHED / PL111_BOT_WC / PL111_BOT_CACHED - * @handle: returned a handle to created gem object. - * - this handle will be set by gem module of kernel side. - */ -struct drm_pl111_gem_create { - uint32_t height; - uint32_t width; - uint32_t bpp; - uint32_t flags; - /* handle, pitch, size will be returned */ - uint32_t handle; - uint32_t pitch; - uint64_t size; -}; - -#define DRM_PL111_GEM_CREATE 0x00 - -#define DRM_IOCTL_PL111_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \ - DRM_PL111_GEM_CREATE, struct drm_pl111_gem_create) -/****************************************************************************/ - -#define PL111_FB_FROM_FRAMEBUFFER(drm_fb) \ - (container_of(drm_fb, struct pl111_drm_framebuffer, fb)) - -#define PL111_BO_FROM_FRAMEBUFFER(drm_fb) \ - (container_of(drm_fb, struct pl111_drm_framebuffer, fb)->bo) - -#define PL111_BO_FROM_GEM(gem_obj) \ - container_of(gem_obj, struct pl111_gem_bo, gem_object) - -#define to_pl111_crtc(x) container_of(x, struct pl111_drm_crtc, crtc) - -#define PL111_ENCODER_FROM_ENCODER(x) \ - container_of(x, struct pl111_drm_encoder, encoder) - -#define PL111_CONNECTOR_FROM_CONNECTOR(x) \ - container_of(x, struct pl111_drm_connector, connector) - -#include "pl111_drm_funcs.h" - -#endif /* _PL111_DRM_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_connector.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_connector.c deleted file mode 100755 index c7c3a226a868b..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_connector.c +++ /dev/null @@ -1,170 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * pl111_drm_connector.c - * Implementation of the connector functions for PL111 DRM - */ -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "pl111_drm.h" - - -static struct { - int w, h, type; -} pl111_drm_modes[] = { - { 640, 480, DRM_MODE_TYPE_PREFERRED}, - { 800, 600, 0}, - {1024, 768, 0}, - { -1, -1, -1} -}; - -void pl111_connector_destroy(struct drm_connector *connector) -{ - struct pl111_drm_connector *pl111_connector = - PL111_CONNECTOR_FROM_CONNECTOR(connector); - - DRM_DEBUG_KMS("DRM %s on connector=%p\n", __func__, connector); - - drm_sysfs_connector_remove(connector); - drm_connector_cleanup(connector); - kfree(pl111_connector); -} - -enum drm_connector_status pl111_connector_detect(struct drm_connector - *connector, bool force) -{ - DRM_DEBUG_KMS("DRM %s on connector=%p\n", __func__, connector); - return connector_status_connected; -} - -void pl111_connector_dpms(struct drm_connector *connector, int mode) -{ - DRM_DEBUG_KMS("DRM %s on connector=%p\n", __func__, connector); -} - -struct drm_encoder * -pl111_connector_helper_best_encoder(struct drm_connector *connector) -{ - DRM_DEBUG_KMS("DRM %s on connector=%p\n", __func__, connector); - - if (connector->encoder != NULL) { - return connector->encoder; /* Return attached encoder */ - } else { - /* - * If there is no attached encoder we choose the best candidate - * from the list. - * For PL111 there is only one encoder so we return the first - * one we find. - * Other h/w would require a suitable criterion below. - */ - struct drm_encoder *encoder = NULL; - struct drm_device *dev = connector->dev; - - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - head) { - if (1) { /* criterion ? */ - break; - } - } - return encoder; /* return best candidate encoder */ - } -} - -int pl111_connector_helper_get_modes(struct drm_connector *connector) -{ - int i = 0; - int count = 0; - - DRM_DEBUG_KMS("DRM %s on connector=%p\n", __func__, connector); - - while (pl111_drm_modes[i].w != -1) { - struct drm_display_mode *mode = - drm_mode_find_dmt(connector->dev, - pl111_drm_modes[i].w, - pl111_drm_modes[i].h, - 60 -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) - , false -#endif - ); - - if (mode != NULL) { - mode->type |= pl111_drm_modes[i].type; - drm_mode_probed_add(connector, mode); - count++; - } - - i++; - } - - DRM_DEBUG_KMS("found %d modes\n", count); - - return count; -} - -int pl111_connector_helper_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - DRM_DEBUG_KMS("DRM %s on connector=%p\n", __func__, connector); - return MODE_OK; -} - -const struct drm_connector_funcs connector_funcs = { - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = pl111_connector_destroy, - .detect = pl111_connector_detect, - .dpms = pl111_connector_dpms, -}; - -const struct drm_connector_helper_funcs connector_helper_funcs = { - .get_modes = pl111_connector_helper_get_modes, - .mode_valid = pl111_connector_helper_mode_valid, - .best_encoder = pl111_connector_helper_best_encoder, -}; - -struct pl111_drm_connector *pl111_connector_create(struct drm_device *dev) -{ - struct pl111_drm_connector *pl111_connector; - - pl111_connector = kzalloc(sizeof(struct pl111_drm_connector), - GFP_KERNEL); - - if (pl111_connector == NULL) { - pr_err("Failed to allocated pl111_drm_connector\n"); - return NULL; - } - - drm_connector_init(dev, &pl111_connector->connector, &connector_funcs, - DRM_MODE_CONNECTOR_DVII); - - drm_connector_helper_add(&pl111_connector->connector, - &connector_helper_funcs); - - drm_sysfs_connector_add(&pl111_connector->connector); - - return pl111_connector; -} - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_crtc.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_crtc.c deleted file mode 100755 index ede07ff9fc01c..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_crtc.c +++ /dev/null @@ -1,449 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * pl111_drm_crtc.c - * Implementation of the CRTC functions for PL111 DRM - */ -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "pl111_drm.h" - -static int pl111_crtc_num; - -#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 11, 0)) -#define export_dma_buf export_dma_buf -#else -#define export_dma_buf dma_buf -#endif - -void pl111_common_irq(struct pl111_drm_crtc *pl111_crtc) -{ - struct drm_device *dev = pl111_crtc->crtc.dev; - struct pl111_drm_flip_resource *old_flip_res; - struct pl111_gem_bo *bo; - unsigned long irq_flags; - int flips_in_flight; -#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS - unsigned long flags; -#endif - - spin_lock_irqsave(&pl111_crtc->base_update_lock, irq_flags); - - /* - * Cache the flip resource that caused the IRQ since it will be - * dispatched later. Early return if the IRQ isn't associated to - * a base register update. - * - * TODO MIDBASE-2790: disable IRQs when a flip is not pending. - */ - old_flip_res = pl111_crtc->current_update_res; - if (!old_flip_res) { - spin_unlock_irqrestore(&pl111_crtc->base_update_lock, irq_flags); - return; - } - pl111_crtc->current_update_res = NULL; - - /* Prepare the next flip (if any) of the queue as soon as possible. */ - if (!list_empty(&pl111_crtc->update_queue)) { - struct pl111_drm_flip_resource *flip_res; - /* Remove the head of the list */ - flip_res = list_first_entry(&pl111_crtc->update_queue, - struct pl111_drm_flip_resource, link); - list_del(&flip_res->link); - do_flip_to_res(flip_res); - /* - * current_update_res will be set, so guarentees that - * another flip_res coming in gets queued instead of - * handled immediately - */ - } - spin_unlock_irqrestore(&pl111_crtc->base_update_lock, irq_flags); - - /* Finalize properly the flip that caused the IRQ */ - DRM_DEBUG_KMS("DRM Finalizing old_flip_res=%p\n", old_flip_res); - - bo = PL111_BO_FROM_FRAMEBUFFER(old_flip_res->fb); -#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS - spin_lock_irqsave(&pl111_crtc->current_displaying_lock, flags); - release_kds_resource_and_display(old_flip_res); - spin_unlock_irqrestore(&pl111_crtc->current_displaying_lock, flags); -#endif - /* Release DMA buffer on this flip */ - - if (bo->gem_object.export_dma_buf != NULL) - dma_buf_put(bo->gem_object.export_dma_buf); - - drm_handle_vblank(dev, pl111_crtc->crtc_index); - - /* Wake up any processes waiting for page flip event */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) - if (old_flip_res->event) { - spin_lock_bh(&dev->event_lock); - drm_send_vblank_event(dev, pl111_crtc->crtc_index, - old_flip_res->event); - spin_unlock_bh(&dev->event_lock); - } -#else - if (old_flip_res->event) { - struct drm_pending_vblank_event *e = old_flip_res->event; - struct timeval now; - unsigned int seq; - - DRM_DEBUG_KMS("%s: wake up page flip event (%p)\n", __func__, - old_flip_res->event); - - spin_lock_bh(&dev->event_lock); - seq = drm_vblank_count_and_time(dev, pl111_crtc->crtc_index, - &now); - e->pipe = pl111_crtc->crtc_index; - e->event.sequence = seq; - e->event.tv_sec = now.tv_sec; - e->event.tv_usec = now.tv_usec; - - list_add_tail(&e->base.link, - &e->base.file_priv->event_list); - - wake_up_interruptible(&e->base.file_priv->event_wait); - spin_unlock_bh(&dev->event_lock); - } -#endif - - drm_vblank_put(dev, pl111_crtc->crtc_index); - - /* - * workqueue.c:process_one_work(): - * "It is permissible to free the struct work_struct from - * inside the function that is called from it" - */ - kmem_cache_free(priv.page_flip_slab, old_flip_res); - - flips_in_flight = atomic_dec_return(&priv.nr_flips_in_flight); - if (flips_in_flight == 0 || - flips_in_flight == (NR_FLIPS_IN_FLIGHT_THRESHOLD - 1)) - wake_up(&priv.wait_for_flips); - - DRM_DEBUG_KMS("DRM release flip_res=%p\n", old_flip_res); -} - -void show_framebuffer_on_crtc_cb(void *cb1, void *cb2) -{ - struct pl111_drm_flip_resource *flip_res = cb1; - struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(flip_res->crtc); - - pl111_crtc->show_framebuffer_cb(cb1, cb2); -} - -int show_framebuffer_on_crtc(struct drm_crtc *crtc, - struct drm_framebuffer *fb, bool page_flip, - struct drm_pending_vblank_event *event) -{ - struct pl111_gem_bo *bo; - struct pl111_drm_flip_resource *flip_res; - int flips_in_flight; - int old_flips_in_flight; - -#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 14, 0)) - crtc->fb = fb; -#else - crtc->primary->fb = fb; -#endif - - bo = PL111_BO_FROM_FRAMEBUFFER(fb); - if (bo == NULL) { - DRM_DEBUG_KMS("Failed to get pl111_gem_bo object\n"); - return -EINVAL; - } - - /* If this is a full modeset, wait for all outstanding flips to complete - * before continuing. This avoids unnecessary complication from being - * able to queue up multiple modesets and queues of mixed modesets and - * page flips. - * - * Modesets should be uncommon and will not be performant anyway, so - * making them synchronous should have negligible performance impact. - */ - if (!page_flip) { - int ret = wait_event_killable(priv.wait_for_flips, - atomic_read(&priv.nr_flips_in_flight) == 0); - if (ret) - return ret; - } - - /* - * There can be more 'early display' flips in flight than there are - * buffers, and there is (currently) no explicit bound on the number of - * flips. Hence, we need a new allocation for each one. - * - * Note: this could be optimized down if we knew a bound on the flips, - * since an application can only have so many buffers in flight to be - * useful/not hog all the memory - */ - flip_res = kmem_cache_alloc(priv.page_flip_slab, GFP_KERNEL); - if (flip_res == NULL) { - pr_err("kmem_cache_alloc failed to alloc - flip ignored\n"); - return -ENOMEM; - } - - /* - * increment flips in flight, whilst blocking when we reach - * NR_FLIPS_IN_FLIGHT_THRESHOLD - */ - do { - /* - * Note: use of assign-and-then-compare in the condition to set - * flips_in_flight - */ - int ret = wait_event_killable(priv.wait_for_flips, - (flips_in_flight = - atomic_read(&priv.nr_flips_in_flight)) - < NR_FLIPS_IN_FLIGHT_THRESHOLD); - if (ret != 0) { - kmem_cache_free(priv.page_flip_slab, flip_res); - return ret; - } - - old_flips_in_flight = atomic_cmpxchg(&priv.nr_flips_in_flight, - flips_in_flight, flips_in_flight + 1); - } while (old_flips_in_flight != flips_in_flight); - - flip_res->fb = fb; - flip_res->crtc = crtc; - flip_res->page_flip = page_flip; - flip_res->event = event; - INIT_LIST_HEAD(&flip_res->link); - DRM_DEBUG_KMS("DRM alloc flip_res=%p\n", flip_res); -#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS - if (bo->gem_object.export_dma_buf != NULL) { - struct dma_buf *buf = bo->gem_object.export_dma_buf; - unsigned long shared[1] = { 0 }; - struct kds_resource *resource_list[1] = { - get_dma_buf_kds_resource(buf) }; - int err; - - get_dma_buf(buf); - DRM_DEBUG_KMS("Got dma_buf %p\n", buf); - - /* Wait for the KDS resource associated with this buffer */ - err = kds_async_waitall(&flip_res->kds_res_set, - &priv.kds_cb, flip_res, fb, 1, shared, - resource_list); - BUG_ON(err); - } else { - struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc); - - DRM_DEBUG_KMS("No dma_buf for this flip\n"); - - /* No dma-buf attached so just call the callback directly */ - flip_res->kds_res_set = NULL; - pl111_crtc->show_framebuffer_cb(flip_res, fb); - } -#else - if (bo->gem_object.export_dma_buf != NULL) { - struct dma_buf *buf = bo->gem_object.export_dma_buf; - - get_dma_buf(buf); - DRM_DEBUG_KMS("Got dma_buf %p\n", buf); - } else { - DRM_DEBUG_KMS("No dma_buf for this flip\n"); - } - - /* No dma-buf attached to this so just call the callback directly */ - { - struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc); - pl111_crtc->show_framebuffer_cb(flip_res, fb); - } -#endif - - /* For the same reasons as the wait at the start of this function, - * wait for the modeset to complete before continuing. - */ - if (!page_flip) { - int ret = wait_event_killable(priv.wait_for_flips, - flips_in_flight == 0); - if (ret) - return ret; - } - - return 0; -} - -int pl111_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, -#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 11, 0)) - struct drm_pending_vblank_event *event) -#else - struct drm_pending_vblank_event *event, - uint32_t flags) -#endif -{ - DRM_DEBUG_KMS("%s: crtc=%p, fb=%p, event=%p\n", - __func__, crtc, fb, event); - return show_framebuffer_on_crtc(crtc, fb, true, event); -} - -int pl111_crtc_helper_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, - int x, int y, struct drm_framebuffer *old_fb) -{ - int ret; - struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc); - struct drm_display_mode *duplicated_mode; - - DRM_DEBUG_KMS("DRM crtc_helper_mode_set, x=%d y=%d bpp=%d\n", - adjusted_mode->hdisplay, adjusted_mode->vdisplay, -#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 14, 0)) - crtc->fb->bits_per_pixel); -#else - crtc->primary->fb->bits_per_pixel); -#endif - - duplicated_mode = drm_mode_duplicate(crtc->dev, adjusted_mode); - if (!duplicated_mode) - return -ENOMEM; - - pl111_crtc->new_mode = duplicated_mode; -#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 14, 0)) - ret = show_framebuffer_on_crtc(crtc, crtc->fb, false, NULL); -#else - ret = show_framebuffer_on_crtc(crtc, crtc->primary->fb, false, NULL); -#endif - if (ret != 0) { - pl111_crtc->new_mode = pl111_crtc->current_mode; - drm_mode_destroy(crtc->dev, duplicated_mode); - } - - return ret; -} - -void pl111_crtc_helper_prepare(struct drm_crtc *crtc) -{ - DRM_DEBUG_KMS("DRM %s on crtc=%p\n", __func__, crtc); -} - -void pl111_crtc_helper_commit(struct drm_crtc *crtc) -{ - DRM_DEBUG_KMS("DRM %s on crtc=%p\n", __func__, crtc); -} - -bool pl111_crtc_helper_mode_fixup(struct drm_crtc *crtc, -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) - const struct drm_display_mode *mode, -#else - struct drm_display_mode *mode, -#endif - struct drm_display_mode *adjusted_mode) -{ - DRM_DEBUG_KMS("DRM %s on crtc=%p\n", __func__, crtc); - -#ifdef CONFIG_ARCH_VEXPRESS - /* - * 1024x768 with more than 16 bits per pixel may not work - * correctly on Versatile Express due to bandwidth issues - */ - if (mode->hdisplay == 1024 && mode->vdisplay == 768 && -#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 14, 0)) - crtc->fb->bits_per_pixel > 16) { -#else - crtc->primary->fb->bits_per_pixel > 16) { -#endif - DRM_INFO("*WARNING* 1024x768 at > 16 bpp may suffer corruption\n"); - } -#endif - - return true; -} - -void pl111_crtc_helper_disable(struct drm_crtc *crtc) -{ - int ret; - - DRM_DEBUG_KMS("DRM %s on crtc=%p\n", __func__, crtc); - - /* don't disable crtc until no flips in flight as irq will be disabled */ - ret = wait_event_killable(priv.wait_for_flips, atomic_read(&priv.nr_flips_in_flight) == 0); - if(ret) { - pr_err("pl111_crtc_helper_disable failed\n"); - return; - } - clcd_disable(crtc); -} - -void pl111_crtc_destroy(struct drm_crtc *crtc) -{ - struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc); - - DRM_DEBUG_KMS("DRM %s on crtc=%p\n", __func__, crtc); - - drm_crtc_cleanup(crtc); - kfree(pl111_crtc); -} - -const struct drm_crtc_funcs crtc_funcs = { - .cursor_set = pl111_crtc_cursor_set, - .cursor_move = pl111_crtc_cursor_move, - .set_config = drm_crtc_helper_set_config, - .page_flip = pl111_crtc_page_flip, - .destroy = pl111_crtc_destroy -}; - -const struct drm_crtc_helper_funcs crtc_helper_funcs = { - .mode_set = pl111_crtc_helper_mode_set, - .prepare = pl111_crtc_helper_prepare, - .commit = pl111_crtc_helper_commit, - .mode_fixup = pl111_crtc_helper_mode_fixup, - .disable = pl111_crtc_helper_disable, -}; - -struct pl111_drm_crtc *pl111_crtc_create(struct drm_device *dev) -{ - struct pl111_drm_crtc *pl111_crtc; - - pl111_crtc = kzalloc(sizeof(struct pl111_drm_crtc), GFP_KERNEL); - if (pl111_crtc == NULL) { - pr_err("Failed to allocated pl111_drm_crtc\n"); - return NULL; - } - - drm_crtc_init(dev, &pl111_crtc->crtc, &crtc_funcs); - drm_crtc_helper_add(&pl111_crtc->crtc, &crtc_helper_funcs); - - pl111_crtc->crtc_index = pl111_crtc_num; - pl111_crtc_num++; - pl111_crtc->crtc.enabled = 0; - pl111_crtc->last_bpp = 0; - pl111_crtc->current_update_res = NULL; -#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS - pl111_crtc->displaying_fb = NULL; - pl111_crtc->old_kds_res_set = NULL; - spin_lock_init(&pl111_crtc->current_displaying_lock); -#endif - pl111_crtc->show_framebuffer_cb = show_framebuffer_on_crtc_cb_internal; - INIT_LIST_HEAD(&pl111_crtc->update_queue); - spin_lock_init(&pl111_crtc->base_update_lock); - - return pl111_crtc; -} - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_cursor.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_cursor.c deleted file mode 100755 index 4bf20fec24640..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_cursor.c +++ /dev/null @@ -1,331 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * pl111_drm_cursor.c - * Implementation of cursor functions for PL111 DRM - */ -#include -#include -#include -#include -#include -#include - -#include -#include -#include "pl111_clcd_ext.h" -#include "pl111_drm.h" - -#define PL111_MAX_CURSOR_WIDTH (64) -#define PL111_MAX_CURSOR_HEIGHT (64) - -#define ARGB_2_LBBP_BINARY_THRESHOLD (1 << 7) -#define ARGB_ALPHA_SHIFT 24 -#define ARGB_ALPHA_MASK (0xff << ARGB_ALPHA_SHIFT) -#define ARGB_RED_SHIFT 16 -#define ARGB_RED_MASK (0xff << ARGB_RED_SHIFT) -#define ARGB_GREEN_SHIFT 8 -#define ARGB_GREEN_MASK (0xff << ARGB_GREEN_SHIFT) -#define ARGB_BLUE_SHIFT 0 -#define ARGB_BLUE_MASK (0xff << ARGB_BLUE_SHIFT) - - -void pl111_set_cursor_size(enum pl111_cursor_size size) -{ - u32 reg_data = readl(priv.regs + CLCD_CRSR_CONFIG); - - if (size == CURSOR_64X64) - reg_data |= CRSR_CONFIG_CRSR_SIZE; - else - reg_data &= ~CRSR_CONFIG_CRSR_SIZE; - - writel(reg_data, priv.regs + CLCD_CRSR_CONFIG); -} - -void pl111_set_cursor_sync(enum pl111_cursor_sync sync) -{ - u32 reg_data = readl(priv.regs + CLCD_CRSR_CONFIG); - - if (sync == CURSOR_SYNC_VSYNC) - reg_data |= CRSR_CONFIG_CRSR_FRAME_SYNC; - else - reg_data &= ~CRSR_CONFIG_CRSR_FRAME_SYNC; - - writel(reg_data, priv.regs + CLCD_CRSR_CONFIG); -} - -void pl111_set_cursor(u32 cursor) -{ - u32 reg_data = readl(priv.regs + CLCD_CRSR_CTRL); - - reg_data &= ~(CRSR_CTRL_CRSR_MAX << CRSR_CTRL_CRSR_NUM_SHIFT); - reg_data |= (cursor & CRSR_CTRL_CRSR_MAX) << CRSR_CTRL_CRSR_NUM_SHIFT; - - writel(reg_data, priv.regs + CLCD_CRSR_CTRL); -} - -void pl111_set_cursor_enable(bool enable) -{ - u32 reg_data = readl(priv.regs + CLCD_CRSR_CTRL); - - if (enable) - reg_data |= CRSR_CTRL_CRSR_ON; - else - reg_data &= ~CRSR_CTRL_CRSR_ON; - - writel(reg_data, priv.regs + CLCD_CRSR_CTRL); -} - -void pl111_set_cursor_position(u32 x, u32 y) -{ - u32 reg_data = (x & CRSR_XY_MASK) | - ((y & CRSR_XY_MASK) << CRSR_XY_Y_SHIFT); - - writel(reg_data, priv.regs + CLCD_CRSR_XY); -} - -void pl111_set_cursor_clipping(u32 x, u32 y) -{ - u32 reg_data; - - /* - * Do not allow setting clipping values larger than - * the cursor size since the cursor is already fully hidden - * when x,y = PL111_MAX_CURSOR_WIDTH. - */ - if (x > PL111_MAX_CURSOR_WIDTH) - x = PL111_MAX_CURSOR_WIDTH; - if (y > PL111_MAX_CURSOR_WIDTH) - y = PL111_MAX_CURSOR_WIDTH; - - reg_data = (x & CRSR_CLIP_MASK) | - ((y & CRSR_CLIP_MASK) << CRSR_CLIP_Y_SHIFT); - - writel(reg_data, priv.regs + CLCD_CRSR_CLIP); -} - -void pl111_set_cursor_palette(u32 color0, u32 color1) -{ - writel(color0 & CRSR_PALETTE_MASK, priv.regs + CLCD_CRSR_PALETTE_0); - writel(color1 & CRSR_PALETTE_MASK, priv.regs + CLCD_CRSR_PALETTE_1); -} - -void pl111_cursor_enable(void) -{ - pl111_set_cursor_sync(CURSOR_SYNC_VSYNC); - pl111_set_cursor_size(CURSOR_64X64); - pl111_set_cursor_palette(0x0, 0x00ffffff); - pl111_set_cursor_enable(true); -} - -void pl111_cursor_disable(void) -{ - pl111_set_cursor_enable(false); -} - -/* shift required to locate pixel into the correct position in - * a cursor LBBP word, indexed by x mod 16. - */ -static const unsigned char -x_mod_16_to_value_shift[CLCD_CRSR_IMAGE_PIXELS_PER_WORD] = { - 6, 4, 2, 0, 14, 12, 10, 8, 22, 20, 18, 16, 30, 28, 26, 24 -}; - -/* Pack the pixel value into its correct position in the buffer as specified - * for LBBP */ -static inline void -set_lbbp_pixel(uint32_t *buffer, unsigned int x, unsigned int y, - uint32_t value) -{ - u32 *cursor_ram = priv.regs + CLCD_CRSR_IMAGE; - uint32_t shift; - uint32_t data; - - shift = x_mod_16_to_value_shift[x % CLCD_CRSR_IMAGE_PIXELS_PER_WORD]; - - /* Get the word containing this pixel */ - cursor_ram = cursor_ram + (x >> CLCD_CRSR_IMAGE_WORDS_PER_LINE) + (y << 2); - - /* Update pixel in cursor RAM */ - data = readl(cursor_ram); - data &= ~(CLCD_CRSR_LBBP_COLOR_MASK << shift); - data |= value << shift; - writel(data, cursor_ram); -} - -static u32 pl111_argb_to_lbbp(u32 argb_pix) -{ - u32 lbbp_pix = CLCD_CRSR_LBBP_TRANSPARENT; - u32 alpha = (argb_pix & ARGB_ALPHA_MASK) >> ARGB_ALPHA_SHIFT; - u32 red = (argb_pix & ARGB_RED_MASK) >> ARGB_RED_SHIFT; - u32 green = (argb_pix & ARGB_GREEN_MASK) >> ARGB_GREEN_SHIFT; - u32 blue = (argb_pix & ARGB_BLUE_MASK) >> ARGB_BLUE_SHIFT; - - /* - * Converting from 8 pixel transparency to binary transparency - * it's the best we can achieve. - */ - if (alpha & ARGB_2_LBBP_BINARY_THRESHOLD) { - u32 gray, max, min; - - /* - * Convert to gray using the lightness method: - * gray = [max(R,G,B) + min(R,G,B)]/2 - */ - min = min(red, green); - min = min(min, blue); - max = max(red, green); - max = max(max, blue); - gray = (min + max) >> 1; /* divide by 2 */ - /* Apply binary threshold to the gray value calculated */ - if (gray & ARGB_2_LBBP_BINARY_THRESHOLD) - lbbp_pix = CLCD_CRSR_LBBP_FOREGROUND; - else - lbbp_pix = CLCD_CRSR_LBBP_BACKGROUND; - } - - return lbbp_pix; -} - -/* - * The PL111 hardware cursor supports only LBBP which is a 2bpp format but - * the cursor format from userspace is ARGB8888 so we need to convert - * to LBBP here. - */ -static void pl111_set_cursor_image(u32 *data) -{ -#ifdef ARGB_LBBP_CONVERSION_DEBUG - /* Add 1 on width to insert trailing NULL */ - char string_cursor[PL111_MAX_CURSOR_WIDTH + 1]; -#endif /* ARGB_LBBP_CONVERSION_DEBUG */ - unsigned int x; - unsigned int y; - - for (y = 0; y < PL111_MAX_CURSOR_HEIGHT; y++) { - for (x = 0; x < PL111_MAX_CURSOR_WIDTH; x++) { - u32 value = pl111_argb_to_lbbp(*data); - -#ifdef ARGB_LBBP_CONVERSION_DEBUG - if (value == CLCD_CRSR_LBBP_TRANSPARENT) - string_cursor[x] = 'T'; - else if (value == CLCD_CRSR_LBBP_FOREGROUND) - string_cursor[x] = 'F'; - else if (value == CLCD_CRSR_LBBP_INVERSE) - string_cursor[x] = 'I'; - else - string_cursor[x] = 'B'; - -#endif /* ARGB_LBBP_CONVERSION_DEBUG */ - set_lbbp_pixel(data, x, y, value); - ++data; - } -#ifdef ARGB_LBBP_CONVERSION_DEBUG - string_cursor[PL111_MAX_CURSOR_WIDTH] = '\0'; - DRM_INFO("%s\n", string_cursor); -#endif /* ARGB_LBBP_CONVERSION_DEBUG */ - } -} - -int pl111_crtc_cursor_set(struct drm_crtc *crtc, - struct drm_file *file_priv, - uint32_t handle, - uint32_t width, - uint32_t height) -{ - struct drm_gem_object *obj; - struct pl111_gem_bo *bo; - - DRM_DEBUG_KMS("handle = %u, width = %u, height = %u\n", - handle, width, height); - - if (!handle) { - pl111_cursor_disable(); - return 0; - } - - if ((width != PL111_MAX_CURSOR_WIDTH) || - (height != PL111_MAX_CURSOR_HEIGHT)) - return -EINVAL; - - obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); - if (!obj) { - DRM_ERROR("Cannot find cursor object for handle = %d\n", - handle); - return -ENOENT; - } - - /* - * We expect a PL111_MAX_CURSOR_WIDTH x PL111_MAX_CURSOR_HEIGHT - * ARGB888 buffer object in the input. - * - */ - if (obj->size < (PL111_MAX_CURSOR_WIDTH * PL111_MAX_CURSOR_HEIGHT * 4)) { - DRM_ERROR("Cannot set cursor with an obj size = %d\n", - obj->size); - drm_gem_object_unreference_unlocked(obj); - return -EINVAL; - } - - bo = PL111_BO_FROM_GEM(obj); - if (!(bo->type & PL111_BOT_DMA)) { - DRM_ERROR("Tried to set cursor with non DMA backed obj = %p\n", - obj); - drm_gem_object_unreference_unlocked(obj); - return -EINVAL; - } - - pl111_set_cursor_image(bo->backing_data.dma.fb_cpu_addr); - - /* - * Since we copy the contents of the buffer to the HW cursor internal - * memory this GEM object is not needed anymore. - */ - drm_gem_object_unreference_unlocked(obj); - - pl111_cursor_enable(); - - return 0; -} - -int pl111_crtc_cursor_move(struct drm_crtc *crtc, - int x, int y) -{ - int x_clip = 0; - int y_clip = 0; - - DRM_DEBUG("x %d y %d\n", x, y); - - /* - * The cursor image is clipped automatically at the screen limits when - * it extends beyond the screen image to the right or bottom but - * we must clip it using pl111 HW features for negative values. - */ - if (x < 0) { - x_clip = -x; - x = 0; - } - if (y < 0) { - y_clip = -y; - y = 0; - } - - pl111_set_cursor_clipping(x_clip, y_clip); - pl111_set_cursor_position(x, y); - - return 0; -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_device.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_device.c deleted file mode 100755 index af92498784f30..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_device.c +++ /dev/null @@ -1,336 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * pl111_drm_device.c - * Implementation of the Linux device driver entrypoints for PL111 DRM - */ -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "pl111_drm.h" - -struct pl111_drm_dev_private priv; - -#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS -static void initial_kds_obtained(void *cb1, void *cb2) -{ - wait_queue_head_t *wait = (wait_queue_head_t *) cb1; - bool *cb_has_called = (bool *) cb2; - - *cb_has_called = true; - wake_up(wait); -} - -/* Must be called from within current_displaying_lock spinlock */ -void release_kds_resource_and_display(struct pl111_drm_flip_resource *flip_res) -{ - struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(flip_res->crtc); - pl111_crtc->displaying_fb = flip_res->fb; - - /* Release the previous buffer */ - if (pl111_crtc->old_kds_res_set != NULL) { - /* - * Can flip to the same buffer, but must not release the current - * resource set - */ - BUG_ON(pl111_crtc->old_kds_res_set == flip_res->kds_res_set); - kds_resource_set_release(&pl111_crtc->old_kds_res_set); - } - /* Record the current buffer, to release on the next buffer flip */ - pl111_crtc->old_kds_res_set = flip_res->kds_res_set; -} -#endif - -void pl111_drm_preclose(struct drm_device *dev, struct drm_file *file_priv) -{ - DRM_DEBUG_KMS("DRM %s on dev=%p\n", __func__, dev); -} - -void pl111_drm_lastclose(struct drm_device *dev) -{ - DRM_DEBUG_KMS("DRM %s on dev=%p\n", __func__, dev); -} - -/* - * pl111 does not have a proper HW counter for vblank IRQs so enable_vblank - * and disable_vblank are just no op callbacks. - */ -static int pl111_enable_vblank(struct drm_device *dev, int crtc) -{ - DRM_DEBUG_KMS("%s: dev=%p, crtc=%d", __func__, dev, crtc); - return 0; -} - -static void pl111_disable_vblank(struct drm_device *dev, int crtc) -{ - DRM_DEBUG_KMS("%s: dev=%p, crtc=%d", __func__, dev, crtc); -} - -struct drm_mode_config_funcs mode_config_funcs = { - .fb_create = pl111_fb_create, -}; - -static int pl111_modeset_init(struct drm_device *dev) -{ - struct drm_mode_config *mode_config; - struct pl111_drm_dev_private *priv = dev->dev_private; - struct pl111_drm_connector *pl111_connector; - struct pl111_drm_encoder *pl111_encoder; - int ret = 0; - - if (priv == NULL) - return -EINVAL; - - drm_mode_config_init(dev); - mode_config = &dev->mode_config; - mode_config->funcs = &mode_config_funcs; - mode_config->min_width = 1; - mode_config->max_width = 1024; - mode_config->min_height = 1; - mode_config->max_height = 768; - - priv->pl111_crtc = pl111_crtc_create(dev); - if (priv->pl111_crtc == NULL) { - pr_err("Failed to create pl111_drm_crtc\n"); - ret = -ENOMEM; - goto out_config; - } - - priv->number_crtcs = 1; - - pl111_connector = pl111_connector_create(dev); - if (pl111_connector == NULL) { - pr_err("Failed to create pl111_drm_connector\n"); - ret = -ENOMEM; - goto out_config; - } - - pl111_encoder = pl111_encoder_create(dev, 1); - if (pl111_encoder == NULL) { - pr_err("Failed to create pl111_drm_encoder\n"); - ret = -ENOMEM; - goto out_config; - } - - ret = drm_mode_connector_attach_encoder(&pl111_connector->connector, - &pl111_encoder->encoder); - if (ret != 0) { - DRM_ERROR("Failed to attach encoder\n"); - goto out_config; - } - - pl111_connector->connector.encoder = &pl111_encoder->encoder; - - goto finish; - -out_config: - drm_mode_config_cleanup(dev); -finish: - DRM_DEBUG("%s returned %d\n", __func__, ret); - return ret; -} - -static void pl111_modeset_fini(struct drm_device *dev) -{ - drm_mode_config_cleanup(dev); -} - -static int pl111_drm_load(struct drm_device *dev, unsigned long chipset) -{ - int ret = 0; - - pr_info("DRM %s\n", __func__); - - mutex_init(&priv.export_dma_buf_lock); - atomic_set(&priv.nr_flips_in_flight, 0); - init_waitqueue_head(&priv.wait_for_flips); -#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS - ret = kds_callback_init(&priv.kds_cb, 1, show_framebuffer_on_crtc_cb); - if (ret != 0) { - pr_err("Failed to initialise KDS callback\n"); - goto finish; - } - - ret = kds_callback_init(&priv.kds_obtain_current_cb, 1, - initial_kds_obtained); - if (ret != 0) { - pr_err("Failed to init KDS obtain callback\n"); - kds_callback_term(&priv.kds_cb); - goto finish; - } -#endif - - /* Create a cache for page flips */ - priv.page_flip_slab = kmem_cache_create("page flip slab", - sizeof(struct pl111_drm_flip_resource), 0, 0, NULL); - if (priv.page_flip_slab == NULL) { - DRM_ERROR("Failed to create slab\n"); - ret = -ENOMEM; - goto out_kds_callbacks; - } - - dev->dev_private = &priv; - - ret = pl111_modeset_init(dev); - if (ret != 0) { - pr_err("Failed to init modeset\n"); - goto out_slab; - } - - ret = pl111_device_init(dev); - if (ret != 0) { - DRM_ERROR("Failed to init MMIO and IRQ\n"); - goto out_modeset; - } - - ret = drm_vblank_init(dev, 1); - if (ret != 0) { - DRM_ERROR("Failed to init vblank\n"); - goto out_vblank; - } - -#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) - platform_set_drvdata(dev->platformdev, dev); -#endif - - goto finish; - -out_vblank: - pl111_device_fini(dev); -out_modeset: - pl111_modeset_fini(dev); -out_slab: - kmem_cache_destroy(priv.page_flip_slab); -out_kds_callbacks: -#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS - kds_callback_term(&priv.kds_obtain_current_cb); - kds_callback_term(&priv.kds_cb); -#endif -finish: - DRM_DEBUG_KMS("pl111_drm_load returned %d\n", ret); - return ret; -} - -static int pl111_drm_unload(struct drm_device *dev) -{ - pr_info("DRM %s\n", __func__); - - kmem_cache_destroy(priv.page_flip_slab); - - drm_vblank_cleanup(dev); - pl111_modeset_fini(dev); - pl111_device_fini(dev); - -#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS - kds_callback_term(&priv.kds_obtain_current_cb); - kds_callback_term(&priv.kds_cb); -#endif - return 0; -} - -static struct vm_operations_struct pl111_gem_vm_ops = { - .fault = pl111_gem_fault, -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -#else - .open = pl111_gem_vm_open, - .close = pl111_gem_vm_close, -#endif -}; - -static const struct file_operations drm_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .unlocked_ioctl = drm_ioctl, - .mmap = pl111_gem_mmap, - .poll = drm_poll, - .read = drm_read, -}; - -static struct drm_ioctl_desc pl111_ioctls[] = { - DRM_IOCTL_DEF_DRV(PL111_GEM_CREATE, pl111_drm_gem_create_ioctl, - DRM_CONTROL_ALLOW | DRM_UNLOCKED), -}; - -static struct drm_driver driver = { - .driver_features = - DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, - .load = pl111_drm_load, - .unload = pl111_drm_unload, - .context_dtor = NULL, - .preclose = pl111_drm_preclose, - .lastclose = pl111_drm_lastclose, - .suspend = pl111_drm_suspend, - .resume = pl111_drm_resume, - .get_vblank_counter = drm_vblank_count, - .enable_vblank = pl111_enable_vblank, - .disable_vblank = pl111_disable_vblank, - .ioctls = pl111_ioctls, - .fops = &drm_fops, - .name = DRIVER_NAME, - .desc = DRIVER_DESC, - .date = DRIVER_DATE, - .major = DRIVER_MAJOR, - .minor = DRIVER_MINOR, - .patchlevel = DRIVER_PATCHLEVEL, - .dumb_create = pl111_dumb_create, - .dumb_destroy = pl111_dumb_destroy, - .dumb_map_offset = pl111_dumb_map_offset, - .gem_free_object = pl111_gem_free_object, - .gem_vm_ops = &pl111_gem_vm_ops, - .prime_handle_to_fd = &pl111_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_export = &pl111_gem_prime_export, - .gem_prime_import = &pl111_gem_prime_import, -}; - -int pl111_drm_init(struct platform_device *dev) -{ - int ret; - pr_info("DRM %s\n", __func__); - pr_info("PL111 DRM initialize, driver name: %s, version %d.%d\n", - DRIVER_NAME, DRIVER_MAJOR, DRIVER_MINOR); - driver.num_ioctls = ARRAY_SIZE(pl111_ioctls); - ret = 0; -#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 15, 0)) - driver.kdriver.platform_device = dev; -#endif - return drm_platform_init(&driver, dev); - -} - -void pl111_drm_exit(struct platform_device *dev) -{ - pr_info("DRM %s\n", __func__); - -#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 13, 0)) - drm_platform_exit(&driver, dev); -#else - drm_put_dev(platform_get_drvdata(dev)); -#endif -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_dma_buf.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_dma_buf.c deleted file mode 100755 index 1131f46b27dfd..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_dma_buf.c +++ /dev/null @@ -1,625 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * pl111_drm_dma_buf.c - * Implementation of the dma_buf functions for PL111 DRM - */ -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "pl111_drm.h" - -#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 11, 0)) -#define export_dma_buf export_dma_buf -#else -#define export_dma_buf dma_buf -#endif - -#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS -static void obtain_kds_if_currently_displayed(struct drm_device *dev, - struct pl111_gem_bo *bo, - struct dma_buf *dma_buf) -{ - unsigned long shared[1] = { 0 }; - struct kds_resource *resource_list[1]; - struct kds_resource_set *kds_res_set; - struct drm_crtc *crtc; - bool cb_has_called = false; - unsigned long flags; - int err; - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); - - DRM_DEBUG_KMS("Obtaining initial KDS res for bo:%p dma_buf:%p\n", - bo, dma_buf); - - resource_list[0] = get_dma_buf_kds_resource(dma_buf); - get_dma_buf(dma_buf); - - /* - * Can't use kds_waitall(), because kbase will be let through due to - * locked ignore' - */ - err = kds_async_waitall(&kds_res_set, - &priv.kds_obtain_current_cb, &wake, - &cb_has_called, 1, shared, resource_list); - BUG_ON(err); - wait_event(wake, cb_has_called == true); - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc); - spin_lock_irqsave(&pl111_crtc->current_displaying_lock, flags); - if (pl111_crtc->displaying_fb) { - struct pl111_drm_framebuffer *pl111_fb; - struct drm_framebuffer *fb = pl111_crtc->displaying_fb; - - pl111_fb = PL111_FB_FROM_FRAMEBUFFER(fb); - - if (pl111_fb->bo == bo) { - DRM_DEBUG_KMS("Initial KDS resource for bo %p", bo); - DRM_DEBUG_KMS(" is being displayed, keeping\n"); - /* There shouldn't be a previous buffer to release */ - BUG_ON(pl111_crtc->old_kds_res_set); - - if (kds_res_set == NULL) { - err = kds_async_waitall(&kds_res_set, - &priv.kds_obtain_current_cb, - &wake, &cb_has_called, - 1, shared, resource_list); - BUG_ON(err); - wait_event(wake, cb_has_called == true); - } - - /* Current buffer will need releasing on next flip */ - pl111_crtc->old_kds_res_set = kds_res_set; - - /* - * Clear kds_res_set, so a new kds_res_set is allocated - * for additional CRTCs - */ - kds_res_set = NULL; - } - } - spin_unlock_irqrestore(&pl111_crtc->current_displaying_lock, flags); - } - - /* kds_res_set will be NULL here if any CRTCs are displaying fb */ - if (kds_res_set != NULL) { - DRM_DEBUG_KMS("Initial KDS resource for bo %p", bo); - DRM_DEBUG_KMS(" not being displayed, discarding\n"); - /* They're not being displayed, release them */ - kds_resource_set_release(&kds_res_set); - } - - dma_buf_put(dma_buf); -} -#endif - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) - -static int pl111_dma_buf_mmap(struct dma_buf *buffer, - struct vm_area_struct *vma) -{ - struct drm_gem_object *obj = buffer->priv; - struct pl111_gem_bo *bo = PL111_BO_FROM_GEM(obj); - struct drm_device *dev = obj->dev; - int ret; - - DRM_DEBUG_KMS("DRM %s on dma_buf=%p\n", __func__, buffer); - - mutex_lock(&dev->struct_mutex); - ret = drm_gem_mmap_obj(obj, obj->size, vma); - mutex_unlock(&dev->struct_mutex); - if (ret) - return ret; - - return pl111_bo_mmap(obj, bo, vma, buffer->size); -} - -#else - -static int pl111_dma_buf_mmap(struct dma_buf *buffer, - struct vm_area_struct *vma) -{ - struct drm_gem_object *obj = buffer->priv; - struct pl111_gem_bo *bo = PL111_BO_FROM_GEM(obj); - struct drm_device *dev = obj->dev; - - DRM_DEBUG_KMS("DRM %s on dma_buf=%p\n", __func__, buffer); - - mutex_lock(&dev->struct_mutex); - - /* Check for valid size. */ - if (obj->size < vma->vm_end - vma->vm_start) - return -EINVAL; - - BUG_ON(!dev->driver->gem_vm_ops); - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) - vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; -#else - vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; -#endif - - vma->vm_ops = dev->driver->gem_vm_ops; - vma->vm_private_data = obj; - - /* Take a ref for this mapping of the object, so that the fault - * handler can dereference the mmap offset's pointer to the object. - * This reference is cleaned up by the corresponding vm_close - * (which should happen whether the vma was created by this call, or - * by a vm_open due to mremap or partial unmap or whatever). - */ - drm_gem_object_reference(obj); - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) - pl111_drm_vm_open_locked(dev, vma); -#else - drm_vm_open_locked(dev, vma); -#endif - - mutex_unlock(&dev->struct_mutex); - - return pl111_bo_mmap(obj, bo, vma, buffer->size); -} - -#endif /* KERNEL_VERSION */ - -static void pl111_dma_buf_release(struct dma_buf *buf) -{ - /* - * Need to release the dma_buf's reference on the gem object it was - * exported from, and also clear the gem object's export_dma_buf - * pointer to this dma_buf as it no longer exists - */ - struct drm_gem_object *obj = (struct drm_gem_object *)buf->priv; - struct pl111_gem_bo *bo; -#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS - struct drm_crtc *crtc; - unsigned long flags; -#endif - bo = PL111_BO_FROM_GEM(obj); - - DRM_DEBUG_KMS("Releasing dma_buf %p, drm_gem_obj=%p\n", buf, obj); - -#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS - list_for_each_entry(crtc, &bo->gem_object.dev->mode_config.crtc_list, - head) { - struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc); - spin_lock_irqsave(&pl111_crtc->current_displaying_lock, flags); - if (pl111_crtc->displaying_fb) { - struct pl111_drm_framebuffer *pl111_fb; - struct drm_framebuffer *fb = pl111_crtc->displaying_fb; - - pl111_fb = PL111_FB_FROM_FRAMEBUFFER(fb); - if (pl111_fb->bo == bo) { - kds_resource_set_release(&pl111_crtc->old_kds_res_set); - pl111_crtc->old_kds_res_set = NULL; - } - } - spin_unlock_irqrestore(&pl111_crtc->current_displaying_lock, flags); - } -#endif - mutex_lock(&priv.export_dma_buf_lock); - - obj->export_dma_buf = NULL; - drm_gem_object_unreference_unlocked(obj); - - mutex_unlock(&priv.export_dma_buf_lock); -} - -static int pl111_dma_buf_attach(struct dma_buf *buf, struct device *dev, - struct dma_buf_attachment *attach) -{ - DRM_DEBUG_KMS("Attaching dma_buf %p to device %p attach=%p\n", buf, - dev, attach); - - attach->priv = dev; - - return 0; -} - -static void pl111_dma_buf_detach(struct dma_buf *buf, - struct dma_buf_attachment *attach) -{ - DRM_DEBUG_KMS("Detaching dma_buf %p attach=%p\n", attach->dmabuf, - attach); -} - -/* Heavily from exynos_drm_dmabuf.c */ -static struct sg_table *pl111_dma_buf_map_dma_buf(struct dma_buf_attachment - *attach, - enum dma_data_direction - direction) -{ - struct drm_gem_object *obj = attach->dmabuf->priv; - struct pl111_gem_bo *bo = PL111_BO_FROM_GEM(obj); - struct drm_device *dev = obj->dev; - int size, n_pages, nents; - struct scatterlist *s, *sg; - struct sg_table *sgt; - int ret, i; - - DRM_DEBUG_KMS("Mapping dma_buf %p from attach=%p (bo=%p)\n", attach->dmabuf, - attach, bo); - - /* - * Nothing to do, if we are trying to map a dmabuf that has been imported. - * Just return the existing sgt. - */ - if (obj->import_attach) { - BUG_ON(!bo->sgt); - return bo->sgt; - } - - size = obj->size; - n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; - - if (bo->type & PL111_BOT_DMA) { - sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); - if (!sgt) { - DRM_ERROR("Failed to allocate sg_table\n"); - return ERR_PTR(-ENOMEM); - } - - ret = sg_alloc_table(sgt, 1, GFP_KERNEL); - if (ret < 0) { - DRM_ERROR("Failed to allocate page table\n"); - return ERR_PTR(-ENOMEM); - } - sg_dma_len(sgt->sgl) = size; - /* We use DMA coherent mappings for PL111_BOT_DMA so we must - * use the virtual address returned at buffer allocation */ - sg_set_buf(sgt->sgl, bo->backing_data.dma.fb_cpu_addr, size); - sg_dma_address(sgt->sgl) = bo->backing_data.dma.fb_dev_addr; - } else { /* PL111_BOT_SHM */ - struct page **pages; - int pg = 0; - - mutex_lock(&dev->struct_mutex); - pages = get_pages(obj); - if (IS_ERR(pages)) { - dev_err(obj->dev->dev, "could not get pages: %ld\n", - PTR_ERR(pages)); - return ERR_CAST(pages); - } - sgt = drm_prime_pages_to_sg(pages, n_pages); - if (sgt == NULL) - return ERR_PTR(-ENOMEM); - - pl111_gem_sync_to_dma(bo); - - /* - * At this point the pages have been dma-mapped by either - * get_pages() for non cached maps or pl111_gem_sync_to_dma() - * for cached. So the physical addresses can be assigned - * to the sg entries. - * drm_prime_pages_to_sg() may have combined contiguous pages - * into chunks so we assign the physical address of the first - * page of a chunk to the chunk and check that the physical - * addresses of the rest of the pages in that chunk are also - * contiguous. - */ - sg = sgt->sgl; - nents = sgt->nents; - - for_each_sg(sg, s, nents, i) { - int j, n_pages_in_chunk = sg_dma_len(s) >> PAGE_SHIFT; - - sg_dma_address(s) = bo->backing_data.shm.dma_addrs[pg]; - - for (j = pg+1; j < pg+n_pages_in_chunk; j++) { - BUG_ON(bo->backing_data.shm.dma_addrs[j] != - bo->backing_data.shm.dma_addrs[j-1]+PAGE_SIZE); - } - - pg += n_pages_in_chunk; - } - - mutex_unlock(&dev->struct_mutex); - } - bo->sgt = sgt; - return sgt; -} - -static void pl111_dma_buf_unmap_dma_buf(struct dma_buf_attachment *attach, - struct sg_table *sgt, - enum dma_data_direction direction) -{ - struct drm_gem_object *obj = attach->dmabuf->priv; - struct pl111_gem_bo *bo = PL111_BO_FROM_GEM(obj); - - DRM_DEBUG_KMS("Unmapping dma_buf %p from attach=%p (bo=%p)\n", attach->dmabuf, - attach, bo); - - sg_free_table(sgt); - kfree(sgt); - bo->sgt = NULL; -} - -/* - * There isn't any operation here that can sleep or fail so this callback can - * be used for both kmap and kmap_atomic implementations. - */ -static void *pl111_dma_buf_kmap(struct dma_buf *dma_buf, unsigned long pageno) -{ - struct pl111_gem_bo *bo = dma_buf->priv; - void *vaddr = NULL; - - /* Make sure we cannot access outside the memory range */ - if (((pageno + 1) << PAGE_SHIFT) > bo->gem_object.size) - return NULL; - - if (bo->type & PL111_BOT_DMA) { - vaddr = (bo->backing_data.dma.fb_cpu_addr + - (pageno << PAGE_SHIFT)); - } else { - vaddr = page_address(bo->backing_data.shm.pages[pageno]); - } - - return vaddr; -} - -/* - * Find a scatterlist that starts in "start" and has "len" - * or return a NULL dma_handle. - */ -static dma_addr_t pl111_find_matching_sg(struct sg_table *sgt, size_t start, - size_t len) -{ - struct scatterlist *sg; - unsigned int count; - size_t size = 0; - dma_addr_t dma_handle = 0; - - /* Find a scatterlist that starts in "start" and has "len" - * or return error */ - for_each_sg(sgt->sgl, sg, sgt->nents, count) { - if ((size == start) && (len == sg_dma_len(sg))) { - dma_handle = sg_dma_address(sg); - break; - } - size += sg_dma_len(sg); - } - return dma_handle; -} - -static int pl111_dma_buf_begin_cpu(struct dma_buf *dma_buf, - size_t start, size_t len, - enum dma_data_direction dir) -{ - struct pl111_gem_bo *bo = dma_buf->priv; - struct sg_table *sgt = bo->sgt; - dma_addr_t dma_handle; - - if ((start + len) > bo->gem_object.size) - return -EINVAL; - - if (!(bo->type & PL111_BOT_SHM)) { - struct device *dev = bo->gem_object.dev->dev; - - dma_handle = pl111_find_matching_sg(sgt, start, len); - if (!dma_handle) - return -EINVAL; - - dma_sync_single_range_for_cpu(dev, dma_handle, 0, len, dir); - } - /* PL111_BOT_DMA uses coherents mappings, no need to sync */ - return 0; -} - -static void pl111_dma_buf_end_cpu(struct dma_buf *dma_buf, - size_t start, size_t len, - enum dma_data_direction dir) -{ - struct pl111_gem_bo *bo = dma_buf->priv; - struct sg_table *sgt = bo->sgt; - dma_addr_t dma_handle; - - if ((start + len) > bo->gem_object.size) - return; - - if (!(bo->type & PL111_BOT_DMA)) { - struct device *dev = bo->gem_object.dev->dev; - - dma_handle = pl111_find_matching_sg(sgt, start, len); - if (!dma_handle) - return; - - dma_sync_single_range_for_device(dev, dma_handle, 0, len, dir); - } - /* PL111_BOT_DMA uses coherents mappings, no need to sync */ -} - -static struct dma_buf_ops pl111_dma_buf_ops = { - .release = &pl111_dma_buf_release, - .attach = &pl111_dma_buf_attach, - .detach = &pl111_dma_buf_detach, - .map_dma_buf = &pl111_dma_buf_map_dma_buf, - .unmap_dma_buf = &pl111_dma_buf_unmap_dma_buf, - .kmap_atomic = &pl111_dma_buf_kmap, - .kmap = &pl111_dma_buf_kmap, - .begin_cpu_access = &pl111_dma_buf_begin_cpu, - .end_cpu_access = &pl111_dma_buf_end_cpu, - .mmap = &pl111_dma_buf_mmap, -}; - -struct drm_gem_object *pl111_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf) -{ - struct dma_buf_attachment *attachment; - struct drm_gem_object *obj; - struct pl111_gem_bo *bo; - struct scatterlist *sgl; - struct sg_table *sgt; - dma_addr_t cont_phys; - int ret = 0; - int i; - - DRM_DEBUG_KMS("DRM %s on dev=%p dma_buf=%p\n", __func__, dev, dma_buf); - - /* is this one of own objects? */ - if (dma_buf->ops == &pl111_dma_buf_ops) { - obj = dma_buf->priv; - /* is it from our device? */ - if (obj->dev == dev) { - /* - * Importing dmabuf exported from our own gem increases - * refcount on gem itself instead of f_count of dmabuf. - */ - drm_gem_object_reference(obj); - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) - /* before v3.10.0 we assume the caller has taken a ref on the dma_buf - * we don't want it for self-imported buffers so drop it here */ - dma_buf_put(dma_buf); -#endif - - return obj; - } - } - - attachment = dma_buf_attach(dma_buf, dev->dev); - if (IS_ERR(attachment)) - return ERR_CAST(attachment); - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) - /* from 3.10.0 we assume the caller has not taken a ref so we take one here */ - get_dma_buf(dma_buf); -#endif - - sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL); - if (IS_ERR_OR_NULL(sgt)) { - ret = PTR_ERR(sgt); - goto err_buf_detach; - } - - bo = kzalloc(sizeof(*bo), GFP_KERNEL); - if (!bo) { - DRM_ERROR("%s: failed to allocate buffer object.\n", __func__); - ret = -ENOMEM; - goto err_unmap_attach; - } - - /* Find out whether the buffer is contiguous or not */ - sgl = sgt->sgl; - cont_phys = sg_phys(sgl); - bo->type |= PL111_BOT_DMA; - for_each_sg(sgt->sgl, sgl, sgt->nents, i) { - dma_addr_t real_phys = sg_phys(sgl); - if (real_phys != cont_phys) { - bo->type &= ~PL111_BOT_DMA; - break; - } - cont_phys += (PAGE_SIZE - sgl->offset); - } - -#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 11, 0)) - ret = drm_gem_private_object_init(dev, &bo->gem_object, - dma_buf->size); - if (ret != 0) { - DRM_ERROR("DRM could not import DMA GEM obj\n"); - goto err_free_buffer; - } -#else - drm_gem_private_object_init(dev, &bo->gem_object, dma_buf->size); -#endif - - if (bo->type & PL111_BOT_DMA) { - bo->backing_data.dma.fb_cpu_addr = sg_virt(sgt->sgl); - bo->backing_data.dma.fb_dev_addr = sg_phys(sgt->sgl); - DRM_DEBUG_KMS("DRM %s pl111_gem_bo=%p, contiguous import\n", __func__, bo); - } else { /* PL111_BOT_SHM */ - DRM_DEBUG_KMS("DRM %s pl111_gem_bo=%p, non contiguous import\n", __func__, bo); - } - - bo->gem_object.import_attach = attachment; - bo->sgt = sgt; - - return &bo->gem_object; - -err_free_buffer: - kfree(bo); - bo = NULL; -err_unmap_attach: - dma_buf_unmap_attachment(attachment, sgt, DMA_BIDIRECTIONAL); -err_buf_detach: - dma_buf_detach(dma_buf, attachment); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) - /* from 3.10.0 we will have taken a ref so drop it here */ - dma_buf_put(dma_buf); -#endif - return ERR_PTR(ret); -} - -struct dma_buf *pl111_gem_prime_export(struct drm_device *dev, - struct drm_gem_object *obj, int flags) -{ - struct dma_buf *new_buf; - struct pl111_gem_bo *bo; - size_t size; - - DRM_DEBUG("DRM %s on dev=%p drm_gem_obj=%p\n", __func__, dev, obj); - size = obj->size; - - new_buf = dma_buf_export(obj /*priv */ , &pl111_dma_buf_ops, size, - flags | O_RDWR); - bo = PL111_BO_FROM_GEM(new_buf->priv); - - /* - * bo->gem_object.export_dma_buf not setup until after gem_prime_export - * finishes - */ - -#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS - /* - * Ensure that we hold the kds resource if it's the currently - * displayed buffer. - */ - obtain_kds_if_currently_displayed(dev, bo, new_buf); -#endif - - DRM_DEBUG("Created dma_buf %p\n", new_buf); - - return new_buf; -} - -int pl111_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, - uint32_t handle, uint32_t flags, int *prime_fd) -{ - int result; - /* - * This will re-use any existing exports, and calls - * driver->gem_prime_export to do the first export when needed - */ - DRM_DEBUG_KMS("DRM %s on file_priv=%p, handle=0x%.8x\n", __func__, - file_priv, handle); - - mutex_lock(&priv.export_dma_buf_lock); - result = drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, - prime_fd); - mutex_unlock(&priv.export_dma_buf_lock); - - return result; -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_encoder.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_encoder.c deleted file mode 100755 index 78c91c081068f..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_encoder.c +++ /dev/null @@ -1,107 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * pl111_drm_encoder.c - * Implementation of the encoder functions for PL111 DRM - */ -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "pl111_drm.h" - -bool pl111_encoder_helper_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - DRM_DEBUG_KMS("DRM %s on encoder=%p\n", __func__, encoder); - return true; -} - -void pl111_encoder_helper_prepare(struct drm_encoder *encoder) -{ - DRM_DEBUG_KMS("DRM %s on encoder=%p\n", __func__, encoder); -} - -void pl111_encoder_helper_commit(struct drm_encoder *encoder) -{ - DRM_DEBUG_KMS("DRM %s on encoder=%p\n", __func__, encoder); -} - -void pl111_encoder_helper_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - DRM_DEBUG_KMS("DRM %s on encoder=%p\n", __func__, encoder); -} - -void pl111_encoder_helper_disable(struct drm_encoder *encoder) -{ - DRM_DEBUG_KMS("DRM %s on encoder=%p\n", __func__, encoder); -} - -void pl111_encoder_destroy(struct drm_encoder *encoder) -{ - struct pl111_drm_encoder *pl111_encoder = - PL111_ENCODER_FROM_ENCODER(encoder); - - DRM_DEBUG_KMS("DRM %s on encoder=%p\n", __func__, encoder); - - drm_encoder_cleanup(encoder); - kfree(pl111_encoder); -} - -const struct drm_encoder_funcs encoder_funcs = { - .destroy = pl111_encoder_destroy, -}; - -const struct drm_encoder_helper_funcs encoder_helper_funcs = { - .mode_fixup = pl111_encoder_helper_mode_fixup, - .prepare = pl111_encoder_helper_prepare, - .commit = pl111_encoder_helper_commit, - .mode_set = pl111_encoder_helper_mode_set, - .disable = pl111_encoder_helper_disable, -}; - -struct pl111_drm_encoder *pl111_encoder_create(struct drm_device *dev, - int possible_crtcs) -{ - struct pl111_drm_encoder *pl111_encoder; - - pl111_encoder = kzalloc(sizeof(struct pl111_drm_encoder), GFP_KERNEL); - if (pl111_encoder == NULL) { - pr_err("Failed to allocated pl111_drm_encoder\n"); - return NULL; - } - - drm_encoder_init(dev, &pl111_encoder->encoder, &encoder_funcs, - DRM_MODE_ENCODER_DAC); - - drm_encoder_helper_add(&pl111_encoder->encoder, &encoder_helper_funcs); - - pl111_encoder->encoder.possible_crtcs = possible_crtcs; - - return pl111_encoder; -} - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_fb.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_fb.c deleted file mode 100755 index f575c9e5f7c1d..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_fb.c +++ /dev/null @@ -1,202 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * pl111_drm_fb.c - * Implementation of the framebuffer functions for PL111 DRM - */ -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include "pl111_drm.h" - -static void pl111_fb_destroy(struct drm_framebuffer *framebuffer) -{ - struct pl111_drm_framebuffer *pl111_fb; -#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS - struct drm_crtc *crtc; - unsigned long flags; -#endif - DRM_DEBUG_KMS("Destroying framebuffer 0x%p...\n", framebuffer); - - pl111_fb = PL111_FB_FROM_FRAMEBUFFER(framebuffer); - - /* - * Because flips are deferred, wait for all previous flips to complete - */ - wait_event(priv.wait_for_flips, - atomic_read(&priv.nr_flips_in_flight) == 0); -#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS - /* - * Release KDS resources if it's currently being displayed. Only occurs - * when the last framebuffer is destroyed. - */ - list_for_each_entry(crtc, &framebuffer->dev->mode_config.crtc_list, - head) { - struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc); - spin_lock_irqsave(&pl111_crtc->current_displaying_lock, flags); - if (pl111_crtc->displaying_fb == framebuffer) { - /* Release the current buffers */ - if (pl111_crtc->old_kds_res_set != NULL) { - DRM_DEBUG_KMS("Releasing KDS resources for "); - DRM_DEBUG_KMS("displayed 0x%p\n", framebuffer); - kds_resource_set_release( - &pl111_crtc->old_kds_res_set); - } - pl111_crtc->old_kds_res_set = NULL; - } - spin_unlock_irqrestore(&pl111_crtc->current_displaying_lock, - flags); - } -#endif - drm_framebuffer_cleanup(framebuffer); - - if ((pl111_fb->bo != NULL) && (&pl111_fb->bo->gem_object != NULL)) - drm_gem_object_unreference_unlocked(&pl111_fb->bo->gem_object); - - kfree(pl111_fb); - - DRM_DEBUG_KMS("Destroyed framebuffer 0x%p\n", framebuffer); -} - -static int pl111_fb_create_handle(struct drm_framebuffer *fb, - struct drm_file *file_priv, - unsigned int *handle) -{ - struct pl111_gem_bo *bo = PL111_BO_FROM_FRAMEBUFFER(fb); - DRM_DEBUG_KMS("DRM %s on fb=%p\n", __func__, fb); - - if (bo == NULL) - return -EINVAL; - - return drm_gem_handle_create(file_priv, &bo->gem_object, handle); -} - -const struct drm_framebuffer_funcs fb_funcs = { - .destroy = pl111_fb_destroy, - .create_handle = pl111_fb_create_handle, -}; - -struct drm_framebuffer *pl111_fb_create(struct drm_device *dev, - struct drm_file *file_priv, - struct drm_mode_fb_cmd2 *mode_cmd) -{ - struct pl111_drm_framebuffer *pl111_fb = NULL; - struct drm_framebuffer *fb = NULL; - struct drm_gem_object *gem_obj; - struct pl111_gem_bo *bo; - int err = 0; - size_t min_size; - int bpp; - int depth; - - pr_info("DRM %s\n", __func__); - gem_obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); - if (gem_obj == NULL) { - DRM_ERROR("Could not get gem obj from handle to create fb\n"); - err = -ENOENT; - goto error; - } - - bo = PL111_BO_FROM_GEM(gem_obj); - drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp); - - if (mode_cmd->pitches[0] < mode_cmd->width * (bpp >> 3)) { - DRM_ERROR("bad pitch %u for plane 0\n", mode_cmd->pitches[0]); - err = -EINVAL; - goto error; - } - - min_size = (mode_cmd->height - 1) * mode_cmd->pitches[0] - + mode_cmd->width * (bpp >> 3); - - if (bo->gem_object.size < min_size) { - DRM_ERROR("gem obj size < min size\n"); - err = -EINVAL; - goto error; - } - - /* We can't scan out SHM so we can't create an fb for it */ - if (!(bo->type & PL111_BOT_DMA)) { - DRM_ERROR("Can't create FB for non-scanout buffer\n"); - err = -EINVAL; - goto error; - } - - switch ((char)(mode_cmd->pixel_format & 0xFF)) { - case 'Y': - case 'U': - case 'V': - case 'N': - case 'T': - DRM_ERROR("YUV formats not supported\n"); - err = -EINVAL; - goto error; - } - - pl111_fb = kzalloc(sizeof(struct pl111_drm_framebuffer), GFP_KERNEL); - if (pl111_fb == NULL) { - DRM_ERROR("Could not allocate pl111_drm_framebuffer\n"); - err = -ENOMEM; - goto error; - } - fb = &pl111_fb->fb; - - err = drm_framebuffer_init(dev, fb, &fb_funcs); - if (err) { - DRM_ERROR("drm_framebuffer_init failed\n"); - kfree(fb); - fb = NULL; - goto error; - } - - drm_helper_mode_fill_fb_struct(fb, mode_cmd); - - /* The only framebuffer formats supported by pl111 - * are 16 bpp or 32 bpp with 24 bit depth. - * See clcd_enable() - */ - if (!((fb->bits_per_pixel == 16) || - (fb->bits_per_pixel == 32 && fb->depth == 24))) { - DRM_DEBUG_KMS("unsupported pixel format bpp=%d, depth=%d\n", fb->bits_per_pixel, fb->depth); - drm_framebuffer_cleanup(fb); - kfree(fb); - fb = NULL; - err = -EINVAL; - goto error; - } - - pl111_fb->bo = bo; - - DRM_DEBUG_KMS("Created fb 0x%p with gem_obj 0x%p physaddr=0x%.8x\n", - fb, gem_obj, bo->backing_data.dma.fb_dev_addr); - - return fb; - -error: - if (gem_obj != NULL) - drm_gem_object_unreference_unlocked(gem_obj); - - return ERR_PTR(err); -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_funcs.h b/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_funcs.h deleted file mode 100755 index 494baa0d057d4..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_funcs.h +++ /dev/null @@ -1,130 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * pl111_drm_funcs.h - * Function prototypes for PL111 DRM - */ - -#ifndef PL111_DRM_FUNCS_H_ -#define PL111_DRM_FUNCS_H_ - -/* Platform Initialisation */ -int pl111_drm_init(struct platform_device *dev); -void pl111_drm_exit(struct platform_device *dev); - -/* KDS Callbacks */ -void show_framebuffer_on_crtc_cb(void *cb1, void *cb2); -void release_kds_resource_and_display(struct pl111_drm_flip_resource *flip_res); - -/* CRTC Functions */ -struct pl111_drm_crtc *pl111_crtc_create(struct drm_device *dev); -struct pl111_drm_crtc *pl111_crtc_dummy_create(struct drm_device *dev); -void pl111_crtc_destroy(struct drm_crtc *crtc); - -bool pl111_crtc_is_fb_currently_displayed(struct drm_device *dev, - struct drm_framebuffer *fb); - -int show_framebuffer_on_crtc(struct drm_crtc *crtc, - struct drm_framebuffer *fb, bool page_flip, - struct drm_pending_vblank_event *event); - -/* Common IRQ handler */ -void pl111_common_irq(struct pl111_drm_crtc *pl111_crtc); - -int pl111_crtc_cursor_set(struct drm_crtc *crtc, - struct drm_file *file_priv, - uint32_t handle, - uint32_t width, - uint32_t height); -int pl111_crtc_cursor_move(struct drm_crtc *crtc, - int x, int y); - -/* Connector Functions */ -struct pl111_drm_connector *pl111_connector_create(struct drm_device *dev); -void pl111_connector_destroy(struct drm_connector *connector); -struct pl111_drm_connector *pl111_connector_dummy_create(struct drm_device - *dev); - -/* Encoder Functions */ -struct pl111_drm_encoder *pl111_encoder_create(struct drm_device *dev, - int possible_crtcs); -struct pl111_drm_encoder *pl111_encoder_dummy_create(struct drm_device *dev, - int possible_crtcs); -void pl111_encoder_destroy(struct drm_encoder *encoder); - -/* Frame Buffer Functions */ -struct drm_framebuffer *pl111_fb_create(struct drm_device *dev, - struct drm_file *file_priv, - struct drm_mode_fb_cmd2 *mode_cmd); - -/* VMA Functions */ -int pl111_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); -int pl111_gem_mmap(struct file *file_priv, struct vm_area_struct *vma); -struct page **get_pages(struct drm_gem_object *obj); -void put_pages(struct drm_gem_object *obj, struct page **pages); -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) -void pl111_drm_vm_open_locked(struct drm_device *dev, - struct vm_area_struct *vma); -void pl111_gem_vm_open(struct vm_area_struct *vma); -void pl111_gem_vm_close(struct vm_area_struct *vma); -#endif - -/* Suspend Functions */ -int pl111_drm_resume(struct drm_device *dev); -int pl111_drm_suspend(struct drm_device *dev, pm_message_t state); - -/* GEM Functions */ -int pl111_dumb_create(struct drm_file *file_priv, - struct drm_device *dev, - struct drm_mode_create_dumb *args); -int pl111_dumb_destroy(struct drm_file *file_priv, - struct drm_device *dev, uint32_t handle); -int pl111_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *dev, uint32_t handle, - uint64_t *offset); -void pl111_gem_free_object(struct drm_gem_object *obj); - -int pl111_bo_mmap(struct drm_gem_object *obj, struct pl111_gem_bo *bo, - struct vm_area_struct *vma, size_t size); -void pl111_gem_sync_to_cpu(struct pl111_gem_bo *bo, int pgoff); -void pl111_gem_sync_to_dma(struct pl111_gem_bo *bo); - -/* DMA BUF Functions */ -struct drm_gem_object *pl111_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf); -int pl111_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, - uint32_t handle, uint32_t flags, int *prime_fd); -struct dma_buf *pl111_gem_prime_export(struct drm_device *dev, - struct drm_gem_object *obj, int flags); - -/* Pl111 Functions */ -void show_framebuffer_on_crtc_cb_internal(struct pl111_drm_flip_resource - *flip_res, struct drm_framebuffer *fb); -int clcd_disable(struct drm_crtc *crtc); -void do_flip_to_res(struct pl111_drm_flip_resource *flip_res); -int pl111_amba_probe(struct amba_device *dev, const struct amba_id *id); -int pl111_amba_remove(struct amba_device *dev); - -int pl111_device_init(struct drm_device *dev); -void pl111_device_fini(struct drm_device *dev); - -void pl111_convert_drm_mode_to_timing(struct drm_display_mode *mode, - struct clcd_regs *timing); -void pl111_convert_timing_to_drm_mode(struct clcd_regs *timing, - struct drm_display_mode *mode); -#endif /* PL111_DRM_FUNCS_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_gem.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_gem.c deleted file mode 100755 index 13fb256056935..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_gem.c +++ /dev/null @@ -1,476 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * pl111_drm_gem.c - * Implementation of the GEM functions for PL111 DRM - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "pl111_drm.h" - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) -#include -#endif - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) -#include -#endif - -void pl111_gem_free_object(struct drm_gem_object *obj) -{ - struct pl111_gem_bo *bo; - struct drm_device *dev = obj->dev; - DRM_DEBUG_KMS("DRM %s on drm_gem_object=%p\n", __func__, obj); - - bo = PL111_BO_FROM_GEM(obj); - - if (obj->import_attach) - drm_prime_gem_destroy(obj, bo->sgt); - -#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 11, 0)) - if (obj->map_list.map != NULL) - drm_gem_free_mmap_offset(obj); -#else - drm_gem_free_mmap_offset(obj); -#endif - /* - * Only free the backing memory if the object has not been imported. - * If it has been imported, the exporter is in charge to free that - * once dmabuf's refcount becomes 0. - */ - if (obj->import_attach) - goto imported_out; - - if (bo->type & PL111_BOT_DMA) { - dma_free_writecombine(dev->dev, obj->size, - bo->backing_data.dma.fb_cpu_addr, - bo->backing_data.dma.fb_dev_addr); - } else if (bo->backing_data.shm.pages != NULL) { - put_pages(obj, bo->backing_data.shm.pages); - } - -imported_out: - drm_gem_object_release(obj); - - kfree(bo); - - DRM_DEBUG_KMS("Destroyed dumb_bo handle 0x%p\n", bo); -} - -static int pl111_gem_object_create(struct drm_device *dev, u64 size, - u32 flags, struct drm_file *file_priv, - u32 *handle) -{ - int ret = 0; - struct pl111_gem_bo *bo = NULL; - - bo = kzalloc(sizeof(*bo), GFP_KERNEL); - if (bo == NULL) { - ret = -ENOMEM; - goto finish; - } - - bo->type = flags; - -#ifndef ARCH_HAS_SG_CHAIN - /* - * If the ARCH can't chain we can't have non-contiguous allocs larger - * than a single sg can hold. - * In this case we fall back to using contiguous memory - */ - if (!(bo->type & PL111_BOT_DMA)) { - long unsigned int n_pages = - PAGE_ALIGN(size) >> PAGE_SHIFT; - if (n_pages > SG_MAX_SINGLE_ALLOC) { - bo->type |= PL111_BOT_DMA; - /* - * Non-contiguous allocation request changed to - * contigous - */ - DRM_INFO("non-contig alloc to contig %lu > %lu pages.", - n_pages, SG_MAX_SINGLE_ALLOC); - } - } -#endif - if (bo->type & PL111_BOT_DMA) { - /* scanout compatible - use physically contiguous buffer */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); - bo->backing_data.dma.fb_cpu_addr = - dma_alloc_attrs(dev->dev, size, - &bo->backing_data.dma.fb_dev_addr, - GFP_KERNEL, - &attrs); - if (bo->backing_data.dma.fb_cpu_addr == NULL) { - DRM_ERROR("dma_alloc_attrs failed\n"); - ret = -ENOMEM; - goto free_bo; - } -#else - bo->backing_data.dma.fb_cpu_addr = - dma_alloc_writecombine(dev->dev, size, - &bo->backing_data.dma.fb_dev_addr, - GFP_KERNEL); - if (bo->backing_data.dma.fb_cpu_addr == NULL) { - DRM_ERROR("dma_alloc_writecombine failed\n"); - ret = -ENOMEM; - goto free_bo; - } -#endif - -#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 11, 0)) - ret = drm_gem_private_object_init(dev, &bo->gem_object, - size); - if (ret != 0) { - DRM_ERROR("DRM could not initialise GEM object\n"); - goto free_dma; - } -#else - drm_gem_private_object_init(dev, &bo->gem_object, size); -#endif - - } else { /* PL111_BOT_SHM */ - /* not scanout compatible - use SHM backed object */ - ret = drm_gem_object_init(dev, &bo->gem_object, size); - if (ret != 0) { - DRM_ERROR("DRM could not init SHM backed GEM obj\n"); - ret = -ENOMEM; - goto free_bo; - } - DRM_DEBUG_KMS("Num bytes: %d\n", bo->gem_object.size); - } - - DRM_DEBUG("s=%llu, flags=0x%x, %s 0x%.8lx, type=%d\n", - size, flags, - (bo->type & PL111_BOT_DMA) ? "physaddr" : "shared page array", - (bo->type & PL111_BOT_DMA) ? - (unsigned long)bo->backing_data.dma.fb_dev_addr: - (unsigned long)bo->backing_data.shm.pages, - bo->type); - - ret = drm_gem_handle_create(file_priv, &bo->gem_object, handle); - if (ret != 0) { - DRM_ERROR("DRM failed to create GEM handle\n"); - goto obj_release; - } - - /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(&bo->gem_object); - - return 0; - -obj_release: - drm_gem_object_release(&bo->gem_object); -#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 11, 0)) -free_dma: -#endif - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) - if (bo->type & PL111_BOT_DMA) { - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); - dma_free_attrs(dev->dev, size, - bo->backing_data.dma.fb_cpu_addr, - bo->backing_data.dma.fb_dev_addr, - &attrs); - } -#else - if (bo->type & PL111_BOT_DMA) - dma_free_writecombine(dev->dev, size, - bo->backing_data.dma.fb_cpu_addr, - bo->backing_data.dma.fb_dev_addr); -#endif -free_bo: - kfree(bo); -finish: - return ret; -} - -int pl111_drm_gem_create_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_pl111_gem_create *args = data; - uint32_t bytes_pp; - - /* Round bpp up, to allow for case where bpp<8 */ - bytes_pp = args->bpp >> 3; - if (args->bpp & ((1 << 3) - 1)) - bytes_pp++; - - if (args->flags & ~PL111_BOT_MASK) { - DRM_ERROR("wrong flags: 0x%x\n", args->flags); - return -EINVAL; - } - - args->pitch = ALIGN(args->width * bytes_pp, 64); - args->size = PAGE_ALIGN(args->pitch * args->height); - - DRM_DEBUG_KMS("gem_create w=%d h=%d p=%d bpp=%d b=%d s=%llu f=0x%x\n", - args->width, args->height, args->pitch, args->bpp, - bytes_pp, args->size, args->flags); - - return pl111_gem_object_create(dev, args->size, args->flags, file_priv, - &args->handle); -} - -int pl111_dumb_create(struct drm_file *file_priv, - struct drm_device *dev, struct drm_mode_create_dumb *args) -{ - uint32_t bytes_pp; - - /* Round bpp up, to allow for case where bpp<8 */ - bytes_pp = args->bpp >> 3; - if (args->bpp & ((1 << 3) - 1)) - bytes_pp++; - - if (args->flags) { - DRM_ERROR("flags must be zero: 0x%x\n", args->flags); - return -EINVAL; - } - - args->pitch = ALIGN(args->width * bytes_pp, 64); - args->size = PAGE_ALIGN(args->pitch * args->height); - - DRM_DEBUG_KMS("dumb_create w=%d h=%d p=%d bpp=%d b=%d s=%llu f=0x%x\n", - args->width, args->height, args->pitch, args->bpp, - bytes_pp, args->size, args->flags); - - return pl111_gem_object_create(dev, args->size, - PL111_BOT_DMA | PL111_BOT_UNCACHED, - file_priv, &args->handle); -} - -int pl111_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, - uint32_t handle) -{ - DRM_DEBUG_KMS("DRM %s on file_priv=%p handle=0x%.8x\n", __func__, - file_priv, handle); - return drm_gem_handle_delete(file_priv, handle); -} - -int pl111_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *dev, uint32_t handle, - uint64_t *offset) -{ - struct drm_gem_object *obj; - int ret = 0; - DRM_DEBUG_KMS("DRM %s on file_priv=%p handle=0x%.8x\n", __func__, - file_priv, handle); - - /* GEM does all our handle to object mapping */ - obj = drm_gem_object_lookup(dev, file_priv, handle); - if (obj == NULL) { - ret = -ENOENT; - goto fail; - } - -#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 11, 0)) - if (obj->map_list.map == NULL) { - ret = drm_gem_create_mmap_offset(obj); - if (ret != 0) { - drm_gem_object_unreference_unlocked(obj); - goto fail; - } - } -#else - ret = drm_gem_create_mmap_offset(obj); - if (ret != 0) { - drm_gem_object_unreference_unlocked(obj); - goto fail; - } -#endif - -#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 11, 0)) - *offset = (uint64_t) obj->map_list.hash.key << PAGE_SHIFT; -#else - *offset = drm_vma_node_offset_addr(&obj->vma_node); - DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); -#endif - - drm_gem_object_unreference_unlocked(obj); -fail: - return ret; -} - -/* sync the buffer for DMA access */ -void pl111_gem_sync_to_dma(struct pl111_gem_bo *bo) -{ - struct drm_device *dev = bo->gem_object.dev; - - if (!(bo->type & PL111_BOT_DMA) && (bo->type & PL111_BOT_CACHED)) { - int i, npages = bo->gem_object.size >> PAGE_SHIFT; - struct page **pages = bo->backing_data.shm.pages; - bool dirty = false; - - for (i = 0; i < npages; i++) { - if (!bo->backing_data.shm.dma_addrs[i]) { - DRM_DEBUG("%s: dma map page=%d bo=%p\n", __func__, i, bo); - bo->backing_data.shm.dma_addrs[i] = - dma_map_page(dev->dev, pages[i], 0, - PAGE_SIZE, DMA_BIDIRECTIONAL); - dirty = true; - } - } - - if (dirty) { - DRM_DEBUG("%s: zap ptes (and flush cache) bo=%p\n", __func__, bo); - /* - * TODO MIDEGL-1813 - * - * Use flush_cache_page() and outer_flush_range() to - * flush only the user space mappings of the dirty pages - */ - flush_cache_all(); - outer_flush_all(); - unmap_mapping_range(bo->gem_object.filp->f_mapping, 0, - bo->gem_object.size, 1); - } - } -} - -void pl111_gem_sync_to_cpu(struct pl111_gem_bo *bo, int pgoff) -{ - struct drm_device *dev = bo->gem_object.dev; - - /* - * TODO MIDEGL-1808 - * - * The following check was meant to detect if the CPU is trying to access - * a buffer that is currently mapped for DMA accesses, which is illegal - * as described by the DMA-API. - * - * However, some of our tests are trying to do that, which triggers the message - * below and avoids dma-unmapping the pages not to annoy the DMA device but that - * leads to the test failing because of caches not being properly flushed. - */ - - /* - if (bo->sgt) { - DRM_ERROR("%s: the CPU is trying to access a dma-mapped buffer\n", __func__); - return; - } - */ - - if (!(bo->type & PL111_BOT_DMA) && (bo->type & PL111_BOT_CACHED) && - bo->backing_data.shm.dma_addrs[pgoff]) { - DRM_DEBUG("%s: unmap bo=%p (s=%d), paddr=%08x\n", - __func__, bo, bo->gem_object.size, - bo->backing_data.shm.dma_addrs[pgoff]); - dma_unmap_page(dev->dev, bo->backing_data.shm.dma_addrs[pgoff], - PAGE_SIZE, DMA_BIDIRECTIONAL); - bo->backing_data.shm.dma_addrs[pgoff] = 0; - } -} - -/* Based on omapdrm driver */ -int pl111_bo_mmap(struct drm_gem_object *obj, struct pl111_gem_bo *bo, - struct vm_area_struct *vma, size_t size) -{ - DRM_DEBUG("DRM %s on drm_gem_object=%p, pl111_gem_bo=%p type=%08x\n", - __func__, obj, bo, bo->type); - - vma->vm_flags &= ~VM_PFNMAP; - vma->vm_flags |= VM_MIXEDMAP; - - if (bo->type & PL111_BOT_WC) { - vma->vm_page_prot = - pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); - } else if (bo->type & PL111_BOT_CACHED) { - /* - * Objects that do not have a filp (DMA backed) can't be - * mapped as cached now. Write-combine should be enough. - */ - if (WARN_ON(!obj->filp)) - return -EINVAL; - - /* - * As explained in Documentation/dma-buf-sharing.txt - * we need this trick so that we can manually zap ptes - * in order to fake coherency. - */ - fput(vma->vm_file); - get_file(obj->filp); - vma->vm_file = obj->filp; - - vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - } else { /* PL111_BOT_UNCACHED */ - vma->vm_page_prot = - pgprot_noncached(vm_get_page_prot(vma->vm_flags)); - } - return 0; -} - -int pl111_gem_mmap(struct file *file_priv, struct vm_area_struct *vma) -{ - int ret; - struct drm_file *priv = file_priv->private_data; - struct drm_device *dev = priv->minor->dev; -#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 13, 0)) - struct drm_gem_mm *mm = dev->mm_private; - struct drm_hash_item *hash; - struct drm_local_map *map = NULL; -#else - struct drm_vma_offset_node *node; -#endif - struct drm_gem_object *obj; - struct pl111_gem_bo *bo; - - DRM_DEBUG_KMS("DRM %s\n", __func__); - -#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 13, 0)) - drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash); - map = drm_hash_entry(hash, struct drm_map_list, hash)->map; - obj = map->handle; -#else - node = drm_vma_offset_exact_lookup(dev->vma_offset_manager, - vma->vm_pgoff, - vma_pages(vma)); - obj = container_of(node, struct drm_gem_object, vma_node); -#endif - bo = PL111_BO_FROM_GEM(obj); - - DRM_DEBUG_KMS("DRM %s on pl111_gem_bo %p bo->type 0x%08x\n", __func__, bo, bo->type); - - /* for an imported buffer we let the exporter handle the mmap */ - if (obj->import_attach) - return dma_buf_mmap(obj->import_attach->dmabuf, vma, 0); - - ret = drm_gem_mmap(file_priv, vma); - if (ret < 0) { - DRM_ERROR("failed to mmap\n"); - return ret; - } - - /* Our page fault handler uses the page offset calculated from the vma, - * so we need to remove the gem cookie offset specified in the call. - */ - vma->vm_pgoff = 0; - - return pl111_bo_mmap(obj, bo, vma, vma->vm_end - vma->vm_start); -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_pl111.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_pl111.c deleted file mode 100755 index 1d613d0592a33..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_pl111.c +++ /dev/null @@ -1,417 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2014 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * pl111_drm_pl111.c - * PL111 specific functions for PL111 DRM - */ -#include -#include -#include -#include -#include -#include -#include -#include - -#include "pl111_drm.h" - -/* This can't be called from IRQ context, due to clk_get() and board->enable */ -static int clcd_enable(struct drm_framebuffer *fb) -{ - __u32 cntl; - struct clcd_board *board; - - pr_info("DRM %s\n", __func__); - - clk_prepare_enable(priv.clk); - - /* Enable and Power Up */ - cntl = CNTL_LCDEN | CNTL_LCDTFT | CNTL_LCDPWR | CNTL_LCDVCOMP(1); - DRM_DEBUG_KMS("fb->bits_per_pixel = %d\n", fb->bits_per_pixel); - if (fb->bits_per_pixel == 16) - cntl |= CNTL_LCDBPP16_565; - else if (fb->bits_per_pixel == 32 && fb->depth == 24) - cntl |= CNTL_LCDBPP24; - else - BUG_ON(1); - - cntl |= CNTL_BGR; - - writel(cntl, priv.regs + CLCD_PL111_CNTL); - - if (priv.amba_dev->dev.platform_data) { - board = priv.amba_dev->dev.platform_data; - - if (board->enable) - board->enable(NULL); - } - - /* Enable Interrupts */ - writel(CLCD_IRQ_NEXTBASE_UPDATE, priv.regs + CLCD_PL111_IENB); - - return 0; -} - -int clcd_disable(struct drm_crtc *crtc) -{ - struct clcd_board *board; - struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc); -#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS - unsigned long flags; -#endif - - pr_info("DRM %s\n", __func__); - - /* Disable Interrupts */ - writel(0x00000000, priv.regs + CLCD_PL111_IENB); - - if (priv.amba_dev->dev.platform_data) { - board = priv.amba_dev->dev.platform_data; - - if (board->disable) - board->disable(NULL); - } - - /* Disable and Power Down */ - writel(0, priv.regs + CLCD_PL111_CNTL); - - /* Disable clock */ - clk_disable_unprepare(priv.clk); - - pl111_crtc->last_bpp = 0; -#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS - spin_lock_irqsave(&pl111_crtc->current_displaying_lock, flags); - /* Release the previous buffers */ - if (pl111_crtc->old_kds_res_set != NULL) - kds_resource_set_release(&pl111_crtc->old_kds_res_set); - - pl111_crtc->old_kds_res_set = NULL; - spin_unlock_irqrestore(&pl111_crtc->current_displaying_lock, flags); -#endif - return 0; -} - -/* - * To avoid a possible race where "pl111_crtc->current_update_res" has - * been updated (non NULL) but the corresponding scanout buffer has not been - * written to the base registers we must always call this function holding - * the "base_update_lock" spinlock with IRQs disabled (spin_lock_irqsave()). - */ -void do_flip_to_res(struct pl111_drm_flip_resource *flip_res) -{ - struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(flip_res->crtc); - struct drm_framebuffer *fb; - struct pl111_gem_bo *bo; - size_t min_size; - fb = flip_res->fb; - bo = PL111_BO_FROM_FRAMEBUFFER(fb); - - - - min_size = (fb->height - 1) * fb->pitches[0] - + fb->width * (fb->bits_per_pixel >> 3); - - BUG_ON(bo->gem_object.size < min_size); - - /* Don't even attempt PL111_BOT_SHM, it's not contiguous */ - BUG_ON(bo->type != PL111_BOT_DMA); - - /* - * Note the buffer for releasing after IRQ, and don't allow any more - * updates until then. - * - * This clcd controller latches the new address on next vsync. Address - * latching is indicated by CLCD_IRQ_NEXTBASE_UPDATE, and so we must - * wait for that before releasing the previous buffer's kds - * resources. Otherwise, we'll allow writers to write to the old buffer - * whilst it is still being displayed - */ - pl111_crtc->current_update_res = flip_res; - - DRM_DEBUG_KMS("Displaying fb 0x%p, dumb_bo 0x%p, physaddr %.8x\n", - fb, bo, bo->backing_data.dma.fb_dev_addr); - - if (drm_vblank_get(pl111_crtc->crtc.dev, pl111_crtc->crtc_index) < 0) - DRM_ERROR("Could not get vblank reference for crtc %d\n", - pl111_crtc->crtc_index); - - /* Set the scanout buffer */ - writel(bo->backing_data.dma.fb_dev_addr, priv.regs + CLCD_UBAS); - writel(bo->backing_data.dma.fb_dev_addr + - ((fb->height - 1) * fb->pitches[0]), priv.regs + CLCD_LBAS); -} - -void -show_framebuffer_on_crtc_cb_internal(struct pl111_drm_flip_resource *flip_res, - struct drm_framebuffer *fb) -{ - unsigned long irq_flags; - struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(flip_res->crtc); - - spin_lock_irqsave(&pl111_crtc->base_update_lock, irq_flags); - if (list_empty(&pl111_crtc->update_queue) && - !pl111_crtc->current_update_res) { - do_flip_to_res(flip_res); - } else { - /* - * Enqueue the update to occur on a future IRQ - * This only happens on triple-or-greater buffering - */ - DRM_DEBUG_KMS("Deferring 3+ buffered flip to fb %p to IRQ\n", - fb); - list_add_tail(&flip_res->link, &pl111_crtc->update_queue); - } - spin_unlock_irqrestore(&pl111_crtc->base_update_lock, irq_flags); - - if (!flip_res->page_flip && (pl111_crtc->last_bpp == 0 || - pl111_crtc->last_bpp != fb->bits_per_pixel || - !drm_mode_equal(pl111_crtc->new_mode, - pl111_crtc->current_mode))) { - struct clcd_regs timing; - - pl111_convert_drm_mode_to_timing(pl111_crtc->new_mode, &timing); - - DRM_DEBUG_KMS("Set timing: %08X:%08X:%08X:%08X clk=%ldHz\n", - timing.tim0, timing.tim1, timing.tim2, - timing.tim3, timing.pixclock); - - /* This is the actual mode setting part */ - clk_set_rate(priv.clk, timing.pixclock); - - writel(timing.tim0, priv.regs + CLCD_TIM0); - writel(timing.tim1, priv.regs + CLCD_TIM1); - writel(timing.tim2, priv.regs + CLCD_TIM2); - writel(timing.tim3, priv.regs + CLCD_TIM3); - - clcd_enable(fb); - pl111_crtc->last_bpp = fb->bits_per_pixel; - } - - if (!flip_res->page_flip) { - drm_mode_destroy(flip_res->crtc->dev, pl111_crtc->current_mode); - pl111_crtc->current_mode = pl111_crtc->new_mode; - pl111_crtc->new_mode = NULL; - } - - BUG_ON(!pl111_crtc->current_mode); - - /* - * If IRQs weren't enabled before, they are now. This will eventually - * cause flip_res to be released via pl111_common_irq, which updates - * every time the Base Address is latched (i.e. every frame, regardless - * of whether we update the base address or not) - */ -} - -irqreturn_t pl111_irq(int irq, void *data) -{ - u32 irq_stat; - struct pl111_drm_crtc *pl111_crtc = priv.pl111_crtc; - - irq_stat = readl(priv.regs + CLCD_PL111_MIS); - - if (!irq_stat) - return IRQ_NONE; - - if (irq_stat & CLCD_IRQ_NEXTBASE_UPDATE) - pl111_common_irq(pl111_crtc); - - /* Clear the interrupt once done */ - writel(irq_stat, priv.regs + CLCD_PL111_ICR); - - return IRQ_HANDLED; -} - -int pl111_device_init(struct drm_device *dev) -{ - struct pl111_drm_dev_private *priv = dev->dev_private; - int ret; - - if (priv == NULL) { - pr_err("%s no private data\n", __func__); - return -EINVAL; - } - - if (priv->amba_dev == NULL) { - pr_err("%s no amba device found\n", __func__); - return -EINVAL; - } - - /* set up MMIO for register access */ - priv->mmio_start = priv->amba_dev->res.start; - priv->mmio_len = resource_size(&priv->amba_dev->res); - - DRM_DEBUG_KMS("mmio_start=%lu, mmio_len=%u\n", priv->mmio_start, - priv->mmio_len); - - priv->regs = ioremap(priv->mmio_start, priv->mmio_len); - if (priv->regs == NULL) { - pr_err("%s failed mmio\n", __func__); - return -EINVAL; - } - - /* turn off interrupts */ - writel(0, priv->regs + CLCD_PL111_IENB); - - ret = request_irq(priv->amba_dev->irq[0], pl111_irq, 0, - "pl111_irq_handler", NULL); - if (ret != 0) { - pr_err("%s failed %d\n", __func__, ret); - goto out_mmio; - } - - goto finish; - -out_mmio: - iounmap(priv->regs); -finish: - DRM_DEBUG_KMS("pl111_device_init returned %d\n", ret); - return ret; -} - -void pl111_device_fini(struct drm_device *dev) -{ - struct pl111_drm_dev_private *priv = dev->dev_private; - u32 cntl; - - if (priv == NULL || priv->regs == NULL) - return; - - free_irq(priv->amba_dev->irq[0], NULL); - - cntl = readl(priv->regs + CLCD_PL111_CNTL); - - cntl &= ~CNTL_LCDEN; - writel(cntl, priv->regs + CLCD_PL111_CNTL); - - cntl &= ~CNTL_LCDPWR; - writel(cntl, priv->regs + CLCD_PL111_CNTL); - - iounmap(priv->regs); -} - -int pl111_amba_probe(struct amba_device *dev, const struct amba_id *id) -{ - struct clcd_board *board = dev->dev.platform_data; - int ret; - pr_info("DRM %s\n", __func__); - - if (!board) - dev_warn(&dev->dev, "board data not available\n"); - - ret = amba_request_regions(dev, NULL); - if (ret != 0) { - DRM_ERROR("CLCD: unable to reserve regs region\n"); - goto out; - } - - priv.amba_dev = dev; - - priv.clk = clk_get(&priv.amba_dev->dev, NULL); - if (IS_ERR(priv.clk)) { - DRM_ERROR("CLCD: unable to get clk.\n"); - ret = PTR_ERR(priv.clk); - goto clk_err; - } - - return 0; - -clk_err: - amba_release_regions(dev); -out: - return ret; -} - -int pl111_amba_remove(struct amba_device *dev) -{ - DRM_DEBUG_KMS("DRM %s\n", __func__); - - clk_put(priv.clk); - - amba_release_regions(dev); - - priv.amba_dev = NULL; - - return 0; -} - -void pl111_convert_drm_mode_to_timing(struct drm_display_mode *mode, - struct clcd_regs *timing) -{ - unsigned int ppl, hsw, hfp, hbp; - unsigned int lpp, vsw, vfp, vbp; - unsigned int cpl; - - memset(timing, 0, sizeof(struct clcd_regs)); - - ppl = (mode->hdisplay / 16) - 1; - hsw = mode->hsync_end - mode->hsync_start - 1; - hfp = mode->hsync_start - mode->hdisplay - 1; - hbp = mode->htotal - mode->hsync_end - 1; - - lpp = mode->vdisplay - 1; - vsw = mode->vsync_end - mode->vsync_start - 1; - vfp = mode->vsync_start - mode->vdisplay; - vbp = mode->vtotal - mode->vsync_end; - - cpl = mode->hdisplay - 1; - - timing->tim0 = (ppl << 2) | (hsw << 8) | (hfp << 16) | (hbp << 24); - timing->tim1 = lpp | (vsw << 10) | (vfp << 16) | (vbp << 24); - timing->tim2 = TIM2_IVS | TIM2_IHS | TIM2_IPC | TIM2_BCD | (cpl << 16); - timing->tim3 = 0; - - timing->pixclock = mode->clock * 1000; -} - -void pl111_convert_timing_to_drm_mode(struct clcd_regs *timing, - struct drm_display_mode *mode) -{ - unsigned int ppl, hsw, hfp, hbp; - unsigned int lpp, vsw, vfp, vbp; - - ppl = (timing->tim0 >> 2) & 0x3f; - hsw = (timing->tim0 >> 8) & 0xff; - hfp = (timing->tim0 >> 16) & 0xff; - hbp = (timing->tim0 >> 24) & 0xff; - - lpp = timing->tim1 & 0x3ff; - vsw = (timing->tim1 >> 10) & 0x3f; - vfp = (timing->tim1 >> 16) & 0xff; - vbp = (timing->tim1 >> 24) & 0xff; - - mode->hdisplay = (ppl + 1) * 16; - mode->hsync_start = ((ppl + 1) * 16) + hfp + 1; - mode->hsync_end = ((ppl + 1) * 16) + hfp + hsw + 2; - mode->htotal = ((ppl + 1) * 16) + hfp + hsw + hbp + 3; - mode->hskew = 0; - - mode->vdisplay = lpp + 1; - mode->vsync_start = lpp + vfp + 1; - mode->vsync_end = lpp + vfp + vsw + 2; - mode->vtotal = lpp + vfp + vsw + vbp + 2; - - mode->flags = 0; - - mode->width_mm = 0; - mode->height_mm = 0; - - mode->clock = timing->pixclock / 1000; - mode->hsync = timing->pixclock / mode->htotal; - mode->vrefresh = mode->hsync / mode->vtotal; -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_platform.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_platform.c deleted file mode 100755 index 9d5ec0cb1751c..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_platform.c +++ /dev/null @@ -1,151 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * pl111_drm_platform.c - * Implementation of the Linux platform device entrypoints for PL111 DRM - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "pl111_drm.h" - -static int pl111_platform_drm_suspend(struct platform_device *dev, - pm_message_t state) -{ - pr_info("DRM %s\n", __func__); - return 0; -} - -static int pl111_platform_drm_resume(struct platform_device *dev) -{ - pr_info("DRM %s\n", __func__); - return 0; -} - -int pl111_platform_drm_probe(struct platform_device *dev) -{ - pr_info("DRM %s\n", __func__); - return pl111_drm_init(dev); -} - -static int pl111_platform_drm_remove(struct platform_device *dev) -{ - pr_info("DRM %s\n", __func__); - pl111_drm_exit(dev); - - return 0; -} - -static struct amba_id pl111_id_table[] = { - { - .id = 0x00041110, - .mask = 0x000ffffe, - }, - {0, 0}, -}; - -static struct amba_driver pl111_amba_driver = { - .drv = { - .name = "clcd-pl11x", - }, - .probe = pl111_amba_probe, - .remove = pl111_amba_remove, - .id_table = pl111_id_table, -}; - -static struct platform_driver platform_drm_driver = { - .probe = pl111_platform_drm_probe, - .remove = pl111_platform_drm_remove, - .suspend = pl111_platform_drm_suspend, - .resume = pl111_platform_drm_resume, - .driver = { - .owner = THIS_MODULE, - .name = DRIVER_NAME, - }, -}; - -static const struct platform_device_info pl111_drm_pdevinfo = { - .name = DRIVER_NAME, - .id = -1, - .dma_mask = ~0UL -}; - -static struct platform_device *pl111_drm_device; - -static int __init pl111_platform_drm_init(void) -{ - int ret; - - pr_info("DRM %s\n", __func__); - - pl111_drm_device = platform_device_register_full(&pl111_drm_pdevinfo); - if (pl111_drm_device == NULL) { - pr_err("DRM platform_device_register_full() failed\n"); - return -ENOMEM; - } - - ret = amba_driver_register(&pl111_amba_driver); - if (ret != 0) { - pr_err("DRM amba_driver_register() failed %d\n", ret); - goto err_amba_reg; - } - - ret = platform_driver_register(&platform_drm_driver); - if (ret != 0) { - pr_err("DRM platform_driver_register() failed %d\n", ret); - goto err_pdrv_reg; - } - - return 0; - -err_pdrv_reg: - amba_driver_unregister(&pl111_amba_driver); -err_amba_reg: - platform_device_unregister(pl111_drm_device); - - return ret; -} - -static void __exit pl111_platform_drm_exit(void) -{ - pr_info("DRM %s\n", __func__); - - platform_device_unregister(pl111_drm_device); - amba_driver_unregister(&pl111_amba_driver); - platform_driver_unregister(&platform_drm_driver); -} - -#ifdef MODULE -module_init(pl111_platform_drm_init); -#else -late_initcall(pl111_platform_drm_init); -#endif -module_exit(pl111_platform_drm_exit); - -MODULE_DESCRIPTION(DRIVER_DESC); -MODULE_VERSION(DRIVER_VERSION); -MODULE_AUTHOR(DRIVER_AUTHOR); -MODULE_LICENSE(DRIVER_LICENCE); -MODULE_ALIAS(DRIVER_ALIAS); diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_suspend.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_suspend.c deleted file mode 100755 index d033566a57971..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_suspend.c +++ /dev/null @@ -1,43 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * pl111_drm_suspend.c - * Implementation of the suspend/resume functions for PL111 DRM - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "pl111_drm.h" - -int pl111_drm_suspend(struct drm_device *dev, pm_message_t state) -{ - pr_info("DRM %s\n", __func__); - return 0; -} - -int pl111_drm_resume(struct drm_device *dev) -{ - pr_info("DRM %s\n", __func__); - return 0; -} diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_vma.c b/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_vma.c deleted file mode 100755 index ff602efd6d668..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/pl111_drm_vma.c +++ /dev/null @@ -1,308 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - -/** - * pl111_drm_vma.c - * Implementation of the VM functions for PL111 DRM - */ -#include -#include -#include -#include -#include -#include - -#include -#include -#include "pl111_drm.h" - -/* BEGIN drivers/staging/omapdrm/omap_gem_helpers.c */ -/** - * drm_gem_put_pages - helper to free backing pages for a GEM object - * @obj: obj in question - * @pages: pages to free - */ -static void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, - bool dirty, bool accessed) -{ - int i, npages; - struct pl111_gem_bo *bo; - npages = obj->size >> PAGE_SHIFT; - bo = PL111_BO_FROM_GEM(obj); - for (i = 0; i < npages; i++) { - if (dirty) - set_page_dirty(pages[i]); - if (accessed) - mark_page_accessed(pages[i]); - /* Undo the reference we took when populating the table */ - page_cache_release(pages[i]); - } - drm_free_large(pages); -} - -void put_pages(struct drm_gem_object *obj, struct page **pages) -{ - int i, npages; - struct pl111_gem_bo *bo; - npages = obj->size >> PAGE_SHIFT; - bo = PL111_BO_FROM_GEM(obj); - _drm_gem_put_pages(obj, pages, true, true); - if (bo->backing_data.shm.dma_addrs) { - for (i = 0; i < npages; i++) { - /* Filter pages unmapped because of CPU accesses */ - if (!bo->backing_data.shm.dma_addrs[i]) - continue; - if (!dma_mapping_error(obj->dev->dev, - bo->backing_data.shm.dma_addrs[i])) { - dma_unmap_page(obj->dev->dev, - bo->backing_data.shm.dma_addrs[i], - PAGE_SIZE, - DMA_BIDIRECTIONAL); - } - } - kfree(bo->backing_data.shm.dma_addrs); - bo->backing_data.shm.dma_addrs = NULL; - } -} - -/** - * drm_gem_get_pages - helper to allocate backing pages for a GEM object - * @obj: obj in question - * @gfpmask: gfp mask of requested pages - */ -static struct page **_drm_gem_get_pages(struct drm_gem_object *obj, - gfp_t gfpmask) -{ - struct inode *inode; - struct address_space *mapping; - struct page *p, **pages; - int i, npages; - - /* This is the shared memory object that backs the GEM resource */ - inode = obj->filp->f_path.dentry->d_inode; - mapping = inode->i_mapping; - - npages = obj->size >> PAGE_SHIFT; - - pages = drm_malloc_ab(npages, sizeof(struct page *)); - if (pages == NULL) - return ERR_PTR(-ENOMEM); - - gfpmask |= mapping_gfp_mask(mapping); - - for (i = 0; i < npages; i++) { - p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); - if (IS_ERR(p)) - goto fail; - pages[i] = p; - - /* - * There is a hypothetical issue w/ drivers that require - * buffer memory in the low 4GB.. if the pages are un- - * pinned, and swapped out, they can end up swapped back - * in above 4GB. If pages are already in memory, then - * shmem_read_mapping_page_gfp will ignore the gfpmask, - * even if the already in-memory page disobeys the mask. - * - * It is only a theoretical issue today, because none of - * the devices with this limitation can be populated with - * enough memory to trigger the issue. But this BUG_ON() - * is here as a reminder in case the problem with - * shmem_read_mapping_page_gfp() isn't solved by the time - * it does become a real issue. - * - * See this thread: http://lkml.org/lkml/2011/7/11/238 - */ - BUG_ON((gfpmask & __GFP_DMA32) && - (page_to_pfn(p) >= 0x00100000UL)); - } - - return pages; - -fail: - while (i--) - page_cache_release(pages[i]); - - drm_free_large(pages); - return ERR_PTR(PTR_ERR(p)); -} - -struct page **get_pages(struct drm_gem_object *obj) -{ - struct pl111_gem_bo *bo; - bo = PL111_BO_FROM_GEM(obj); - - if (bo->backing_data.shm.pages == NULL) { - struct page **p; - int npages = obj->size >> PAGE_SHIFT; - int i; - - p = _drm_gem_get_pages(obj, GFP_KERNEL); - if (IS_ERR(p)) - return ERR_PTR(-ENOMEM); - - bo->backing_data.shm.pages = p; - - if (bo->backing_data.shm.dma_addrs == NULL) { - bo->backing_data.shm.dma_addrs = - kzalloc(npages * sizeof(dma_addr_t), - GFP_KERNEL); - if (bo->backing_data.shm.dma_addrs == NULL) - goto error_out; - } - - if (!(bo->type & PL111_BOT_CACHED)) { - for (i = 0; i < npages; ++i) { - bo->backing_data.shm.dma_addrs[i] = - dma_map_page(obj->dev->dev, p[i], 0, PAGE_SIZE, - DMA_BIDIRECTIONAL); - if (dma_mapping_error(obj->dev->dev, - bo->backing_data.shm.dma_addrs[i])) - goto error_out; - } - } - } - - return bo->backing_data.shm.pages; - -error_out: - put_pages(obj, bo->backing_data.shm.pages); - bo->backing_data.shm.pages = NULL; - return ERR_PTR(-ENOMEM); -} - -/* END drivers/staging/omapdrm/omap_gem_helpers.c */ -int pl111_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) -{ - struct page **pages; - unsigned long pfn; - struct drm_gem_object *obj = vma->vm_private_data; - struct pl111_gem_bo *bo = PL111_BO_FROM_GEM(obj); - struct drm_device *dev = obj->dev; - int ret; - - mutex_lock(&dev->struct_mutex); - - /* - * Our mmap calls setup a valid vma->vm_pgoff - * so we can use vmf->pgoff - */ - - if (bo->type & PL111_BOT_DMA) { - pfn = (bo->backing_data.dma.fb_dev_addr >> PAGE_SHIFT) + - vmf->pgoff; - } else { /* PL111_BOT_SHM */ - pages = get_pages(obj); - if (IS_ERR(pages)) { - dev_err(obj->dev->dev, - "could not get pages: %ld\n", PTR_ERR(pages)); - ret = PTR_ERR(pages); - goto error; - } - pfn = page_to_pfn(pages[vmf->pgoff]); - pl111_gem_sync_to_cpu(bo, vmf->pgoff); - } - - DRM_DEBUG("bo=%p physaddr=0x%.8x for offset 0x%x\n", - bo, PFN_PHYS(pfn), PFN_PHYS(vmf->pgoff)); - - ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); - -error: - mutex_unlock(&dev->struct_mutex); - - switch (ret) { - case 0: - case -ERESTARTSYS: - case -EINTR: - case -EBUSY: - return VM_FAULT_NOPAGE; - case -ENOMEM: - return VM_FAULT_OOM; - default: - return VM_FAULT_SIGBUS; - } -} - -/* - * The core drm_vm_ functions in kernel 3.4 are not ready - * to handle dma_buf cases where vma->vm_file->private_data - * cannot be accessed to get the device. - * - * We use these functions from 3.5 instead where the device - * pointer is passed explicitly. - * - * However they aren't exported from the kernel until 3.10 - */ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) -void pl111_drm_vm_open_locked(struct drm_device *dev, - struct vm_area_struct *vma) -{ - struct drm_vma_entry *vma_entry; - - DRM_DEBUG("0x%08lx,0x%08lx\n", - vma->vm_start, vma->vm_end - vma->vm_start); - atomic_inc(&dev->vma_count); - - vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL); - if (vma_entry) { - vma_entry->vma = vma; - vma_entry->pid = current->pid; - list_add(&vma_entry->head, &dev->vmalist); - } -} - -void pl111_drm_vm_close_locked(struct drm_device *dev, - struct vm_area_struct *vma) -{ - struct drm_vma_entry *pt, *temp; - - DRM_DEBUG("0x%08lx,0x%08lx\n", - vma->vm_start, vma->vm_end - vma->vm_start); - atomic_dec(&dev->vma_count); - - list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { - if (pt->vma == vma) { - list_del(&pt->head); - kfree(pt); - break; - } - } -} - -void pl111_gem_vm_open(struct vm_area_struct *vma) -{ - struct drm_gem_object *obj = vma->vm_private_data; - - drm_gem_object_reference(obj); - - mutex_lock(&obj->dev->struct_mutex); - pl111_drm_vm_open_locked(obj->dev, vma); - mutex_unlock(&obj->dev->struct_mutex); -} - -void pl111_gem_vm_close(struct vm_area_struct *vma) -{ - struct drm_gem_object *obj = vma->vm_private_data; - struct drm_device *dev = obj->dev; - - mutex_lock(&dev->struct_mutex); - pl111_drm_vm_close_locked(obj->dev, vma); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); -} -#endif diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/sconscript b/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/sconscript deleted file mode 100755 index c5011a7d07c12..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/pl111/sconscript +++ /dev/null @@ -1,52 +0,0 @@ -# -# (C) COPYRIGHT 2010-2013, 2015 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - -import os -import shutil -Import('env') - -# Generate a build environment for the integration tests, taking a copy of the top-level build environment -# (env) as a start. -drm_env = env.Clone() - -# Xorg uses C++ style comments and 'inline' keyword -if '-std=c89' in drm_env['CFLAGS']: - drm_env['CFLAGS'].remove('-std=c89') - -# X11 generates a lot of warnings -if '-Werror' in drm_env['CCFLAGS']: - drm_env['CCFLAGS'].remove('-Werror') - -#remove the 'lib'prefix -drm_env['LIBPREFIX'] = '' - -src = Glob('*.c') - -if drm_env.GetOption('clean') : - drm_env.Execute(Action("make clean", 'clean [pl111]')) - cmd = drm_env.Command('$STATIC_LIB_PATH/mali_drm.ko', src, []) -else: - # The target is mali_drm.ko, built from the source in pl111_drm/pl111, via the action makeAction - # mali_drm.ko will be copied to $STATIC_LIB_PATH after being built by the standard Linux - # kernel build system, after which it can be installed to the directory specified if - # "libs_install" is set; this is done by LibTarget. - makeAction=Action("cd ${SOURCE.dir} && make MALI_DEBUG=${debug} && cp pl111_drm.ko $STATIC_LIB_PATH/mali_drm.ko", '$MAKECOMSTR') - cmd = drm_env.Command('$STATIC_LIB_PATH/mali_drm.ko', src, [makeAction]) - -# need Module.symvers from drm.ko -#drm_env.Depends('$STATIC_LIB_PATH/pl111_drm.ko', '$STATIC_LIB_PATH/drm.ko') - -env.ProgTarget('x11', cmd) - diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/sconscript b/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/sconscript deleted file mode 100755 index d030e7129eee5..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/drm/sconscript +++ /dev/null @@ -1,23 +0,0 @@ -# -# (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - -Import('env') - -if 'x11' in env['winsys']: - # pl111_drm isn't released so only try to build it if it's there - if Glob('pl111/sconscript') and (env['platform_config'] == 'vexpress' or env['platform_config'] == 'vexpress_6xvirtex7_10mhz'): - SConscript('pl111/sconscript') - -#SConscript('dummy_drm/sconscript') diff --git a/drivers/gpu/arm/t83x/kernel/drivers/gpu/sconscript b/drivers/gpu/arm/t83x/kernel/drivers/gpu/sconscript deleted file mode 100755 index 729dbda13c197..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/gpu/sconscript +++ /dev/null @@ -1,21 +0,0 @@ -# -# (C) COPYRIGHT 2010-2013 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - - -SConscript('arm/sconscript') - -if Glob('drm/sconscript'): - SConscript('drm/sconscript') diff --git a/drivers/gpu/arm/t83x/kernel/drivers/sconscript b/drivers/gpu/arm/t83x/kernel/drivers/sconscript deleted file mode 100755 index dd6a637ccacd5..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/drivers/sconscript +++ /dev/null @@ -1,23 +0,0 @@ -# -# (C) COPYRIGHT 2010-2013 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -if Glob('base/sconscript'): - SConscript('base/sconscript') - -if Glob('video/sconscript'): - SConscript('video/sconscript') - -SConscript('gpu/sconscript') diff --git a/drivers/gpu/arm/t83x/kernel/include/linux/dma-buf-test-exporter.h b/drivers/gpu/arm/t83x/kernel/include/linux/dma-buf-test-exporter.h deleted file mode 100755 index 98984ba6032f7..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/include/linux/dma-buf-test-exporter.h +++ /dev/null @@ -1,78 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - -#ifndef _LINUX_DMA_BUF_TEST_EXPORTER_H_ -#define _LINUX_DMA_BUF_TEST_EXPORTER_H_ - -#include -#include - -#define DMA_BUF_TE_VER_MAJOR 1 -#define DMA_BUF_TE_VER_MINOR 0 -#define DMA_BUF_TE_ENQ 0x642d7465 -#define DMA_BUF_TE_ACK 0x68692100 - -struct dma_buf_te_ioctl_version -{ - int op; /**< Must be set to DMA_BUF_TE_ENQ by client, driver will set it to DMA_BUF_TE_ACK */ - int major; /**< Major version */ - int minor; /**< Minor version */ -}; - -struct dma_buf_te_ioctl_alloc -{ - __u64 size; /* size of buffer to allocate, in pages */ -}; - -struct dma_buf_te_ioctl_status -{ - /* in */ - int fd; /* the dma_buf to query, only dma_buf objects exported by this driver is supported */ - /* out */ - int attached_devices; /* number of devices attached (active 'dma_buf_attach's) */ - int device_mappings; /* number of device mappings (active 'dma_buf_map_attachment's) */ - int cpu_mappings; /* number of cpu mappings (active 'mmap's) */ -}; - -struct dma_buf_te_ioctl_set_failing -{ - /* in */ - int fd; /* the dma_buf to set failure mode for, only dma_buf objects exported by this driver is supported */ - - /* zero = no fail injection, non-zero = inject failure */ - int fail_attach; - int fail_map; - int fail_mmap; -}; - -struct dma_buf_te_ioctl_fill -{ - int fd; - unsigned int value; -}; - -#define DMA_BUF_TE_IOCTL_BASE 'E' -/* Below all returning 0 if successful or -errcode except DMA_BUF_TE_ALLOC which will return fd or -errcode */ -#define DMA_BUF_TE_VERSION _IOR(DMA_BUF_TE_IOCTL_BASE, 0x00, struct dma_buf_te_ioctl_version) -#define DMA_BUF_TE_ALLOC _IOR(DMA_BUF_TE_IOCTL_BASE, 0x01, struct dma_buf_te_ioctl_alloc) -#define DMA_BUF_TE_QUERY _IOR(DMA_BUF_TE_IOCTL_BASE, 0x02, struct dma_buf_te_ioctl_status) -#define DMA_BUF_TE_SET_FAILING _IOW(DMA_BUF_TE_IOCTL_BASE, 0x03, struct dma_buf_te_ioctl_set_failing) -#define DMA_BUF_TE_ALLOC_CONT _IOR(DMA_BUF_TE_IOCTL_BASE, 0x04, struct dma_buf_te_ioctl_alloc) -#define DMA_BUF_TE_FILL _IOR(DMA_BUF_TE_IOCTL_BASE, 0x05, struct dma_buf_te_ioctl_fill) - -#endif /* _LINUX_DMA_BUF_TEST_EXPORTER_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/include/linux/kds.h b/drivers/gpu/arm/t83x/kernel/include/linux/kds.h deleted file mode 100755 index 1346edae04ccf..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/include/linux/kds.h +++ /dev/null @@ -1,173 +0,0 @@ -/* - * - * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#ifndef _KDS_H_ -#define _KDS_H_ - -#include -#include - -#define KDS_WAIT_BLOCKING (ULONG_MAX) - -struct kds_resource_set; - -typedef void (*kds_callback_fn) (void *callback_parameter, void *callback_extra_parameter); - -struct kds_callback -{ - kds_callback_fn user_cb; /* real cb */ - int direct; /* do direct or queued call? */ - struct workqueue_struct *wq; -}; - -struct kds_link -{ - struct kds_resource_set *parent; - struct list_head link; - unsigned long state; -}; - -struct kds_resource -{ - struct kds_link waiters; -}; - -/* callback API */ - -/* Initialize a callback object. - * - * Typically created per context or per hw resource. - * - * Callbacks can be performed directly if no nested locking can - * happen in the client. - * - * Nested locking can occur when a lock is held during the kds_async_waitall or - * kds_resource_set_release call. If the callback needs to take the same lock - * nested locking will happen. - * - * If nested locking could happen non-direct callbacks can be requested. - * Callbacks will then be called asynchronous to the triggering call. - */ -int kds_callback_init(struct kds_callback *cb, int direct, kds_callback_fn user_cb); - -/* Terminate the use of a callback object. - * - * If the callback object was set up as non-direct - * any pending callbacks will be flushed first. - * Note that to avoid a deadlock the lock callbacks needs - * can't be held when a callback object is terminated. - */ -void kds_callback_term(struct kds_callback *cb); - - -/* resource object API */ - -/* initialize a resource handle for a shared resource */ -void kds_resource_init(struct kds_resource * const resource); - -/* - * Will return 0 on success. - * If the resource is being used or waited -EBUSY is returned. - * The caller should NOT try to terminate a resource that could still have clients. - * After the function returns the resource is no longer known by kds. - */ -int kds_resource_term(struct kds_resource *resource); - -/* Asynchronous wait for a set of resources. - * Callback will be called when all resources are available. - * If all the resources was available the callback will be called before kds_async_waitall returns. - * So one must not hold any locks the callback code-flow can take when calling kds_async_waitall. - * Caller considered to own/use the resources until \a kds_rset_release is called. - * exclusive_access_bitmap is a bitmap where a high bit means exclusive access while a low bit means shared access. - * Use the Linux __set_bit API, where the index of the buffer to control is used as the bit index. - * - * Standard Linux error return value. - */ -int kds_async_waitall( - struct kds_resource_set ** const pprset, - struct kds_callback *cb, - void *callback_parameter, - void *callback_extra_parameter, - int number_resources, - unsigned long *exclusive_access_bitmap, - struct kds_resource **resource_list); - -/* Synchronous wait for a set of resources. - * Function will return when one of these have happened: - * - all resources have been obtained - * - timeout lapsed while waiting - * - a signal was received while waiting - * - * To wait without a timeout, specify KDS_WAIT_BLOCKING for \a jifies_timeout, otherwise - * the timeout in jiffies. A zero timeout attempts to obtain all resources and returns - * immediately with a timeout if all resources could not be obtained. - * - * Caller considered to own/use the resources when the function returns. - * Caller must release the resources using \a kds_rset_release. - * - * Calling this function while holding already locked resources or other locking primitives is dangerous. - * One must if this is needed decide on a lock order of the resources and/or the other locking primitives - * and always take the resources/locking primitives in the specific order. - * - * Use the ERR_PTR framework to decode the return value. - * NULL = time out - * If IS_ERR then PTR_ERR gives: - * ERESTARTSYS = signal received, retry call after signal - * all other values = internal error, lock failed - * Other values = successful wait, now the owner, must call kds_resource_set_release - */ -struct kds_resource_set *kds_waitall( - int number_resources, - unsigned long *exclusive_access_bitmap, - struct kds_resource **resource_list, - unsigned long jifies_timeout); - -/* Release resources after use. - * Caller must handle that other async callbacks will trigger, - * so must avoid holding any locks a callback will take. - * - * The function takes a pointer to your poiner to handle a race - * between a cancelation and a completion. - * - * If the caller can't guarantee that a race can't occur then - * the passed in pointer must be the same in both call paths - * to allow kds to manage the potential race. - */ -void kds_resource_set_release(struct kds_resource_set **pprset); - -/* Release resources after use and wait callbacks to complete. - * Caller must handle that other async callbacks will trigger, - * so must avoid holding any locks a callback will take. - * - * The function takes a pointer to your poiner to handle a race - * between a cancelation and a completion. - * - * If the caller can't guarantee that a race can't occur then - * the passed in pointer must be the same in both call paths - * to allow kds to manage the potential race. - * - * This should be used to cancel waits which are pending on a kds - * resource. - * - * It is a bug to call this from atomic contexts and from within - * a kds callback that now owns the kds_rseource. - */ - -void kds_resource_set_release_sync(struct kds_resource_set **pprset); -#endif /* _KDS_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/include/linux/ump-common.h b/drivers/gpu/arm/t83x/kernel/include/linux/ump-common.h deleted file mode 100755 index 0015bda6ffc79..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/include/linux/ump-common.h +++ /dev/null @@ -1,252 +0,0 @@ -/* - * - * (C) COPYRIGHT 2010-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file ump-common.h - * - * This file contains some common enum values used both in both the user and kernel space side of UMP. - */ - -#ifndef _UMP_COMMON_H_ -#define _UMP_COMMON_H_ - -#ifdef __cplusplus -extern "C" -{ -#endif - -#ifdef __KERNEL__ -#include -#else -#include -#endif - -#define UMP_UINT32_MAX (4294967295U) -#define UMP_UINT64_MAX (18446744073709551615ULL) - -#ifdef __GNUC__ - #define CHECK_RESULT __attribute__((__warn_unused_result__)) - #define INLINE __inline__ -#else - #define CHECK_RESULT - #define INLINE __inline -#endif - -#ifndef STATIC - #define STATIC static -#endif - -/** - * Values to identify major and minor version of UMP - */ -#define UMP_VERSION_MAJOR 2 -#define UMP_VERSION_MINOR 0 - -/** - * Typedef for a secure ID, a system wide identifier for UMP memory buffers. - */ -typedef int32_t ump_secure_id; - -/** - * Value to indicate an invalid secure Id. - */ -#define UMP_INVALID_SECURE_ID ((ump_secure_id)0) - -/** - * UMP error codes. - */ -typedef enum -{ - UMP_OK = 0, /**< indicates success */ - UMP_ERROR = 1 /**< indicates failure */ -} ump_result; - -/** - * Allocation flag bits. - * - * ump_allocate accepts zero or more flags to specify the type of memory to allocate and how to expose it to devices. - * - * For each supported device there are 4 flags to control access permissions and give usage characteristic hints to optimize the allocation/mapping. - * They are; - * @li @a UMP_PROT__RD read permission - * @li @a UMP_PROT__WR write permission - * @li @a UMP_HINT__RD read often - * @li @a UMP_HINT__WR written often - * - * 5 devices are currently supported, with a device being the CPU itself. - * The other 4 devices will be mapped to real devices per SoC design. - * They are just named W,X,Y,Z by UMP as it has no knowledge of their real names/identifiers. - * As an example device W could be a camera device while device Z could be an ARM GPU device, leaving X and Y unused. - * - * 2 additional flags control the allocation; - * @li @a UMP_CONSTRAINT_PHYSICALLY_LINEAR the allocation must be physical linear. Typical for devices without an MMU and no IOMMU to help it. - * @li @a UMP_PROT_SHAREABLE the allocation can be shared with other processes on the system. Without this flag the returned allocation won't be resolvable in other processes. - * - * All UMP allocation are growable unless they're @a UMP_PROT_SHAREABLE. - * The hint bits should be used to indicate the access pattern so the driver can select the most optimal memory type and cache settings based on the what the system supports. - */ -typedef enum -{ - /* Generic helpers */ - UMP_PROT_DEVICE_RD = (1u << 0), - UMP_PROT_DEVICE_WR = (1u << 1), - UMP_HINT_DEVICE_RD = (1u << 2), - UMP_HINT_DEVICE_WR = (1u << 3), - UMP_DEVICE_MASK = 0xF, - UMP_DEVICE_CPU_SHIFT = 0, - UMP_DEVICE_W_SHIFT = 4, - UMP_DEVICE_X_SHIFT = 8, - UMP_DEVICE_Y_SHIFT = 12, - UMP_DEVICE_Z_SHIFT = 16, - - /* CPU protection and hints. */ - UMP_PROT_CPU_RD = (1u << 0), - UMP_PROT_CPU_WR = (1u << 1), - UMP_HINT_CPU_RD = (1u << 2), - UMP_HINT_CPU_WR = (1u << 3), - - /* device W */ - UMP_PROT_W_RD = (1u << 4), - UMP_PROT_W_WR = (1u << 5), - UMP_HINT_W_RD = (1u << 6), - UMP_HINT_W_WR = (1u << 7), - - /* device X */ - UMP_PROT_X_RD = (1u << 8), - UMP_PROT_X_WR = (1u << 9), - UMP_HINT_X_RD = (1u << 10), - UMP_HINT_X_WR = (1u << 11), - - /* device Y */ - UMP_PROT_Y_RD = (1u << 12), - UMP_PROT_Y_WR = (1u << 13), - UMP_HINT_Y_RD = (1u << 14), - UMP_HINT_Y_WR = (1u << 15), - - /* device Z */ - UMP_PROT_Z_RD = (1u << 16), - UMP_PROT_Z_WR = (1u << 17), - UMP_HINT_Z_RD = (1u << 18), - UMP_HINT_Z_WR = (1u << 19), - - /* 20-26 reserved for future use */ - UMPP_ALLOCBITS_UNUSED = (0x7Fu << 20), - /** Allocations marked as @ UMP_CONSTRAINT_UNCACHED won't be mapped as cached by the cpu */ - UMP_CONSTRAINT_UNCACHED = (1u << 27), - /** Require 32-bit physically addressable memory */ - UMP_CONSTRAINT_32BIT_ADDRESSABLE = (1u << 28), - /** For devices without an MMU and with no IOMMU assistance. */ - UMP_CONSTRAINT_PHYSICALLY_LINEAR = (1u << 29), - /** Shareable must be set to allow the allocation to be used by other processes, the default is non-shared */ - UMP_PROT_SHAREABLE = (1u << 30) - /* (1u << 31) should not be used to ensure compiler portability */ -} ump_allocation_bits; - -/** - * ump_allocation_bits combination argument type. - * - * Type used to pass zero or more bits from the @ref ump_allocation_bits enum - */ -typedef uint32_t ump_alloc_flags; - - -/** - * Default allocation flags for UMP v1 compatible allocations. - */ -#define UMP_V1_API_DEFAULT_ALLOCATION_FLAGS UMP_PROT_CPU_RD | UMP_PROT_CPU_WR | \ - UMP_PROT_W_RD | UMP_PROT_W_WR | \ - UMP_PROT_X_RD | UMP_PROT_X_WR | \ - UMP_PROT_Y_RD | UMP_PROT_Y_WR | \ - UMP_PROT_Z_RD | UMP_PROT_Z_WR | \ - UMP_PROT_SHAREABLE | \ - UMP_CONSTRAINT_32BIT_ADDRESSABLE - -/** - * CPU cache sync operations. - * - * Cache synchronization operations to pass to @ref ump_cpu_msync_now - */ -enum -{ - /** - * Cleans any dirty cache lines to main memory, but the data will still be available in the cache. - * After a clean the contents of memory is considered to be "owned" by the device. - * */ - UMP_MSYNC_CLEAN = 1, - - /** Cleans any dirty cache lines to main memory and Ejects all lines from the cache. - * After an clean&invalidate the contents of memory is considered to be "owned" by the CPU. - * Any subsequent access will fetch data from main memory. - * - * @note Due to CPUs doing speculative prefetching a UMP_MSYNC_CLEAN_AND_INVALIDATE must be done before and after interacting with hardware. - * */ - UMP_MSYNC_CLEAN_AND_INVALIDATE - -}; - -typedef uint32_t ump_cpu_msync_op; - -/** - * Memory import types supported. - * If new import types are added they will appear here. - * They must be added before UMPP_EXTERNAL_MEM_COUNT and - * must be assigned an explicit sequantial number. - * - * @li UMP_EXTERNAL_MEM_TYPE_ION - Import an ION allocation - * Takes a int* (pointer to a file descriptor) - * Another ION reference is taken which is released on the final ump_release - */ -enum ump_external_memory_type -{ - UMPP_EXTERNAL_MEM_TYPE_UNUSED = 0, /* reserve type 0 */ - UMP_EXTERNAL_MEM_TYPE_ION = 1, - UMPP_EXTERNAL_MEM_COUNT -}; - -/** @name UMP v1 API - * - *@{ - */ - -/** - * Allocation constraints. - * - * Allocation flags to pass @ref ump_ref_drv_allocate - * - * UMP v1 API only. - */ -typedef enum -{ - /** the allocation is mapped as noncached. */ - UMP_REF_DRV_CONSTRAINT_NONE = 0, - /** not supported. */ - UMP_REF_DRV_CONSTRAINT_PHYSICALLY_LINEAR = 1, - /** the allocation is mapped as cached by the cpu. */ - UMP_REF_DRV_CONSTRAINT_USE_CACHE = 4 -} ump_alloc_constraints; - -/* @} */ - - -#ifdef __cplusplus -} -#endif - - -#endif /* _UMP_COMMON_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/include/linux/ump-import.h b/drivers/gpu/arm/t83x/kernel/include/linux/ump-import.h deleted file mode 100755 index 89ce727d4f877..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/include/linux/ump-import.h +++ /dev/null @@ -1,99 +0,0 @@ -/* - * - * (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#ifndef _UMP_IMPORT_H_ -#define _UMP_IMPORT_H_ - -#include -#include - -/** - * UMP import module info. - * Contains information about the Linux module providing the import module, - * used to block unloading of the Linux module while imported memory exists. - * Lists the functions implementing the UMP import functions. - */ -struct ump_import_handler -{ - /** - * Linux module of the import handler - */ - struct module * linux_module; - - /** - * UMP session usage begin. - * - * Called when a UMP session first is bound to the handler. - * Typically used to set up any import module specific per-session data. - * The function returns a pointer to this data in the output pointer custom_session_data - * which will be passed to \a session_end and \a import. - * - * Note: custom_session_data must be set to non-NULL if successful. - * If no pointer is needed set it a magic value to validate instead. - * - * @param[out] custom_session_data Pointer to a generic pointer where any data can be stored - * @return 0 on success, error code if the session could not be initiated. - */ - int (*session_begin)(void ** custom_session_data); - - /** - * UMP session usage end. - * - * Called when a UMP session is no longer using the handler. - * Only called if @a session_begin returned OK. - * - * @param[in] custom_session_data The value set by the session_begin handler - */ - void (*session_end)(void * custom_session_data); - - /** - * Import request. - * - * Called when a client has asked to import a resource of the type the import module was installed for. - * Only called if @a session_begin returned OK. - * - * The requested flags must be verified to be valid to apply to the imported memory. - * If not valid return UMP_DD_INVALID_MEMORY_HANDLE. - * If the flags are found to be valid call \a ump_dd_create_from_phys_blocks_64 to create a handle. - * - * @param[in] custom_session_data The value set by the session_begin handler - * @param[in] phandle Pointer to the handle to import - * @param flags The requested UMPv2 flags to assign to the imported handle - * @return UMP_DD_INVALID_MEMORY_HANDLE if the import failed, a valid ump handle on success - */ - ump_dd_handle (*import)(void * custom_session_data, void * phandle, ump_alloc_flags flags); -}; - -/** - * Import module registration. - * Registers a ump_import_handler structure for a memory type. - * @param type Type of the memory to register a handler for - * @param[in] handler Handler strcture to install - * @return 0 on success, a Linux error code on failure - */ -int ump_import_module_register(enum ump_external_memory_type type, struct ump_import_handler * handler); - -/** - * Import module deregistration. - * Uninstalls the handler for the given memory type. - * @param type Type of the memory to unregister the handler for - */ -void ump_import_module_unregister(enum ump_external_memory_type type); - -#endif /* _UMP_IMPORT_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/include/linux/ump-ioctl.h b/drivers/gpu/arm/t83x/kernel/include/linux/ump-ioctl.h deleted file mode 100755 index 451403e7c1ee9..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/include/linux/ump-ioctl.h +++ /dev/null @@ -1,152 +0,0 @@ -/* - * - * (C) COPYRIGHT 2008-2013 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -#ifndef _UMP_IOCTL_H_ -#define _UMP_IOCTL_H - -#include - -/* - * The order and size of the members of these have been chosen so the structures look the same in 32-bit and 64-bit builds. - * If any changes are done build the ump_struct_size_checker test for 32-bit and 64-bit targets. Both must compile successfully to commit. - */ - -/** 32/64-bit neutral way to represent pointers */ -typedef union ump_pointer -{ - void * value; /**< client should store their pointers here */ - uint32_t compat_value; /**< 64-bit kernels should fetch value here when handling 32-bit clients */ - uint64_t sizer; /**< Force 64-bit storage for all clients regardless */ -} ump_pointer; - -/** - * UMP allocation request. - * Used when performing ump_allocate - */ -typedef struct ump_k_allocate -{ - uint64_t size; /**< [in] Size in bytes to allocate */ - ump_secure_id secure_id; /**< [out] Secure ID of allocation on success */ - ump_alloc_flags alloc_flags; /**< [in] Flags to use when allocating */ -} ump_k_allocate; - -/** - * UMP size query request. - * Used when performing ump_size_get - */ -typedef struct ump_k_sizequery -{ - uint64_t size; /**< [out] Size of allocation */ - ump_secure_id secure_id; /**< [in] ID of allocation to query the size of */ - uint32_t padding; /* don't remove */ -} ump_k_sizequery; - -/** - * UMP cache synchronization request. - * Used when performing ump_cpu_msync_now - */ -typedef struct ump_k_msync -{ - ump_pointer mapped_ptr; /**< [in] CPU VA to perform cache operation on */ - ump_secure_id secure_id; /**< [in] ID of allocation to perform cache operation on */ - ump_cpu_msync_op cache_operation; /**< [in] Cache operation to perform */ - uint64_t size; /**< [in] Size in bytes of the range to synchronize */ -} ump_k_msync; - -/** - * UMP memory retain request. - * Used when performing ump_retain - */ -typedef struct ump_k_retain -{ - ump_secure_id secure_id; /**< [in] ID of allocation to retain a reference to */ - uint32_t padding; /* don't remove */ -} ump_k_retain; - -/** - * UMP memory release request. - * Used when performing ump_release - */ -typedef struct ump_k_release -{ - ump_secure_id secure_id; /**< [in] ID of allocation to release a reference to */ - uint32_t padding; /* don't remove */ -} ump_k_release; - -typedef struct ump_k_import -{ - ump_pointer phandle; /**< [in] Pointer to handle to import */ - uint32_t type; /**< [in] Type of handle to import */ - ump_alloc_flags alloc_flags; /**< [in] Flags to assign to the imported memory */ - ump_secure_id secure_id; /**< [out] UMP ID representing the imported memory */ - uint32_t padding; /* don't remove */ -} ump_k_import; - -/** - * UMP allocation flags request. - * Used when performing umpp_get_allocation_flags - * - * used only by v1 API - */ -typedef struct ump_k_allocation_flags -{ - ump_secure_id secure_id; /**< [in] Secure ID of allocation on success */ - ump_alloc_flags alloc_flags; /**< [out] Flags to use when allocating */ -} ump_k_allocation_flags; - -#define UMP_CALL_MAX_SIZE 512 -/* - * Ioctl definitions - */ - -/* Use '~' as magic number */ - -#define UMP_IOC_MAGIC '~' - -#define UMP_FUNC_ALLOCATE _IOWR(UMP_IOC_MAGIC, 1, ump_k_allocate) -#define UMP_FUNC_SIZEQUERY _IOWR(UMP_IOC_MAGIC, 2, ump_k_sizequery) -#define UMP_FUNC_MSYNC _IOWR(UMP_IOC_MAGIC, 3, ump_k_msync) -#define UMP_FUNC_RETAIN _IOW(UMP_IOC_MAGIC, 4, ump_k_retain) -#define UMP_FUNC_RELEASE _IOW(UMP_IOC_MAGIC, 5, ump_k_release) -#define UMP_FUNC_ALLOCATION_FLAGS_GET _IOWR(UMP_IOC_MAGIC, 6, ump_k_allocation_flags) -#define UMP_FUNC_IMPORT _IOWR(UMP_IOC_MAGIC, 7, ump_k_import) - -/*max ioctl sequential number*/ -#define UMP_IOC_MAXNR 7 - -/* 15 bits for the UMP ID (allowing 32768 IDs) */ -#define UMP_LINUX_ID_BITS 15 -#define UMP_LINUX_ID_MASK ((1ULL << UMP_LINUX_ID_BITS) - 1ULL) - -/* 64-bit (really 52 bits) encoding: 15 bits for the ID, 37 bits for the offset */ -#define UMP_LINUX_OFFSET_BITS_64 37 -#define UMP_LINUX_OFFSET_MASK_64 ((1ULL << UMP_LINUX_OFFSET_BITS_64)-1) -/* 32-bit encoding: 15 bits for the ID, 17 bits for the offset */ -#define UMP_LINUX_OFFSET_BITS_32 17 -#define UMP_LINUX_OFFSET_MASK_32 ((1ULL << UMP_LINUX_OFFSET_BITS_32)-1) - -#if __SIZEOF_LONG__ == 8 -#define UMP_LINUX_OFFSET_BITS UMP_LINUX_OFFSET_BITS_64 -#define UMP_LINUX_OFFSET_MASK UMP_LINUX_OFFSET_MASK_64 -#else -#define UMP_LINUX_OFFSET_BITS UMP_LINUX_OFFSET_BITS_32 -#define UMP_LINUX_OFFSET_MASK UMP_LINUX_OFFSET_MASK_32 -#endif - -#endif /* _UMP_IOCTL_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/include/linux/ump.h b/drivers/gpu/arm/t83x/kernel/include/linux/ump.h deleted file mode 100755 index 16f9c39f69c15..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/include/linux/ump.h +++ /dev/null @@ -1,481 +0,0 @@ -/* - * - * (C) COPYRIGHT 2008-2013, 2015 ARM Limited. All rights reserved. - * - * This program is free software and is provided to you under the terms of the - * GNU General Public License version 2 as published by the Free Software - * Foundation, and any use by you of this program is subject to the terms - * of such GNU licence. - * - * A copy of the licence is included with the program, and can also be obtained - * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ - - - - - -/** - * @file - * - * This file contains the kernel space part of the UMP API. - * - */ - -#ifndef _UMP_KERNEL_INTERFACE_H_ -#define _UMP_KERNEL_INTERFACE_H_ - -/** - * @addtogroup ump_api - * @{ - */ - -/** @defgroup ump_kernel_space_api UMP Kernel Space API - * @{ */ - -/** - * External representation of a UMP handle in kernel space. - */ -typedef void * ump_dd_handle; - -#ifdef CONFIG_KDS -#include -#endif - -#include - -#define UMP_KERNEL_API_EXPORT - -#if defined(__KERNEL__) -#include -#else -#include -#endif - -#ifdef __cplusplus -extern "C" -{ -#endif - -/** - * Value to indicate an invalid UMP memory handle. - */ -#define UMP_DD_INVALID_MEMORY_HANDLE ((ump_dd_handle)0) - -/** - * Struct used to describe a physical block used by UMP memory - */ -typedef struct ump_dd_physical_block_64 -{ - uint64_t addr; /**< The physical address of the block */ - uint64_t size; /**< The length of the block, in bytes, typically page aligned */ -} ump_dd_physical_block_64; - -/** - * Security filter hook. - * - * Each allocation can have a security filter attached to it.@n - * The hook receives - * @li the secure ID - * @li a handle to the allocation - * @li the callback_data argument provided to @ref ump_dd_allocate_64 or @ref ump_dd_create_from_phys_blocks_64 - * - * The hook must return @a true to indicate that access to the handle is allowed or @n - * @a false to state that no access is permitted.@n - * This hook is guaranteed to be called in the context of the requesting process/address space. - * - * The arguments provided to the hook are; - * @li the secure ID - * @li handle to the allocation - * @li the callback_data set when registering the hook - * - * Return value; - * @li @a true to permit access - * @li @a false to deny access - */ -typedef bool (*ump_dd_security_filter)(ump_secure_id, ump_dd_handle, void *); - -/** - * Final release notify hook. - * - * Allocations can have a hook attached to them which is called when the last reference to the allocation is released. - * No reference count manipulation is allowed on the provided handle, just property querying (ID get, size get, phys block get). - * This is similar to finalizers in OO languages. - * - * The arguments provided to the hook are; - * * handle to the allocation - * * the callback_data set when registering the hook - */ -typedef void (*ump_dd_final_release_callback)(const ump_dd_handle, void *); - -/** - * Allocate a buffer. - * The lifetime of the allocation is controlled by a reference count. - * The reference count of the returned buffer is set to 1. - * The memory will be freed once the reference count reaches 0. - * Use @ref ump_dd_retain and @ref ump_dd_release to control the reference count. - * @param size Number of bytes to allocate. Will be padded up to a multiple of the page size. - * @param flags Bit-wise OR of zero or more of the allocation flag bits. - * @param[in] filter_func Pointer to a function which will be called when an allocation is required from a - * secure id before the allocation itself is returned to user-space. - * NULL permitted if no need for a callback. - * @param[in] final_release_func Pointer to a function which will be called when the last reference is removed, - * just before the allocation is freed. NULL permitted if no need for a callback. - * @param[in] callback_data An opaque pointer which will be provided to @a filter_func and @a final_release_func - * @return Handle to the new allocation, or @a UMP_DD_INVALID_MEMORY_HANDLE on allocation failure. - */ -UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_allocate_64(uint64_t size, ump_alloc_flags flags, ump_dd_security_filter filter_func, ump_dd_final_release_callback final_release_func, void* callback_data); - - -/** - * Allocation bits getter. - * Retrieves the allocation flags used when instantiating the given handle. - * Just a copy of the flag given to @ref ump_dd_allocate_64 and @ref ump_dd_create_from_phys_blocks_64 - * @param mem The handle retrieve the bits for - * @return The allocation bits used to instantiate the allocation - */ -UMP_KERNEL_API_EXPORT ump_alloc_flags ump_dd_allocation_flags_get(const ump_dd_handle mem); - - -/** - * Retrieves the secure ID for the specified UMP memory. - * - * This identifier is unique across the entire system, and uniquely identifies - * the specified UMP memory allocation. This identifier can later be used through the - * @ref ump_dd_from_secure_id or - * @ref ump_from_secure_id - * functions in order to access this UMP memory, for instance from another process (if shared of course). - * Unless the allocation was marked as shared the returned ID will only be resolvable in the same process as did the allocation. - * - * Calling on an @a UMP_DD_INVALID_MEMORY_HANDLE will result in undefined behavior. - * Debug builds will assert on this. - * - * @note There is a user space equivalent function called @ref ump_secure_id_get - * - * @see ump_dd_from_secure_id - * @see ump_from_secure_id - * @see ump_secure_id_get - * - * @param mem Handle to UMP memory. - * - * @return Returns the secure ID for the specified UMP memory. - */ -UMP_KERNEL_API_EXPORT ump_secure_id ump_dd_secure_id_get(const ump_dd_handle mem); - -#ifdef CONFIG_KDS -/** - * Retrieve the KDS resource for the specified UMP memory. - * - * The KDS resource should be used to synchronize access to the UMP allocation. - * See the KDS API for how to do that. - * - * @param mem Handle to the UMP memory to query. - * @return Pointer to the KDS resource controlling access to the UMP memory. - */ -UMP_KERNEL_API_EXPORT struct kds_resource * ump_dd_kds_resource_get(const ump_dd_handle mem); -#endif - -/** - * Retrieves a handle to allocated UMP memory. - * - * The usage of UMP memory is reference counted, so this will increment the reference - * count by one for the specified UMP memory. - * Use @ref ump_dd_release when there is no longer any - * use for the retrieved handle. - * - * If called on an non-shared allocation and this is a different process @a UMP_DD_INVALID_MEMORY_HANDLE will be returned. - * - * Calling on an @a UMP_INVALID_SECURE_ID will return @a UMP_DD_INVALID_MEMORY_HANDLE - * - * @note There is a user space equivalent function called @ref ump_from_secure_id - * - * @see ump_dd_release - * @see ump_from_secure_id - * - * @param secure_id The secure ID of the UMP memory to open, that can be retrieved using the @ref ump_secure_id_get function. - * - * @return @a UMP_DD_INVALID_MEMORY_HANDLE indicates failure, otherwise a valid handle is returned. - */ -UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_from_secure_id(ump_secure_id secure_id); - - -/** - * Retrieves all physical memory block information for specified UMP memory. - * - * This function can be used by other device drivers in order to create MMU tables. - * This function will return a pointer to an array of @ref ump_dd_physical_block_64 in @a pArray and the number of array elements in @a pCount - * - * Calling on an @a UMP_DD_INVALID_MEMORY_HANDLE results in undefined behavior. - * Debug builds will assert on this. - * - * @param mem Handle to UMP memory. - * @param[out] pCount Pointer to where to store the number of items in the returned array - * @param[out] pArray Pointer to where to store a pointer to the physical blocks array - */ -UMP_KERNEL_API_EXPORT void ump_dd_phys_blocks_get_64(const ump_dd_handle mem, uint64_t * const pCount, const ump_dd_physical_block_64 ** const pArray); - -/** - * Retrieves the actual size of the specified UMP memory. - * - * The size is reported in bytes, and is typically page aligned. - * - * Calling on an @a UMP_DD_INVALID_MEMORY_HANDLE results in undefined behavior. - * Debug builds will assert on this. - * - * @note There is a user space equivalent function called @ref ump_size_get - * - * @see ump_size_get - * - * @param mem Handle to UMP memory. - * - * @return Returns the allocated size of the specified UMP memory, in bytes. - */ -UMP_KERNEL_API_EXPORT uint64_t ump_dd_size_get_64(const ump_dd_handle mem); - - -/** - * Adds an extra reference to the specified UMP memory allocation. - * - * The function @ref ump_dd_release must then be used - * to release each copy of the UMP memory handle. - * - * Calling on an @a UMP_DD_INVALID_MEMORY_HANDLE results in undefined behavior. - * Debug builds will assert on this. - * - * @note You are not required to call @ref ump_dd_retain - * for UMP handles returned from - * @ref ump_dd_from_secure_id, - * because these handles are already reference counted by this function. - * - * @note There is a user space equivalent function called @ref ump_retain - * - * @see ump_retain - * - * @param mem Handle to UMP memory. - * @return 0 indicates success, any other value indicates failure. - */ -UMP_KERNEL_API_EXPORT int ump_dd_retain(ump_dd_handle mem); - - -/** - * Releases a reference from the specified UMP memory. - * - * This function must be called once for every reference to the UMP memory handle. - * When the last reference is released, all resources associated with this UMP memory - * handle are freed. - * - * One can only call ump_release when matched with a successful ump_dd_retain, ump_dd_allocate_64 or ump_dd_from_secure_id - * If called on an @a UMP_DD_INVALID_MEMORY_HANDLE the function will early out. - * - * @note There is a user space equivalent function called @ref ump_release - * - * @see ump_release - * - * @param mem Handle to UMP memory. - */ -UMP_KERNEL_API_EXPORT void ump_dd_release(ump_dd_handle mem); - -/** - * Create an ump allocation handle based on externally managed memory. - * Used to wrap an existing allocation as an UMP memory handle. - * Once wrapped the memory acts just like a normal allocation coming from @ref ump_dd_allocate_64. - * The only exception is that the freed physical memory is not put into the pool of free memory, but instead considered returned to the caller once @a final_release_func returns. - * The blocks array will be copied, so no need to hold on to it after this function returns. - * @param[in] blocks Array of @ref ump_dd_physical_block_64 - * @param num_blocks Number of elements in the array pointed to by @a blocks - * @param flags Allocation flags to mark the handle with - * @param[in] filter_func Pointer to a function which will be called when an allocation is required from a secure id before the allocation itself is returned to user-space. - * NULL permitted if no need for a callback. - * @param[in] final_release_func Pointer to a function which will be called when the last reference is removed, just before the allocation is freed. NULL permitted if no need for a callback. - * @param[in] callback_data An opaque pointer which will be provided to @a filter_func and @a final_release_func - * @return Handle to the UMP allocation handle created, or @a UMP_DD_INVALID_MEMORY_HANDLE if no such handle could be created. - */ -UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_create_from_phys_blocks_64(const ump_dd_physical_block_64 * blocks, uint64_t num_blocks, ump_alloc_flags flags, ump_dd_security_filter filter_func, ump_dd_final_release_callback final_release_func, void* callback_data); - - -/** @name UMP v1 API - * Functions provided to support compatibility with UMP v1 API - * - *@{ - */ - -/** - * Value to indicate an invalid UMP memory handle. - */ -#define UMP_DD_HANDLE_INVALID UMP_DD_INVALID_MEMORY_HANDLE - -/** - * UMP error codes for kernel space. - */ -typedef enum -{ - UMP_DD_SUCCESS, /**< indicates success */ - UMP_DD_INVALID /**< indicates failure */ -} ump_dd_status_code; - - -/** - * Struct used to describe a physical block used by UMP memory - */ -typedef struct ump_dd_physical_block -{ - unsigned long addr; /**< The physical address of the block */ - unsigned long size; /**< The length of the block, typically page aligned */ -} ump_dd_physical_block; - - -/** - * Retrieves a handle to allocated UMP memory. - * - * The usage of UMP memory is reference counted, so this will increment the reference - * count by one for the specified UMP memory. - * Use @ref ump_dd_reference_release "ump_dd_reference_release" when there is no longer any - * use for the retrieved handle. - * - * @note There is a user space equivalent function called @ref ump_handle_create_from_secure_id "ump_handle_create_from_secure_id" - * - * @see ump_dd_reference_release - * @see ump_handle_create_from_secure_id - * - * @param secure_id The secure ID of the UMP memory to open, that can be retrieved using the @ref ump_secure_id_get "ump_secure_id_get " function. - * - * @return UMP_INVALID_MEMORY_HANDLE indicates failure, otherwise a valid handle is returned. - */ -UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id); - - - -/** - * Create an ump allocation handle based on externally managed memory. - * Used to wrap an existing allocation as an UMP memory handle. - * - * @param[in] blocks Array of @ref ump_dd_physical_block - * @param num_blocks Number of elements in the array pointed to by @a blocks - * - * @return Handle to the UMP allocation handle created, or @a UMP_DD_INVALID_MEMORY_HANDLE if no such handle could be created. - */ -UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block * blocks, unsigned long num_blocks); - - -/** - * Retrieves the number of physical blocks used by the specified UMP memory. - * - * This function retrieves the number of @ref ump_dd_physical_block "ump_dd_physical_block" structs needed - * to describe the physical memory layout of the given UMP memory. This can later be used when calling - * the functions @ref ump_dd_phys_blocks_get "ump_dd_phys_blocks_get" and - * @ref ump_dd_phys_block_get "ump_dd_phys_block_get". - * - * @see ump_dd_phys_blocks_get - * @see ump_dd_phys_block_get - * - * @param mem Handle to UMP memory. - * - * @return The number of ump_dd_physical_block structs required to describe the physical memory layout of the specified UMP memory. - */ -UMP_KERNEL_API_EXPORT unsigned long ump_dd_phys_block_count_get(ump_dd_handle mem); - - -/** - * Retrieves all physical memory block information for specified UMP memory. - * - * This function can be used by other device drivers in order to create MMU tables. - * - * @note This function will fail if the num_blocks parameter is either to large or to small. - * - * @see ump_dd_phys_block_get - * - * @param mem Handle to UMP memory. - * @param blocks An array of @ref ump_dd_physical_block "ump_dd_physical_block" structs that will receive the physical description. - * @param num_blocks The number of blocks to return in the blocks array. Use the function - * @ref ump_dd_phys_block_count_get "ump_dd_phys_block_count_get" first to determine the number of blocks required. - * - * @return UMP_DD_SUCCESS indicates success, UMP_DD_INVALID indicates failure. - */ -UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle mem, ump_dd_physical_block * const blocks, unsigned long num_blocks); - - -/** - * Retrieves the physical memory block information for specified block for the specified UMP memory. - * - * This function can be used by other device drivers in order to create MMU tables. - * - * @note This function will return UMP_DD_INVALID if the specified index is out of range. - * - * @see ump_dd_phys_blocks_get - * - * @param mem Handle to UMP memory. - * @param index Which physical info block to retrieve. - * @param block Pointer to a @ref ump_dd_physical_block "ump_dd_physical_block" struct which will receive the requested information. - * - * @return UMP_DD_SUCCESS indicates success, UMP_DD_INVALID indicates failure. - */ -UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle mem, unsigned long index, ump_dd_physical_block * const block); - - -/** - * Retrieves the actual size of the specified UMP memory. - * - * The size is reported in bytes, and is typically page aligned. - * - * @note There is a user space equivalent function called @ref ump_size_get "ump_size_get" - * - * @see ump_size_get - * - * @param mem Handle to UMP memory. - * - * @return Returns the allocated size of the specified UMP memory, in bytes. - */ -UMP_KERNEL_API_EXPORT unsigned long ump_dd_size_get(ump_dd_handle mem); - - -/** - * Adds an extra reference to the specified UMP memory. - * - * This function adds an extra reference to the specified UMP memory. This function should - * be used every time a UMP memory handle is duplicated, that is, assigned to another ump_dd_handle - * variable. The function @ref ump_dd_reference_release "ump_dd_reference_release" must then be used - * to release each copy of the UMP memory handle. - * - * @note You are not required to call @ref ump_dd_reference_add "ump_dd_reference_add" - * for UMP handles returned from - * @ref ump_dd_handle_create_from_secure_id "ump_dd_handle_create_from_secure_id", - * because these handles are already reference counted by this function. - * - * @note There is a user space equivalent function called @ref ump_reference_add "ump_reference_add" - * - * @see ump_reference_add - * - * @param mem Handle to UMP memory. - */ -UMP_KERNEL_API_EXPORT void ump_dd_reference_add(ump_dd_handle mem); - - -/** - * Releases a reference from the specified UMP memory. - * - * This function should be called once for every reference to the UMP memory handle. - * When the last reference is released, all resources associated with this UMP memory - * handle are freed. - * - * @note There is a user space equivalent function called @ref ump_reference_release "ump_reference_release" - * - * @see ump_reference_release - * - * @param mem Handle to UMP memory. - */ -UMP_KERNEL_API_EXPORT void ump_dd_reference_release(ump_dd_handle mem); - -/* @} */ - -#ifdef __cplusplus -} -#endif - - -/** @} */ /* end group ump_kernel_space_api */ - -/** @} */ /* end group ump_api */ - -#endif /* _UMP_KERNEL_INTERFACE_H_ */ diff --git a/drivers/gpu/arm/t83x/kernel/license.txt b/drivers/gpu/arm/t83x/kernel/license.txt deleted file mode 100755 index 77c14bdc45b8c..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/license.txt +++ /dev/null @@ -1,198 +0,0 @@ -GPLV2 LICENCE AGREEMENT FOR MALI GPUS LINUX KERNEL DEVICE DRIVERS SOURCE CODE - -THE USE OF THE SOFTWARE ACCOMPANYING THIS DOCUMENT IS EXPRESSLY SUBJECT TO THE TERMS OF THE GNU GENERAL PUBLIC LICENSE VERSION 2 AS PUBLISHED BY THE FREE SOFTWARE FOUNDATION AND SET OUT BELOW FOR REFERENCE (?GPL LICENCE?). ARM IS ONLY WILLING TO DISTRIBUTE THE SOFTWARE TO YOU ON CONDITION THAT YOU ACCEPT ALL OF THE TERMS IN THE GPL LICENCE PRIOR TO MODIFYING OR DISTRIBUTING THE SOFTWARE. - - - -Further for the period of three (3) years, ARM hereby offers to make available the source code of any part of the software program that is supplied as object code or in executable form. - - - -GPL Licence - - - -GNU GENERAL PUBLIC LICENSE - -Version 2, June 1991 - - - -Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - - - -Preamble - - - -The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. - - - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. - - - -To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. - - - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - - - -We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. - - - -Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. - - - -Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. - - - -The precise terms and conditions for copying, distribution and modification follow. - - - -GNU GENERAL PUBLIC LICENSE - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - - -0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". - - - -Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). - -Whether that is true depends on what the Program does. - - - -1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; - -and give any other recipients of the Program a copy of this License along with the Program. - - - -You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. - - - -2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: - - - -a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. - - - -b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. - - - -c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) - - - -These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. - - - -Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. - - - -In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. - - - -3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: - - - -a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, - - - -b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, - - - -c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) - - - -The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. - - - -If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. - - - -4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. - - - -5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the - -Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. - - - -6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. - -You are not responsible for enforcing compliance by third parties to this License. - - - -7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. - - - -If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. - - - -It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. - - - -This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. - - - -8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. - - - -9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - - - -Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. - - - -10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. - - - -NO WARRANTY - - - -11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - - -12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -/end - diff --git a/drivers/gpu/arm/t83x/kernel/sconscript b/drivers/gpu/arm/t83x/kernel/sconscript deleted file mode 100755 index e6be64c91a415..0000000000000 --- a/drivers/gpu/arm/t83x/kernel/sconscript +++ /dev/null @@ -1,23 +0,0 @@ -# -# (C) COPYRIGHT 2010-2014 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the -# GNU General Public License version 2 as published by the Free Software -# Foundation, and any use by you of this program is subject to the terms -# of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained -# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301, USA. -# -# - - - -Import('env') - -if env['backend'] == 'kernel' and int(env['kernel_modules']) == 1 and env['gpu'] != 'none': - SConscript('drivers/sconscript') - - if Glob('tests/sconscript'): - SConscript('tests/sconscript') diff --git a/drivers/gpu/arm/ump/Kbuild b/drivers/gpu/arm/ump/Kbuild index 45c24081532ff..11b9ab56c99bb 100755 --- a/drivers/gpu/arm/ump/Kbuild +++ b/drivers/gpu/arm/ump/Kbuild @@ -10,9 +10,7 @@ # Set default configuration to use, if Makefile didn't provide one. # Change this to use a different config.h -TARGET_PLATFORM ?= aml-meson -CONFIG ?= aml-meson-m400-1 - +CONFIG ?= default # Validate selected config ifneq ($(shell [ -d $(src)/arch-$(CONFIG) ] && [ -f $(src)/arch-$(CONFIG)/config.h ] && echo "OK"), OK) @@ -37,6 +35,7 @@ DRIVER_REV := $(MALI_RELEASE_NAME)-r$(SVN_REV) CHANGE_DATE := $(shell $(SVN_INFO) | grep '^Last Changed Date: ' | cut -d: -f2- | cut -b2-) CHANGED_REVISION := $(shell $(SVN_INFO) | grep '^Last Changed Rev: ' | cut -d: -f2- | cut -b2-) REPO_URL := $(shell $(SVN_INFO) | grep '^URL: ' | cut -d: -f2- | cut -b2-) + else # SVN GIT_REV := $(shell cd $(src); git describe --always 2>/dev/null) ifneq ($(GIT_REV),) @@ -45,6 +44,7 @@ DRIVER_REV := $(MALI_RELEASE_NAME)-$(GIT_REV) CHANGE_DATE := $(shell cd $(src); git log -1 --format="%ci") CHANGED_REVISION := $(GIT_REV) REPO_URL := $(shell cd $(src); git describe --all --always 2>/dev/null) + else # Git # No Git or SVN detected DRIVER_REV := $(MALI_RELEASE_NAME) @@ -52,10 +52,12 @@ CHANGE_DATE := $(MALI_RELEASE_NAME) CHANGED_REVISION := $(MALI_RELEASE_NAME) endif endif + ccflags-y += -DSVN_REV=$(SVN_REV) ccflags-y += -DSVN_REV_STRING=\"$(DRIVER_REV)\" - -ccflags-y += -I$(src) -I$(src)/common -I$(src)/linux -I$(src)/../mali/common -I$(src)/../mali/linux -I$(src)/include/ump +ccflags-y += -Wno-error=date-time +ccflags-y += -I$(src) -I$(src)/common -I$(src)/linux -I$(src)/../mali/common -I$(src)/../mali/linux -I$(src)/include -I$(src)/../../ump/include/ump +ccflags-y += -I$(src)/include/ump ccflags-y += -DMALI_STATE_TRACKING=0 ccflags-y += -DMALI_ENABLE_CPU_CYCLES=0 ccflags-$(CONFIG_UMP_DEBUG) += -DDEBUG @@ -83,12 +85,15 @@ ump-y = common/ump_kernel_common.o \ linux/ump_osk_atomics.o \ linux/ump_osk_low_level_mem.o \ linux/ump_osk_misc.o \ - linux/ump_kernel_random_mapping.o \ - $(UDD_FILE_PREFIX)linux/mali_osk_atomics.o \ - $(UDD_FILE_PREFIX)linux/mali_osk_locks.o \ - $(UDD_FILE_PREFIX)linux/mali_osk_memory.o \ - $(UDD_FILE_PREFIX)linux/mali_osk_math.o \ - $(UDD_FILE_PREFIX)linux/mali_osk_misc.o + linux/ump_kernel_random_mapping.o + +ifneq ($(CONFIG_MALI400),y) +ump-y += $(UDD_FILE_PREFIX)linux/mali_osk_atomics.o \ + $(UDD_FILE_PREFIX)linux/mali_osk_locks.o \ + $(UDD_FILE_PREFIX)linux/mali_osk_memory.o \ + $(UDD_FILE_PREFIX)linux/mali_osk_math.o \ + $(UDD_FILE_PREFIX)linux/mali_osk_misc.o +endif obj-$(CONFIG_UMP) := ump.o diff --git a/drivers/gpu/arm/ump/Kconfig b/drivers/gpu/arm/ump/Kconfig index eb7a19b928aa6..01ce6707deb02 100755 --- a/drivers/gpu/arm/ump/Kconfig +++ b/drivers/gpu/arm/ump/Kconfig @@ -1,8 +1,5 @@ -menu "Mali 400 UMP device driver" config UMP tristate "UMP support" - depends on ARM || ARM64 - default n ---help--- This enables support for the UMP memory allocation and sharing API. @@ -12,8 +9,7 @@ config UMP config UMP_DEBUG bool "Enable extra debug in UMP" depends on UMP - default n + default y ---help--- This enabled extra debug checks and messages in UMP. -endmenu diff --git a/drivers/gpu/arm/ump/Makefile b/drivers/gpu/arm/ump/Makefile index 9793569292268..4d4f02b0fb6b7 100755 --- a/drivers/gpu/arm/ump/Makefile +++ b/drivers/gpu/arm/ump/Makefile @@ -1,5 +1,5 @@ # -# Copyright (C) 2010-2012, 2014-2015 ARM Limited. All rights reserved. +# Copyright (C) 2010-2012, 2014, 2016 ARM Limited. All rights reserved. # # This program is free software and is provided to you under the terms of the GNU General Public License version 2 # as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -8,12 +8,6 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # -# Skip this Makefile when included from modpost -ifeq ($(KBUILD_EXTMOD),) - -TARGET_PLATFORM ?= aml-meson -CONFIG ?= aml-meson-m400-1 - # For each arch check: CROSS_COMPILE , KDIR , CFLAGS += -DARCH export ARCH ?= arm @@ -42,7 +36,7 @@ endif KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build ifeq ($(ARCH), arm) - # when compiling for ARM we're cross compiling +# when compiling for ARM we're cross compiling export CROSS_COMPILE ?= $(call check_cc2, arm-linux-gnueabi-gcc, arm-linux-gnueabi-, arm-none-linux-gnueabi-) endif @@ -71,5 +65,3 @@ kernelrelease: clean: $(MAKE) -C $(KDIR) M=$(CURDIR) clean $(MAKE) -C $(KDIR) M=$(CURDIR)/../mali clean - -endif diff --git a/drivers/gpu/arm/ump/Makefile.common b/drivers/gpu/arm/ump/Makefile.common index 6b3b8d1efde48..00153ded9f98c 100755 --- a/drivers/gpu/arm/ump/Makefile.common +++ b/drivers/gpu/arm/ump/Makefile.common @@ -1,5 +1,5 @@ # -# Copyright (C) 2010-2011, 2013, 2015 ARM Limited. All rights reserved. +# Copyright (C) 2010-2011, 2013, 2016 ARM Limited. All rights reserved. # # This program is free software and is provided to you under the terms of the GNU General Public License version 2 # as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/Documentation/networking/.gitignore b/drivers/gpu/arm/ump/Module.symvers similarity index 100% rename from Documentation/networking/.gitignore rename to drivers/gpu/arm/ump/Module.symvers diff --git a/drivers/gpu/arm/ump/arch b/drivers/gpu/arm/ump/arch index b2d568a2fb551..f297691734dbe 120000 --- a/drivers/gpu/arm/ump/arch +++ b/drivers/gpu/arm/ump/arch @@ -1 +1 @@ -arch-aml-meson-m400-1 \ No newline at end of file +arch-default \ No newline at end of file diff --git a/drivers/gpu/arm/ump/arch-default/config.h b/drivers/gpu/arm/ump/arch-default/config.h new file mode 100755 index 0000000000000..620a4d284c1d3 --- /dev/null +++ b/drivers/gpu/arm/ump/arch-default/config.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2010, 2012, 2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __ARCH_CONFIG_H__ +#define __ARCH_CONFIG_H__ + +/* Use OS memory. */ +#define ARCH_UMP_BACKEND_DEFAULT 1 + +/* OS memory won't need a base address. */ +#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT 0 + +/* 512 MB maximum limit for UMP allocations. */ +#define ARCH_UMP_MEMORY_SIZE_DEFAULT 512UL * 1024UL * 1024UL + + +#endif /* __ARCH_CONFIG_H__ */ diff --git a/drivers/gpu/arm/ump/arch-pb-virtex5/config.h b/drivers/gpu/arm/ump/arch-pb-virtex5/config.h index 99679cd13fbf5..9160ce1cee80f 100755 --- a/drivers/gpu/arm/ump/arch-pb-virtex5/config.h +++ b/drivers/gpu/arm/ump/arch-pb-virtex5/config.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2014 ARM Limited. All rights reserved. + * Copyright (C) 2010-2013, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/ump/common/ump_kernel_api.c b/drivers/gpu/arm/ump/common/ump_kernel_api.c index 21dde81ef419f..2fdaa1eaebd52 100755 --- a/drivers/gpu/arm/ump/common/ump_kernel_api.c +++ b/drivers/gpu/arm/ump/common/ump_kernel_api.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -284,7 +284,7 @@ void _ump_ukk_msync(_ump_uk_msync_s *args) (ump_secure_id)args->secure_id, args->op, args->address, args->mapping)); if (args->address) { - virtual = (void *)((u32)args->address); + virtual = (void *)((uintptr_t)args->address); offset = (u32)((args->address) - (args->mapping)); } else { /* Flush entire mapping when no address is specified. */ @@ -331,12 +331,12 @@ void _ump_ukk_cache_operations_control(_ump_uk_cache_operations_control_s *args) } else if (op == _UMP_UK_CACHE_OP_FINISH) { DBG_MSG(4, ("Cache ops finish\n")); session_data->cache_operations_ongoing--; -#if 0 + if (session_data->has_pending_level1_cache_flush) { /* This function will set has_pending_level1_cache_flush=0 */ _ump_osk_msync(NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data); } -#endif + /* to be on the safe side: always flush l1 cache when cache operations are done */ _ump_osk_msync(NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data); diff --git a/drivers/gpu/arm/ump/common/ump_kernel_common.c b/drivers/gpu/arm/ump/common/ump_kernel_common.c index ca410a8916c04..1f5ba47127255 100755 --- a/drivers/gpu/arm/ump/common/ump_kernel_common.c +++ b/drivers/gpu/arm/ump/common/ump_kernel_common.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/ump/common/ump_kernel_common.h b/drivers/gpu/arm/ump/common/ump_kernel_common.h index 2efe1ed1e7898..f6d0490e25d2c 100755 --- a/drivers/gpu/arm/ump/common/ump_kernel_common.h +++ b/drivers/gpu/arm/ump/common/ump_kernel_common.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -42,7 +42,7 @@ extern int ump_debug_level; #define DEBUG_ASSERT_POINTER(pointer) do {if( (pointer)== NULL) MSG_ERR(("NULL pointer " #pointer)); } while(0) #define DEBUG_ASSERT(condition) do {if(!(condition)) MSG_ERR(("ASSERT failed: " #condition)); } while(0) -#else /* DEBUG */ +#else #define UMP_DEBUG_PRINT(args) do {} while(0) #define UMP_DEBUG_CODE(args) #define DBG_MSG(level,args) do {} while(0) @@ -50,7 +50,7 @@ extern int ump_debug_level; #define DBG_MSG_ELSE(level,args) do {} while(0) #define DEBUG_ASSERT(condition) do {} while(0) #define DEBUG_ASSERT_POINTER(pointer) do {} while(0) -#endif /* DEBUG */ +#endif #define MSG_ERR(args) do{ /* args should be in brackets */ \ _mali_osk_dbgmsg("UMP: ERR: %s\n" ,__FILE__); \ @@ -110,7 +110,7 @@ extern struct ump_dev device; _mali_osk_errcode_t ump_kernel_constructor(void); void ump_kernel_destructor(void); -int map_errcode(_mali_osk_errcode_t err); +int ump_map_errcode(_mali_osk_errcode_t err); /** * variables from user space cannot be dereferenced from kernel space; tagging them diff --git a/drivers/gpu/arm/ump/common/ump_kernel_descriptor_mapping.c b/drivers/gpu/arm/ump/common/ump_kernel_descriptor_mapping.c index 45e2bb4f7bd7b..d22c049f0aa0c 100755 --- a/drivers/gpu/arm/ump/common/ump_kernel_descriptor_mapping.c +++ b/drivers/gpu/arm/ump/common/ump_kernel_descriptor_mapping.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2011, 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2011, 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/ump/common/ump_kernel_descriptor_mapping.h b/drivers/gpu/arm/ump/common/ump_kernel_descriptor_mapping.h index 60d691739c08a..bad215ab87a47 100755 --- a/drivers/gpu/arm/ump/common/ump_kernel_descriptor_mapping.h +++ b/drivers/gpu/arm/ump/common/ump_kernel_descriptor_mapping.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2011, 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2011, 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/ump/common/ump_kernel_memory_backend.h b/drivers/gpu/arm/ump/common/ump_kernel_memory_backend.h index 056244ebe1629..1b4fbc50f6dfc 100755 --- a/drivers/gpu/arm/ump/common/ump_kernel_memory_backend.h +++ b/drivers/gpu/arm/ump/common/ump_kernel_memory_backend.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2011, 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2011, 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/ump/common/ump_kernel_ref_drv.c b/drivers/gpu/arm/ump/common/ump_kernel_ref_drv.c index 37a682ac9ba43..39f32afdead64 100755 --- a/drivers/gpu/arm/ump/common/ump_kernel_ref_drv.c +++ b/drivers/gpu/arm/ump/common/ump_kernel_ref_drv.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/ump/common/ump_kernel_types.h b/drivers/gpu/arm/ump/common/ump_kernel_types.h index 098ce14127380..a1c9bd3449f3f 100755 --- a/drivers/gpu/arm/ump/common/ump_kernel_types.h +++ b/drivers/gpu/arm/ump/common/ump_kernel_types.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -15,6 +15,9 @@ #include "mali_osk.h" #include +#ifdef CONFIG_DMA_SHARED_BUFFER +#include +#endif typedef enum { UMP_USED_BY_CPU = 0, @@ -44,6 +47,10 @@ typedef struct ump_dd_mem { int is_cached; ump_hw_usage hw_device; ump_lock_usage lock_usage; +#ifdef CONFIG_DMA_SHARED_BUFFER + struct dma_buf_attachment *import_attach; + struct sg_table *sgt; +#endif } ump_dd_mem; diff --git a/drivers/gpu/arm/ump/common/ump_osk.h b/drivers/gpu/arm/ump/common/ump_osk.h index 6fe73662c6aea..bd1aef5aab54a 100755 --- a/drivers/gpu/arm/ump/common/ump_osk.h +++ b/drivers/gpu/arm/ump/common/ump_osk.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/ump/common/ump_uk_types.h b/drivers/gpu/arm/ump/common/ump_uk_types.h old mode 100755 new mode 100644 index d91b0707d9360..0e42a89db0138 --- a/drivers/gpu/arm/ump/common/ump_uk_types.h +++ b/drivers/gpu/arm/ump/common/ump_uk_types.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2012-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -48,6 +48,7 @@ typedef enum _UMP_IOC_SWITCH_HW_USAGE, _UMP_IOC_LOCK, _UMP_IOC_UNLOCK, + _UMP_IOC_DMABUF_IMPORT, } _ump_uk_functions; typedef enum @@ -186,6 +187,14 @@ typedef struct _ump_uk_unlock_s u32 secure_id; /**< [in] secure_id that identifies the ump buffer */ } _ump_uk_unlock_s; +typedef struct _ump_uk_dmabuf_s +{ + void *ctx; /**< [in,out] user-kernel context (trashed on output) */ + int fd; /**< [in] dmabuf_fd that identifies the dmabuf buffer */ + size_t size; /**< [in] size of the buffer */ + u32 secure_id; /**< [out] secure_id that identifies the ump buffer */ +} _ump_uk_dmabuf_s; + #ifdef __cplusplus } #endif diff --git a/drivers/gpu/arm/ump/common/ump_ukk.h b/drivers/gpu/arm/ump/common/ump_ukk.h index ad097e8b209dc..b8b1624b015f0 100755 --- a/drivers/gpu/arm/ump/common/ump_ukk.h +++ b/drivers/gpu/arm/ump/common/ump_ukk.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/ump/include/ump/ump_kernel_interface.h b/drivers/gpu/arm/ump/include/ump/ump_kernel_interface.h old mode 100755 new mode 100644 diff --git a/drivers/gpu/arm/ump/include/ump/ump_kernel_interface_ref_drv.h b/drivers/gpu/arm/ump/include/ump/ump_kernel_interface_ref_drv.h old mode 100755 new mode 100644 diff --git a/drivers/gpu/arm/ump/include/ump/ump_kernel_platform.h b/drivers/gpu/arm/ump/include/ump/ump_kernel_platform.h old mode 100755 new mode 100644 diff --git a/drivers/gpu/arm/ump/linux/license/gpl/ump_kernel_license.h b/drivers/gpu/arm/ump/linux/license/gpl/ump_kernel_license.h old mode 100755 new mode 100644 index 567d803b312e4..c0a1b838c6000 --- a/drivers/gpu/arm/ump/linux/license/gpl/ump_kernel_license.h +++ b/drivers/gpu/arm/ump/linux/license/gpl/ump_kernel_license.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2013, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/ump/linux/license/proprietary/ump_kernel_license.h b/drivers/gpu/arm/ump/linux/license/proprietary/ump_kernel_license.h new file mode 100644 index 0000000000000..bcf4e915af5f7 --- /dev/null +++ b/drivers/gpu/arm/ump/linux/license/proprietary/ump_kernel_license.h @@ -0,0 +1,30 @@ +/* + * This confidential and proprietary software may be used only as + * authorised by a licensing agreement from ARM Limited + * (C) COPYRIGHT 2010-2011, 2013, 2016 ARM Limited + * ALL RIGHTS RESERVED + * The entire notice above must be reproduced on all authorised + * copies and copies may only be made to the extent permitted + * by a licensing agreement from ARM Limited. + */ + +/** + * @file ump_kernel_license.h + * Defines for the macro MODULE_LICENSE. + */ + +#ifndef __UMP_KERNEL_LICENSE_H__ +#define __UMP_KERNEL_LICENSE_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#define UMP_KERNEL_LINUX_LICENSE "Proprietary" +#define UMP_LICENSE_IS_GPL 0 + +#ifdef __cplusplus +} +#endif + +#endif /* __UMP_KERNEL_LICENSE_H__ */ diff --git a/drivers/gpu/arm/ump/linux/ump_ioctl.h b/drivers/gpu/arm/ump/linux/ump_ioctl.h index c6c8f82c0b389..b3fe7d3661c47 100755 --- a/drivers/gpu/arm/ump/linux/ump_ioctl.h +++ b/drivers/gpu/arm/ump/linux/ump_ioctl.h @@ -1,11 +1,21 @@ /* - * Copyright (C) 2010-2013, 2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2013, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + + * Class Path Exception + * Linking this library statically or dynamically with other modules is making a combined work based on this library. + * Thus, the terms and conditions of the GNU General Public License cover the whole combination. + * As a special exception, the copyright holders of this library give you permission to link this library with independent modules + * to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting + * executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions + * of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify + * this library, you may extend this exception to your version of the library, but you are not obligated to do so. + * If you do not wish to do so, delete this exception statement from your version. */ #ifndef __UMP_IOCTL_H__ @@ -45,6 +55,7 @@ extern "C" { #define UMP_IOC_LOCK _IOW(UMP_IOCTL_NR, _UMP_IOC_LOCK, _ump_uk_lock_s) #define UMP_IOC_UNLOCK _IOW(UMP_IOCTL_NR, _UMP_IOC_UNLOCK, _ump_uk_unlock_s) +#define UMP_IOC_DMABUF_IMPORT _IOW(UMP_IOCTL_NR, _UMP_IOC_DMABUF_IMPORT, _ump_uk_dmabuf_s) #ifdef __cplusplus } diff --git a/drivers/gpu/arm/ump/linux/ump_kernel_linux.c b/drivers/gpu/arm/ump/linux/ump_kernel_linux.c index dea05b35ebf6d..c9b5cac8fa382 100755 --- a/drivers/gpu/arm/ump/linux/ump_kernel_linux.c +++ b/drivers/gpu/arm/ump/linux/ump_kernel_linux.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -73,7 +73,7 @@ struct ump_device { /* The global variable containing the global device data */ static struct ump_device ump_device; - +struct device *ump_global_mdev = NULL; /* Forward declare static functions */ static int ump_file_open(struct inode *inode, struct file *filp); @@ -107,12 +107,12 @@ static int ump_initialize_module(void) { _mali_osk_errcode_t err; -// DBG_MSG(2, ("Inserting UMP device driver. Compiled: %s, time: %s\n", __DATE__, __TIME__)); + DBG_MSG(2, ("Inserting UMP device driver.\n")); err = ump_kernel_constructor(); if (_MALI_OSK_ERR_OK != err) { MSG_ERR(("UMP device driver init failed\n")); - return map_errcode(err); + return ump_map_errcode(err); } MSG(("UMP device driver %s loaded\n", SVN_REV_STRING)); @@ -192,13 +192,12 @@ int ump_kernel_device_initialize(void) if (IS_ERR(ump_device.ump_class)) { err = PTR_ERR(ump_device.ump_class); } else { - struct device *mdev; - mdev = device_create(ump_device.ump_class, NULL, dev, NULL, ump_dev_name); - if (!IS_ERR(mdev)) { + ump_global_mdev = device_create(ump_device.ump_class, NULL, dev, NULL, ump_dev_name); + if (!IS_ERR(ump_global_mdev)) { return 0; } - err = PTR_ERR(mdev); + err = PTR_ERR(ump_global_mdev); } cdev_del(&ump_device.cdev); #else @@ -256,7 +255,7 @@ static int ump_file_open(struct inode *inode, struct file *filp) err = _ump_ukk_open((void **) &session_data); if (_MALI_OSK_ERR_OK != err) { MSG_ERR(("Ump failed to open a new session\n")); - return map_errcode(err); + return ump_map_errcode(err); } filp->private_data = (void *)session_data; @@ -276,7 +275,7 @@ static int ump_file_release(struct inode *inode, struct file *filp) err = _ump_ukk_close((void **) &filp->private_data); if (_MALI_OSK_ERR_OK != err) { - return map_errcode(err); + return ump_map_errcode(err); } return 0; /* success */ @@ -347,6 +346,15 @@ static int ump_file_ioctl(struct inode *inode, struct file *filp, unsigned int c err = ump_unlock_wrapper((u32 __user *)argument, session_data); break; + case UMP_IOC_DMABUF_IMPORT: + #ifdef CONFIG_DMA_SHARED_BUFFER + err = ump_dmabuf_import_wrapper((u32 __user *)argument, session_data); + #else + err = -EFAULT; + DBG_MSG(1, ("User space use dmabuf API, but kernel don't support DMA BUF\n")); + #endif + break; + default: DBG_MSG(1, ("No handler for IOCTL. cmd: 0x%08x, arg: 0x%08lx\n", cmd, arg)); err = -EFAULT; @@ -356,7 +364,7 @@ static int ump_file_ioctl(struct inode *inode, struct file *filp, unsigned int c return err; } -int map_errcode(_mali_osk_errcode_t err) +int ump_map_errcode(_mali_osk_errcode_t err) { switch (err) { case _MALI_OSK_ERR_OK : @@ -418,7 +426,7 @@ static int ump_file_mmap(struct file *filp, struct vm_area_struct *vma) err = _ump_ukk_map_mem(&args); if (_MALI_OSK_ERR_OK != err) { MSG_ERR(("_ump_ukk_map_mem() failed in function ump_file_mmap()")); - return map_errcode(err); + return ump_map_errcode(err); } return 0; /* success */ diff --git a/drivers/gpu/arm/ump/linux/ump_kernel_linux.h b/drivers/gpu/arm/ump/linux/ump_kernel_linux.h index 41dae7988c49d..f7c0a96beddba 100755 --- a/drivers/gpu/arm/ump/linux/ump_kernel_linux.h +++ b/drivers/gpu/arm/ump/linux/ump_kernel_linux.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2013, 2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2013, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/ump/linux/ump_kernel_memory_backend_dedicated.c b/drivers/gpu/arm/ump/linux/ump_kernel_memory_backend_dedicated.c index 9f329e9524a14..517b46bbc04d4 100755 --- a/drivers/gpu/arm/ump/linux/ump_kernel_memory_backend_dedicated.c +++ b/drivers/gpu/arm/ump/linux/ump_kernel_memory_backend_dedicated.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2011, 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2011, 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/ump/linux/ump_kernel_memory_backend_dedicated.h b/drivers/gpu/arm/ump/linux/ump_kernel_memory_backend_dedicated.h index 283dde7938355..8c1ea1e749f64 100755 --- a/drivers/gpu/arm/ump/linux/ump_kernel_memory_backend_dedicated.h +++ b/drivers/gpu/arm/ump/linux/ump_kernel_memory_backend_dedicated.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2014-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/ump/linux/ump_kernel_memory_backend_os.c b/drivers/gpu/arm/ump/linux/ump_kernel_memory_backend_os.c index 763b6109e486b..f07cbe6cc7166 100755 --- a/drivers/gpu/arm/ump/linux/ump_kernel_memory_backend_os.c +++ b/drivers/gpu/arm/ump/linux/ump_kernel_memory_backend_os.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2011, 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2011, 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -26,7 +26,7 @@ #include "ump_kernel_common.h" #include "ump_kernel_memory_backend.h" - +extern struct device *ump_global_mdev; typedef struct os_allocator { struct semaphore mutex; @@ -146,7 +146,7 @@ static int os_allocate(void *ctx, ump_dd_mem *descriptor) descriptor->block_array[pages_allocated].addr = page_to_phys(new_page); descriptor->block_array[pages_allocated].size = PAGE_SIZE; } else { - descriptor->block_array[pages_allocated].addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); + descriptor->block_array[pages_allocated].addr = dma_map_page(ump_global_mdev, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); descriptor->block_array[pages_allocated].size = PAGE_SIZE; } @@ -169,7 +169,7 @@ static int os_allocate(void *ctx, ump_dd_mem *descriptor) while (pages_allocated) { pages_allocated--; if (!is_cached) { - dma_unmap_page(NULL, descriptor->block_array[pages_allocated].addr, PAGE_SIZE, DMA_BIDIRECTIONAL); + dma_unmap_page(ump_global_mdev, descriptor->block_array[pages_allocated].addr, PAGE_SIZE, DMA_BIDIRECTIONAL); } __free_page(pfn_to_page(descriptor->block_array[pages_allocated].addr >> PAGE_SHIFT)); } @@ -218,7 +218,7 @@ static void os_free(void *ctx, ump_dd_mem *descriptor) for (i = 0; i < descriptor->nr_blocks; i++) { DBG_MSG(6, ("Freeing physical page. Address: 0x%08lx\n", descriptor->block_array[i].addr)); if (! descriptor->is_cached) { - dma_unmap_page(NULL, descriptor->block_array[i].addr, PAGE_SIZE, DMA_BIDIRECTIONAL); + dma_unmap_page(ump_global_mdev, descriptor->block_array[i].addr, PAGE_SIZE, DMA_BIDIRECTIONAL); } __free_page(pfn_to_page(descriptor->block_array[i].addr >> PAGE_SHIFT)); } diff --git a/drivers/gpu/arm/ump/linux/ump_kernel_memory_backend_os.h b/drivers/gpu/arm/ump/linux/ump_kernel_memory_backend_os.h index 22666573bec7d..2d62f4dd14487 100755 --- a/drivers/gpu/arm/ump/linux/ump_kernel_memory_backend_os.h +++ b/drivers/gpu/arm/ump/linux/ump_kernel_memory_backend_os.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2014-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/ump/linux/ump_kernel_random_mapping.c b/drivers/gpu/arm/ump/linux/ump_kernel_random_mapping.c index 8fed5c38508a6..cd1c457ca75d4 100755 --- a/drivers/gpu/arm/ump/linux/ump_kernel_random_mapping.c +++ b/drivers/gpu/arm/ump/linux/ump_kernel_random_mapping.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2011, 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2011, 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -186,6 +186,21 @@ void ump_random_mapping_put(ump_dd_mem *mem) if (0 == new_ref) { DBG_MSG(3, ("Final release of memory. ID: %u\n", mem->secure_id)); +#ifdef CONFIG_DMA_SHARED_BUFFER + if (mem->import_attach) { + struct dma_buf_attachment *attach = mem->import_attach; + struct dma_buf *dma_buf; + + if (mem->sgt) + dma_buf_unmap_attachment(attach, mem->sgt, + DMA_BIDIRECTIONAL); + + dma_buf = attach->dmabuf; + dma_buf_detach(attach->dmabuf, attach); + dma_buf_put(dma_buf); + + } +#endif ump_random_mapping_remove_internal(device.secure_id_map, mem->secure_id); mem->release_func(mem->ctx, mem); diff --git a/drivers/gpu/arm/ump/linux/ump_kernel_random_mapping.h b/drivers/gpu/arm/ump/linux/ump_kernel_random_mapping.h index d304157b1ffbc..67d4a864bc478 100755 --- a/drivers/gpu/arm/ump/linux/ump_kernel_random_mapping.h +++ b/drivers/gpu/arm/ump/linux/ump_kernel_random_mapping.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2011, 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2011, 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/ump/linux/ump_memory_backend.c b/drivers/gpu/arm/ump/linux/ump_memory_backend.c index 3baa2ac4aa235..87b141904cf1a 100755 --- a/drivers/gpu/arm/ump/linux/ump_memory_backend.c +++ b/drivers/gpu/arm/ump/linux/ump_memory_backend.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/ump/linux/ump_osk_atomics.c b/drivers/gpu/arm/ump/linux/ump_osk_atomics.c index 9fae2953308a3..f9fc1943b1403 100755 --- a/drivers/gpu/arm/ump/linux/ump_osk_atomics.c +++ b/drivers/gpu/arm/ump/linux/ump_osk_atomics.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2014-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/ump/linux/ump_osk_low_level_mem.c b/drivers/gpu/arm/ump/linux/ump_osk_low_level_mem.c index 0d81fbe326b06..0af88bb0070c3 100755 --- a/drivers/gpu/arm/ump/linux/ump_osk_low_level_mem.c +++ b/drivers/gpu/arm/ump/linux/ump_osk_low_level_mem.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -198,7 +198,7 @@ _mali_osk_errcode_t _ump_osk_mem_mapregion_map(ump_memory_allocation *descriptor if (NULL == vma) return _MALI_OSK_ERR_FAULT; - retval = remap_pfn_range(vma, ((u32)descriptor->mapping) + offset, (*phys_addr) >> PAGE_SHIFT, size, vma->vm_page_prot) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;; + retval = remap_pfn_range(vma, ((uintptr_t)descriptor->mapping) + offset, (*phys_addr) >> PAGE_SHIFT, size, vma->vm_page_prot) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;; DBG_MSG(4, ("Mapping virtual to physical memory. ID: %u, vma: 0x%08lx, virtual addr:0x%08lx, physical addr: 0x%08lx, size:%lu, prot:0x%x, vm_flags:0x%x RETVAL: 0x%x\n", ump_dd_secure_id_get(descriptor->handle), @@ -214,7 +214,7 @@ _mali_osk_errcode_t _ump_osk_mem_mapregion_map(ump_memory_allocation *descriptor static void level1_cache_flush_all(void) { DBG_MSG(4, ("UMP[xx] Flushing complete L1 cache\n")); - __cpuc_flush_kern_all(); + flush_cache_all(); } void _ump_osk_msync(ump_dd_mem *mem, void *virt, u32 offset, u32 size, ump_uk_msync_op op, ump_session_data *session_data) @@ -224,7 +224,7 @@ void _ump_osk_msync(ump_dd_mem *mem, void *virt, u32 offset, u32 size, ump_uk_ms /* Flush L1 using virtual address, the entire range in one go. * Only flush if user space process has a valid write mapping on given address. */ if ((mem) && (virt != NULL) && (access_ok(VERIFY_WRITE, virt, size))) { - __cpuc_flush_dcache_area(virt, size); + __flush_dcache_area(virt, size); DBG_MSG(3, ("UMP[%02u] Flushing CPU L1 Cache. CPU address: %x, size: %x\n", mem->secure_id, virt, size)); } else { if (session_data) { @@ -292,13 +292,10 @@ void _ump_osk_msync(ump_dd_mem *mem, void *virt, u32 offset, u32 size, ump_uk_ms switch (op) { case _UMP_UK_MSYNC_CLEAN: - outer_clean_range(start_p, end_p); break; case _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE: - outer_flush_range(start_p, end_p); break; case _UMP_UK_MSYNC_INVALIDATE: - outer_inv_range(start_p, end_p); break; default: break; diff --git a/drivers/gpu/arm/ump/linux/ump_osk_misc.c b/drivers/gpu/arm/ump/linux/ump_osk_misc.c index bd985d3afb726..ce0ab06f8e41d 100755 --- a/drivers/gpu/arm/ump/linux/ump_osk_misc.c +++ b/drivers/gpu/arm/ump/linux/ump_osk_misc.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/ump/linux/ump_ukk_ref_wrappers.c b/drivers/gpu/arm/ump/linux/ump_ukk_ref_wrappers.c index 8350d3e0b75b6..4a58e93e60dff 100755 --- a/drivers/gpu/arm/ump/linux/ump_ukk_ref_wrappers.c +++ b/drivers/gpu/arm/ump/linux/ump_ukk_ref_wrappers.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -20,6 +20,11 @@ #include "ump_uk_types.h" #include "ump_ukk.h" #include "ump_kernel_common.h" +#include +#include "ump_kernel_interface_ref_drv.h" +#include "mali_osk_list.h" + +extern struct device *ump_global_mdev; /* * IOCTL operation; Allocate UMP memory @@ -46,7 +51,7 @@ int ump_allocate_wrapper(u32 __user *argument, struct ump_session_data *sessio err = _ump_ukk_allocate(&user_interaction); if (_MALI_OSK_ERR_OK != err) { DBG_MSG(1, ("_ump_ukk_allocate() failed in ump_ioctl_allocate()\n")); - return map_errcode(err); + return ump_map_errcode(err); } user_interaction.ctx = NULL; @@ -69,3 +74,157 @@ int ump_allocate_wrapper(u32 __user *argument, struct ump_session_data *sessio return 0; /* success */ } + +#ifdef CONFIG_DMA_SHARED_BUFFER +static ump_dd_handle get_ump_handle_from_dmabuf(struct ump_session_data *session_data, + struct dma_buf *dmabuf) +{ + ump_session_memory_list_element *session_mem, *tmp; + struct dma_buf_attachment *attach; + ump_dd_handle ump_handle; + + DEBUG_ASSERT_POINTER(session_data); + + _mali_osk_mutex_wait(session_data->lock); + + _MALI_OSK_LIST_FOREACHENTRY(session_mem, tmp, + &session_data->list_head_session_memory_list, + ump_session_memory_list_element, list) { + if (session_mem->mem->import_attach) { + attach = session_mem->mem->import_attach; + if (attach->dmabuf == dmabuf) { + _mali_osk_mutex_signal(session_data->lock); + ump_handle = (ump_dd_handle)session_mem->mem; + ump_random_mapping_get(device.secure_id_map, ump_dd_secure_id_get(ump_handle)); + return ump_handle; + } + } + } + + _mali_osk_mutex_signal(session_data->lock); + + return NULL; +} + +int ump_dmabuf_import_wrapper(u32 __user *argument, + struct ump_session_data *session_data) +{ + ump_session_memory_list_element *session = NULL; + _ump_uk_dmabuf_s ump_dmabuf; + ump_dd_handle ump_handle; + ump_dd_physical_block *blocks = NULL; + struct dma_buf_attachment *attach = NULL; + struct dma_buf *dma_buf; + struct sg_table *sgt = NULL; + struct scatterlist *sgl; + unsigned int i = 0; + int ret = 0; + + /* Sanity check input parameters */ + if (!argument || !session_data) { + MSG_ERR(("NULL parameter.\n")); + return -EINVAL; + } + + if (copy_from_user(&ump_dmabuf, argument, + sizeof(_ump_uk_dmabuf_s))) { + MSG_ERR(("copy_from_user() failed.\n")); + return -EFAULT; + } + + dma_buf = dma_buf_get(ump_dmabuf.fd); + if (IS_ERR(dma_buf)) + return PTR_ERR(dma_buf); + + /* + * if already imported then increase a refcount to the ump descriptor + * and call dma_buf_put() and then go to found to return previous + * ump secure id. + */ + ump_handle = get_ump_handle_from_dmabuf(session_data, dma_buf); + if (ump_handle) { + dma_buf_put(dma_buf); + goto found; + } + + attach = dma_buf_attach(dma_buf, ump_global_mdev); + if (IS_ERR(attach)) { + ret = PTR_ERR(attach); + goto err_dma_buf_put; + } + + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) { + ret = PTR_ERR(sgt); + goto err_dma_buf_detach; + } + + blocks = (ump_dd_physical_block *)_mali_osk_malloc(sizeof(ump_dd_physical_block) * sgt->nents); + if (!blocks) { + DBG_MSG(1, ("Failed to allocate blocks.\n")); + ret = -EFAULT; + goto err_dma_buf_unmap; + } + for_each_sg(sgt->sgl, sgl, sgt->nents, i) { + blocks[i].addr = sg_phys(sgl); + blocks[i].size = sg_dma_len(sgl); + } + + /* + * Initialize the session memory list element, and add it + * to the session object + */ + session = _mali_osk_calloc(1, sizeof(*session)); + if (!session) { + DBG_MSG(1, ("Failed to allocate session.\n")); + ret = -EFAULT; + goto err_free_block; + } + + ump_handle = ump_dd_handle_create_from_phys_blocks(blocks, i); + if (UMP_DD_HANDLE_INVALID == ump_handle) { + DBG_MSG(1, ("Failed to create ump handle.\n")); + ret = -EFAULT; + goto err_free_session; + } + + session->mem = (ump_dd_mem *)ump_handle; + session->mem->import_attach = attach; + session->mem->sgt = sgt; + + _mali_osk_mutex_wait(session_data->lock); + _mali_osk_list_add(&(session->list), + &(session_data->list_head_session_memory_list)); + _mali_osk_mutex_signal(session_data->lock); + + _mali_osk_free(blocks); + +found: + ump_dmabuf.ctx = (void *)session_data; + ump_dmabuf.secure_id = ump_dd_secure_id_get(ump_handle); + ump_dmabuf.size = ump_dd_size_get(ump_handle); + + if (copy_to_user(argument, &ump_dmabuf, + sizeof(_ump_uk_dmabuf_s))) { + MSG_ERR(("copy_to_user() failed.\n")); + ret = -EFAULT; + goto err_release_ump_handle; + } + + return ret; + +err_release_ump_handle: + ump_dd_reference_release(ump_handle); +err_free_session: + _mali_osk_free(session); +err_free_block: + _mali_osk_free(blocks); +err_dma_buf_unmap: + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); +err_dma_buf_detach: + dma_buf_detach(dma_buf, attach); +err_dma_buf_put: + dma_buf_put(dma_buf); + return ret; +} +#endif diff --git a/drivers/gpu/arm/ump/linux/ump_ukk_ref_wrappers.h b/drivers/gpu/arm/ump/linux/ump_ukk_ref_wrappers.h index d0cdbb5281ff1..b8b2ccefe603a 100755 --- a/drivers/gpu/arm/ump/linux/ump_ukk_ref_wrappers.h +++ b/drivers/gpu/arm/ump/linux/ump_ukk_ref_wrappers.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2013-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2013-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -25,7 +25,9 @@ extern "C" { int ump_allocate_wrapper(u32 __user *argument, struct ump_session_data *session_data); - +#ifdef CONFIG_DMA_SHARED_BUFFER +int ump_dmabuf_import_wrapper(u32 __user *argument, struct ump_session_data *session_data); +#endif #ifdef __cplusplus } diff --git a/drivers/gpu/arm/ump/linux/ump_ukk_wrappers.c b/drivers/gpu/arm/ump/linux/ump_ukk_wrappers.c index 2616ebf2c81f9..2a88f50b545dc 100755 --- a/drivers/gpu/arm/ump/linux/ump_ukk_wrappers.c +++ b/drivers/gpu/arm/ump/linux/ump_ukk_wrappers.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -44,7 +44,7 @@ int ump_get_api_version_wrapper(u32 __user *argument, struct ump_session_data *s err = _ump_uku_get_api_version(&version_info); if (_MALI_OSK_ERR_OK != err) { MSG_ERR(("_ump_uku_get_api_version() failed in ump_ioctl_get_api_version()\n")); - return map_errcode(err); + return ump_map_errcode(err); } version_info.ctx = NULL; @@ -83,7 +83,7 @@ int ump_release_wrapper(u32 __user *argument, struct ump_session_data *session err = _ump_ukk_release(&release_args); if (_MALI_OSK_ERR_OK != err) { MSG_ERR(("_ump_ukk_release() failed in ump_ioctl_release()\n")); - return map_errcode(err); + return ump_map_errcode(err); } @@ -113,7 +113,7 @@ int ump_size_get_wrapper(u32 __user *argument, struct ump_session_data *sessio err = _ump_ukk_size_get(&user_interaction); if (_MALI_OSK_ERR_OK != err) { MSG_ERR(("_ump_ukk_size_get() failed in ump_ioctl_size_get()\n")); - return map_errcode(err); + return ump_map_errcode(err); } user_interaction.ctx = NULL; diff --git a/drivers/gpu/arm/ump/linux/ump_ukk_wrappers.h b/drivers/gpu/arm/ump/linux/ump_ukk_wrappers.h index 05000804af0a9..1adad5b798c9f 100755 --- a/drivers/gpu/arm/ump/linux/ump_ukk_wrappers.h +++ b/drivers/gpu/arm/ump/linux/ump_ukk_wrappers.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2010, 2012-2014, 2016 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/gpu/arm/ump/readme.txt b/drivers/gpu/arm/ump/readme.txt old mode 100755 new mode 100644 diff --git a/drivers/gpu/arm/umplock/Makefile b/drivers/gpu/arm/umplock/Makefile old mode 100755 new mode 100644 index 8b5a6d9be8213..a622e9c17eac5 --- a/drivers/gpu/arm/umplock/Makefile +++ b/drivers/gpu/arm/umplock/Makefile @@ -1,69 +1 @@ -# -# Copyright (C) 2012, 2015 ARM Limited. All rights reserved. -# -# This program is free software and is provided to you under the terms of the GNU General Public License version 2 -# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. -# -# A copy of the licence is included with the program, and can also be obtained from Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# - -# default to building for the host -ARCH ?= $(shell uname -m) - -# linux build system integration - -ifneq ($(KERNELRELEASE),) -# Inside the kernel build system - -EXTRA_CFLAGS += -I$(KBUILD_EXTMOD) - -SRC = umplock_driver.c - -MODULE:=umplock.ko - -obj-m := $(MODULE:.ko=.o) -$(MODULE:.ko=-y) := $(SRC:.c=.o) - -$(MODULE:.ko=-objs) := $(SRC:.c=.o) - -else -# Outside the kernel build system -# -# - -# Get any user defined KDIR- or maybe even a hardcoded KDIR --include KDIR_CONFIGURATION - -# Define host system directory -KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build - -ifeq ($(ARCH), arm) - # when compiling for ARM we're cross compiling - export CROSS_COMPILE ?= arm-none-linux-gnueabi- - CONFIG ?= arm -else - # Compiling for the host - CONFIG ?= $(shell uname -m) -endif - -# default cpu to select -CPU ?= $(shell uname -m) - -# look up KDIR based om CPU selection -KDIR ?= $(KDIR-$(CPU)) - -ifeq ($(KDIR),) -$(error No KDIR found for platform $(CPU)) -endif - -all: - $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) - -kernelrelease: - $(MAKE) -C $(KDIR) kernelrelease - -clean: - $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean - -endif +obj-y += umplock_driver.o diff --git a/drivers/gpu/arm/umplock/umplock_driver.c b/drivers/gpu/arm/umplock/umplock_driver.c old mode 100755 new mode 100644 index f117177fcc3e0..2a26e31b66855 --- a/drivers/gpu/arm/umplock/umplock_driver.c +++ b/drivers/gpu/arm/umplock/umplock_driver.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2015 ARM Limited. All rights reserved. + * Copyright (C) 2012-2014 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. @@ -29,7 +29,6 @@ typedef struct lock_cmd_priv { typedef struct lock_ref { int ref_count; u32 pid; - u32 down_count; } _lock_ref; typedef struct umplock_item { @@ -142,7 +141,6 @@ static int do_umplock_create_locked(_lock_cmd_priv *lock_cmd) if (ref_index < MAX_PIDS) { device.items[i_index].references[ref_index].pid = lock_cmd->pid; device.items[i_index].references[ref_index].ref_count = 0; - device.items[i_index].references[ref_index].down_count = 0; } else { PERROR("whoops, item ran out of available reference slots\n"); return -EINVAL; @@ -157,7 +155,6 @@ static int do_umplock_create_locked(_lock_cmd_priv *lock_cmd) device.items[i_index].usage = lock_item->usage; device.items[i_index].references[0].pid = lock_cmd->pid; device.items[i_index].references[0].ref_count = 0; - device.items[i_index].references[0].down_count = 0; sema_init(&device.items[i_index].item_lock, 1); } else { PERROR("whoops, ran out of available slots\n"); @@ -210,14 +207,12 @@ static int do_umplock_process(_lock_cmd_priv *lock_cmd) return 0; } - device.items[i_index].references[ref_index].down_count++; mutex_unlock(&device.item_list_lock); if (down_interruptible(&device.items[i_index].item_lock)) { /*wait up without hold the umplock. restore previous state and return*/ mutex_lock(&device.item_list_lock); device.items[i_index].references[ref_index].ref_count--; device.items[i_index].id_ref_count--; - device.items[i_index].references[ref_index].down_count--; if (0 == device.items[i_index].references[ref_index].ref_count) { device.items[i_index].references[ref_index].pid = 0; if (0 == device.items[i_index].id_ref_count) { @@ -226,7 +221,7 @@ static int do_umplock_process(_lock_cmd_priv *lock_cmd) } } - PERROR("failed lock, pid: %d, secure_id: 0x%x, ref_count: %d\n", lock_cmd->pid, lock_item->secure_id, device.items[i_index].references[ref_index].ref_count); + PDEBUG(1, "failed lock, pid: %d, secure_id: 0x%x, ref_count: %d\n", lock_cmd->pid, lock_item->secure_id, device.items[i_index].references[ref_index].ref_count); mutex_unlock(&device.item_list_lock); return -ERESTARTSYS; @@ -242,7 +237,7 @@ static int do_umplock_process(_lock_cmd_priv *lock_cmd) static int do_umplock_release(_lock_cmd_priv *lock_cmd) { - int ret, i_index, ref_index, call_up; + int ret, i_index, ref_index; _lock_item_s *lock_item = (_lock_item_s *)&lock_cmd->msg; mutex_lock(&device.item_list_lock); @@ -286,11 +281,6 @@ static int do_umplock_release(_lock_cmd_priv *lock_cmd) device.items[i_index].id_ref_count--; PDEBUG(1, "unlock, pid: %d, secure_id: 0x%x, ref_count: %d\n", lock_cmd->pid, lock_item->secure_id, device.items[i_index].references[ref_index].ref_count); - call_up = 0; - if (device.items[i_index].references[ref_index].down_count > 1) { - call_up = 1; - device.items[i_index].references[ref_index].down_count--; - } if (0 == device.items[i_index].references[ref_index].ref_count) { device.items[i_index].references[ref_index].pid = 0; if (0 == device.items[i_index].id_ref_count) { @@ -298,10 +288,6 @@ static int do_umplock_release(_lock_cmd_priv *lock_cmd) device.items[i_index].secure_id = 0; } device.items[i_index].owner = 0; - call_up = 1; - } - if (call_up) { - PDEBUG(1, "call up, pid: %d, secure_id: 0x%x\n", lock_cmd->pid, lock_item->secure_id); up(&device.items[i_index].item_lock); } mutex_unlock(&device.item_list_lock); @@ -530,6 +516,7 @@ static struct file_operations umplock_fops = { .open = umplock_driver_open, .release = umplock_driver_release, .unlocked_ioctl = umplock_driver_ioctl, + .compat_ioctl = umplock_driver_ioctl, }; int umplock_device_initialize(void) @@ -581,7 +568,7 @@ void umplock_device_terminate(void) static int __init umplock_initialize_module(void) { - PDEBUG(1, "Inserting UMP lock device driver. Compiled: %s, time: %s\n", __DATE__, __TIME__); + PDEBUG(1, "Inserting UMP lock device driver."); mutex_init(&device.item_list_lock); if (umplock_device_initialize() != 0) { diff --git a/drivers/gpu/arm/umplock/umplock_ioctl.h b/drivers/gpu/arm/umplock/umplock_ioctl.h old mode 100755 new mode 100644 index e7ae81c28964b..9ac5b7f25a6eb --- a/drivers/gpu/arm/umplock/umplock_ioctl.h +++ b/drivers/gpu/arm/umplock/umplock_ioctl.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012-2013, 2015 ARM Limited. All rights reserved. + * Copyright (C) 2012-2014 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 8bb35272433ec..60bbc66466300 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -1200,8 +1200,8 @@ int sdio_reset_comm(struct mmc_card *card) printk("%s():\n", __func__); mmc_claim_host(host); - /* for realtek sdio wifi need send IO reset command firstly */ - if (588 == card->cis.vendor) + /* for realtek / s9082c sdio wifi need send IO reset command firstly */ + if ((588 == card->cis.vendor) || (743 == card->cis.vendor)) sdio_reset(host); mmc_go_idle(host); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index df2126e7144d9..d85de74da0614 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -59,11 +59,14 @@ #endif #include #include +#ifdef CONFIG_AMLOGIC_CPU_INFO +#include +#endif extern unsigned int g_mac_addr_setup; #if defined (CONFIG_EFUSE) -extern char *efuse_get_mac(char *addr); +extern int efuse_get_mac(char *addr); #endif #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) @@ -2426,6 +2429,12 @@ static int stmmac_open(struct net_device *dev) int ret = 0; +#ifdef CONFIG_AMLOGIC_CPU_INFO + static const u8 def_mac[] = {0x00, 0x15, 0x18, 0x01, 0x81, 0x31}; + unsigned int low0, low1, high0, high1; + u8 buf[6]; +#endif + if (g_mac_addr_setup == 0) { #if defined (CONFIG_AML_NAND_KEY) || defined (CONFIG_SECURITYKEY) ret = read_mac_from_nand(dev); @@ -2433,11 +2442,25 @@ static int stmmac_open(struct net_device *dev) #endif { #if defined CONFIG_EFUSE - efuse_get_mac(dev->dev_addr); + ret = efuse_get_mac(dev->dev_addr); #endif } } +#ifdef CONFIG_AMLOGIC_CPU_INFO + if (ether_addr_equal(priv->dev->dev_addr, def_mac) || + !is_valid_ether_addr(priv->dev->dev_addr) || + ret < 0) { + printk("%s: generate MAC from CPU serial number\n", __func__); + cpuinfo_get_chipid(&low0, &low1, &high0, &high1); + memcpy(buf, &low0, 6); + buf[0] &= 0xfe; /* clear multicast bit */ + buf[0] |= 0x02; /* set local assignment bit (IEEE802) */ + printk("%s: generated MAC address: %pM\n", __func__, buf); + ether_addr_copy(dev->dev_addr, buf); + } +#endif + stmmac_check_ether_addr(priv); if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && diff --git a/drivers/net/wan/.gitignore b/drivers/net/wan/.gitignore deleted file mode 100644 index dae3ea6bb18cd..0000000000000 --- a/drivers/net/wan/.gitignore +++ /dev/null @@ -1 +0,0 @@ -wanxlfw.inc diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 33e739aedb9e5..eba19fe8eaf8e 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -282,7 +282,6 @@ source "drivers/net/wireless/orinoco/Kconfig" source "drivers/net/wireless/p54/Kconfig" source "drivers/net/wireless/rt2x00/Kconfig" source "drivers/net/wireless/rtlwifi/Kconfig" -source "drivers/net/wireless/bcmdhd/Kconfig" source "drivers/net/wireless/ti/Kconfig" source "drivers/net/wireless/zd1211rw/Kconfig" source "drivers/net/wireless/mwifiex/Kconfig" diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 23cba29f11461..0fab227025be3 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -25,7 +25,6 @@ obj-$(CONFIG_ZD1211RW) += zd1211rw/ obj-$(CONFIG_RTL8180) += rtl818x/ obj-$(CONFIG_RTL8187) += rtl818x/ obj-$(CONFIG_RTLWIFI) += rtlwifi/ -obj-$(CONFIG_BCMDHD) += bcmdhd/ # 16-bit wireless PCMCIA client drivers obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o diff --git a/drivers/net/wireless/bcmdhd/circularbuf.c b/drivers/net/wireless/bcmdhd/circularbuf.c deleted file mode 100644 index bfb308c34f05d..0000000000000 --- a/drivers/net/wireless/bcmdhd/circularbuf.c +++ /dev/null @@ -1,324 +0,0 @@ -/** @file circularbuf.c - * - * PCIe host driver and dongle firmware need to communicate with each other. The mechanism consists - * of multiple circular buffers located in (DMA'able) host memory. A circular buffer is either used - * for host -> dongle (h2d) or dongle -> host communication. Both host driver and firmware make use - * of this source file. This source file contains functions to manage such a set of circular - * buffers, but does not contain the code to read or write the data itself into the buffers. It - * leaves that up to the software layer that uses this file, which can be implemented either using - * pio or DMA transfers. It also leaves the format of the data that is written and read to a higher - * layer. Typically the data is in the form of so-called 'message buffers'. - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: circularbuf.c 467150 2014-04-02 17:30:43Z $ - */ - -#include -#include -#include - -#define CIRCULARBUF_READ_SPACE_AT_END(x) \ - ((x->w_ptr >= x->rp_ptr) ? (x->w_ptr - x->rp_ptr) : (x->e_ptr - x->rp_ptr)) - -#define CIRCULARBUF_READ_SPACE_AVAIL(x) \ - (((CIRCULARBUF_READ_SPACE_AT_END(x) == 0) && (x->w_ptr < x->rp_ptr)) ? \ - x->w_ptr : CIRCULARBUF_READ_SPACE_AT_END(x)) - -int cbuf_msg_level = CBUF_ERROR_VAL | CBUF_TRACE_VAL | CBUF_INFORM_VAL; - -/* #define CBUF_DEBUG */ -#ifdef CBUF_DEBUG -#define CBUF_DEBUG_CHECK(x) x -#else -#define CBUF_DEBUG_CHECK(x) -#endif /* CBUF_DEBUG */ - -/** - * ----------------------------------------------------------------------------- - * Function : circularbuf_init - * Description: - * - * - * Input Args : buf_base_addr: address of DMA'able host memory provided by caller - * - * - * Return Values : - * - * ----------------------------------------------------------------------------- - */ -void -circularbuf_init(circularbuf_t *handle, void *buf_base_addr, uint16 total_buf_len) -{ - handle->buf_addr = buf_base_addr; - - handle->depth = handle->e_ptr = HTOL32(total_buf_len); - - /* Initialize Read and Write pointers */ - handle->w_ptr = handle->r_ptr = handle->wp_ptr = handle->rp_ptr = HTOL32(0); - handle->mb_ring_bell = NULL; - handle->mb_ctx = NULL; - - return; -} - -/** - * When an item is added to the circular buffer by the producing party, the consuming party has to - * be notified by means of a 'door bell' or 'ring'. This function allows the caller to register a - * 'ring' function that will be called when a 'write complete' occurs. - */ -void -circularbuf_register_cb(circularbuf_t *handle, mb_ring_t mb_ring_func, void *ctx) -{ - handle->mb_ring_bell = mb_ring_func; - handle->mb_ctx = ctx; -} - -#ifdef CBUF_DEBUG -static void -circularbuf_check_sanity(circularbuf_t *handle) -{ - if ((handle->e_ptr > handle->depth) || - (handle->r_ptr > handle->e_ptr) || - (handle->rp_ptr > handle->e_ptr) || - (handle->w_ptr > handle->e_ptr)) - { - printf("%s:%d: Pointers are corrupted.\n", __FUNCTION__, __LINE__); - circularbuf_debug_print(handle); - ASSERT(0); - } - return; -} -#endif /* CBUF_DEBUG */ - -/** - * ----------------------------------------------------------------------------- - * Function : circularbuf_reserve_for_write - * - * Description: - * This function reserves N bytes for write in the circular buffer. The circularbuf - * implementation will only reserve space in the circular buffer and return - * the pointer to the address where the new data can be written. - * The actual write implementation (bcopy/dma) is outside the scope of - * circularbuf implementation. - * - * Input Args : - * size - No. of bytes to reserve for write - * - * Return Values : - * void * : Pointer to the reserved location. This is the address - * that will be used for write (dma/bcopy) - * - * ----------------------------------------------------------------------------- - */ -void * BCMFASTPATH -circularbuf_reserve_for_write(circularbuf_t *handle, uint16 size) -{ - int16 avail_space; - void *ret_ptr = NULL; - - CBUF_DEBUG_CHECK(circularbuf_check_sanity(handle)); - ASSERT(size < handle->depth); - - if (handle->wp_ptr >= handle->r_ptr) - avail_space = handle->depth - handle->wp_ptr; - else - avail_space = handle->r_ptr - handle->wp_ptr; - - ASSERT(avail_space <= handle->depth); - if (avail_space > size) - { - /* Great. We have enough space. */ - ret_ptr = CIRCULARBUF_START(handle) + handle->wp_ptr; - - /* - * We need to update the wp_ptr for the next guy to write. - * - * Please Note : We are not updating the write pointer here. This can be - * done only after write is complete (In case of DMA, we can only schedule - * the DMA. Actual completion will be known only on DMA complete interrupt). - */ - handle->wp_ptr += size; - return ret_ptr; - } - - /* - * If there is no available space, we should check if there is some space left - * in the beginning of the circular buffer. Wrap-around case, where there is - * not enough space in the end of the circular buffer. But, there might be - * room in the beginning of the buffer. - */ - if (handle->wp_ptr >= handle->r_ptr) - { - avail_space = handle->r_ptr; - if (avail_space > size) - { - /* OK. There is room in the beginning. Let's go ahead and use that. - * But, before that, we have left a hole at the end of the circular - * buffer as that was not sufficient to accomodate the requested - * size. Let's make sure this is updated in the circularbuf structure - * so that consumer does not use the hole. - */ - handle->e_ptr = handle->wp_ptr; - handle->wp_ptr = size; - - return CIRCULARBUF_START(handle); - } - } - - /* We have tried enough to accomodate the new packet. There is no room for now. */ - return NULL; -} - -/** - * ----------------------------------------------------------------------------- - * Function : circularbuf_write_complete - * - * Description: - * This function has to be called by the producer end of circularbuf to indicate to - * the circularbuf layer that data has been written and the write pointer can be - * updated. In the process, if there was a doorbell callback registered, that - * function would also be invoked as to notify the consuming party. - * - * Input Args : - * dest_addr : Address where the data was written. This would be the - * same address that was reserved earlier. - * bytes_written : Length of data written - * - * ----------------------------------------------------------------------------- - */ -void BCMFASTPATH -circularbuf_write_complete(circularbuf_t *handle, uint16 bytes_written) -{ - CBUF_DEBUG_CHECK(circularbuf_check_sanity(handle)); - - /* Update the write pointer */ - if ((handle->w_ptr + bytes_written) >= handle->depth) { - OSL_CACHE_FLUSH((void *) CIRCULARBUF_START(handle), bytes_written); - handle->w_ptr = bytes_written; - } else { - OSL_CACHE_FLUSH((void *) (CIRCULARBUF_START(handle) + handle->w_ptr), - bytes_written); - handle->w_ptr += bytes_written; - } - - /* And ring the door bell (mail box interrupt) to indicate to the peer that - * message is available for consumption. - */ - if (handle->mb_ring_bell) - handle->mb_ring_bell(handle->mb_ctx); -} - -/** - * ----------------------------------------------------------------------------- - * Function : circularbuf_get_read_ptr - * - * Description: - * This function will be called by the consumer of circularbuf for reading data from - * the circular buffer. This will typically be invoked when the consumer gets a - * doorbell interrupt. - * Please note that the function only returns the pointer (and length) from - * where the data can be read. Actual read implementation is up to the - * consumer. It could be a bcopy or dma. - * - * Input Args : - * void * : Address from where the data can be read. - * available_len : Length of data available for read. - * - * ----------------------------------------------------------------------------- - */ -void * BCMFASTPATH -circularbuf_get_read_ptr(circularbuf_t *handle, uint16 *available_len) -{ - uint8 *ret_addr; - - CBUF_DEBUG_CHECK(circularbuf_check_sanity(handle)); - - /* First check if there is any data available in the circular buffer */ - *available_len = CIRCULARBUF_READ_SPACE_AVAIL(handle); - if (*available_len == 0) - return NULL; - - /* - * Although there might be data in the circular buffer for read, in - * cases of write wrap-around and read still in the end of the circular - * buffer, we might have to wrap around the read pending pointer also. - */ - if (CIRCULARBUF_READ_SPACE_AT_END(handle) == 0) - handle->rp_ptr = 0; - - ret_addr = CIRCULARBUF_START(handle) + handle->rp_ptr; - - /* - * Please note that we do not update the read pointer here. Only - * read pending pointer is updated, so that next reader knows where - * to read data from. - * read pointer can only be updated when the read is complete. - */ - handle->rp_ptr = (uint16)(ret_addr - CIRCULARBUF_START(handle) + *available_len); - - ASSERT(*available_len <= handle->depth); - - OSL_CACHE_INV((void *) ret_addr, *available_len); - - return ret_addr; -} - -/** - * ----------------------------------------------------------------------------- - * Function : circularbuf_read_complete - * Description: - * This function has to be called by the consumer end of circularbuf to indicate - * that data has been consumed and the read pointer can be updated, so the producing side - * can can use the freed space for new entries. - * - * - * Input Args : - * bytes_read : No. of bytes consumed by the consumer. This has to match - * the length returned by circularbuf_get_read_ptr - * - * Return Values : - * CIRCULARBUF_SUCCESS : Otherwise - * - * ----------------------------------------------------------------------------- - */ -circularbuf_ret_t BCMFASTPATH -circularbuf_read_complete(circularbuf_t *handle, uint16 bytes_read) -{ - CBUF_DEBUG_CHECK(circularbuf_check_sanity(handle)); - ASSERT(bytes_read < handle->depth); - - /* Update the read pointer */ - if ((handle->w_ptr < handle->e_ptr) && (handle->r_ptr + bytes_read) > handle->e_ptr) - handle->r_ptr = bytes_read; - else - handle->r_ptr += bytes_read; - - return CIRCULARBUF_SUCCESS; -} - -/** - * ----------------------------------------------------------------------------- - * Function : circularbuf_revert_rp_ptr - * - * Description: - * The rp_ptr update during circularbuf_get_read_ptr() is done to reflect the amount of data - * that is sent out to be read by the consumer. But the consumer may not always read the - * entire data. In such a case, the rp_ptr needs to be reverted back by 'left' bytes, where - * 'left' is the no. of bytes left unread. - * - * Input args: - * bytes : The no. of bytes left unread by the consumer - * - * ----------------------------------------------------------------------------- - */ -circularbuf_ret_t -circularbuf_revert_rp_ptr(circularbuf_t *handle, uint16 bytes) -{ - CBUF_DEBUG_CHECK(circularbuf_check_sanity(handle)); - ASSERT(bytes < handle->depth); - - handle->rp_ptr -= bytes; - - return CIRCULARBUF_SUCCESS; -} diff --git a/drivers/net/wireless/bcmdhd/dhd_bta.h b/drivers/net/wireless/bcmdhd/dhd_bta.h deleted file mode 100644 index 4067fc39b2985..0000000000000 --- a/drivers/net/wireless/bcmdhd/dhd_bta.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * BT-AMP support routines - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: dhd_bta.h 291086 2011-10-21 01:17:24Z $ - */ -#ifndef __dhd_bta_h__ -#define __dhd_bta_h__ - -struct dhd_pub; - -extern int dhd_bta_docmd(struct dhd_pub *pub, void *cmd_buf, uint cmd_len); - -extern void dhd_bta_doevt(struct dhd_pub *pub, void *data_buf, uint data_len); - -extern int dhd_bta_tx_hcidata(struct dhd_pub *pub, void *data_buf, uint data_len); -extern void dhd_bta_tx_hcidata_complete(struct dhd_pub *dhdp, void *txp, bool success); - - -#endif /* __dhd_bta_h__ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg80211.h b/drivers/net/wireless/bcmdhd/dhd_cfg80211.h deleted file mode 100644 index 7e1999d980946..0000000000000 --- a/drivers/net/wireless/bcmdhd/dhd_cfg80211.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Linux cfg80211 driver - Dongle Host Driver (DHD) related - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: wl_cfg80211.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $ - */ - - -#ifndef __DHD_CFG80211__ -#define __DHD_CFG80211__ - -#include -#include - -#ifndef WL_ERR -#define WL_ERR CFG80211_ERR -#endif -#ifndef WL_TRACE -#define WL_TRACE CFG80211_TRACE -#endif - -s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg); -s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg); -s32 dhd_cfg80211_down(struct bcm_cfg80211 *cfg); -s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val); -s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg); -s32 dhd_config_dongle(struct bcm_cfg80211 *cfg); - -#endif /* __DHD_CFG80211__ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_config.c b/drivers/net/wireless/bcmdhd/dhd_config.c deleted file mode 100644 index 376c041294ccf..0000000000000 --- a/drivers/net/wireless/bcmdhd/dhd_config.c +++ /dev/null @@ -1,2522 +0,0 @@ - -#include -#include - -#include -#include -#include -#if defined(HW_OOB) || defined(FORCE_WOWLAN) -#include -#include -#include -#include -#endif - -#include -#include - -/* message levels */ -#define CONFIG_ERROR_LEVEL 0x0001 -#define CONFIG_TRACE_LEVEL 0x0002 - -uint config_msg_level = CONFIG_ERROR_LEVEL; - -#define CONFIG_ERROR(x) \ - do { \ - if (config_msg_level & CONFIG_ERROR_LEVEL) { \ - printk(KERN_ERR "CONFIG-ERROR) "); \ - printk x; \ - } \ - } while (0) -#define CONFIG_TRACE(x) \ - do { \ - if (config_msg_level & CONFIG_TRACE_LEVEL) { \ - printk(KERN_ERR "CONFIG-TRACE) "); \ - printk x; \ - } \ - } while (0) - -#define MAXSZ_BUF 1000 -#define MAXSZ_CONFIG 4096 - -#define FW_TYPE_STA 0 -#define FW_TYPE_APSTA 1 -#define FW_TYPE_P2P 2 -#define FW_TYPE_MFG 3 -#define FW_TYPE_G 0 -#define FW_TYPE_AG 1 - -#ifdef CONFIG_PATH_AUTO_SELECT -#define BCM4330B2_CONF_NAME "config_40183b2.txt" -#define BCM43362A0_CONF_NAME "config_40181a0.txt" -#define BCM43362A2_CONF_NAME "config_40181a2.txt" -#define BCM43438A0_CONF_NAME "config_43438a0.txt" -#define BCM43438A1_CONF_NAME "config_43438a1.txt" -#define BCM4334B1_CONF_NAME "config_4334b1.txt" -#define BCM43341B0_CONF_NAME "config_43341b0.txt" -#define BCM43241B4_CONF_NAME "config_43241b4.txt" -#define BCM4339A0_CONF_NAME "config_4339a0.txt" -#define BCM43455C0_CONF_NAME "config_43455c0.txt" -#define BCM4354A1_CONF_NAME "config_4354a1.txt" -#define BCM4356A2_CONF_NAME "config_4356a2.txt" -#define BCM4359B1_CONF_NAME "config_4359b1.txt" -#endif - -#ifdef BCMSDIO -#define SBSDIO_CIS_SIZE_LIMIT 0x200 /* maximum bytes in one CIS */ - -const static char *bcm4330b2_fw_name[] = { - "fw_bcm40183b2.bin", - "fw_bcm40183b2_apsta.bin", - "fw_bcm40183b2_p2p.bin", - "fw_bcm40183b2_mfg.bin" -}; - -const static char *bcm4330b2_ag_fw_name[] = { - "fw_bcm40183b2_ag.bin", - "fw_bcm40183b2_ag_apsta.bin", - "fw_bcm40183b2_ag_p2p.bin", - "fw_bcm40183b2_ag_mfg.bin" -}; - -const static char *bcm43362a0_fw_name[] = { - "fw_bcm40181a0.bin", - "fw_bcm40181a0_apsta.bin", - "fw_bcm40181a0_p2p.bin", - "fw_bcm40181a0_mfg.bin" -}; - -const static char *bcm43362a2_fw_name[] = { - "fw_bcm40181a2.bin", - "fw_bcm40181a2_apsta.bin", - "fw_bcm40181a2_p2p.bin", - "fw_bcm40181a2_mfg.bin" -}; - -const static char *bcm4334b1_ag_fw_name[] = { - "fw_bcm4334b1_ag.bin", - "fw_bcm4334b1_ag_apsta.bin", - "fw_bcm4334b1_ag_p2p.bin", - "fw_bcm4334b1_ag_mfg.bin" -}; - -const static char *bcm43438a0_fw_name[] = { - "fw_bcm43438a0.bin", - "fw_bcm43438a0_apsta.bin", - "fw_bcm43438a0_p2p.bin", - "fw_bcm43438a0_mfg.bin" -}; - -const static char *bcm43438a1_fw_name[] = { - "fw_bcm43438a1.bin", - "fw_bcm43438a1_apsta.bin", - "fw_bcm43438a1_p2p.bin", - "fw_bcm43438a1_mfg.bin" -}; - -const static char *bcm43341b0_ag_fw_name[] = { - "fw_bcm43341b0_ag.bin", - "fw_bcm43341b0_ag_apsta.bin", - "fw_bcm43341b0_ag_p2p.bin", - "fw_bcm43341b0_ag_mfg.bin" -}; - -const static char *bcm43241b4_ag_fw_name[] = { - "fw_bcm43241b4_ag.bin", - "fw_bcm43241b4_ag_apsta.bin", - "fw_bcm43241b4_ag_p2p.bin", - "fw_bcm43241b4_ag_mfg.bin" -}; - -const static char *bcm4339a0_ag_fw_name[] = { - "fw_bcm4339a0_ag.bin", - "fw_bcm4339a0_ag_apsta.bin", - "fw_bcm4339a0_ag_p2p.bin", - "fw_bcm4339a0_ag_mfg.bin" -}; - -const static char *bcm43455c0_ag_fw_name[] = { - "fw_bcm43455c0_ag.bin", - "fw_bcm43455c0_ag_apsta.bin", - "fw_bcm43455c0_ag_p2p.bin", - "fw_bcm43455c0_ag_mfg.bin" -}; - -const static char *bcm4354a1_ag_fw_name[] = { - "fw_bcm4354a1_ag.bin", - "fw_bcm4354a1_ag_apsta.bin", - "fw_bcm4354a1_ag_p2p.bin", - "fw_bcm4354a1_ag_mfg.bin" -}; - -const static char *bcm4356a2_ag_fw_name[] = { - "fw_bcm4356a2_ag.bin", - "fw_bcm4356a2_ag_apsta.bin", - "fw_bcm4356a2_ag_p2p.bin", - "fw_bcm4356a2_ag_mfg.bin" -}; - -const static char *bcm4359b1_ag_fw_name[] = { - "fw_bcm4359b1_ag.bin", - "fw_bcm4359b1_ag_apsta.bin", - "fw_bcm4359b1_ag_p2p.bin", - "fw_bcm4359b1_ag_mfg.bin" -}; -#endif -#ifdef BCMPCIE -const static char *bcm4356a2_pcie_ag_fw_name[] = { - "fw_bcm4356a2_pcie_ag.bin", - "fw_bcm4356a2_pcie_ag_apsta.bin", - "fw_bcm4356a2_pcie_ag_p2p.bin", - "fw_bcm4356a2_pcie_ag_mfg.bin" -}; -#endif - -#define htod32(i) i -#define htod16(i) i -#define dtoh32(i) i -#define dtoh16(i) i -#define htodchanspec(i) i -#define dtohchanspec(i) i - -#ifdef BCMSDIO -void -dhd_conf_free_mac_list(wl_mac_list_ctrl_t *mac_list) -{ - int i; - - CONFIG_TRACE(("%s called\n", __FUNCTION__)); - if (mac_list->m_mac_list_head) { - for (i = 0; i < mac_list->count; i++) { - if (mac_list->m_mac_list_head[i].mac) { - CONFIG_TRACE(("%s Free mac %p\n", __FUNCTION__, mac_list->m_mac_list_head[i].mac)); - kfree(mac_list->m_mac_list_head[i].mac); - } - } - CONFIG_TRACE(("%s Free m_mac_list_head %p\n", __FUNCTION__, mac_list->m_mac_list_head)); - kfree(mac_list->m_mac_list_head); - } - mac_list->count = 0; -} - -void -dhd_conf_free_chip_nv_path_list(wl_chip_nv_path_list_ctrl_t *chip_nv_list) -{ - CONFIG_TRACE(("%s called\n", __FUNCTION__)); - - if (chip_nv_list->m_chip_nv_path_head) { - CONFIG_TRACE(("%s Free %p\n", __FUNCTION__, chip_nv_list->m_chip_nv_path_head)); - kfree(chip_nv_list->m_chip_nv_path_head); - } - chip_nv_list->count = 0; -} - -#if defined(HW_OOB) || defined(FORCE_WOWLAN) -void -dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, uint chip) -{ - uint32 gpiocontrol, addr; - - if (CHIPID(chip) == BCM43362_CHIP_ID) { - printf("%s: Enable HW OOB for 43362\n", __FUNCTION__); - addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, gpiocontrol); - gpiocontrol = bcmsdh_reg_read(sdh, addr, 4); - gpiocontrol |= 0x2; - bcmsdh_reg_write(sdh, addr, 4, gpiocontrol); - bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10005, 0xf, NULL); - bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10006, 0x0, NULL); - bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10007, 0x2, NULL); - } -} -#endif - -int -dhd_conf_get_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, uint8 *mac) -{ - int i, err = -1; - uint8 *ptr = 0; - unsigned char tpl_code, tpl_link='\0'; - uint8 header[3] = {0x80, 0x07, 0x19}; - uint8 *cis; - - if (!(cis = MALLOC(dhd->osh, SBSDIO_CIS_SIZE_LIMIT))) { - CONFIG_ERROR(("%s: cis malloc failed\n", __FUNCTION__)); - return err; - } - bzero(cis, SBSDIO_CIS_SIZE_LIMIT); - - if ((err = bcmsdh_cis_read(sdh, 0, cis, SBSDIO_CIS_SIZE_LIMIT))) { - CONFIG_ERROR(("%s: cis read err %d\n", __FUNCTION__, err)); - MFREE(dhd->osh, cis, SBSDIO_CIS_SIZE_LIMIT); - return err; - } - err = -1; // reset err; - ptr = cis; - do { - /* 0xff means we're done */ - tpl_code = *ptr; - ptr++; - if (tpl_code == 0xff) - break; - - /* null entries have no link field or data */ - if (tpl_code == 0x00) - continue; - - tpl_link = *ptr; - ptr++; - /* a size of 0xff also means we're done */ - if (tpl_link == 0xff) - break; - if (config_msg_level & CONFIG_TRACE_LEVEL) { - printf("%s: tpl_code=0x%02x, tpl_link=0x%02x, tag=0x%02x\n", - __FUNCTION__, tpl_code, tpl_link, *ptr); - printk("%s: value:", __FUNCTION__); - for (i=0; iosh, cis, SBSDIO_CIS_SIZE_LIMIT); - - return err; -} - -void -dhd_conf_set_fw_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *fw_path) -{ - int i, j; - uint8 mac[6]={0}; - int fw_num=0, mac_num=0; - uint32 oui, nic; - wl_mac_list_t *mac_list; - wl_mac_range_t *mac_range; - char *pfw_name; - int fw_type, fw_type_new; - - mac_list = dhd->conf->fw_by_mac.m_mac_list_head; - fw_num = dhd->conf->fw_by_mac.count; - if (!mac_list || !fw_num) - return; - - if (dhd_conf_get_mac(dhd, sdh, mac)) { - CONFIG_ERROR(("%s: Can not read MAC address\n", __FUNCTION__)); - return; - } - oui = (mac[0] << 16) | (mac[1] << 8) | (mac[2]); - nic = (mac[3] << 16) | (mac[4] << 8) | (mac[5]); - - /* find out the last '/' */ - i = strlen(fw_path); - while (i > 0) { - if (fw_path[i] == '/') break; - i--; - } - pfw_name = &fw_path[i+1]; - fw_type = (strstr(pfw_name, "_mfg") ? - FW_TYPE_MFG : (strstr(pfw_name, "_apsta") ? - FW_TYPE_APSTA : (strstr(pfw_name, "_p2p") ? - FW_TYPE_P2P : FW_TYPE_STA))); - - for (i=0; i= mac_range[j].nic_start && nic <= mac_range[j].nic_end) { - strcpy(pfw_name, mac_list[i].name); - printf("%s: matched oui=0x%06X, nic=0x%06X\n", - __FUNCTION__, oui, nic); - printf("%s: fw_path=%s\n", __FUNCTION__, fw_path); - return; - } - } - } - } -} - -void -dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *nv_path) -{ - int i, j; - uint8 mac[6]={0}; - int nv_num=0, mac_num=0; - uint32 oui, nic; - wl_mac_list_t *mac_list; - wl_mac_range_t *mac_range; - char *pnv_name; - - mac_list = dhd->conf->nv_by_mac.m_mac_list_head; - nv_num = dhd->conf->nv_by_mac.count; - if (!mac_list || !nv_num) - return; - - if (dhd_conf_get_mac(dhd, sdh, mac)) { - CONFIG_ERROR(("%s: Can not read MAC address\n", __FUNCTION__)); - return; - } - oui = (mac[0] << 16) | (mac[1] << 8) | (mac[2]); - nic = (mac[3] << 16) | (mac[4] << 8) | (mac[5]); - - /* find out the last '/' */ - i = strlen(nv_path); - while (i > 0) { - if (nv_path[i] == '/') break; - i--; - } - pnv_name = &nv_path[i+1]; - - for (i=0; i= mac_range[j].nic_start && nic <= mac_range[j].nic_end) { - strcpy(pnv_name, mac_list[i].name); - printf("%s: matched oui=0x%06X, nic=0x%06X\n", - __FUNCTION__, oui, nic); - printf("%s: nv_path=%s\n", __FUNCTION__, nv_path); - return; - } - } - } - } -} -#endif - -void -dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path) -{ - int fw_type, ag_type; - uint chip, chiprev; - int i; - - chip = dhd->conf->chip; - chiprev = dhd->conf->chiprev; - - if (fw_path[0] == '\0') { -#ifdef CONFIG_BCMDHD_FW_PATH - bcm_strncpy_s(fw_path, MOD_PARAM_PATHLEN-1, CONFIG_BCMDHD_FW_PATH, MOD_PARAM_PATHLEN-1); - if (fw_path[0] == '\0') -#endif - { - printf("firmware path is null\n"); - return; - } - } -#ifndef FW_PATH_AUTO_SELECT - return; -#endif - - /* find out the last '/' */ - i = strlen(fw_path); - while (i > 0) { - if (fw_path[i] == '/') break; - i--; - } -#ifdef BAND_AG - ag_type = FW_TYPE_AG; -#else - ag_type = strstr(&fw_path[i], "_ag") ? FW_TYPE_AG : FW_TYPE_G; -#endif - fw_type = (strstr(&fw_path[i], "_mfg") ? - FW_TYPE_MFG : (strstr(&fw_path[i], "_apsta") ? - FW_TYPE_APSTA : (strstr(&fw_path[i], "_p2p") ? - FW_TYPE_P2P : FW_TYPE_STA))); - - switch (chip) { -#ifdef BCMSDIO - case BCM4330_CHIP_ID: - if (ag_type == FW_TYPE_G) { - if (chiprev == BCM4330B2_CHIP_REV) - strcpy(&fw_path[i+1], bcm4330b2_fw_name[fw_type]); - break; - } else { - if (chiprev == BCM4330B2_CHIP_REV) - strcpy(&fw_path[i+1], bcm4330b2_ag_fw_name[fw_type]); - break; - } - case BCM43362_CHIP_ID: - if (chiprev == BCM43362A0_CHIP_REV) - strcpy(&fw_path[i+1], bcm43362a0_fw_name[fw_type]); - else - strcpy(&fw_path[i+1], bcm43362a2_fw_name[fw_type]); - break; - case BCM43430_CHIP_ID: - if (chiprev == BCM43430A0_CHIP_REV) - strcpy(&fw_path[i+1], bcm43438a0_fw_name[fw_type]); - else if (chiprev == BCM43430A1_CHIP_REV) - strcpy(&fw_path[i+1], bcm43438a1_fw_name[fw_type]); - break; - case BCM4334_CHIP_ID: - if (chiprev == BCM4334B1_CHIP_REV) - strcpy(&fw_path[i+1], bcm4334b1_ag_fw_name[fw_type]); - break; - case BCM43340_CHIP_ID: - case BCM43341_CHIP_ID: - if (chiprev == BCM43341B0_CHIP_REV) - strcpy(&fw_path[i+1], bcm43341b0_ag_fw_name[fw_type]); - break; - case BCM4324_CHIP_ID: - if (chiprev == BCM43241B4_CHIP_REV) - strcpy(&fw_path[i+1], bcm43241b4_ag_fw_name[fw_type]); - break; - case BCM4335_CHIP_ID: - if (chiprev == BCM4335A0_CHIP_REV) - strcpy(&fw_path[i+1], bcm4339a0_ag_fw_name[fw_type]); - break; - case BCM4345_CHIP_ID: - case BCM43454_CHIP_ID: - if (chiprev == BCM43455C0_CHIP_REV) - strcpy(&fw_path[i+1], bcm43455c0_ag_fw_name[fw_type]); - break; - case BCM4339_CHIP_ID: - if (chiprev == BCM4339A0_CHIP_REV) - strcpy(&fw_path[i+1], bcm4339a0_ag_fw_name[fw_type]); - break; - case BCM4354_CHIP_ID: - if (chiprev == BCM4354A1_CHIP_REV) - strcpy(&fw_path[i+1], bcm4354a1_ag_fw_name[fw_type]); - else if (chiprev == BCM4356A2_CHIP_REV) - strcpy(&fw_path[i+1], bcm4356a2_ag_fw_name[fw_type]); - break; - case BCM4356_CHIP_ID: - case BCM4371_CHIP_ID: - if (chiprev == BCM4356A2_CHIP_REV) - strcpy(&fw_path[i+1], bcm4356a2_ag_fw_name[fw_type]); - break; - case BCM4359_CHIP_ID: - if (chiprev == BCM4359B1_CHIP_REV) - strcpy(&fw_path[i+1], bcm4359b1_ag_fw_name[fw_type]); - break; -#endif -#ifdef BCMPCIE - case BCM4356_CHIP_ID: - if (chiprev == BCM4356A2_CHIP_REV) - strcpy(&fw_path[i+1], bcm4356a2_pcie_ag_fw_name[fw_type]); - break; -#endif - } - - printf("%s: firmware_path=%s\n", __FUNCTION__, fw_path); -} - -void -dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path) -{ - int matched=-1; - uint chip, chiprev; - int i; - - chip = dhd->conf->chip; - chiprev = dhd->conf->chiprev; - - for (i = 0;i < dhd->conf->nv_by_chip.count;i++) { - if (chip == dhd->conf->nv_by_chip.m_chip_nv_path_head[i].chip && - chiprev==dhd->conf->nv_by_chip.m_chip_nv_path_head[i].chiprev) { - matched = i; - break; - } - } - if (matched < 0) - return; - - if (nv_path[0] == '\0') { -#ifdef CONFIG_BCMDHD_NVRAM_PATH - bcm_strncpy_s(nv_path, MOD_PARAM_PATHLEN-1, CONFIG_BCMDHD_NVRAM_PATH, MOD_PARAM_PATHLEN-1); - if (nv_path[0] == '\0') -#endif - { - printf("nvram path is null\n"); - return; - } - } - - /* find out the last '/' */ - i = strlen(nv_path); - while (i > 0) { - if (nv_path[i] == '/') break; - i--; - } - - strcpy(&nv_path[i+1], dhd->conf->nv_by_chip.m_chip_nv_path_head[matched].name); - - printf("%s: nvram_path=%s\n", __FUNCTION__, nv_path); -} - -void -dhd_conf_set_conf_path_by_nv_path(dhd_pub_t *dhd, char *conf_path, char *nv_path) -{ - int i; - - if (nv_path[0] == '\0') { -#ifdef CONFIG_BCMDHD_NVRAM_PATH - bcm_strncpy_s(conf_path, MOD_PARAM_PATHLEN-1, CONFIG_BCMDHD_NVRAM_PATH, MOD_PARAM_PATHLEN-1); - if (nv_path[0] == '\0') -#endif - { - printf("nvram path is null\n"); - return; - } - } else - strcpy(conf_path, nv_path); - - /* find out the last '/' */ - i = strlen(conf_path); - while (i > 0) { - if (conf_path[i] == '/') break; - i--; - } - strcpy(&conf_path[i+1], "config.txt"); - - printf("%s: config_path=%s\n", __FUNCTION__, conf_path); -} - -#ifdef CONFIG_PATH_AUTO_SELECT -void -dhd_conf_set_conf_name_by_chip(dhd_pub_t *dhd, char *conf_path) -{ - uint chip, chiprev; - int i; - - chip = dhd->conf->chip; - chiprev = dhd->conf->chiprev; - - if (conf_path[0] == '\0') { - printf("config path is null\n"); - return; - } - - /* find out the last '/' */ - i = strlen(conf_path); - while (i > 0) { - if (conf_path[i] == '/') break; - i--; - } - - switch (chip) { -#ifdef BCMSDIO - case BCM4330_CHIP_ID: - if (chiprev == BCM4330B2_CHIP_REV) - strcpy(&conf_path[i+1], BCM4330B2_CONF_NAME); - break; - case BCM43362_CHIP_ID: - if (chiprev == BCM43362A0_CHIP_REV) - strcpy(&conf_path[i+1], BCM43362A0_CONF_NAME); - else - strcpy(&conf_path[i+1], BCM43362A2_CONF_NAME); - break; - case BCM43430_CHIP_ID: - if (chiprev == BCM43430A0_CHIP_REV) - strcpy(&conf_path[i+1], BCM43438A0_CONF_NAME); - else if (chiprev == BCM43430A1_CHIP_REV) - strcpy(&conf_path[i+1], BCM43438A1_CONF_NAME); - break; - case BCM4334_CHIP_ID: - if (chiprev == BCM4334B1_CHIP_REV) - strcpy(&conf_path[i+1], BCM4334B1_CONF_NAME); - break; - case BCM43340_CHIP_ID: - case BCM43341_CHIP_ID: - if (chiprev == BCM43341B0_CHIP_REV) - strcpy(&conf_path[i+1], BCM43341B0_CONF_NAME); - break; - case BCM4324_CHIP_ID: - if (chiprev == BCM43241B4_CHIP_REV) - strcpy(&conf_path[i+1], BCM43241B4_CONF_NAME); - break; - case BCM4335_CHIP_ID: - if (chiprev == BCM4335A0_CHIP_REV) - strcpy(&conf_path[i+1], BCM4339A0_CONF_NAME); - break; - case BCM4345_CHIP_ID: - case BCM43454_CHIP_ID: - if (chiprev == BCM43455C0_CHIP_REV) - strcpy(&conf_path[i+1], BCM43455C0_CONF_NAME); - break; - case BCM4339_CHIP_ID: - if (chiprev == BCM4339A0_CHIP_REV) - strcpy(&conf_path[i+1], BCM4339A0_CONF_NAME); - break; - case BCM4354_CHIP_ID: - if (chiprev == BCM4354A1_CHIP_REV) - strcpy(&conf_path[i+1], BCM4354A1_CONF_NAME); - else if (chiprev == BCM4356A2_CHIP_REV) - strcpy(&conf_path[i+1], BCM4356A2_CONF_NAME); - break; - case BCM4356_CHIP_ID: - case BCM4371_CHIP_ID: - if (chiprev == BCM4356A2_CHIP_REV) - strcpy(&conf_path[i+1], BCM4356A2_CONF_NAME); - break; - case BCM4359_CHIP_ID: - if (chiprev == BCM4359B1_CHIP_REV) - strcpy(&conf_path[i+1], BCM4359B1_CONF_NAME); - break; -#endif -#ifdef BCMPCIE - case BCM4356_CHIP_ID: - if (chiprev == BCM4356A2_CHIP_REV) - strcpy(&conf_path[i+1], BCM4356A2_CONF_NAME); - break; -#endif - } - - printf("%s: config_path=%s\n", __FUNCTION__, conf_path); -} -#endif - -int -dhd_conf_set_fw_int_cmd(dhd_pub_t *dhd, char *name, uint cmd, int val, - int def, bool down) -{ - int bcmerror = -1; - - if (val >= def) { - if (down) { - if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0) - CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, bcmerror)); - } - printf("%s: set %s %d %d\n", __FUNCTION__, name, cmd, val); - if ((bcmerror = dhd_wl_ioctl_cmd(dhd, cmd, &val, sizeof(val), TRUE, 0)) < 0) - CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, bcmerror)); - } - return bcmerror; -} - -int -dhd_conf_set_fw_int_struct_cmd(dhd_pub_t *dhd, char *name, uint cmd, - int *val, int len, bool down) -{ - int bcmerror = -1; - - if (down) { - if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0) - CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, bcmerror)); - } - if ((bcmerror = dhd_wl_ioctl_cmd(dhd, cmd, val, len, TRUE, 0)) < 0) - CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, bcmerror)); - - return bcmerror; -} - -int -dhd_conf_set_fw_string_cmd(dhd_pub_t *dhd, char *cmd, int val, int def, - bool down) -{ - int bcmerror = -1; - char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */ - - if (val >= def) { - if (down) { - if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0) - CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, bcmerror)); - } - printf("%s: set %s %d\n", __FUNCTION__, cmd, val); - bcm_mkiovar(cmd, (char *)&val, 4, iovbuf, sizeof(iovbuf)); - if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) - CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, cmd, bcmerror)); - } - return bcmerror; -} - -int -dhd_conf_set_fw_string_struct_cmd(dhd_pub_t *dhd, char *cmd, char *val, - int len, bool down) -{ - int bcmerror = -1; - char iovbuf[WLC_IOCTL_SMLEN]; - - if (down) { - if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0) - CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, bcmerror)); - } - printf("%s: set %s\n", __FUNCTION__, cmd); - bcm_mkiovar(cmd, val, len, iovbuf, sizeof(iovbuf)); - if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) - CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, cmd, bcmerror)); - - return bcmerror; -} - -uint -dhd_conf_get_band(dhd_pub_t *dhd) -{ - uint band = WLC_BAND_AUTO; - - if (dhd && dhd->conf) - band = dhd->conf->band; - else - CONFIG_ERROR(("%s: dhd or conf is NULL\n", __FUNCTION__)); - - return band; -} - -int -dhd_conf_set_country(dhd_pub_t *dhd) -{ - int bcmerror = -1; - - memset(&dhd->dhd_cspec, 0, sizeof(wl_country_t)); - printf("%s: set country %s, revision %d\n", __FUNCTION__, - dhd->conf->cspec.ccode, dhd->conf->cspec.rev); - dhd_conf_set_fw_string_struct_cmd(dhd, "country", (char *)&dhd->conf->cspec, sizeof(wl_country_t), FALSE); - - return bcmerror; -} - -int -dhd_conf_get_country(dhd_pub_t *dhd, wl_country_t *cspec) -{ - int bcmerror = -1; - - memset(cspec, 0, sizeof(wl_country_t)); - bcm_mkiovar("country", NULL, 0, (char*)cspec, sizeof(wl_country_t)); - if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, cspec, sizeof(wl_country_t), FALSE, 0)) < 0) - CONFIG_ERROR(("%s: country code getting failed %d\n", __FUNCTION__, bcmerror)); - else - printf("Country code: %s (%s/%d)\n", cspec->country_abbrev, cspec->ccode, cspec->rev); - - return bcmerror; -} - -int -dhd_conf_get_country_from_config(dhd_pub_t *dhd, wl_country_t *cspec) -{ - int bcmerror = -1, i; - struct dhd_conf *conf = dhd->conf; - - for (i = 0; i < conf->country_list.count; i++) { - if (strcmp(cspec->country_abbrev, conf->country_list.cspec[i].country_abbrev) == 0) { - memcpy(cspec->ccode, - conf->country_list.cspec[i].ccode, WLC_CNTRY_BUF_SZ); - cspec->rev = conf->country_list.cspec[i].rev; - printf("%s: %s/%d\n", __FUNCTION__, cspec->ccode, cspec->rev); - return 0; - } - } - - return bcmerror; -} - -int -dhd_conf_fix_country(dhd_pub_t *dhd) -{ - int bcmerror = -1; - uint band; - wl_uint32_list_t *list; - u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)]; - - if (!(dhd && dhd->conf)) { - return bcmerror; - } - - memset(valid_chan_list, 0, sizeof(valid_chan_list)); - list = (wl_uint32_list_t *)(void *) valid_chan_list; - list->count = htod32(WL_NUMCHANNELS); - if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, valid_chan_list, sizeof(valid_chan_list), FALSE, 0)) < 0) { - CONFIG_ERROR(("%s: get channels failed with %d\n", __FUNCTION__, bcmerror)); - } - - band = dhd_conf_get_band(dhd); - - if (bcmerror || ((band == WLC_BAND_AUTO || band == WLC_BAND_2G) && - dtoh32(list->count)<11)) { - CONFIG_ERROR(("%s: bcmerror=%d, # of channels %d\n", - __FUNCTION__, bcmerror, dtoh32(list->count))); - if ((bcmerror = dhd_conf_set_country(dhd)) < 0) { - strcpy(dhd->conf->cspec.country_abbrev, "US"); - dhd->conf->cspec.rev = 0; - strcpy(dhd->conf->cspec.ccode, "US"); - dhd_conf_set_country(dhd); - } - } - - return bcmerror; -} - -bool -dhd_conf_match_channel(dhd_pub_t *dhd, uint32 channel) -{ - int i; - bool match = false; - - if (dhd && dhd->conf) { - if (dhd->conf->channels.count == 0) - return true; - for (i=0; iconf->channels.count; i++) { - if (channel == dhd->conf->channels.channel[i]) - match = true; - } - } else { - match = true; - CONFIG_ERROR(("%s: dhd or conf is NULL\n", __FUNCTION__)); - } - - return match; -} - -int -dhd_conf_set_roam(dhd_pub_t *dhd) -{ - int bcmerror = -1; - struct dhd_conf *conf = dhd->conf; - - dhd_roam_disable = conf->roam_off; - dhd_conf_set_fw_string_cmd(dhd, "roam_off", dhd->conf->roam_off, 0, FALSE); - - if (!conf->roam_off || !conf->roam_off_suspend) { - printf("%s: set roam_trigger %d\n", __FUNCTION__, conf->roam_trigger[0]); - dhd_conf_set_fw_int_struct_cmd(dhd, "WLC_SET_ROAM_TRIGGER", WLC_SET_ROAM_TRIGGER, - conf->roam_trigger, sizeof(conf->roam_trigger), FALSE); - - printf("%s: set roam_scan_period %d\n", __FUNCTION__, conf->roam_scan_period[0]); - dhd_conf_set_fw_int_struct_cmd(dhd, "WLC_SET_ROAM_SCAN_PERIOD", WLC_SET_ROAM_SCAN_PERIOD, - conf->roam_scan_period, sizeof(conf->roam_scan_period), FALSE); - - printf("%s: set roam_delta %d\n", __FUNCTION__, conf->roam_delta[0]); - dhd_conf_set_fw_int_struct_cmd(dhd, "WLC_SET_ROAM_DELTA", WLC_SET_ROAM_DELTA, - conf->roam_delta, sizeof(conf->roam_delta), FALSE); - - dhd_conf_set_fw_string_cmd(dhd, "fullroamperiod", dhd->conf->fullroamperiod, 1, FALSE); - } - - return bcmerror; -} - -void -dhd_conf_get_wme(dhd_pub_t *dhd, edcf_acparam_t *acp) -{ - int bcmerror = -1; - char iovbuf[WLC_IOCTL_SMLEN]; - edcf_acparam_t *acparam; - - bzero(iovbuf, sizeof(iovbuf)); - - /* - * Get current acparams, using buf as an input buffer. - * Return data is array of 4 ACs of wme params. - */ - bcm_mkiovar("wme_ac_sta", NULL, 0, iovbuf, sizeof(iovbuf)); - if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) { - CONFIG_ERROR(("%s: wme_ac_sta getting failed %d\n", __FUNCTION__, bcmerror)); - return; - } - memcpy((char*)acp, iovbuf, sizeof(edcf_acparam_t)*AC_COUNT); - - acparam = &acp[AC_BK]; - CONFIG_TRACE(("%s: BK: aci %d aifsn %d ecwmin %d ecwmax %d size %d\n", __FUNCTION__, - acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK, - acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, - (int)sizeof(acp))); - acparam = &acp[AC_BE]; - CONFIG_TRACE(("%s: BE: aci %d aifsn %d ecwmin %d ecwmax %d size %d\n", __FUNCTION__, - acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK, - acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, - (int)sizeof(acp))); - acparam = &acp[AC_VI]; - CONFIG_TRACE(("%s: VI: aci %d aifsn %d ecwmin %d ecwmax %d size %d\n", __FUNCTION__, - acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK, - acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, - (int)sizeof(acp))); - acparam = &acp[AC_VO]; - CONFIG_TRACE(("%s: VO: aci %d aifsn %d ecwmin %d ecwmax %d size %d\n", __FUNCTION__, - acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK, - acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, - (int)sizeof(acp))); - - return; -} - -void -dhd_conf_update_wme(dhd_pub_t *dhd, edcf_acparam_t *acparam_cur, int aci) -{ - int aifsn, ecwmin, ecwmax; - edcf_acparam_t *acp; - struct dhd_conf *conf = dhd->conf; - - /* Default value */ - aifsn = acparam_cur->ACI&EDCF_AIFSN_MASK; - ecwmin = acparam_cur->ECW&EDCF_ECWMIN_MASK; - ecwmax = (acparam_cur->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT; - - /* Modified value */ - if (conf->wme.aifsn[aci] > 0) - aifsn = conf->wme.aifsn[aci]; - if (conf->wme.cwmin[aci] > 0) - ecwmin = conf->wme.cwmin[aci]; - if (conf->wme.cwmax[aci] > 0) - ecwmax = conf->wme.cwmax[aci]; - - /* Update */ - acp = acparam_cur; - acp->ACI = (acp->ACI & ~EDCF_AIFSN_MASK) | (aifsn & EDCF_AIFSN_MASK); - acp->ECW = ((ecwmax << EDCF_ECWMAX_SHIFT) & EDCF_ECWMAX_MASK) | (acp->ECW & EDCF_ECWMIN_MASK); - acp->ECW = ((acp->ECW & EDCF_ECWMAX_MASK) | (ecwmin & EDCF_ECWMIN_MASK)); - - CONFIG_TRACE(("%s: mod aci %d aifsn %d ecwmin %d ecwmax %d size %d\n", __FUNCTION__, - acp->ACI, acp->ACI&EDCF_AIFSN_MASK, - acp->ECW&EDCF_ECWMIN_MASK, (acp->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, - (int)sizeof(edcf_acparam_t))); - - /* - * Now use buf as an output buffer. - * Put WME acparams after "wme_ac\0" in buf. - * NOTE: only one of the four ACs can be set at a time. - */ - dhd_conf_set_fw_string_struct_cmd(dhd, "wme_ac_sta", (char *)acp, sizeof(edcf_acparam_t), FALSE); - -} - -void -dhd_conf_set_wme(dhd_pub_t *dhd) -{ - edcf_acparam_t acparam_cur[AC_COUNT]; - - if (dhd && dhd->conf) { - if (!dhd->conf->force_wme_ac) { - CONFIG_TRACE(("%s: force_wme_ac is not enabled %d\n", - __FUNCTION__, dhd->conf->force_wme_ac)); - return; - } - - CONFIG_TRACE(("%s: Before change:\n", __FUNCTION__)); - dhd_conf_get_wme(dhd, acparam_cur); - - dhd_conf_update_wme(dhd, &acparam_cur[AC_BK], AC_BK); - dhd_conf_update_wme(dhd, &acparam_cur[AC_BE], AC_BE); - dhd_conf_update_wme(dhd, &acparam_cur[AC_VI], AC_VI); - dhd_conf_update_wme(dhd, &acparam_cur[AC_VO], AC_VO); - - CONFIG_TRACE(("%s: After change:\n", __FUNCTION__)); - dhd_conf_get_wme(dhd, acparam_cur); - } else { - CONFIG_ERROR(("%s: dhd or conf is NULL\n", __FUNCTION__)); - } - - return; -} - -#ifdef PKT_FILTER_SUPPORT -void -dhd_conf_add_pkt_filter(dhd_pub_t *dhd) -{ - int i; - char str[12]; -#define MACS "%02x%02x%02x%02x%02x%02x" - - /* - * All pkt: pkt_filter_add=99 0 0 0 0x000000000000 0x000000000000 - * Netbios pkt: 120 0 0 12 0xFFFF000000000000000000FF000000000000000000000000FFFF 0x0800000000000000000000110000000000000000000000000089 - */ - for(i=0; iconf->pkt_filter_add.count; i++) { - dhd->pktfilter[i+dhd->pktfilter_count] = dhd->conf->pkt_filter_add.filter[i]; - printf("%s: %s\n", __FUNCTION__, dhd->pktfilter[i+dhd->pktfilter_count]); - } - dhd->pktfilter_count += i; - - if (dhd->conf->pkt_filter_magic) { - strcpy(&dhd->conf->pkt_filter_add.filter[dhd->conf->pkt_filter_add.count][0], "256 0 1 0 0x"); - for (i=0; i<16; i++) - strcat(&dhd->conf->pkt_filter_add.filter[dhd->conf->pkt_filter_add.count][0], "FFFFFFFFFFFF"); - strcat(&dhd->conf->pkt_filter_add.filter[dhd->conf->pkt_filter_add.count][0], " 0x"); - sprintf(str, MACS, MAC2STRDBG(dhd->mac.octet)); - for (i=0; i<16; i++) - strcat(&dhd->conf->pkt_filter_add.filter[dhd->conf->pkt_filter_add.count][0], str); - dhd->pktfilter[dhd->pktfilter_count] = dhd->conf->pkt_filter_add.filter[dhd->conf->pkt_filter_add.count]; - dhd->pktfilter_count += 1; - } -} - -bool -dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id) -{ - int i; - - if (dhd && dhd->conf) { - for (i=0; i < dhd->conf->pkt_filter_del.count; i++) { - if (id == dhd->conf->pkt_filter_del.id[i]) { - printf("%s: %d\n", __FUNCTION__, dhd->conf->pkt_filter_del.id[i]); - return true; - } - } - return false; - } - return false; -} - -void -dhd_conf_discard_pkt_filter(dhd_pub_t *dhd) -{ - dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = NULL; - dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF"; - dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "102 0 0 0 0xFFFFFF 0x01005E"; - dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = "103 0 0 0 0xFFFF 0x3333"; - dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL; - /* Do not enable ARP to pkt filter if dhd_master_mode is false.*/ - dhd->pktfilter[DHD_ARP_FILTER_NUM] = NULL; - - /* IPv4 broadcast address XXX.XXX.XXX.255 */ - dhd->pktfilter[dhd->pktfilter_count] = "110 0 0 12 0xFFFF00000000000000000000000000000000000000FF 0x080000000000000000000000000000000000000000FF"; - dhd->pktfilter_count++; - /* discard IPv4 multicast address 224.0.0.0/4 */ - dhd->pktfilter[dhd->pktfilter_count] = "111 0 0 12 0xFFFF00000000000000000000000000000000F0 0x080000000000000000000000000000000000E0"; - dhd->pktfilter_count++; - /* discard IPv6 multicast address FF00::/8 */ - dhd->pktfilter[dhd->pktfilter_count] = "112 0 0 12 0xFFFF000000000000000000000000000000000000000000000000FF 0x86DD000000000000000000000000000000000000000000000000FF"; - dhd->pktfilter_count++; - /* discard Netbios pkt */ - dhd->pktfilter[dhd->pktfilter_count] = "120 0 0 12 0xFFFF000000000000000000FF000000000000000000000000FFFF 0x0800000000000000000000110000000000000000000000000089"; - dhd->pktfilter_count++; - -} -#endif /* PKT_FILTER_SUPPORT */ - -void -dhd_conf_set_disable_proptx(dhd_pub_t *dhd) -{ - printf("%s: set disable_proptx %d\n", __FUNCTION__, dhd->conf->disable_proptx); - disable_proptx = dhd->conf->disable_proptx; -} - -int -dhd_conf_get_pm(dhd_pub_t *dhd) -{ - if (dhd && dhd->conf) - return dhd->conf->pm; - return -1; -} - -int -dhd_conf_get_tcpack_sup_mode(dhd_pub_t *dhd) -{ - if (dhd && dhd->conf) - return dhd->conf->tcpack_sup_mode; - return -1; -} - -unsigned int -process_config_vars(char *varbuf, unsigned int len, char *pickbuf, char *param) -{ - bool findNewline, changenewline=FALSE, pick=FALSE; - int column; - unsigned int n, pick_column=0; - - findNewline = FALSE; - column = 0; - - for (n = 0; n < len; n++) { - if (varbuf[n] == '\r') - continue; - if ((findNewline || changenewline) && varbuf[n] != '\n') - continue; - findNewline = FALSE; - if (varbuf[n] == '#') { - findNewline = TRUE; - continue; - } - if (varbuf[n] == '\\') { - changenewline = TRUE; - continue; - } - if (!changenewline && varbuf[n] == '\n') { - if (column == 0) - continue; - column = 0; - continue; - } - if (changenewline && varbuf[n] == '\n') { - changenewline = FALSE; - continue; - } - if (!memcmp(&varbuf[n], param, strlen(param)) && column == 0) { - pick = TRUE; - column = strlen(param); - n += column; - pick_column = 0; - } else { - if (pick && column == 0) - pick = FALSE; - else - column++; - } - if (pick) { - if (varbuf[n] == 0x9) - continue; - if (pick_column>0 && pickbuf[pick_column-1] == ' ' && varbuf[n] == ' ') - continue; - pickbuf[pick_column] = varbuf[n]; - pick_column++; - } - } - - return pick_column; -} - -void -dhd_conf_read_log_level(dhd_pub_t *dhd, char *bufp, uint len) -{ - uint len_val; - char *pick; - - pick = MALLOC(dhd->osh, MAXSZ_BUF); - if (!pick) { - CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n", - __FUNCTION__, MAXSZ_BUF)); - return; - } - - /* Process dhd_msglevel */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "msglevel="); - if (len_val) { - dhd_msg_level = (int)simple_strtol(pick, NULL, 0); - printf("%s: dhd_msg_level = 0x%X\n", __FUNCTION__, dhd_msg_level); - } -#ifdef BCMSDIO - /* Process sd_msglevel */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "sd_msglevel="); - if (len_val) { - sd_msglevel = (int)simple_strtol(pick, NULL, 0); - printf("%s: sd_msglevel = 0x%X\n", __FUNCTION__, sd_msglevel); - } -#endif - /* Process android_msg_level */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "android_msg_level="); - if (len_val) { - android_msg_level = (int)simple_strtol(pick, NULL, 0); - printf("%s: android_msg_level = 0x%X\n", __FUNCTION__, android_msg_level); - } - /* Process config_msg_level */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "config_msg_level="); - if (len_val) { - config_msg_level = (int)simple_strtol(pick, NULL, 0); - printf("%s: config_msg_level = 0x%X\n", __FUNCTION__, config_msg_level); - } -#ifdef WL_CFG80211 - /* Process wl_dbg_level */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "wl_dbg_level="); - if (len_val) { - wl_dbg_level = (int)simple_strtol(pick, NULL, 0); - printf("%s: wl_dbg_level = 0x%X\n", __FUNCTION__, wl_dbg_level); - } -#endif -#if defined(WL_WIRELESS_EXT) - /* Process iw_msg_level */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "iw_msg_level="); - if (len_val) { - iw_msg_level = (int)simple_strtol(pick, NULL, 0); - printf("%s: iw_msg_level = 0x%X\n", __FUNCTION__, iw_msg_level); - } -#endif - -#if defined(DHD_DEBUG) - /* Process dhd_console_ms */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "dhd_console_ms="); - if (len_val) { - dhd_console_ms = (int)simple_strtol(pick, NULL, 0); - printf("%s: dhd_console_ms = 0x%X\n", __FUNCTION__, dhd_console_ms); - } -#endif - - if (pick) - MFREE(dhd->osh, pick, MAXSZ_BUF); -} - -void -dhd_conf_read_wme_ac_params(dhd_pub_t *dhd, char *bufp, uint len) -{ - uint len_val; - char *pick; - struct dhd_conf *conf = dhd->conf; - - pick = MALLOC(dhd->osh, MAXSZ_BUF); - if (!pick) { - CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n", - __FUNCTION__, MAXSZ_BUF)); - return; - } - - /* Process WMM parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "force_wme_ac="); - if (len_val) { - conf->force_wme_ac = (int)simple_strtol(pick, NULL, 10); - printf("%s: force_wme_ac = %d\n", __FUNCTION__, conf->force_wme_ac); - } - - if (conf->force_wme_ac) { - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "bk_aifsn="); - if (len_val) { - conf->wme.aifsn[AC_BK] = (int)simple_strtol(pick, NULL, 10); - printf("%s: AC_BK aifsn = %d\n", __FUNCTION__, conf->wme.aifsn[AC_BK]); - } - - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "bk_cwmin="); - if (len_val) { - conf->wme.cwmin[AC_BK] = (int)simple_strtol(pick, NULL, 10); - printf("%s: AC_BK cwmin = %d\n", __FUNCTION__, conf->wme.cwmin[AC_BK]); - } - - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "bk_cwmax="); - if (len_val) { - conf->wme.cwmax[AC_BK] = (int)simple_strtol(pick, NULL, 10); - printf("%s: AC_BK cwmax = %d\n", __FUNCTION__, conf->wme.cwmax[AC_BK]); - } - - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "be_aifsn="); - if (len_val) { - conf->wme.aifsn[AC_BE] = (int)simple_strtol(pick, NULL, 10); - printf("%s: AC_BE aifsn = %d\n", __FUNCTION__, conf->wme.aifsn[AC_BE]); - } - - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "be_cwmin="); - if (len_val) { - conf->wme.cwmin[AC_BE] = (int)simple_strtol(pick, NULL, 10); - printf("%s: AC_BE cwmin = %d\n", __FUNCTION__, conf->wme.cwmin[AC_BE]); - } - - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "be_cwmax="); - if (len_val) { - conf->wme.cwmax[AC_BE] = (int)simple_strtol(pick, NULL, 10); - printf("%s: AC_BE cwmax = %d\n", __FUNCTION__, conf->wme.cwmax[AC_BE]); - } - - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "vi_aifsn="); - if (len_val) { - conf->wme.aifsn[AC_VI] = (int)simple_strtol(pick, NULL, 10); - printf("%s: AC_VI aifsn = %d\n", __FUNCTION__, conf->wme.aifsn[AC_VI]); - } - - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "vi_cwmin="); - if (len_val) { - conf->wme.cwmin[AC_VI] = (int)simple_strtol(pick, NULL, 10); - printf("%s: AC_VI cwmin = %d\n", __FUNCTION__, conf->wme.cwmin[AC_VI]); - } - - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "vi_cwmax="); - if (len_val) { - conf->wme.cwmax[AC_VI] = (int)simple_strtol(pick, NULL, 10); - printf("%s: AC_VI cwmax = %d\n", __FUNCTION__, conf->wme.cwmax[AC_VI]); - } - - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "vo_aifsn="); - if (len_val) { - conf->wme.aifsn[AC_VO] = (int)simple_strtol(pick, NULL, 10); - printf("%s: AC_VO aifsn = %d\n", __FUNCTION__, conf->wme.aifsn[AC_VO]); - } - - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "vo_cwmin="); - if (len_val) { - conf->wme.cwmin[AC_VO] = (int)simple_strtol(pick, NULL, 10); - printf("%s: AC_VO cwmin = %d\n", __FUNCTION__, conf->wme.cwmin[AC_VO]); - } - - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "vo_cwmax="); - if (len_val) { - conf->wme.cwmax[AC_VO] = (int)simple_strtol(pick, NULL, 10); - printf("%s: AC_VO cwmax = %d\n", __FUNCTION__, conf->wme.cwmax[AC_VO]); - } - } - - if (pick) - MFREE(dhd->osh, pick, MAXSZ_BUF); - -} - -void -dhd_conf_read_fw_by_mac(dhd_pub_t *dhd, char *bufp, uint len) -{ - uint len_val; - int i, j; - char *pick; - char *pch, *pick_tmp; - wl_mac_list_t *mac_list; - wl_mac_range_t *mac_range; - struct dhd_conf *conf = dhd->conf; - - pick = MALLOC(dhd->osh, MAXSZ_BUF); - if (!pick) { - CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n", - __FUNCTION__, MAXSZ_BUF)); - return; - } - - /* Process fw_by_mac: - * fw_by_mac=[fw_mac_num] \ - * [fw_name1] [mac_num1] [oui1-1] [nic_start1-1] [nic_end1-1] \ - * [oui1-1] [nic_start1-1] [nic_end1-1]... \ - * [oui1-n] [nic_start1-n] [nic_end1-n] \ - * [fw_name2] [mac_num2] [oui2-1] [nic_start2-1] [nic_end2-1] \ - * [oui2-1] [nic_start2-1] [nic_end2-1]... \ - * [oui2-n] [nic_start2-n] [nic_end2-n] \ - * Ex: fw_by_mac=2 \ - * fw_bcmdhd1.bin 2 0x0022F4 0xE85408 0xE8549D 0x983B16 0x3557A9 0x35582A \ - * fw_bcmdhd2.bin 3 0x0022F4 0xE85408 0xE8549D 0x983B16 0x3557A9 0x35582A \ - * 0x983B16 0x916157 0x916487 - */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "fw_by_mac="); - if (len_val) { - pick_tmp = pick; - pch = bcmstrtok(&pick_tmp, " ", 0); - conf->fw_by_mac.count = (uint32)simple_strtol(pch, NULL, 0); - if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->fw_by_mac.count, GFP_KERNEL))) { - conf->fw_by_mac.count = 0; - CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); - } - printf("%s: fw_count=%d\n", __FUNCTION__, conf->fw_by_mac.count); - conf->fw_by_mac.m_mac_list_head = mac_list; - for (i=0; ifw_by_mac.count; i++) { - pch = bcmstrtok(&pick_tmp, " ", 0); - strcpy(mac_list[i].name, pch); - pch = bcmstrtok(&pick_tmp, " ", 0); - mac_list[i].count = (uint32)simple_strtol(pch, NULL, 0); - printf("%s: name=%s, mac_count=%d\n", __FUNCTION__, - mac_list[i].name, mac_list[i].count); - if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count, GFP_KERNEL))) { - mac_list[i].count = 0; - CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); - break; - } - mac_list[i].mac = mac_range; - for (j=0; josh, pick, MAXSZ_BUF); -} - -void -dhd_conf_read_nv_by_mac(dhd_pub_t *dhd, char *bufp, uint len) -{ - uint len_val; - int i, j; - char *pick; - char *pch, *pick_tmp; - wl_mac_list_t *mac_list; - wl_mac_range_t *mac_range; - struct dhd_conf *conf = dhd->conf; - - pick = MALLOC(dhd->osh, MAXSZ_BUF); - if (!pick) { - CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n", - __FUNCTION__, MAXSZ_BUF)); - return; - } - - /* Process nv_by_mac: - * [nv_by_mac]: The same format as fw_by_mac - */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "nv_by_mac="); - if (len_val) { - pick_tmp = pick; - pch = bcmstrtok(&pick_tmp, " ", 0); - conf->nv_by_mac.count = (uint32)simple_strtol(pch, NULL, 0); - if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_mac.count, GFP_KERNEL))) { - conf->nv_by_mac.count = 0; - CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); - } - printf("%s: nv_count=%d\n", __FUNCTION__, conf->nv_by_mac.count); - conf->nv_by_mac.m_mac_list_head = mac_list; - for (i=0; inv_by_mac.count; i++) { - pch = bcmstrtok(&pick_tmp, " ", 0); - strcpy(mac_list[i].name, pch); - pch = bcmstrtok(&pick_tmp, " ", 0); - mac_list[i].count = (uint32)simple_strtol(pch, NULL, 0); - printf("%s: name=%s, mac_count=%d\n", __FUNCTION__, - mac_list[i].name, mac_list[i].count); - if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count, GFP_KERNEL))) { - mac_list[i].count = 0; - CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); - break; - } - mac_list[i].mac = mac_range; - for (j=0; josh, pick, MAXSZ_BUF); -} - -void -dhd_conf_read_nv_by_chip(dhd_pub_t *dhd, char *bufp, uint len) -{ - uint len_val; - int i; - char *pick; - char *pch, *pick_tmp; - wl_chip_nv_path_t *chip_nv_path; - struct dhd_conf *conf = dhd->conf; - - pick = MALLOC(dhd->osh, MAXSZ_BUF); - if (!pick) { - CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n", - __FUNCTION__, MAXSZ_BUF)); - return; - } - - /* Process nv_by_chip: - * nv_by_chip=[nv_chip_num] \ - * [chip1] [chiprev1] [nv_name1] [chip2] [chiprev2] [nv_name2] \ - * Ex: nv_by_chip=2 \ - * 43430 0 nvram_ap6212.txt 43430 1 nvram_ap6212a.txt \ - */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "nv_by_chip="); - if (len_val) { - pick_tmp = pick; - pch = bcmstrtok(&pick_tmp, " ", 0); - conf->nv_by_chip.count = (uint32)simple_strtol(pch, NULL, 0); - if (!(chip_nv_path = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_chip.count, GFP_KERNEL))) { - conf->nv_by_chip.count = 0; - CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); - } - printf("%s: nv_by_chip_count=%d\n", __FUNCTION__, conf->nv_by_chip.count); - conf->nv_by_chip.m_chip_nv_path_head = chip_nv_path; - for (i=0; inv_by_chip.count; i++) { - pch = bcmstrtok(&pick_tmp, " ", 0); - chip_nv_path[i].chip = (uint32)simple_strtol(pch, NULL, 0); - pch = bcmstrtok(&pick_tmp, " ", 0); - chip_nv_path[i].chiprev = (uint32)simple_strtol(pch, NULL, 0); - pch = bcmstrtok(&pick_tmp, " ", 0); - strcpy(chip_nv_path[i].name, pch); - printf("%s: chip=0x%x, chiprev=%d, name=%s\n", __FUNCTION__, - chip_nv_path[i].chip, chip_nv_path[i].chiprev, chip_nv_path[i].name); - } - } - - if (pick) - MFREE(dhd->osh, pick, MAXSZ_BUF); -} - -void -dhd_conf_read_roam_params(dhd_pub_t *dhd, char *bufp, uint len) -{ - uint len_val; - char *pick; - struct dhd_conf *conf = dhd->conf; - - pick = MALLOC(dhd->osh, MAXSZ_BUF); - if (!pick) { - CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n", - __FUNCTION__, MAXSZ_BUF)); - return; - } - - /* Process roam */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "roam_off="); - if (len_val) { - if (!strncmp(pick, "0", len_val)) - conf->roam_off = 0; - else - conf->roam_off = 1; - printf("%s: roam_off = %d\n", __FUNCTION__, conf->roam_off); - } - - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "roam_off_suspend="); - if (len_val) { - if (!strncmp(pick, "0", len_val)) - conf->roam_off_suspend = 0; - else - conf->roam_off_suspend = 1; - printf("%s: roam_off_suspend = %d\n", __FUNCTION__, - conf->roam_off_suspend); - } - - if (!conf->roam_off || !conf->roam_off_suspend) { - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "roam_trigger="); - if (len_val) - conf->roam_trigger[0] = (int)simple_strtol(pick, NULL, 10); - printf("%s: roam_trigger = %d\n", __FUNCTION__, - conf->roam_trigger[0]); - - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "roam_scan_period="); - if (len_val) - conf->roam_scan_period[0] = (int)simple_strtol(pick, NULL, 10); - printf("%s: roam_scan_period = %d\n", __FUNCTION__, - conf->roam_scan_period[0]); - - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "roam_delta="); - if (len_val) - conf->roam_delta[0] = (int)simple_strtol(pick, NULL, 10); - printf("%s: roam_delta = %d\n", __FUNCTION__, conf->roam_delta[0]); - - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "fullroamperiod="); - if (len_val) - conf->fullroamperiod = (int)simple_strtol(pick, NULL, 10); - printf("%s: fullroamperiod = %d\n", __FUNCTION__, - conf->fullroamperiod); - } - - if (pick) - MFREE(dhd->osh, pick, MAXSZ_BUF); - -} - -void -dhd_conf_read_country_list(dhd_pub_t *dhd, char *bufp, uint len) -{ - uint len_val; - int i; - char *pick, *pch, *pick_tmp; - struct dhd_conf *conf = dhd->conf; - - pick = MALLOC(dhd->osh, MAXSZ_BUF); - if (!pick) { - CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n", - __FUNCTION__, MAXSZ_BUF)); - return; - } - - /* Process country_list: - * country_list=[country1]:[ccode1]/[regrev1], - * [country2]:[ccode2]/[regrev2] \ - * Ex: country_list=US:US/0, TW:TW/1 - */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "country_list="); - if (len_val) { - pick_tmp = pick; - for (i=0; icountry_list.cspec[i].country_abbrev, pch); - pch = bcmstrtok(&pick_tmp, "/", 0); - if (!pch) - break; - memcpy(conf->country_list.cspec[i].ccode, pch, 2); - pch = bcmstrtok(&pick_tmp, ", ", 0); - if (!pch) - break; - conf->country_list.cspec[i].rev = (int32)simple_strtol(pch, NULL, 10); - conf->country_list.count ++; - CONFIG_TRACE(("%s: country_list abbrev=%s, ccode=%s, regrev=%d\n", __FUNCTION__, - conf->country_list.cspec[i].country_abbrev, - conf->country_list.cspec[i].ccode, - conf->country_list.cspec[i].rev)); - } - printf("%s: %d country in list\n", __FUNCTION__, conf->country_list.count); - } - - if (pick) - MFREE(dhd->osh, pick, MAXSZ_BUF); -} - -int -dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path) -{ - int bcmerror = -1, i; - uint len, len_val; - void * image = NULL; - char * memblock = NULL; - char *bufp, *pick = NULL, *pch, *pick_tmp; - bool conf_file_exists; - struct dhd_conf *conf = dhd->conf; - - conf_file_exists = ((conf_path != NULL) && (conf_path[0] != '\0')); - if (!conf_file_exists) { - printf("%s: config path %s\n", __FUNCTION__, conf_path); - return (0); - } - - if (conf_file_exists) { - image = dhd_os_open_image(conf_path); - if (image == NULL) { - printf("%s: Ignore config file %s\n", __FUNCTION__, conf_path); - goto err; - } - } - - memblock = MALLOC(dhd->osh, MAXSZ_CONFIG); - if (memblock == NULL) { - CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n", - __FUNCTION__, MAXSZ_CONFIG)); - goto err; - } - - pick = MALLOC(dhd->osh, MAXSZ_BUF); - if (!pick) { - CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n", - __FUNCTION__, MAXSZ_BUF)); - goto err; - } - - /* Read variables */ - if (conf_file_exists) { - len = dhd_os_get_image_block(memblock, MAXSZ_CONFIG, image); - } - if (len > 0 && len < MAXSZ_CONFIG) { - bufp = (char *)memblock; - bufp[len] = 0; - - /* Process log_level */ - dhd_conf_read_log_level(dhd, bufp, len); - dhd_conf_read_roam_params(dhd, bufp, len); - dhd_conf_read_wme_ac_params(dhd, bufp, len); - dhd_conf_read_fw_by_mac(dhd, bufp, len); - dhd_conf_read_nv_by_mac(dhd, bufp, len); - dhd_conf_read_nv_by_chip(dhd, bufp, len); - dhd_conf_read_country_list(dhd, bufp, len); - - /* Process band: - * band=a for 5GHz only and band=b for 2.4GHz only - */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "band="); - if (len_val) { - if (!strncmp(pick, "b", len_val)) - conf->band = WLC_BAND_2G; - else if (!strncmp(pick, "a", len_val)) - conf->band = WLC_BAND_5G; - else - conf->band = WLC_BAND_AUTO; - printf("%s: band = %d\n", __FUNCTION__, conf->band); - } - - /* Process mimo_bw_cap */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "mimo_bw_cap="); - if (len_val) { - conf->mimo_bw_cap = (uint)simple_strtol(pick, NULL, 10); - printf("%s: mimo_bw_cap = %d\n", __FUNCTION__, conf->mimo_bw_cap); - } - - /* Process country code */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "ccode="); - if (len_val) { - memset(&conf->cspec, 0, sizeof(wl_country_t)); - memcpy(conf->cspec.country_abbrev, pick, len_val); - memcpy(conf->cspec.ccode, pick, len_val); - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "regrev="); - if (len_val) - conf->cspec.rev = (int32)simple_strtol(pick, NULL, 10); - } - - /* Process channels */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "channels="); - pick_tmp = pick; - if (len_val) { - pch = bcmstrtok(&pick_tmp, " ,.-", 0); - i=0; - while (pch != NULL && ichannels.channel[i] = (uint32)simple_strtol(pch, NULL, 10); - pch = bcmstrtok(&pick_tmp, " ,.-", 0); - i++; - } - conf->channels.count = i; - printf("%s: channels = ", __FUNCTION__); - for (i=0; ichannels.count; i++) - printf("%d ", conf->channels.channel[i]); - printf("\n"); - } - - /* Process keep alive period */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "keep_alive_period="); - if (len_val) { - conf->keep_alive_period = (uint)simple_strtol(pick, NULL, 10); - printf("%s: keep_alive_period = %d\n", __FUNCTION__, - conf->keep_alive_period); - } - - /* Process STBC parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "stbc="); - if (len_val) { - conf->stbc = (int)simple_strtol(pick, NULL, 10); - printf("%s: stbc = %d\n", __FUNCTION__, conf->stbc); - } - - /* Process phy_oclscdenable parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "phy_oclscdenable="); - if (len_val) { - conf->phy_oclscdenable = (int)simple_strtol(pick, NULL, 10); - printf("%s: phy_oclscdenable = %d\n", __FUNCTION__, conf->phy_oclscdenable); - } - -#ifdef BCMSDIO - /* Process dhd_doflow parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "dhd_doflow="); - if (len_val) { - if (!strncmp(pick, "0", len_val)) - dhd_doflow = FALSE; - else - dhd_doflow = TRUE; - printf("%s: dhd_doflow = %d\n", __FUNCTION__, dhd_doflow); - } - - /* Process dhd_slpauto parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "dhd_slpauto="); - if (len_val) { - if (!strncmp(pick, "0", len_val)) - dhd_slpauto = FALSE; - else - dhd_slpauto = TRUE; - printf("%s: dhd_slpauto = %d\n", __FUNCTION__, dhd_slpauto); - } -#endif - - /* Process dhd_master_mode parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "dhd_master_mode="); - if (len_val) { - if (!strncmp(pick, "0", len_val)) - dhd_master_mode = FALSE; - else - dhd_master_mode = TRUE; - printf("%s: dhd_master_mode = %d\n", __FUNCTION__, dhd_master_mode); - } - -#ifdef PKT_FILTER_SUPPORT - /* Process pkt_filter_add: - * All pkt: pkt_filter_add=99 0 0 0 0x000000000000 0x000000000000 - */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "pkt_filter_add="); - pick_tmp = pick; - if (len_val) { - pch = bcmstrtok(&pick_tmp, ",.-", 0); - i=0; - while (pch != NULL && ipkt_filter_add.filter[i][0], pch); - printf("%s: pkt_filter_add[%d][] = %s\n", __FUNCTION__, i, &conf->pkt_filter_add.filter[i][0]); - pch = bcmstrtok(&pick_tmp, ",.-", 0); - i++; - } - conf->pkt_filter_add.count = i; - } - - /* Process pkt_filter_del */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "pkt_filter_del="); - pick_tmp = pick; - if (len_val) { - pch = bcmstrtok(&pick_tmp, " ,.-", 0); - i=0; - while (pch != NULL && ipkt_filter_del.id[i] = (uint32)simple_strtol(pch, NULL, 10); - pch = bcmstrtok(&pick_tmp, " ,.-", 0); - i++; - } - conf->pkt_filter_del.count = i; - printf("%s: pkt_filter_del id = ", __FUNCTION__); - for (i=0; ipkt_filter_del.count; i++) - printf("%d ", conf->pkt_filter_del.id[i]); - printf("\n"); - } -#endif - - /* Process srl parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "srl="); - if (len_val) { - conf->srl = (int)simple_strtol(pick, NULL, 10); - printf("%s: srl = %d\n", __FUNCTION__, conf->srl); - } - - /* Process lrl parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "lrl="); - if (len_val) { - conf->lrl = (int)simple_strtol(pick, NULL, 10); - printf("%s: lrl = %d\n", __FUNCTION__, conf->lrl); - } - - /* Process beacon timeout parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "bcn_timeout="); - if (len_val) { - conf->bcn_timeout= (uint)simple_strtol(pick, NULL, 10); - printf("%s: bcn_timeout = %d\n", __FUNCTION__, conf->bcn_timeout); - } - - /* Process bus:txglom */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "bus:txglom="); - if (len_val) { - conf->bus_txglom = (int)simple_strtol(pick, NULL, 10); - printf("%s: bus:txglom = %d\n", __FUNCTION__, conf->bus_txglom); - } - - /* Process ampdu_ba_wsize parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "ampdu_ba_wsize="); - if (len_val) { - conf->ampdu_ba_wsize = (int)simple_strtol(pick, NULL, 10); - printf("%s: ampdu_ba_wsize = %d\n", __FUNCTION__, conf->ampdu_ba_wsize); - } - - /* Process kso_enable parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "kso_enable="); - if (len_val) { - if (!strncmp(pick, "0", len_val)) - conf->kso_enable = FALSE; - else - conf->kso_enable = TRUE; - printf("%s: kso_enable = %d\n", __FUNCTION__, conf->kso_enable); - } - - /* Process spect parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "spect="); - if (len_val) { - conf->spect = (int)simple_strtol(pick, NULL, 10); - printf("%s: spect = %d\n", __FUNCTION__, conf->spect); - } - - /* Process txbf parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "txbf="); - if (len_val) { - conf->txbf = (int)simple_strtol(pick, NULL, 10); - printf("%s: txbf = %d\n", __FUNCTION__, conf->txbf); - } - - /* Process frameburst parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "frameburst="); - if (len_val) { - conf->frameburst = (int)simple_strtol(pick, NULL, 10); - printf("%s: frameburst = %d\n", __FUNCTION__, conf->frameburst); - } - - /* Process lpc parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "lpc="); - if (len_val) { - conf->lpc = (int)simple_strtol(pick, NULL, 10); - printf("%s: lpc = %d\n", __FUNCTION__, conf->lpc); - } - - /* Process use_rxchain parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "use_rxchain="); - if (len_val) { - conf->use_rxchain = (int)simple_strtol(pick, NULL, 10); - printf("%s: use_rxchain = %d\n", __FUNCTION__, conf->use_rxchain); - } - -#if defined(BCMSDIOH_TXGLOM) - /* Process txglomsize parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "txglomsize="); - if (len_val) { - conf->txglomsize = (uint)simple_strtol(pick, NULL, 10); - if (conf->txglomsize > SDPCM_MAXGLOM_SIZE) - conf->txglomsize = SDPCM_MAXGLOM_SIZE; - printf("%s: txglomsize = %d\n", __FUNCTION__, conf->txglomsize); - } - - /* Process swtxglom parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "swtxglom="); - if (len_val) { - if (!strncmp(pick, "0", len_val)) - conf->swtxglom = FALSE; - else - conf->swtxglom = TRUE; - printf("%s: swtxglom = %d\n", __FUNCTION__, conf->swtxglom); - } - - /* Process txglom_ext parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "txglom_ext="); - if (len_val) { - if (!strncmp(pick, "0", len_val)) - conf->txglom_ext = FALSE; - else - conf->txglom_ext = TRUE; - printf("%s: txglom_ext = %d\n", __FUNCTION__, conf->txglom_ext); - if (conf->txglom_ext) { - if ((conf->chip == BCM43362_CHIP_ID) || (conf->chip == BCM4330_CHIP_ID)) - conf->txglom_bucket_size = 1680; - else if (conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID || - conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) - conf->txglom_bucket_size = 1684; - } - printf("%s: txglom_bucket_size = %d\n", __FUNCTION__, conf->txglom_bucket_size); - } -#endif - - /* Process disable_proptx parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "disable_proptx="); - if (len_val) { - conf->disable_proptx = (int)simple_strtol(pick, NULL, 10); - printf("%s: disable_proptx = %d\n", __FUNCTION__, conf->disable_proptx); - } - - /* Process dpc_cpucore parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "dpc_cpucore="); - if (len_val) { - conf->dpc_cpucore = (int)simple_strtol(pick, NULL, 10); - printf("%s: dpc_cpucore = %d\n", __FUNCTION__, conf->dpc_cpucore); - } - - /* Process bus:rxglom parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "bus:rxglom="); - if (len_val) { - if (!strncmp(pick, "0", len_val)) - conf->bus_rxglom = FALSE; - else - conf->bus_rxglom = TRUE; - printf("%s: bus:rxglom = %d\n", __FUNCTION__, conf->bus_rxglom); - } - - /* Process deepsleep parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "deepsleep="); - if (len_val) { - if (!strncmp(pick, "1", len_val)) - conf->deepsleep = TRUE; - else - conf->deepsleep = FALSE; - printf("%s: deepsleep = %d\n", __FUNCTION__, conf->deepsleep); - } - - /* Process PM parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "PM="); - if (len_val) { - conf->pm = (int)simple_strtol(pick, NULL, 10); - printf("%s: PM = %d\n", __FUNCTION__, conf->pm); - } - - /* Process tcpack_sup_mode parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "tcpack_sup_mode="); - if (len_val) { - conf->tcpack_sup_mode = (uint)simple_strtol(pick, NULL, 10); - printf("%s: tcpack_sup_mode = %d\n", __FUNCTION__, conf->tcpack_sup_mode); - } - - /* Process dhd_poll parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "dhd_poll="); - if (len_val) { - if (!strncmp(pick, "0", len_val)) - conf->dhd_poll = 0; - else - conf->dhd_poll = 1; - printf("%s: dhd_poll = %d\n", __FUNCTION__, conf->dhd_poll); - } - - /* Process deferred_tx_len parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "deferred_tx_len="); - if (len_val) { - conf->deferred_tx_len = (int)simple_strtol(pick, NULL, 10); - printf("%s: deferred_tx_len = %d\n", __FUNCTION__, conf->deferred_tx_len); - } - - /* Process pktprio8021x parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "pktprio8021x="); - if (len_val) { - conf->pktprio8021x = (int)simple_strtol(pick, NULL, 10); - printf("%s: pktprio8021x = %d\n", __FUNCTION__, conf->pktprio8021x); - } - - /* Process txctl_tmo_fix parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "txctl_tmo_fix="); - if (len_val) { - if (!strncmp(pick, "0", len_val)) - conf->txctl_tmo_fix = FALSE; - else - conf->txctl_tmo_fix = TRUE; - printf("%s: txctl_tmo_fix = %d\n", __FUNCTION__, conf->txctl_tmo_fix); - } - - /* Process tx_in_rx parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "tx_in_rx="); - if (len_val) { - if (!strncmp(pick, "0", len_val)) - conf->tx_in_rx = FALSE; - else - conf->tx_in_rx = TRUE; - printf("%s: tx_in_rx = %d\n", __FUNCTION__, conf->tx_in_rx); - } - - /* Process dhd_txbound parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "dhd_txbound="); - if (len_val) { - dhd_txbound = (uint)simple_strtol(pick, NULL, 10); - printf("%s: dhd_txbound = %d\n", __FUNCTION__, dhd_txbound); - } - - /* Process dhd_rxbound parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "dhd_rxbound="); - if (len_val) { - dhd_rxbound = (uint)simple_strtol(pick, NULL, 10); - printf("%s: dhd_rxbound = %d\n", __FUNCTION__, dhd_rxbound); - } - - /* Process tx_max_offset parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "tx_max_offset="); - if (len_val) { - conf->tx_max_offset = (int)simple_strtol(pick, NULL, 10); - printf("%s: tx_max_offset = %d\n", __FUNCTION__, conf->tx_max_offset); - } - - /* Process rsdb_mode parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "rsdb_mode="); - if (len_val) { - conf->rsdb_mode = (int)simple_strtol(pick, NULL, 10); - printf("%s: rsdb_mode = %d\n", __FUNCTION__, conf->rsdb_mode); - } - - /* Process txglom_mode parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "txglom_mode="); - if (len_val) { - if (!strncmp(pick, "0", len_val)) - conf->txglom_mode = FALSE; - else - conf->txglom_mode = TRUE; - printf("%s: txglom_mode = %d\n", __FUNCTION__, conf->txglom_mode); - } - - /* Process vhtmode parameters */ - memset(pick, 0, MAXSZ_BUF); - len_val = process_config_vars(bufp, len, pick, "vhtmode="); - if (len_val) { - if (!strncmp(pick, "0", len_val)) - conf->vhtmode = 0; - else - conf->vhtmode = 1; - printf("%s: vhtmode = %d\n", __FUNCTION__, conf->vhtmode); - } - - bcmerror = 0; - } else { - CONFIG_ERROR(("%s: error reading config file: %d\n", __FUNCTION__, len)); - bcmerror = BCME_SDIO_ERROR; - } - -err: - if (pick) - MFREE(dhd->osh, pick, MAXSZ_BUF); - - if (memblock) - MFREE(dhd->osh, memblock, MAXSZ_CONFIG); - - if (image) - dhd_os_close_image(image); - - return bcmerror; -} - -int -dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev) -{ - printf("%s: chip=0x%x, chiprev=%d\n", __FUNCTION__, chip, chiprev); - dhd->conf->chip = chip; - dhd->conf->chiprev = chiprev; - return 0; -} - -uint -dhd_conf_get_chip(void *context) -{ - dhd_pub_t *dhd = context; - - if (dhd && dhd->conf) - return dhd->conf->chip; - return 0; -} - -uint -dhd_conf_get_chiprev(void *context) -{ - dhd_pub_t *dhd = context; - - if (dhd && dhd->conf) - return dhd->conf->chiprev; - return 0; -} - -void -dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable) -{ - struct dhd_conf *conf = dhd->conf; - - if (enable) { -#if defined(SWTXGLOM) - if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID || - conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID || - conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) { - // 43362/4330/4334/43340/43341/43241 must use 1.88.45.x swtxglom if txglom_ext is true, since 1.201.59 not support swtxglom - conf->swtxglom = TRUE; - conf->txglom_ext = TRUE; - } - if (conf->chip == BCM43362_CHIP_ID && conf->bus_txglom == 0) { - conf->bus_txglom = 1; // improve tcp tx tput. and cpu idle for 43362 only - } -#endif - // other parameters set in preinit or config.txt - } else { - // clear txglom parameters, but don't change swtxglom since it's possible enabled in config.txt - conf->txglom_ext = FALSE; - conf->txglom_bucket_size = 0; - conf->tx_in_rx = TRUE; - conf->tx_max_offset = 0; - conf->txglomsize = 0; - conf->deferred_tx_len = 0; - } - printf("%s: swtxglom=%d, txglom_ext=%d\n", __FUNCTION__, - conf->swtxglom, conf->txglom_ext); - printf("%s: txglom_bucket_size=%d\n", __FUNCTION__, conf->txglom_bucket_size); - printf("%s: txglomsize=%d, deferred_tx_len=%d, bus_txglom=%d\n", __FUNCTION__, - conf->txglomsize, conf->deferred_tx_len, conf->bus_txglom); - printf("%s: tx_in_rx=%d, tx_max_offset=%d\n", __FUNCTION__, - conf->tx_in_rx, conf->tx_max_offset); - -} - -int -dhd_conf_preinit(dhd_pub_t *dhd) -{ - struct dhd_conf *conf = dhd->conf; - - CONFIG_TRACE(("%s: Enter\n", __FUNCTION__)); - -#ifdef BCMSDIO - dhd_conf_free_mac_list(&conf->fw_by_mac); - dhd_conf_free_mac_list(&conf->nv_by_mac); - dhd_conf_free_chip_nv_path_list(&conf->nv_by_chip); -#endif - memset(&conf->country_list, 0, sizeof(conf_country_list_t)); - conf->band = WLC_BAND_AUTO; - conf->mimo_bw_cap = -1; - if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID) { - strcpy(conf->cspec.country_abbrev, "ALL"); - strcpy(conf->cspec.ccode, "ALL"); - conf->cspec.rev = 0; - } else if (conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID || - conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID || - conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID || - conf->chip == BCM4359_CHIP_ID) { - strcpy(conf->cspec.country_abbrev, "CN"); - strcpy(conf->cspec.ccode, "CN"); - conf->cspec.rev = 38; - } else { - strcpy(conf->cspec.country_abbrev, "CN"); - strcpy(conf->cspec.ccode, "CN"); - conf->cspec.rev = 0; - } - memset(&conf->channels, 0, sizeof(wl_channel_list_t)); - conf->roam_off = 1; - conf->roam_off_suspend = 1; -#ifdef CUSTOM_ROAM_TRIGGER_SETTING - conf->roam_trigger[0] = CUSTOM_ROAM_TRIGGER_SETTING; -#else - conf->roam_trigger[0] = -65; -#endif - conf->roam_trigger[1] = WLC_BAND_ALL; - conf->roam_scan_period[0] = 10; - conf->roam_scan_period[1] = WLC_BAND_ALL; -#ifdef CUSTOM_ROAM_DELTA_SETTING - conf->roam_delta[0] = CUSTOM_ROAM_DELTA_SETTING; -#else - conf->roam_delta[0] = 15; -#endif - conf->roam_delta[1] = WLC_BAND_ALL; -#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC - conf->fullroamperiod = 60; -#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */ - conf->fullroamperiod = 120; -#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */ -#ifdef CUSTOM_KEEP_ALIVE_SETTING - conf->keep_alive_period = CUSTOM_KEEP_ALIVE_SETTING; -#else - conf->keep_alive_period = 28000; -#endif - conf->force_wme_ac = 0; - conf->stbc = -1; - conf->phy_oclscdenable = -1; -#ifdef PKT_FILTER_SUPPORT - memset(&conf->pkt_filter_add, 0, sizeof(conf_pkt_filter_add_t)); - memset(&conf->pkt_filter_del, 0, sizeof(conf_pkt_filter_del_t)); - conf->pkt_filter_magic = FALSE; -#endif - conf->srl = -1; - conf->lrl = -1; - conf->bcn_timeout = 15; - conf->kso_enable = TRUE; - conf->spect = -1; - conf->txbf = -1; - conf->lpc = -1; - conf->disable_proptx = 0; - conf->bus_txglom = 0; - conf->use_rxchain = 0; - conf->bus_rxglom = TRUE; - conf->txglom_ext = FALSE; - conf->tx_max_offset = 0; - conf->deferred_tx_len = 0; - conf->txglomsize = SDPCM_DEFGLOM_SIZE; - conf->ampdu_ba_wsize = 0; - conf->dpc_cpucore = 0; - conf->frameburst = -1; - conf->deepsleep = FALSE; - conf->pm = -1; -#ifdef DHDTCPACK_SUPPRESS - conf->tcpack_sup_mode = TCPACK_SUP_OFF; -#endif - conf->dhd_poll = -1; - conf->pktprio8021x = -1; - conf->txctl_tmo_fix = FALSE; - conf->tx_in_rx = TRUE; - conf->rsdb_mode = -2; - conf->txglom_mode = SDPCM_TXGLOM_MDESC; - conf->vhtmode = -1; - if ((conf->chip == BCM43362_CHIP_ID) || (conf->chip == BCM4330_CHIP_ID)) { - conf->disable_proptx = 1; - } - if (conf->chip == BCM43430_CHIP_ID) { - conf->bus_rxglom = FALSE; - } - if (conf->chip == BCM4339_CHIP_ID) { - conf->txbf = 1; - } - if (conf->chip == BCM4345_CHIP_ID) { - conf->txbf = 1; - } - if (conf->chip == BCM4354_CHIP_ID) { - conf->txbf = 1; - } - if (conf->chip == BCM4356_CHIP_ID) { - conf->txbf = 1; - } - if (conf->chip == BCM4371_CHIP_ID) { - conf->txbf = 1; - } - if (conf->chip == BCM4359_CHIP_ID) { - conf->txbf = 1; - conf->rsdb_mode = 0; - } - -#if defined(SWTXGLOM) - if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID || - conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID || - conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) { - conf->swtxglom = FALSE; // disabled by default - conf->txglom_ext = TRUE; // enabled by default - conf->use_rxchain = 0; // use_rxchain have been disabled if swtxglom enabled - conf->txglomsize = 16; - } else { - conf->swtxglom = FALSE; // use 1.201.59.x txglom by default - conf->txglom_ext = FALSE; - } - - if (conf->chip == BCM43362_CHIP_ID) { - conf->txglom_bucket_size = 1680; // fixed value, don't change - conf->tx_in_rx = FALSE; - conf->tx_max_offset = 1; - } - if (conf->chip == BCM4330_CHIP_ID) { - conf->txglom_bucket_size = 1680; // fixed value, don't change - conf->tx_in_rx = FALSE; - conf->tx_max_offset = 0; - } - if (conf->chip == BCM4334_CHIP_ID) { - conf->txglom_bucket_size = 1684; // fixed value, don't change - conf->tx_in_rx = TRUE; // improve tcp tx tput. and cpu idle - conf->tx_max_offset = 0; // reduce udp tx: dhdsdio_readframes: got unlikely tx max 109 with tx_seq 110 - } - if (conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID) { - conf->txglom_bucket_size = 1684; // fixed value, don't change - conf->tx_in_rx = TRUE; // improve tcp tx tput. and cpu idle - conf->tx_max_offset = 1; - } - if (conf->chip == BCM4324_CHIP_ID) { - conf->txglom_bucket_size = 1684; // fixed value, don't change - conf->tx_in_rx = TRUE; // improve tcp tx tput. and cpu idle - conf->tx_max_offset = 0; - } -#endif -#if defined(BCMSDIOH_TXGLOM_EXT) - conf->txglom_mode = SDPCM_TXGLOM_CPY; - if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID || - conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID || - conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) { - conf->txglom_ext = TRUE; - conf->use_rxchain = 0; - conf->tx_in_rx = TRUE; - conf->tx_max_offset = 1; - } else { - conf->txglom_ext = FALSE; - } - if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID) { - conf->txglom_bucket_size = 1680; // fixed value, don't change - conf->txglomsize = 6; - } - if (conf->chip == BCM4334_CHIP_ID || conf->chip == BCM43340_CHIP_ID || - conf->chip == BCM43341_CHIP_ID || conf->chip == BCM4324_CHIP_ID) { - conf->txglom_bucket_size = 1684; // fixed value, don't change - conf->txglomsize = 16; - } -#endif - if (conf->txglomsize > SDPCM_MAXGLOM_SIZE) - conf->txglomsize = SDPCM_MAXGLOM_SIZE; - conf->deferred_tx_len = conf->txglomsize; - - return 0; -} - -int -dhd_conf_reset(dhd_pub_t *dhd) -{ -#ifdef BCMSDIO - dhd_conf_free_mac_list(&dhd->conf->fw_by_mac); - dhd_conf_free_mac_list(&dhd->conf->nv_by_mac); - dhd_conf_free_chip_nv_path_list(&dhd->conf->nv_by_chip); -#endif - memset(dhd->conf, 0, sizeof(dhd_conf_t)); - return 0; -} - -int -dhd_conf_attach(dhd_pub_t *dhd) -{ - dhd_conf_t *conf; - - CONFIG_TRACE(("%s: Enter\n", __FUNCTION__)); - - if (dhd->conf != NULL) { - printf("%s: config is attached before!\n", __FUNCTION__); - return 0; - } - /* Allocate private bus interface state */ - if (!(conf = MALLOC(dhd->osh, sizeof(dhd_conf_t)))) { - CONFIG_ERROR(("%s: MALLOC failed\n", __FUNCTION__)); - goto fail; - } - memset(conf, 0, sizeof(dhd_conf_t)); - - dhd->conf = conf; - - return 0; - -fail: - if (conf != NULL) - MFREE(dhd->osh, conf, sizeof(dhd_conf_t)); - return BCME_NOMEM; -} - -void -dhd_conf_detach(dhd_pub_t *dhd) -{ - CONFIG_TRACE(("%s: Enter\n", __FUNCTION__)); - - if (dhd->conf) { -#ifdef BCMSDIO - dhd_conf_free_mac_list(&dhd->conf->fw_by_mac); - dhd_conf_free_mac_list(&dhd->conf->nv_by_mac); - dhd_conf_free_chip_nv_path_list(&dhd->conf->nv_by_chip); -#endif - MFREE(dhd->osh, dhd->conf, sizeof(dhd_conf_t)); - } - dhd->conf = NULL; -} diff --git a/drivers/net/wireless/bcmdhd/dhd_flowring.h b/drivers/net/wireless/bcmdhd/dhd_flowring.h deleted file mode 100644 index bfab6fe68f397..0000000000000 --- a/drivers/net/wireless/bcmdhd/dhd_flowring.h +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Header file describing the flow rings DHD interfaces. - * - * Provides type definitions and function prototypes used to create, delete and manage - * - * flow rings at high level - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: dhd_flowrings.h jaganlv $ - */ - -/**************** - * Common types * - */ - -#ifndef _dhd_flowrings_h_ -#define _dhd_flowrings_h_ - -/* Max pkts held in a flow ring's backup queue */ -#define FLOW_RING_QUEUE_THRESHOLD (2048) - -/* Number of H2D common rings : PCIE Spec Rev? */ -#define FLOW_RING_COMMON 2 - -#define FLOWID_INVALID (ID16_INVALID) -#define FLOWID_RESERVED (FLOW_RING_COMMON) - -#define FLOW_RING_STATUS_OPEN 0 -#define FLOW_RING_STATUS_PENDING 1 -#define FLOW_RING_STATUS_CLOSED 2 -#define FLOW_RING_STATUS_DELETE_PENDING 3 -#define FLOW_RING_STATUS_FLUSH_PENDING 4 - -#define DHD_FLOWRING_RX_BUFPOST_PKTSZ 2048 - -#define DHD_FLOW_PRIO_AC_MAP 0 -#define DHD_FLOW_PRIO_TID_MAP 1 - - -/* Pkttag not compatible with PROP_TXSTATUS or WLFC */ -typedef struct dhd_pkttag_fr { - uint16 flowid; - int dataoff; -} dhd_pkttag_fr_t; - -#define DHD_PKTTAG_SET_FLOWID(tag, flow) ((tag)->flowid = (uint16)(flow)) -#define DHD_PKTTAG_SET_DATAOFF(tag, offset) ((tag)->dataoff = (int)(offset)) - -#define DHD_PKTTAG_FLOWID(tag) ((tag)->flowid) -#define DHD_PKTTAG_DATAOFF(tag) ((tag)->dataoff) - -/* Hashing a MacAddress for lkup into a per interface flow hash table */ -#define DHD_FLOWRING_HASH_SIZE 256 -#define DHD_FLOWRING_HASHINDEX(ea, prio) \ - ((((uint8 *)(ea))[3] ^ ((uint8 *)(ea))[4] ^ ((uint8 *)(ea))[5] ^ ((uint8)(prio))) \ - % DHD_FLOWRING_HASH_SIZE) - -#define DHD_IF_ROLE(pub, idx) (((if_flow_lkup_t *)(pub)->if_flow_lkup)[idx].role) -#define DHD_IF_ROLE_AP(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_AP) -#define DHD_IF_ROLE_P2PGO(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_GO) -#define DHD_FLOW_RING(dhdp, flowid) \ - (flow_ring_node_t *)&(((flow_ring_node_t *)((dhdp)->flow_ring_table))[flowid]) - -struct flow_queue; - -/* Flow Ring Queue Enqueue overflow callback */ -typedef int (*flow_queue_cb_t)(struct flow_queue * queue, void * pkt); - -typedef struct flow_queue { - dll_t list; /* manage a flowring queue in a dll */ - void * head; /* first packet in the queue */ - void * tail; /* last packet in the queue */ - uint16 len; /* number of packets in the queue */ - uint16 max; /* maximum number of packets, queue may hold */ - uint32 failures; /* enqueue failures due to queue overflow */ - flow_queue_cb_t cb; /* callback invoked on threshold crossing */ -} flow_queue_t; - -#define flow_queue_len(queue) ((int)(queue)->len) -#define flow_queue_max(queue) ((int)(queue)->max) -#define flow_queue_avail(queue) ((int)((queue)->max - (queue)->len)) -#define flow_queue_full(queue) ((queue)->len >= (queue)->max) -#define flow_queue_empty(queue) ((queue)->len == 0) - -typedef struct flow_info { - uint8 tid; - uint8 ifindex; - char sa[ETHER_ADDR_LEN]; - char da[ETHER_ADDR_LEN]; -} flow_info_t; - -typedef struct flow_ring_node { - dll_t list; /* manage a constructed flowring in a dll, must be at first place */ - flow_queue_t queue; - bool active; - uint8 status; - uint16 flowid; - flow_info_t flow_info; - void *prot_info; - void *lock; /* lock for flowring access protection */ -} flow_ring_node_t; -typedef flow_ring_node_t flow_ring_table_t; - -typedef struct flow_hash_info { - uint16 flowid; - flow_info_t flow_info; - struct flow_hash_info *next; -} flow_hash_info_t; - -typedef struct if_flow_lkup { - bool status; - uint8 role; /* Interface role: STA/AP */ - flow_hash_info_t *fl_hash[DHD_FLOWRING_HASH_SIZE]; /* Lkup Hash table */ -} if_flow_lkup_t; - -static INLINE flow_ring_node_t * -dhd_constlist_to_flowring(dll_t *item) -{ - return ((flow_ring_node_t *)item); -} - -/* Exported API */ - -/* Flow ring's queue management functions */ -extern void dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max); -extern void dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb); -extern int dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt); -extern void * dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue); -extern void dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt); - -extern int dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings); - -extern void dhd_flow_rings_deinit(dhd_pub_t *dhdp); - -extern uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da); - -extern int dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, - void *pktbuf); - -extern void dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid); - -extern void dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex); - -extern void dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, - char *addr); - -/* Handle Interface ADD, DEL operations */ -extern void dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex, - uint8 op, uint8 role); - -/* Handle a STA interface link status update */ -extern int dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, - uint8 status); -extern int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set); -extern int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map); - -extern uint8 dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex); -#endif /* _dhd_flowrings_h_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_gpio.c b/drivers/net/wireless/bcmdhd/dhd_gpio.c deleted file mode 100644 index cceb9211f33ac..0000000000000 --- a/drivers/net/wireless/bcmdhd/dhd_gpio.c +++ /dev/null @@ -1,195 +0,0 @@ - -#include -#include - -#ifdef CONFIG_MACH_ODROID_4210 -#include -#include -#include -#include -#include -#define sdmmc_channel s3c_device_hsmmc0 -#endif -#ifdef CUSTOMER_HW_AMLOGIC -extern void sdio_reinit(void); -extern void extern_wifi_set_enable(int is_on); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) -extern int wifi_irq_num(void); -#endif -#endif - -struct wifi_platform_data dhd_wlan_control = {0}; - -#ifdef CUSTOMER_OOB -uint bcm_wlan_get_oob_irq(void) -{ - uint host_oob_irq = 0; - -#ifdef CONFIG_MACH_ODROID_4210 - printf("GPIO(WL_HOST_WAKE) = EXYNOS4_GPX0(7) = %d\n", EXYNOS4_GPX0(7)); - host_oob_irq = gpio_to_irq(EXYNOS4_GPX0(7)); - gpio_direction_input(EXYNOS4_GPX0(7)); -#endif -#ifdef CUSTOMER_HW_AMLOGIC -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) - host_oob_irq = INT_GPIO_4; -#else - host_oob_irq = wifi_irq_num(); -#endif -#endif - printf("host_oob_irq: %d\n", host_oob_irq); - - return host_oob_irq; -} - -uint bcm_wlan_get_oob_irq_flags(void) -{ - uint host_oob_irq_flags = 0; - -#ifdef HW_OOB - host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE; -#else - host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_SHAREABLE; -#endif - - printf("host_oob_irq_flags=0x%X\n", host_oob_irq_flags); - - return host_oob_irq_flags; -} -#endif - -int bcm_wlan_set_power(bool on) -{ - int err = 0; - - if (on) { - printf("======== PULL WL_REG_ON HIGH! ========\n"); -#ifdef CONFIG_MACH_ODROID_4210 - err = gpio_set_value(EXYNOS4_GPK1(0), 1); -#endif -#ifdef CUSTOMER_HW_AMLOGIC - extern_wifi_set_enable(0); - mdelay(200); - extern_wifi_set_enable(1); - mdelay(200); -// sdio_reinit(); -#endif - /* Lets customer power to get stable */ - mdelay(100); - } else { - printf("======== PULL WL_REG_ON LOW! ========\n"); -#ifdef CONFIG_MACH_ODROID_4210 - err = gpio_set_value(EXYNOS4_GPK1(0), 0); -#endif -#ifdef CUSTOMER_HW_AMLOGIC -// extern_wifi_set_enable(0); -// mdelay(200); -#endif - } - - return err; -} - -int bcm_wlan_set_carddetect(bool present) -{ - int err = 0; - - if (present) { - printf("======== Card detection to detect SDIO card! ========\n"); -#ifdef CONFIG_MACH_ODROID_4210 - err = sdhci_s3c_force_presence_change(&sdmmc_channel, 1); -#endif -#ifdef CUSTOMER_HW_AMLOGIC - sdio_reinit(); -#endif - } else { - printf("======== Card detection to remove SDIO card! ========\n"); -#ifdef CONFIG_MACH_ODROID_4210 - err = sdhci_s3c_force_presence_change(&sdmmc_channel, 0); -#endif -#ifdef CUSTOMER_HW_AMLOGIC - extern_wifi_set_enable(0); - mdelay(200); -#endif - } - - return err; -} - -int bcm_wlan_get_mac_address(unsigned char *buf) -{ - int err = 0; - - printf("======== %s ========\n", __FUNCTION__); -#ifdef EXAMPLE_GET_MAC - /* EXAMPLE code */ - { - struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}}; - bcopy((char *)&ea_example, buf, sizeof(struct ether_addr)); - } -#endif /* EXAMPLE_GET_MAC */ - - return err; -} - -#ifdef CONFIG_DHD_USE_STATIC_BUF -extern void *bcmdhd_mem_prealloc(int section, unsigned long size); -void* bcm_wlan_prealloc(int section, unsigned long size) -{ - void *alloc_ptr = NULL; - alloc_ptr = bcmdhd_mem_prealloc(section, size); - if (alloc_ptr) { - printf("success alloc section %d, size %ld\n", section, size); - if (size != 0L) - bzero(alloc_ptr, size); - return alloc_ptr; - } - printf("can't alloc section %d\n", section); - return NULL; -} -#endif - -#if !defined(WL_WIRELESS_EXT) -struct cntry_locales_custom { - char iso_abbrev[WLC_CNTRY_BUF_SZ]; /* ISO 3166-1 country abbreviation */ - char custom_locale[WLC_CNTRY_BUF_SZ]; /* Custom firmware locale */ - int32 custom_locale_rev; /* Custom local revisin default -1 */ -}; -#endif - -static struct cntry_locales_custom brcm_wlan_translate_custom_table[] = { - /* Table should be filled out based on custom platform regulatory requirement */ - {"", "XT", 49}, /* Universal if Country code is unknown or empty */ - {"US", "US", 0}, -}; - -static void *bcm_wlan_get_country_code(char *ccode) -{ - struct cntry_locales_custom *locales; - int size; - int i; - - if (!ccode) - return NULL; - - locales = brcm_wlan_translate_custom_table; - size = ARRAY_SIZE(brcm_wlan_translate_custom_table); - - for (i = 0; i < size; i++) - if (strcmp(ccode, locales[i].iso_abbrev) == 0) - return &locales[i]; - return NULL; -} - -int bcm_wlan_set_plat_data(void) { - printf("======== %s ========\n", __FUNCTION__); - dhd_wlan_control.set_power = bcm_wlan_set_power; - dhd_wlan_control.set_carddetect = bcm_wlan_set_carddetect; - dhd_wlan_control.get_mac_addr = bcm_wlan_get_mac_address; -#ifdef CONFIG_DHD_USE_STATIC_BUF - dhd_wlan_control.mem_prealloc = bcm_wlan_prealloc; -#endif - dhd_wlan_control.get_country_code = bcm_wlan_get_country_code; - return 0; -} - diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_sched.c b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c deleted file mode 100644 index ed635b889586d..0000000000000 --- a/drivers/net/wireless/bcmdhd/dhd_linux_sched.c +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Expose some of the kernel scheduler routines - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: dhd_linux_sched.c 457570 2014-02-23 13:54:46Z $ - */ -#include -#include -#include -#include -#include - -int setScheduler(struct task_struct *p, int policy, struct sched_param *param) -{ - int rc = 0; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) - rc = sched_setscheduler(p, policy, param); -#endif /* LinuxVer */ - return rc; -} - -int get_scheduler_policy(struct task_struct *p) -{ - int rc = SCHED_NORMAL; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) - rc = p->policy; -#endif /* LinuxVer */ - return rc; -} diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_wq.h b/drivers/net/wireless/bcmdhd/dhd_linux_wq.h deleted file mode 100644 index 35982ef5ca888..0000000000000 --- a/drivers/net/wireless/bcmdhd/dhd_linux_wq.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Broadcom Dongle Host Driver (DHD), Generic work queue framework - * Generic interface to handle dhd deferred work events - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: dhd_linux_wq.h 449578 2014-01-17 13:53:20Z $ - */ -#ifndef _dhd_linux_wq_h_ -#define _dhd_linux_wq_h_ -/* - * Work event definitions - */ -enum _wq_event { - DHD_WQ_WORK_IF_ADD = 1, - DHD_WQ_WORK_IF_DEL, - DHD_WQ_WORK_SET_MAC, - DHD_WQ_WORK_SET_MCAST_LIST, - DHD_WQ_WORK_IPV6_NDO, - DHD_WQ_WORK_HANG_MSG, - - DHD_MAX_WQ_EVENTS -}; - -/* - * Work event priority - */ -#define DHD_WORK_PRIORITY_LOW 0 -#define DHD_WORK_PRIORITY_HIGH 1 - -/* - * Error definitions - */ -#define DHD_WQ_STS_OK 0 -#define DHD_WQ_STS_FAILED -1 /* General failure */ -#define DHD_WQ_STS_UNINITIALIZED -2 -#define DHD_WQ_STS_SCHED_FAILED -3 -#define DHD_WQ_STS_UNKNOWN_EVENT -4 - -typedef void (*event_handler_t)(void *handle, void *event_data, u8 event); - -void *dhd_deferred_work_init(void *dhd); -void dhd_deferred_work_deinit(void *workq); -int dhd_deferred_schedule_work(void *workq, void *event_data, u8 event, - event_handler_t evt_handler, u8 priority); -#endif /* _dhd_linux_wq_h_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_log.c b/drivers/net/wireless/bcmdhd/dhd_log.c deleted file mode 100644 index a498197d65aae..0000000000000 --- a/drivers/net/wireless/bcmdhd/dhd_log.c +++ /dev/null @@ -1,58 +0,0 @@ -/* - * DHD logging module for internal debug - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: dhd_sdio.c 281456 2011-09-02 01:49:45Z $ - */ - -#include -#include - -#include -#include -#include - -#include - -void dhd_blog(char *cp, int size) -{ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)) - static struct socket * _udpSocket = NULL; - struct sockaddr_in _saAddr; - struct iovec iov; - struct msghdr msg; - if (sock_create(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &_udpSocket) >= 0) - { - - { - memset(&_saAddr, 0, sizeof(_saAddr)); - _saAddr.sin_family = AF_INET; - _saAddr.sin_port = htons(7651); - _saAddr.sin_addr.s_addr = in_aton("10.19.74.43"); - - iov.iov_base = cp; - iov.iov_len = size; - - msg.msg_name = &_saAddr; - msg.msg_namelen = sizeof(struct sockaddr_in); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - msg.msg_control = NULL; - msg.msg_controllen = 0; - msg.msg_flags = 0; - - { - mm_segment_t fs = get_fs(); - set_fs(get_ds()); - - sock_sendmsg(_udpSocket, &msg, size); - - set_fs(fs); - } - } - - sock_release(_udpSocket); - } -#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */ -} diff --git a/drivers/net/wireless/bcmdhd/dhd_msgbuf.c b/drivers/net/wireless/bcmdhd/dhd_msgbuf.c deleted file mode 100644 index 2ba222e8087a4..0000000000000 --- a/drivers/net/wireless/bcmdhd/dhd_msgbuf.c +++ /dev/null @@ -1,4189 +0,0 @@ -/* - * Header file describing the internal (inter-module) DHD interfaces. - * - * Provides type definitions and function prototypes used to link the - * DHD OS, bus, and protocol modules. - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: dhd_msgbuf.c 504484 2014-09-24 10:11:20Z $ - */ -#include -#include - -#include -#include -#include - -#include -#include -#include -#include -#include - -#include - - -#include - -#ifdef PROP_TXSTATUS -#include -#include -#endif - -#include -#include -#include -#include - -/* - * PCIE D2H DMA Complete Sync Modes - * - * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into - * Host system memory. A WAR using one of 3 approaches is needed: - * 1. Dongle places ia modulo-253 seqnum in last word of each D2H message - * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum - * writes in the last word of each work item. Each work item has a seqnum - * number = sequence num % 253. - * 3. Read Barrier: Dongle does a host memory read access prior to posting an - * interrupt. - * Host does not participate with option #3, other than reserving a host system - * memory location for the dongle to read. - */ -#define PCIE_D2H_SYNC -#define PCIE_D2H_SYNC_WAIT_TRIES 1024 -#define PCIE_D2H_SYNC_BZERO /* bzero a message before updating the RD offset */ - -#define RETRIES 2 /* # of retries to retrieve matching ioctl response */ -#define IOCTL_HDR_LEN 12 - -#define DEFAULT_RX_BUFFERS_TO_POST 256 -#define RXBUFPOST_THRESHOLD 32 -#define RX_BUF_BURST 16 - -#define DHD_STOP_QUEUE_THRESHOLD 200 -#define DHD_START_QUEUE_THRESHOLD 100 - -#define MODX(x, n) ((x) & ((n) -1)) -#define align(x, n) (MODX(x, n) ? ((x) - MODX(x, n) + (n)) : ((x) - MODX(x, n))) -#define RX_DMA_OFFSET 8 -#define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN) - -#define DMA_D2H_SCRATCH_BUF_LEN 8 -#define DMA_ALIGN_LEN 4 -#define DMA_XFER_LEN_LIMIT 0x400000 - -#define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192 - -#define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D 1 -#define DHD_FLOWRING_MAX_EVENTBUF_POST 8 -#define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8 - -#define DHD_PROT_FUNCS 22 - -typedef struct dhd_mem_map { - void *va; - dmaaddr_t pa; - void *dmah; -} dhd_mem_map_t; - -typedef struct dhd_dmaxfer { - dhd_mem_map_t srcmem; - dhd_mem_map_t destmem; - uint32 len; - uint32 srcdelay; - uint32 destdelay; -} dhd_dmaxfer_t; - -#define TXP_FLUSH_NITEMS -#define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48 - -typedef struct msgbuf_ring { - bool inited; - uint16 idx; - uchar name[24]; - dhd_mem_map_t ring_base; -#ifdef TXP_FLUSH_NITEMS - void* start_addr; - uint16 pend_items_count; -#endif /* TXP_FLUSH_NITEMS */ - ring_mem_t *ringmem; - ring_state_t *ringstate; -#if defined(PCIE_D2H_SYNC) - uint32 seqnum; -#endif /* PCIE_D2H_SYNC */ - void *secdma; -} msgbuf_ring_t; - -#if defined(PCIE_D2H_SYNC) -/* Custom callback attached based upon D2H DMA Sync mode used in dongle. */ -typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, msgbuf_ring_t *ring, - volatile cmn_msg_hdr_t *msg, int msglen); -#endif /* PCIE_D2H_SYNC */ - -typedef struct dhd_prot { - osl_t *osh; /* OSL handle */ - uint32 reqid; - uint32 lastcmd; - uint32 pending; - uint16 rxbufpost; - uint16 max_rxbufpost; - uint16 max_eventbufpost; - uint16 max_ioctlrespbufpost; - uint16 cur_event_bufs_posted; - uint16 cur_ioctlresp_bufs_posted; - uint16 active_tx_count; - uint16 max_tx_count; - uint16 txp_threshold; - /* Ring info */ - msgbuf_ring_t *h2dring_txp_subn; - msgbuf_ring_t *h2dring_rxp_subn; - msgbuf_ring_t *h2dring_ctrl_subn; /* Cbuf handle for H2D ctrl ring */ - msgbuf_ring_t *d2hring_tx_cpln; - msgbuf_ring_t *d2hring_rx_cpln; - msgbuf_ring_t *d2hring_ctrl_cpln; /* Cbuf handle for D2H ctrl ring */ - uint32 rx_dataoffset; - dhd_mem_map_t retbuf; - dhd_mem_map_t ioctbuf; /* For holding ioct request buf */ - dhd_mb_ring_t mb_ring_fn; - - uint32 d2h_dma_scratch_buf_len; /* For holding ioct request buf */ - dhd_mem_map_t d2h_dma_scratch_buf; /* For holding ioct request buf */ - - uint32 h2d_dma_writeindx_buf_len; /* For holding dma ringupd buf - submission write */ - dhd_mem_map_t h2d_dma_writeindx_buf; /* For holding dma ringupd buf - submission write */ - - uint32 h2d_dma_readindx_buf_len; /* For holding dma ringupd buf - submission read */ - dhd_mem_map_t h2d_dma_readindx_buf; /* For holding dma ringupd buf - submission read */ - - uint32 d2h_dma_writeindx_buf_len; /* For holding dma ringupd buf - completion write */ - dhd_mem_map_t d2h_dma_writeindx_buf; /* For holding dma ringupd buf - completion write */ - - uint32 d2h_dma_readindx_buf_len; /* For holding dma ringupd buf - completion read */ - dhd_mem_map_t d2h_dma_readindx_buf; /* For holding dma ringupd buf - completion read */ - -#if defined(PCIE_D2H_SYNC) - d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */ - ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */ - ulong d2h_sync_wait_tot; /* total wait loops */ -#endif /* PCIE_D2H_SYNC */ - dhd_dmaxfer_t dmaxfer; - bool dmaxfer_in_progress; - - uint16 ioctl_seq_no; - uint16 data_seq_no; - uint16 ioctl_trans_id; - void *pktid_map_handle; - uint16 rx_metadata_offset; - uint16 tx_metadata_offset; - uint16 rx_cpln_early_upd_idx; -} dhd_prot_t; - -static int dhdmsgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, - void *buf, uint len, uint8 action); -static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, - void *buf, uint len, uint8 action); -static int dhdmsgbuf_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len, void* buf, void* retbuf); - -static int dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd); -static int dhd_prot_rxbufpost(dhd_pub_t *dhd, uint16 count); -static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint16 rxcnt); -static void dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void* buf, uint16 msglen); -static void dhd_prot_event_process(dhd_pub_t *dhd, void* buf, uint16 len); -static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len); -static int dhd_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len); - -static void dhd_prot_noop(dhd_pub_t *dhd, void * buf, uint16 msglen); -static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen); -static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void * buf, uint16 msglen); -static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void * buf, uint16 msglen); -static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen); -static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen); -static void* dhd_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring, - uint16 msglen, uint16 *alloced); -static int dhd_fillup_ioct_reqst_ptrbased(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, - int ifidx); -static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, uint32 pktid); -static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid); -static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma); -static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay, - uint destdelay, dhd_dmaxfer_t *dma); -static void dhdmsgbuf_dmaxfer_compare(dhd_pub_t *dhd, void *buf, uint16 msglen); -static void dhd_prot_process_flow_ring_create_response(dhd_pub_t *dhd, void* buf, uint16 msglen); -static void dhd_prot_process_flow_ring_delete_response(dhd_pub_t *dhd, void* buf, uint16 msglen); -static void dhd_prot_process_flow_ring_flush_response(dhd_pub_t *dhd, void* buf, uint16 msglen); - - - - -#ifdef DHD_RX_CHAINING -#define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \ - (!ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \ - !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \ - !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \ - !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \ - ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \ - ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \ - (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6)))) - -static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain); -static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx); -static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd); - -#define DHD_PKT_CTF_MAX_CHAIN_LEN 64 -#endif /* DHD_RX_CHAINING */ - -static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post); -static int dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub); -static int dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub); - -static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t * ring); -static void dhd_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring); -static msgbuf_ring_t* prot_ring_attach(dhd_prot_t * prot, char* name, uint16 max_item, - uint16 len_item, uint16 ringid); -static void* prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced); -static void dhd_set_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid, uint16 new_index); -static uint16 dhd_get_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid); -static void prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, uint16 len); -static void prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring); -static uint8* prot_get_src_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 *available_len); -static void prot_store_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring); -static void prot_early_upd_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring); - -typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void * buf, uint16 msglen); -static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = { - dhd_prot_noop, /* 0 is invalid message type */ - dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */ - dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */ - NULL, - dhd_prot_process_flow_ring_create_response, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */ - NULL, - dhd_prot_process_flow_ring_delete_response, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */ - NULL, - dhd_prot_process_flow_ring_flush_response, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */ - NULL, - dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */ - NULL, - dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */ - NULL, - dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */ - NULL, - dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */ - NULL, - dhd_prot_rxcmplt_process, /* MSG_TYPE_RX_CMPLT */ - NULL, - dhdmsgbuf_dmaxfer_compare, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */ - NULL, -}; - - -#if defined(PCIE_D2H_SYNC) - -/* - * D2H DMA to completion callback handlers. Based on the mode advertised by the - * dongle through the PCIE shared region, the appropriate callback will be - * registered in the proto layer to be invoked prior to precessing any message - * from a D2H DMA ring. If the dongle uses a read barrier or another mode that - * does not require host participation, then a noop callback handler will be - * bound that simply returns the msgtype. - */ -static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 seqnum, - uint32 tries, uchar *msg, int msglen); -static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring, - volatile cmn_msg_hdr_t *msg, int msglen); -static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring, - volatile cmn_msg_hdr_t *msg, int msglen); -static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring, - volatile cmn_msg_hdr_t *msg, int msglen); -static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd, dhd_prot_t * prot); - -/* Debug print a livelock avert by dropping a D2H message */ -static void -dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 seqnum, uint32 tries, - uchar *msg, int msglen) -{ - DHD_ERROR(("LIVELOCK DHD<%p> seqnum<%u:%u> tries<%u> max<%lu> tot<%lu>\n", - dhd, seqnum, seqnum% D2H_EPOCH_MODULO, tries, - dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot)); - prhex("D2H MsgBuf Failure", (uchar *)msg, msglen); -} - -/* Sync on a D2H DMA to complete using SEQNUM mode */ -static uint8 BCMFASTPATH -dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring, - volatile cmn_msg_hdr_t *msg, int msglen) -{ - uint32 tries; - uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO; - int num_words = msglen / sizeof(uint32); /* num of 32bit words */ - volatile uint32 *marker = (uint32 *)msg + (num_words - 1); /* last word */ - dhd_prot_t *prot = dhd->prot; - - ASSERT(msglen == RING_LEN_ITEMS(ring)); - - for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) { - uint32 msg_seqnum = *marker; - if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */ - ring->seqnum++; /* next expected sequence number */ - goto dma_completed; - } - - if (tries > prot->d2h_sync_wait_max) - prot->d2h_sync_wait_max = tries; - - OSL_CACHE_INV(msg, msglen); /* invalidate and try again */ - - } /* for PCIE_D2H_SYNC_WAIT_TRIES */ - - dhd_prot_d2h_sync_livelock(dhd, ring->seqnum, tries, (uchar *)msg, msglen); - - ring->seqnum++; /* skip this message ... leak of a pktid */ - return 0; /* invalid msgtype 0 -> noop callback */ - -dma_completed: - - prot->d2h_sync_wait_tot += tries; - return msg->msg_type; -} - -/* Sync on a D2H DMA to complete using XORCSUM mode */ -static uint8 BCMFASTPATH -dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring, - volatile cmn_msg_hdr_t *msg, int msglen) -{ - uint32 tries; - uint32 prot_checksum = 0; /* computed checksum */ - int num_words = msglen / sizeof(uint32); /* num of 32bit words */ - uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO; - dhd_prot_t *prot = dhd->prot; - - ASSERT(msglen == RING_LEN_ITEMS(ring)); - - for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) { - prot_checksum = bcm_compute_xor32((volatile uint32 *)msg, num_words); - if (prot_checksum == 0U) { /* checksum is OK */ - if (msg->epoch == ring_seqnum) { - ring->seqnum++; /* next expected sequence number */ - goto dma_completed; - } - } - - if (tries > prot->d2h_sync_wait_max) - prot->d2h_sync_wait_max = tries; - - OSL_CACHE_INV(msg, msglen); /* invalidate and try again */ - - } /* for PCIE_D2H_SYNC_WAIT_TRIES */ - - dhd_prot_d2h_sync_livelock(dhd, ring->seqnum, tries, (uchar *)msg, msglen); - - ring->seqnum++; /* skip this message ... leak of a pktid */ - return 0; /* invalid msgtype 0 -> noop callback */ - -dma_completed: - - prot->d2h_sync_wait_tot += tries; - return msg->msg_type; -} - -/* Do not sync on a D2H DMA */ -static uint8 BCMFASTPATH -dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring, - volatile cmn_msg_hdr_t *msg, int msglen) -{ - return msg->msg_type; -} - -/* Initialize the D2H DMA Sync mode, per D2H ring seqnum and dhd stats */ -static void -dhd_prot_d2h_sync_init(dhd_pub_t *dhd, dhd_prot_t * prot) -{ - prot->d2h_sync_wait_max = 0UL; - prot->d2h_sync_wait_tot = 0UL; - - prot->d2hring_tx_cpln->seqnum = D2H_EPOCH_INIT_VAL; - prot->d2hring_rx_cpln->seqnum = D2H_EPOCH_INIT_VAL; - prot->d2hring_ctrl_cpln->seqnum = D2H_EPOCH_INIT_VAL; - - if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) - prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum; - else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) - prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum; - else - prot->d2h_sync_cb = dhd_prot_d2h_sync_none; -} - -#endif /* PCIE_D2H_SYNC */ - -/* - * +---------------------------------------------------------------------------+ - * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping. - * The packet id map, also includes storage for some packet parameters that - * may be saved. A native packet pointer along with the parameters may be saved - * and a unique 32bit pkt id will be returned. Later, the saved packet pointer - * and the metadata may be retrieved using the previously allocated packet id. - * +---------------------------------------------------------------------------+ - */ -#define MAX_PKTID_ITEMS (8192) /* Maximum number of pktids supported */ - -typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */ - -/* Construct a packet id mapping table, returing an opaque map handle */ -static dhd_pktid_map_handle_t *dhd_pktid_map_init(void *osh, uint32 num_items); - -/* Destroy a packet id mapping table, freeing all packets active in the table */ -static void dhd_pktid_map_fini(dhd_pktid_map_handle_t *map); - -/* Determine number of pktids that are available */ -static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *map); - -/* Allocate a unique pktid against which a pkt and some metadata is saved */ -static INLINE uint32 dhd_pktid_map_reserve(dhd_pktid_map_handle_t *handle, - void *pkt); -static INLINE void dhd_pktid_map_save(dhd_pktid_map_handle_t *handle, void *pkt, - uint32 nkey, dmaaddr_t physaddr, uint32 len, uint8 dma, void *secdma); -static uint32 dhd_pktid_map_alloc(dhd_pktid_map_handle_t *map, void *pkt, - dmaaddr_t physaddr, uint32 len, uint8 dma, void *secdma); - -/* Return an allocated pktid, retrieving previously saved pkt and metadata */ -static void *dhd_pktid_map_free(dhd_pktid_map_handle_t *map, uint32 id, - dmaaddr_t *physaddr, uint32 *len, void **secdma); - -/* Packet metadata saved in packet id mapper */ -typedef struct dhd_pktid_item { - bool inuse; /* tag an item to be in use */ - uint8 dma; /* map direction: flush or invalidate */ - uint16 len; /* length of mapped packet's buffer */ - void *pkt; /* opaque native pointer to a packet */ - dmaaddr_t physaddr; /* physical address of mapped packet's buffer */ - void *secdma; -} dhd_pktid_item_t; - -typedef struct dhd_pktid_map { - void *osh; - int items; /* total items in map */ - int avail; /* total available items */ - int failures; /* lockers unavailable count */ - uint32 keys[MAX_PKTID_ITEMS + 1]; /* stack of unique pkt ids */ - dhd_pktid_item_t lockers[0]; /* metadata storage */ -} dhd_pktid_map_t; - -/* - * PktId (Locker) #0 is never allocated and is considered invalid. - * - * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a - * depleted pktid pool and must not be used by the caller. - * - * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID. - */ -#define DHD_PKTID_INVALID (0U) - -#define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t)) -#define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \ - (DHD_PKTID_ITEM_SZ * ((items) + 1))) - -#define NATIVE_TO_PKTID_INIT(osh, items) dhd_pktid_map_init((osh), (items)) -#define NATIVE_TO_PKTID_FINI(map) dhd_pktid_map_fini(map) -#define NATIVE_TO_PKTID_CLEAR(map) dhd_pktid_map_clear(map) - -#define NATIVE_TO_PKTID_RSV(map, pkt) dhd_pktid_map_reserve((map), (pkt)) -#define NATIVE_TO_PKTID_SAVE(map, pkt, nkey, pa, len, dma, secdma) \ - dhd_pktid_map_save((map), (void *)(pkt), (nkey), (pa), (uint32)(len), (uint8)dma, \ - (void *)(secdma)) -#define NATIVE_TO_PKTID(map, pkt, pa, len, dma, secdma) \ - dhd_pktid_map_alloc((map), (void *)(pkt), (pa), (uint32)(len), (uint8)dma, (void *)(secdma)) - -#define PKTID_TO_NATIVE(map, pktid, pa, len, secdma) \ - dhd_pktid_map_free((map), (uint32)(pktid), \ - (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **) &secdma) - -#define PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map) - -#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_FLOWRING) -#define FLOWRING_NAME "h2dflr" -#define RING_IS_FLOWRING(ring) \ - ((strncmp(ring->name, FLOWRING_NAME, sizeof(FLOWRING_NAME))) == (0)) -#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_FLOWRING */ - -/* - * +---------------------------------------------------------------------------+ - * Packet to Packet Id mapper using a paradigm. - * - * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_PKTID_ITEMS]. - * - * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique - * packet id is returned. This unique packet id may be used to retrieve the - * previously saved packet metadata, using dhd_pktid_map_free(). On invocation - * of dhd_pktid_map_free(), the unique packet id is essentially freed. A - * subsequent call to dhd_pktid_map_alloc() may reuse this packet id. - * - * Implementation Note: - * Convert this into a abstraction and place into bcmutils ! - * Locker abstraction should treat contents as opaque storage, and a - * callback should be registered to handle inuse lockers on destructor. - * - * +---------------------------------------------------------------------------+ - */ - -/* Allocate and initialize a mapper of num_items */ -static dhd_pktid_map_handle_t * -dhd_pktid_map_init(void *osh, uint32 num_items) -{ - uint32 nkey; - dhd_pktid_map_t *map; - uint32 dhd_pktid_map_sz; - - ASSERT((num_items >= 1) && num_items <= MAX_PKTID_ITEMS); - dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items); - - if ((map = (dhd_pktid_map_t *)MALLOC(osh, dhd_pktid_map_sz)) == NULL) { - DHD_ERROR(("%s:%d: MALLOC failed for size %d\n", - __FUNCTION__, __LINE__, dhd_pktid_map_sz)); - return NULL; - } - bzero(map, dhd_pktid_map_sz); - - map->osh = osh; - map->items = num_items; - map->avail = num_items; - - map->lockers[DHD_PKTID_INVALID].inuse = TRUE; /* tag locker #0 as inuse */ - - for (nkey = 1; nkey <= num_items; nkey++) { /* locker #0 is reserved */ - map->keys[nkey] = nkey; /* populate with unique keys */ - map->lockers[nkey].inuse = FALSE; - } - - return (dhd_pktid_map_handle_t *)map; /* opaque handle */ -} - -/* - * Retrieve all allocated keys and free all . - * Freeing implies: unmapping the buffers and freeing the native packet - * This could have been a callback registered with the pktid mapper. - */ -static void -dhd_pktid_map_fini(dhd_pktid_map_handle_t *handle) -{ - void *osh; - int nkey; - dhd_pktid_map_t *map; - uint32 dhd_pktid_map_sz; - dhd_pktid_item_t *locker; - - if (handle == NULL) - return; - - map = (dhd_pktid_map_t *)handle; - osh = map->osh; - dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items); - - nkey = 1; /* skip reserved KEY #0, and start from 1 */ - locker = &map->lockers[nkey]; - - for (; nkey <= map->items; nkey++, locker++) { - if (locker->inuse == TRUE) { /* numbered key still in use */ - locker->inuse = FALSE; /* force open the locker */ - - { /* This could be a callback registered with dhd_pktid_map */ - DMA_UNMAP(osh, locker->physaddr, locker->len, - locker->dma, 0, 0); - PKTFREE(osh, (ulong*)locker->pkt, FALSE); - } - } - } - - MFREE(osh, handle, dhd_pktid_map_sz); -} - -static void -dhd_pktid_map_clear(dhd_pktid_map_handle_t *handle) -{ - void *osh; - int nkey; - dhd_pktid_map_t *map; - dhd_pktid_item_t *locker; - - DHD_TRACE(("%s\n", __FUNCTION__)); - - if (handle == NULL) - return; - - map = (dhd_pktid_map_t *)handle; - osh = map->osh; - map->failures = 0; - - nkey = 1; /* skip reserved KEY #0, and start from 1 */ - locker = &map->lockers[nkey]; - - for (; nkey <= map->items; nkey++, locker++) { - map->keys[nkey] = nkey; /* populate with unique keys */ - if (locker->inuse == TRUE) { /* numbered key still in use */ - locker->inuse = FALSE; /* force open the locker */ - DHD_TRACE(("%s free id%d\n", __FUNCTION__, nkey)); - DMA_UNMAP(osh, (uint32)locker->physaddr, locker->len, - locker->dma, 0, 0); - PKTFREE(osh, (ulong*)locker->pkt, FALSE); - } - } - map->avail = map->items; -} - -/* Get the pktid free count */ -static INLINE uint32 BCMFASTPATH -dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle) -{ - dhd_pktid_map_t *map; - - ASSERT(handle != NULL); - map = (dhd_pktid_map_t *)handle; - - return map->avail; -} - -/* - * Allocate locker, save pkt contents, and return the locker's numbered key. - * dhd_pktid_map_alloc() is not reentrant, and is the caller's responsibility. - * Caller must treat a returned value DHD_PKTID_INVALID as a failure case, - * implying a depleted pool of pktids. - */ -static INLINE uint32 -dhd_pktid_map_reserve(dhd_pktid_map_handle_t *handle, void *pkt) -{ - uint32 nkey; - dhd_pktid_map_t *map; - dhd_pktid_item_t *locker; - - ASSERT(handle != NULL); - map = (dhd_pktid_map_t *)handle; - - if (map->avail <= 0) { /* no more pktids to allocate */ - map->failures++; - DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__)); - return DHD_PKTID_INVALID; /* failed alloc request */ - } - ASSERT(map->avail <= map->items); - - nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */ - map->avail--; - - locker = &map->lockers[nkey]; /* save packet metadata in locker */ - locker->inuse = TRUE; /* reserve this locker */ - locker->pkt = pkt; - - ASSERT(nkey != DHD_PKTID_INVALID); - return nkey; /* return locker's numbered key */ -} - -static INLINE void -dhd_pktid_map_save(dhd_pktid_map_handle_t *handle, void *pkt, uint32 nkey, - dmaaddr_t physaddr, uint32 len, uint8 dma, void *secdma) -{ - dhd_pktid_map_t *map; - dhd_pktid_item_t *locker; - - ASSERT(handle != NULL); - map = (dhd_pktid_map_t *)handle; - - ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= (uint32)map->items)); - - locker = &map->lockers[nkey]; - ASSERT(locker->pkt == pkt); - - locker->dma = dma; /* store contents in locker */ - locker->physaddr = physaddr; - locker->len = (uint16)len; /* 16bit len */ - locker->secdma = secdma; -} - -static uint32 BCMFASTPATH -dhd_pktid_map_alloc(dhd_pktid_map_handle_t *handle, void *pkt, - dmaaddr_t physaddr, uint32 len, uint8 dma, void *secdma) -{ - uint32 nkey = dhd_pktid_map_reserve(handle, pkt); - if (nkey != DHD_PKTID_INVALID) { - dhd_pktid_map_save(handle, pkt, nkey, physaddr, len, dma, secdma); - } - return nkey; -} - -/* - * Given a numbered key, return the locker contents. - * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility. - * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid - * value. Only a previously allocated pktid may be freed. - */ -static void * BCMFASTPATH -dhd_pktid_map_free(dhd_pktid_map_handle_t *handle, uint32 nkey, - dmaaddr_t *physaddr, uint32 *len, void **secdma) -{ - dhd_pktid_map_t *map; - dhd_pktid_item_t *locker; - - ASSERT(handle != NULL); - - map = (dhd_pktid_map_t *)handle; - ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= (uint32)map->items)); - - locker = &map->lockers[nkey]; - - if (locker->inuse == FALSE) { /* Debug check for cloned numbered key */ - DHD_ERROR(("%s:%d: Error! freeing invalid pktid<%u>\n", - __FUNCTION__, __LINE__, nkey)); - ASSERT(locker->inuse != FALSE); - return NULL; - } - - map->avail++; - map->keys[map->avail] = nkey; /* make this numbered key available */ - locker->inuse = FALSE; /* open and free Locker */ - - *physaddr = locker->physaddr; /* return contents of locker */ - *len = (uint32)locker->len; - *secdma = locker->secdma; - - return locker->pkt; -} - -/* Linkage, sets prot link and updates hdrlen in pub */ -int dhd_prot_attach(dhd_pub_t *dhd) -{ - uint alloced = 0; - - dhd_prot_t *prot; - - /* Allocate prot structure */ - if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT, - sizeof(dhd_prot_t)))) { - DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); - goto fail; - } - memset(prot, 0, sizeof(*prot)); - - prot->osh = dhd->osh; - dhd->prot = prot; - - /* DMAing ring completes supported? FALSE by default */ - dhd->dma_d2h_ring_upd_support = FALSE; - dhd->dma_h2d_ring_upd_support = FALSE; - - /* Ring Allocations */ - /* 1.0 H2D TXPOST ring */ - if (!(prot->h2dring_txp_subn = prot_ring_attach(prot, "h2dtxp", - H2DRING_TXPOST_MAX_ITEM, H2DRING_TXPOST_ITEMSIZE, - BCMPCIE_H2D_TXFLOWRINGID))) { - DHD_ERROR(("%s: kmalloc for H2D TXPOST ring failed\n", __FUNCTION__)); - goto fail; - } - - /* 2.0 H2D RXPOST ring */ - if (!(prot->h2dring_rxp_subn = prot_ring_attach(prot, "h2drxp", - H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE, - BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT))) { - DHD_ERROR(("%s: kmalloc for H2D RXPOST ring failed\n", __FUNCTION__)); - goto fail; - - } - - /* 3.0 H2D CTRL_SUBMISSION ring */ - if (!(prot->h2dring_ctrl_subn = prot_ring_attach(prot, "h2dctrl", - H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE, - BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT))) { - DHD_ERROR(("%s: kmalloc for H2D CTRL_SUBMISSION ring failed\n", - __FUNCTION__)); - goto fail; - - } - - /* 4.0 D2H TX_COMPLETION ring */ - if (!(prot->d2hring_tx_cpln = prot_ring_attach(prot, "d2htxcpl", - D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE, - BCMPCIE_D2H_MSGRING_TX_COMPLETE))) { - DHD_ERROR(("%s: kmalloc for D2H TX_COMPLETION ring failed\n", - __FUNCTION__)); - goto fail; - - } - - /* 5.0 D2H RX_COMPLETION ring */ - if (!(prot->d2hring_rx_cpln = prot_ring_attach(prot, "d2hrxcpl", - D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE, - BCMPCIE_D2H_MSGRING_RX_COMPLETE))) { - DHD_ERROR(("%s: kmalloc for D2H RX_COMPLETION ring failed\n", - __FUNCTION__)); - goto fail; - - } - - /* 6.0 D2H CTRL_COMPLETION ring */ - if (!(prot->d2hring_ctrl_cpln = prot_ring_attach(prot, "d2hctrl", - D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE, - BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE))) { - DHD_ERROR(("%s: kmalloc for D2H CTRL_COMPLETION ring failed\n", - __FUNCTION__)); - goto fail; - } - - /* Return buffer for ioctl */ - prot->retbuf.va = DMA_ALLOC_CONSISTENT(dhd->osh, IOCT_RETBUF_SIZE, DMA_ALIGN_LEN, - &alloced, &prot->retbuf.pa, &prot->retbuf.dmah); - if (prot->retbuf.va == NULL) { - ASSERT(0); - return BCME_NOMEM; - } - - ASSERT(MODX((unsigned long)prot->retbuf.va, DMA_ALIGN_LEN) == 0); - bzero(prot->retbuf.va, IOCT_RETBUF_SIZE); - OSL_CACHE_FLUSH((void *) prot->retbuf.va, IOCT_RETBUF_SIZE); - - /* IOCTL request buffer */ - prot->ioctbuf.va = DMA_ALLOC_CONSISTENT(dhd->osh, IOCT_RETBUF_SIZE, DMA_ALIGN_LEN, - &alloced, &prot->ioctbuf.pa, &prot->ioctbuf.dmah); - - if (prot->ioctbuf.va == NULL) { - ASSERT(0); - return BCME_NOMEM; - } - - ASSERT(MODX((unsigned long)prot->ioctbuf.va, DMA_ALIGN_LEN) == 0); - bzero(prot->ioctbuf.va, IOCT_RETBUF_SIZE); - OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, IOCT_RETBUF_SIZE); - - /* Scratch buffer for dma rx offset */ - prot->d2h_dma_scratch_buf_len = DMA_D2H_SCRATCH_BUF_LEN; - prot->d2h_dma_scratch_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh, DMA_D2H_SCRATCH_BUF_LEN, - DMA_ALIGN_LEN, &alloced, &prot->d2h_dma_scratch_buf.pa, - &prot->d2h_dma_scratch_buf.dmah); - - if (prot->d2h_dma_scratch_buf.va == NULL) { - ASSERT(0); - return BCME_NOMEM; - } - ASSERT(MODX((unsigned long)prot->d2h_dma_scratch_buf.va, DMA_ALIGN_LEN) == 0); - bzero(prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN); - OSL_CACHE_FLUSH((void *)prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN); - - - /* PKTID handle INIT */ - prot->pktid_map_handle = NATIVE_TO_PKTID_INIT(dhd->osh, MAX_PKTID_ITEMS); - if (prot->pktid_map_handle == NULL) { - ASSERT(0); - return BCME_NOMEM; - } - -#if defined(PCIE_D2H_SYNC) - dhd_prot_d2h_sync_init(dhd, prot); -#endif /* PCIE_D2H_SYNC */ - - prot->dmaxfer.srcmem.va = NULL; - prot->dmaxfer.destmem.va = NULL; - prot->dmaxfer_in_progress = FALSE; - - prot->rx_metadata_offset = 0; - prot->tx_metadata_offset = 0; - -#ifdef DHD_RX_CHAINING - dhd_rxchain_reset(&prot->rxchain); -#endif - - return 0; - -fail: -#ifndef CONFIG_DHD_USE_STATIC_BUF - if (prot != NULL) - dhd_prot_detach(dhd); -#endif /* CONFIG_DHD_USE_STATIC_BUF */ - return BCME_NOMEM; -} - -/* Init memory block on host DMA'ing indices */ -int -dhd_prot_init_index_dma_block(dhd_pub_t *dhd, uint8 type, uint32 length) -{ - uint alloced = 0; - - dhd_prot_t *prot = dhd->prot; - uint32 dma_block_size = 4 * length; - - if (prot == NULL) { - DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__)); - return BCME_ERROR; - } - - switch (type) { - case HOST_TO_DNGL_DMA_WRITEINDX_BUFFER: - /* ring update dma buffer for submission write */ - prot->h2d_dma_writeindx_buf_len = dma_block_size; - prot->h2d_dma_writeindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh, - dma_block_size, DMA_ALIGN_LEN, &alloced, - &prot->h2d_dma_writeindx_buf.pa, - &prot->h2d_dma_writeindx_buf.dmah); - - if (prot->h2d_dma_writeindx_buf.va == NULL) { - return BCME_NOMEM; - } - - ASSERT(ISALIGNED(prot->h2d_dma_writeindx_buf.va, 4)); - bzero(prot->h2d_dma_writeindx_buf.va, dma_block_size); - OSL_CACHE_FLUSH((void *)prot->h2d_dma_writeindx_buf.va, dma_block_size); - DHD_ERROR(("%s: H2D_WRITEINDX_ARRAY_HOST: %d-bytes " - "inited for dma'ing h2d-w indices\n", __FUNCTION__, - prot->h2d_dma_writeindx_buf_len)); - break; - - case HOST_TO_DNGL_DMA_READINDX_BUFFER: - /* ring update dma buffer for submission read */ - prot->h2d_dma_readindx_buf_len = dma_block_size; - prot->h2d_dma_readindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh, - dma_block_size, DMA_ALIGN_LEN, &alloced, - &prot->h2d_dma_readindx_buf.pa, - &prot->h2d_dma_readindx_buf.dmah); - if (prot->h2d_dma_readindx_buf.va == NULL) { - return BCME_NOMEM; - } - - ASSERT(ISALIGNED(prot->h2d_dma_readindx_buf.va, 4)); - bzero(prot->h2d_dma_readindx_buf.va, dma_block_size); - OSL_CACHE_FLUSH((void *)prot->h2d_dma_readindx_buf.va, dma_block_size); - DHD_ERROR(("%s: H2D_READINDX_ARRAY_HOST %d-bytes " - "inited for dma'ing h2d-r indices\n", __FUNCTION__, - prot->h2d_dma_readindx_buf_len)); - break; - - case DNGL_TO_HOST_DMA_WRITEINDX_BUFFER: - /* ring update dma buffer for completion write */ - prot->d2h_dma_writeindx_buf_len = dma_block_size; - prot->d2h_dma_writeindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh, - dma_block_size, DMA_ALIGN_LEN, &alloced, - &prot->d2h_dma_writeindx_buf.pa, - &prot->d2h_dma_writeindx_buf.dmah); - - if (prot->d2h_dma_writeindx_buf.va == NULL) { - return BCME_NOMEM; - } - - ASSERT(ISALIGNED(prot->d2h_dma_writeindx_buf.va, 4)); - bzero(prot->d2h_dma_writeindx_buf.va, dma_block_size); - OSL_CACHE_FLUSH((void *)prot->d2h_dma_writeindx_buf.va, dma_block_size); - DHD_ERROR(("%s: D2H_WRITEINDX_ARRAY_HOST %d-bytes " - "inited for dma'ing d2h-w indices\n", __FUNCTION__, - prot->d2h_dma_writeindx_buf_len)); - break; - - case DNGL_TO_HOST_DMA_READINDX_BUFFER: - /* ring update dma buffer for completion read */ - prot->d2h_dma_readindx_buf_len = dma_block_size; - prot->d2h_dma_readindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh, - dma_block_size, DMA_ALIGN_LEN, &alloced, - &prot->d2h_dma_readindx_buf.pa, - &prot->d2h_dma_readindx_buf.dmah); - - if (prot->d2h_dma_readindx_buf.va == NULL) { - return BCME_NOMEM; - } - - ASSERT(ISALIGNED(prot->d2h_dma_readindx_buf.va, 4)); - bzero(prot->d2h_dma_readindx_buf.va, dma_block_size); - OSL_CACHE_FLUSH((void *)prot->d2h_dma_readindx_buf.va, dma_block_size); - DHD_ERROR(("%s: D2H_READINDX_ARRAY_HOST %d-bytes " - "inited for dma'ing d2h-r indices\n", __FUNCTION__, - prot->d2h_dma_readindx_buf_len)); - break; - - default: - DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__)); - return BCME_BADOPTION; - } - - return BCME_OK; - -} - -/* Unlink, frees allocated protocol memory (including dhd_prot) */ -void dhd_prot_detach(dhd_pub_t *dhd) -{ - dhd_prot_t *prot = dhd->prot; - /* Stop the protocol module */ - if (dhd->prot) { - - /* free up scratch buffer */ - if (prot->d2h_dma_scratch_buf.va) { - DMA_FREE_CONSISTENT(dhd->osh, prot->d2h_dma_scratch_buf.va, - DMA_D2H_SCRATCH_BUF_LEN, prot->d2h_dma_scratch_buf.pa, - prot->d2h_dma_scratch_buf.dmah); - prot->d2h_dma_scratch_buf.va = NULL; - } - /* free up ring upd buffer for submission writes */ - if (prot->h2d_dma_writeindx_buf.va) { - DMA_FREE_CONSISTENT(dhd->osh, prot->h2d_dma_writeindx_buf.va, - prot->h2d_dma_writeindx_buf_len, prot->h2d_dma_writeindx_buf.pa, - prot->h2d_dma_writeindx_buf.dmah); - prot->h2d_dma_writeindx_buf.va = NULL; - } - - /* free up ring upd buffer for submission reads */ - if (prot->h2d_dma_readindx_buf.va) { - DMA_FREE_CONSISTENT(dhd->osh, prot->h2d_dma_readindx_buf.va, - prot->h2d_dma_readindx_buf_len, prot->h2d_dma_readindx_buf.pa, - prot->h2d_dma_readindx_buf.dmah); - prot->h2d_dma_readindx_buf.va = NULL; - } - - /* free up ring upd buffer for completion writes */ - if (prot->d2h_dma_writeindx_buf.va) { - DMA_FREE_CONSISTENT(dhd->osh, prot->d2h_dma_writeindx_buf.va, - prot->d2h_dma_writeindx_buf_len, prot->d2h_dma_writeindx_buf.pa, - prot->d2h_dma_writeindx_buf.dmah); - prot->d2h_dma_writeindx_buf.va = NULL; - } - - /* free up ring upd buffer for completion writes */ - if (prot->d2h_dma_readindx_buf.va) { - DMA_FREE_CONSISTENT(dhd->osh, prot->d2h_dma_readindx_buf.va, - prot->d2h_dma_readindx_buf_len, prot->d2h_dma_readindx_buf.pa, - prot->d2h_dma_readindx_buf.dmah); - prot->d2h_dma_readindx_buf.va = NULL; - } - - /* ioctl return buffer */ - if (prot->retbuf.va) { - DMA_FREE_CONSISTENT(dhd->osh, dhd->prot->retbuf.va, - IOCT_RETBUF_SIZE, dhd->prot->retbuf.pa, dhd->prot->retbuf.dmah); - dhd->prot->retbuf.va = NULL; - } - - /* ioctl request buffer */ - if (prot->ioctbuf.va) { - DMA_FREE_CONSISTENT(dhd->osh, dhd->prot->ioctbuf.va, - IOCT_RETBUF_SIZE, dhd->prot->ioctbuf.pa, dhd->prot->ioctbuf.dmah); - - dhd->prot->ioctbuf.va = NULL; - } - - - /* 1.0 H2D TXPOST ring */ - dhd_prot_ring_detach(dhd, prot->h2dring_txp_subn); - /* 2.0 H2D RXPOST ring */ - dhd_prot_ring_detach(dhd, prot->h2dring_rxp_subn); - /* 3.0 H2D CTRL_SUBMISSION ring */ - dhd_prot_ring_detach(dhd, prot->h2dring_ctrl_subn); - /* 4.0 D2H TX_COMPLETION ring */ - dhd_prot_ring_detach(dhd, prot->d2hring_tx_cpln); - /* 5.0 D2H RX_COMPLETION ring */ - dhd_prot_ring_detach(dhd, prot->d2hring_rx_cpln); - /* 6.0 D2H CTRL_COMPLETION ring */ - dhd_prot_ring_detach(dhd, prot->d2hring_ctrl_cpln); - - NATIVE_TO_PKTID_FINI(dhd->prot->pktid_map_handle); - -#ifndef CONFIG_DHD_USE_STATIC_BUF - MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t)); -#endif /* CONFIG_DHD_USE_STATIC_BUF */ - - dhd->prot = NULL; - } -} - -void -dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset) -{ - dhd_prot_t *prot = dhd->prot; - prot->rx_dataoffset = rx_offset; -} - - -/* Initialize protocol: sync w/dongle state. - * Sets dongle media info (iswl, drv_version, mac address). - */ -int dhd_sync_with_dongle(dhd_pub_t *dhd) -{ - int ret = 0; - wlc_rev_info_t revinfo; - - DHD_TRACE(("%s: Enter\n", __FUNCTION__)); - - /* Post event buffer after shim layer is attached */ - ret = dhd_msgbuf_rxbuf_post_event_bufs(dhd); - if (ret <= 0) { - DHD_ERROR(("%s : Post event buffer fail. ret = %d\n", __FUNCTION__, ret)); - return ret; - } - - - /* Get the device rev info */ - memset(&revinfo, 0, sizeof(revinfo)); - ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0); - if (ret < 0) - goto done; - - dhd_process_cid_mac(dhd, TRUE); - - ret = dhd_preinit_ioctls(dhd); - - if (!ret) - dhd_process_cid_mac(dhd, FALSE); - - /* Always assumes wl for now */ - dhd->iswl = TRUE; -done: - return ret; -} - -/* This function does all necessary initialization needed -* for IOCTL/IOVAR path -*/ -int dhd_prot_init(dhd_pub_t *dhd) -{ - int ret = 0; - dhd_prot_t *prot = dhd->prot; - - /* Max pkts in ring */ - prot->max_tx_count = H2DRING_TXPOST_MAX_ITEM; - - DHD_INFO(("%s:%d: MAX_TX_COUNT = %d\n", __FUNCTION__, __LINE__, prot->max_tx_count)); - - /* Read max rx packets supported by dongle */ - dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0); - if (prot->max_rxbufpost == 0) { - /* This would happen if the dongle firmware is not */ - /* using the latest shared structure template */ - prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST; - } - DHD_INFO(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost)); - - prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST; - prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST; - - prot->active_tx_count = 0; - prot->data_seq_no = 0; - prot->ioctl_seq_no = 0; - prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT; - - prot->ioctl_trans_id = 1; - - /* Register the interrupt function upfront */ - /* remove corerev checks in data path */ - prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus); - - /* Initialise rings */ - /* 1.0 H2D TXPOST ring */ - if (dhd_bus_is_txmode_push(dhd->bus)) { - dhd_ring_init(dhd, prot->h2dring_txp_subn); - } - - /* 2.0 H2D RXPOST ring */ - dhd_ring_init(dhd, prot->h2dring_rxp_subn); - /* 3.0 H2D CTRL_SUBMISSION ring */ - dhd_ring_init(dhd, prot->h2dring_ctrl_subn); - /* 4.0 D2H TX_COMPLETION ring */ - dhd_ring_init(dhd, prot->d2hring_tx_cpln); - /* 5.0 D2H RX_COMPLETION ring */ - dhd_ring_init(dhd, prot->d2hring_rx_cpln); - /* 6.0 D2H CTRL_COMPLETION ring */ - dhd_ring_init(dhd, prot->d2hring_ctrl_cpln); - - /* init the scratch buffer */ - dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.pa, - sizeof(prot->d2h_dma_scratch_buf.pa), DNGL_TO_HOST_DMA_SCRATCH_BUFFER, 0); - dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf_len, - sizeof(prot->d2h_dma_scratch_buf_len), DNGL_TO_HOST_DMA_SCRATCH_BUFFER_LEN, 0); - - /* If supported by the host, indicate the memory block - * for comletion writes / submission reads to shared space - */ - if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { - dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_writeindx_buf.pa, - sizeof(prot->d2h_dma_writeindx_buf.pa), - DNGL_TO_HOST_DMA_WRITEINDX_BUFFER, 0); - dhd_bus_cmn_writeshared(dhd->bus, &prot->h2d_dma_readindx_buf.pa, - sizeof(prot->h2d_dma_readindx_buf.pa), - HOST_TO_DNGL_DMA_READINDX_BUFFER, 0); - } - - if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) { - dhd_bus_cmn_writeshared(dhd->bus, &prot->h2d_dma_writeindx_buf.pa, - sizeof(prot->h2d_dma_writeindx_buf.pa), - HOST_TO_DNGL_DMA_WRITEINDX_BUFFER, 0); - dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_readindx_buf.pa, - sizeof(prot->d2h_dma_readindx_buf.pa), - DNGL_TO_HOST_DMA_READINDX_BUFFER, 0); - - } - - ret = dhd_msgbuf_rxbuf_post(dhd); - ret = dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd); - - return ret; -} - -#define DHD_DBG_SHOW_METADATA 0 -#if DHD_DBG_SHOW_METADATA -static void BCMFASTPATH -dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len) -{ - uint8 tlv_t; - uint8 tlv_l; - uint8 *tlv_v = (uint8 *)ptr; - - if (len <= BCMPCIE_D2H_METADATA_HDRLEN) - return; - - len -= BCMPCIE_D2H_METADATA_HDRLEN; - tlv_v += BCMPCIE_D2H_METADATA_HDRLEN; - - while (len > TLV_HDR_LEN) { - tlv_t = tlv_v[TLV_TAG_OFF]; - tlv_l = tlv_v[TLV_LEN_OFF]; - - len -= TLV_HDR_LEN; - tlv_v += TLV_HDR_LEN; - if (len < tlv_l) - break; - if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER)) - break; - - switch (tlv_t) { - case WLFC_CTL_TYPE_TXSTATUS: - bcm_print_bytes("METADATA TX_STATUS", tlv_v, tlv_l); - break; - - case WLFC_CTL_TYPE_RSSI: - bcm_print_bytes("METADATA RX_RSSI", tlv_v, tlv_l); - break; - - case WLFC_CTL_TYPE_FIFO_CREDITBACK: - bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l); - break; - - case WLFC_CTL_TYPE_TX_ENTRY_STAMP: - bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l); - break; - - case WLFC_CTL_TYPE_RX_STAMP: - bcm_print_bytes("METADATA RX_TIMESTAMP", tlv_v, tlv_l); - break; - - case WLFC_CTL_TYPE_TRANS_ID: - bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l); - break; - - case WLFC_CTL_TYPE_COMP_TXSTATUS: - bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l); - break; - - default: - bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l); - break; - } - - len -= tlv_l; - tlv_v += tlv_l; - } -} -#endif /* DHD_DBG_SHOW_METADATA */ - -static INLINE void BCMFASTPATH -dhd_prot_packet_free(dhd_pub_t *dhd, uint32 pktid) -{ - void *PKTBUF; - dmaaddr_t pa; - uint32 pa_len; - void *secdma; - PKTBUF = PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, pa, pa_len, secdma); - - if (PKTBUF) { - { - if (SECURE_DMA_ENAB(dhd->osh)) { - SECURE_DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_TX, 0, 0, secdma, 0); - } else - DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_TX, 0, 0); - } - PKTFREE(dhd->osh, PKTBUF, FALSE); - } - return; -} - -static INLINE void * BCMFASTPATH -dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid) -{ - void *PKTBUF; - dmaaddr_t pa; - uint32 pa_len; - void *secdma; - PKTBUF = PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, pa, pa_len, secdma); - if (PKTBUF) { - if (SECURE_DMA_ENAB(dhd->osh)) - SECURE_DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_RX, 0, 0, secdma, 0); - else - DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_RX, 0, 0); - } - - return PKTBUF; -} - -static int BCMFASTPATH -dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd) -{ - dhd_prot_t *prot = dhd->prot; - int16 fillbufs; - uint16 cnt = 64; - int retcount = 0; - - fillbufs = prot->max_rxbufpost - prot->rxbufpost; - while (fillbufs > 0) { - cnt--; - if (cnt == 0) { - /* find a better way to reschedule rx buf post if space not available */ - DHD_ERROR(("%s: h2d rx post ring not available to post host buffers\n", __FUNCTION__)); - DHD_ERROR(("%s: Current posted host buf count %d \n", __FUNCTION__, prot->rxbufpost)); - break; - } - - /* Post in a burst of 8 buffers ata time */ - fillbufs = MIN(fillbufs, RX_BUF_BURST); - - /* Post buffers */ - retcount = dhd_prot_rxbufpost(dhd, fillbufs); - - if (retcount > 0) { - prot->rxbufpost += (uint16)retcount; - - /* how many more to post */ - fillbufs = prot->max_rxbufpost - prot->rxbufpost; - } else { - /* Make sure we don't run loop any further */ - fillbufs = 0; - } - } - - return 0; -} - -/* Post count no of rx buffers down to dongle */ -static int BCMFASTPATH -dhd_prot_rxbufpost(dhd_pub_t *dhd, uint16 count) -{ - void *p; - uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; - uint8 *rxbuf_post_tmp; - host_rxbuf_post_t *rxbuf_post; - void* msg_start; - dmaaddr_t physaddr; - uint32 pktlen; - dhd_prot_t *prot = dhd->prot; - msgbuf_ring_t * ring = prot->h2dring_rxp_subn; - uint8 i = 0; - uint16 alloced = 0; - unsigned long flags; - - DHD_GENERAL_LOCK(dhd, flags); - /* Claim space for 'count' no of messages */ - msg_start = (void *)dhd_alloc_ring_space(dhd, ring, count, &alloced); - DHD_GENERAL_UNLOCK(dhd, flags); - - if (msg_start == NULL) { - DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__)); - return -1; - } - /* if msg_start != NULL, we should have alloced space for atleast 1 item */ - ASSERT(alloced > 0); - - rxbuf_post_tmp = (uint8*)msg_start; - - /* loop through each message */ - for (i = 0; i < alloced; i++) { - rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp; - /* Create a rx buffer */ - if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) { - DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__)); - break; - } - - pktlen = PKTLEN(dhd->osh, p); - if (SECURE_DMA_ENAB(dhd->osh)) { - DHD_GENERAL_LOCK(dhd, flags); - physaddr = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0, - ring->secdma, 0); - DHD_GENERAL_UNLOCK(dhd, flags); - } else - physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0); - - if (PHYSADDRISZERO(physaddr)) { - if (SECURE_DMA_ENAB(dhd->osh)) { - DHD_GENERAL_LOCK(dhd, flags); - SECURE_DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0, - ring->secdma, 0); - DHD_GENERAL_UNLOCK(dhd, flags); - } else - DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0); - - PKTFREE(dhd->osh, p, FALSE); - DHD_ERROR(("%s: Invalid phyaddr 0\n", __FUNCTION__)); - ASSERT(0); - break; - } - - PKTPULL(dhd->osh, p, prot->rx_metadata_offset); - pktlen = PKTLEN(dhd->osh, p); - - /* CMN msg header */ - rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST; - rxbuf_post->cmn_hdr.if_id = 0; - - /* get the lock before calling NATIVE_TO_PKTID */ - DHD_GENERAL_LOCK(dhd, flags); - - rxbuf_post->cmn_hdr.request_id = - htol32(NATIVE_TO_PKTID(dhd->prot->pktid_map_handle, p, physaddr, - pktlen, DMA_RX, ring->secdma)); - - /* free lock */ - DHD_GENERAL_UNLOCK(dhd, flags); - - if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) { - if (SECURE_DMA_ENAB(dhd->osh)) { - DHD_GENERAL_LOCK(dhd, flags); - SECURE_DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0, - ring->secdma, 0); - DHD_GENERAL_UNLOCK(dhd, flags); - } else - DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0); - - PKTFREE(dhd->osh, p, FALSE); - DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__)); - break; - } - - rxbuf_post->data_buf_len = htol16((uint16)pktlen); - rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr)); - rxbuf_post->data_buf_addr.low_addr = - htol32(PHYSADDRLO(physaddr) + prot->rx_metadata_offset); - - if (prot->rx_metadata_offset) { - rxbuf_post->metadata_buf_len = prot->rx_metadata_offset; - rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr)); - rxbuf_post->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(physaddr)); - } else { - rxbuf_post->metadata_buf_len = 0; - rxbuf_post->metadata_buf_addr.high_addr = 0; - rxbuf_post->metadata_buf_addr.low_addr = 0; - } - - /* Move rxbuf_post_tmp to next item */ - rxbuf_post_tmp = rxbuf_post_tmp + RING_LEN_ITEMS(ring); - } - - if (i < alloced) { - if (RING_WRITE_PTR(ring) < (alloced - i)) - RING_WRITE_PTR(ring) = RING_MAX_ITEM(ring) - (alloced - i); - else - RING_WRITE_PTR(ring) -= (alloced - i); - - alloced = i; - } - - /* Update the write pointer in TCM & ring bell */ - if (alloced > 0) - prot_ring_write_complete(dhd, prot->h2dring_rxp_subn, msg_start, alloced); - - return alloced; -} - -static int -dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf) -{ - void *p; - uint16 pktsz; - ioctl_resp_evt_buf_post_msg_t *rxbuf_post; - dmaaddr_t physaddr; - uint32 pktlen; - dhd_prot_t *prot = dhd->prot; - uint16 alloced = 0; - unsigned long flags; - - if (dhd->busstate == DHD_BUS_DOWN) { - DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); - return -1; - } - - if (event_buf) { - /* Allocate packet for event buffer post */ - pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; - } else { - /* Allocate packet for ctrl/ioctl buffer post */ - pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ; - } - - if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) { - DHD_ERROR(("%s:%d: PKTGET for %s rxbuf failed\n", - __FUNCTION__, __LINE__, event_buf ? - "event" : "ioctl")); - return -1; - } - - pktlen = PKTLEN(dhd->osh, p); - if (SECURE_DMA_ENAB(dhd->osh)) { - DHD_GENERAL_LOCK(dhd, flags); - physaddr = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, - DMA_RX, p, 0, prot->h2dring_ctrl_subn->secdma, 0); - DHD_GENERAL_UNLOCK(dhd, flags); - } else - physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0); - - if (PHYSADDRISZERO(physaddr)) { - - DHD_ERROR(("%s: Invalid phyaddr 0\n", __FUNCTION__)); - ASSERT(0); - goto free_pkt_return; - } - - DHD_GENERAL_LOCK(dhd, flags); - rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)dhd_alloc_ring_space(dhd, - prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced); - if (rxbuf_post == NULL) { - DHD_GENERAL_UNLOCK(dhd, flags); - DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer" - " for %s\n", __FUNCTION__, __LINE__, event_buf ? "event" : - "ioctl")); - if (SECURE_DMA_ENAB(dhd->osh)) { - DHD_GENERAL_LOCK(dhd, flags); - SECURE_DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0, - prot->h2dring_ctrl_subn->secdma, 0); - DHD_GENERAL_UNLOCK(dhd, flags); - } else - DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0); - - goto free_pkt_return; - } - - /* CMN msg header */ - if (event_buf) - rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_EVENT_BUF_POST; - else - rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_IOCTLRESP_BUF_POST; - rxbuf_post->cmn_hdr.if_id = 0; - - rxbuf_post->cmn_hdr.request_id = - htol32(NATIVE_TO_PKTID(dhd->prot->pktid_map_handle, p, physaddr, pktlen, DMA_RX, - prot->h2dring_ctrl_subn->secdma)); - - if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) { - if (RING_WRITE_PTR(prot->h2dring_ctrl_subn) == 0) - RING_WRITE_PTR(prot->h2dring_ctrl_subn) = - RING_MAX_ITEM(prot->h2dring_ctrl_subn) - 1; - else - RING_WRITE_PTR(prot->h2dring_ctrl_subn)--; - DHD_GENERAL_UNLOCK(dhd, flags); - if (SECURE_DMA_ENAB(dhd->osh)) { - DHD_GENERAL_LOCK(dhd, flags); - SECURE_DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0, - prot->h2dring_ctrl_subn->secdma, 0); - DHD_GENERAL_UNLOCK(dhd, flags); - } else - DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0); - - goto free_pkt_return; - } - - rxbuf_post->cmn_hdr.flags = 0; - rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p)); - rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr)); - rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(physaddr)); - - /* Update the write pointer in TCM & ring bell */ - prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, rxbuf_post, - DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); - DHD_GENERAL_UNLOCK(dhd, flags); - - return 1; - -free_pkt_return: - PKTFREE(dhd->osh, p, FALSE); - - return -1; -} - -static uint16 -dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post) -{ - uint32 i = 0; - int32 ret_val; - - DHD_INFO(("%s: max to post %d, event %d\n", __FUNCTION__, max_to_post, event_buf)); - - if (dhd->busstate == DHD_BUS_DOWN) { - DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); - return 0; - } - - while (i < max_to_post) { - ret_val = dhd_prot_rxbufpost_ctrl(dhd, event_buf); - if (ret_val < 0) - break; - i++; - } - DHD_INFO(("%s: posted %d buffers to event_pool/ioctl_resp_pool %d\n", __FUNCTION__, i, event_buf)); - return (uint16)i; -} - -static int -dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd) -{ - dhd_prot_t *prot = dhd->prot; - uint16 retcnt = 0; - - DHD_INFO(("%s: ioctl resp buf post\n", __FUNCTION__)); - - if (dhd->busstate == DHD_BUS_DOWN) { - DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); - return 0; - } - - retcnt = dhd_msgbuf_rxbuf_post_ctrlpath(dhd, FALSE, - prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted); - prot->cur_ioctlresp_bufs_posted += retcnt; - return retcnt; -} - -static int -dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd) -{ - dhd_prot_t *prot = dhd->prot; - uint16 retcnt = 0; - - if (dhd->busstate == DHD_BUS_DOWN) { - DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); - return 0; - } - - retcnt = dhd_msgbuf_rxbuf_post_ctrlpath(dhd, TRUE, - prot->max_eventbufpost - prot->cur_event_bufs_posted); - - prot->cur_event_bufs_posted += retcnt; - return retcnt; -} - -bool BCMFASTPATH -dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound) -{ - dhd_prot_t *prot = dhd->prot; - bool more = TRUE; - uint n = 0; - - /* Process all the messages - DTOH direction */ - while (TRUE) { - uint8 *src_addr; - uint16 src_len; - - /* Store current read pointer */ - /* Read pointer will be updated in prot_early_upd_rxcpln_read_idx */ - prot_store_rxcpln_read_idx(dhd, prot->d2hring_rx_cpln); - - /* Get the message from ring */ - src_addr = prot_get_src_addr(dhd, prot->d2hring_rx_cpln, &src_len); - if (src_addr == NULL) { - more = FALSE; - break; - } - - /* Prefetch data to populate the cache */ - OSL_PREFETCH(src_addr); - - if (dhd_prot_process_msgtype(dhd, prot->d2hring_rx_cpln, src_addr, - src_len) != BCME_OK) { - prot_upd_read_idx(dhd, prot->d2hring_rx_cpln); - DHD_ERROR(("%s: Error at process rxpl msgbuf of len %d\n", - __FUNCTION__, src_len)); - } - - /* After batch processing, check RX bound */ - n += src_len/RING_LEN_ITEMS(prot->d2hring_rx_cpln); - if (n >= bound) { - break; - } - } - - return more; -} - -void -dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flow_id, void *msgring_info) -{ - uint16 r_index = 0; - msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring_info; - - /* Update read pointer */ - if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { - r_index = dhd_get_dmaed_index(dhd, H2D_DMA_READINDX, ring->idx); - ring->ringstate->r_offset = r_index; - } - - DHD_TRACE(("%s: flow %d, write %d read %d \n\n", __FUNCTION__, flow_id, RING_WRITE_PTR(ring), - RING_READ_PTR(ring))); - - /* Need more logic here, but for now use it directly */ - dhd_bus_schedule_queue(dhd->bus, flow_id, TRUE); -} - - -bool BCMFASTPATH -dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound) -{ - dhd_prot_t *prot = dhd->prot; - bool more = TRUE; - uint n = 0; - - /* Process all the messages - DTOH direction */ - while (TRUE) { - uint8 *src_addr; - uint16 src_len; - - src_addr = prot_get_src_addr(dhd, prot->d2hring_tx_cpln, &src_len); - if (src_addr == NULL) { - more = FALSE; - break; - } - - /* Prefetch data to populate the cache */ - OSL_PREFETCH(src_addr); - - if (dhd_prot_process_msgtype(dhd, prot->d2hring_tx_cpln, src_addr, - src_len) != BCME_OK) { - DHD_ERROR(("%s: Error at process txcmpl msgbuf of len %d\n", - __FUNCTION__, src_len)); - } - - /* Write to dngl rd ptr */ - prot_upd_read_idx(dhd, prot->d2hring_tx_cpln); - - /* After batch processing, check bound */ - n += src_len/RING_LEN_ITEMS(prot->d2hring_tx_cpln); - if (n >= bound) { - break; - } - } - - return more; -} - -int BCMFASTPATH -dhd_prot_process_ctrlbuf(dhd_pub_t * dhd) -{ - dhd_prot_t *prot = dhd->prot; - - /* Process all the messages - DTOH direction */ - while (TRUE) { - uint8 *src_addr; - uint16 src_len; - src_addr = prot_get_src_addr(dhd, prot->d2hring_ctrl_cpln, &src_len); - - if (src_addr == NULL) { - break; - } - - /* Prefetch data to populate the cache */ - OSL_PREFETCH(src_addr); - if (dhd_prot_process_msgtype(dhd, prot->d2hring_ctrl_cpln, src_addr, - src_len) != BCME_OK) { - DHD_ERROR(("%s: Error at process ctrlmsgbuf of len %d\n", - __FUNCTION__, src_len)); - } - - /* Write to dngl rd ptr */ - prot_upd_read_idx(dhd, prot->d2hring_ctrl_cpln); - } - - return 0; -} - -static int BCMFASTPATH -dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len) -{ - dhd_prot_t *prot = dhd->prot; - uint32 cur_dma_len = 0; - int ret = BCME_OK; - - DHD_INFO(("%s: process msgbuf of len %d\n", __FUNCTION__, len)); - - while (len > 0) { - ASSERT(len > (sizeof(cmn_msg_hdr_t) + prot->rx_dataoffset)); - if (prot->rx_dataoffset) { - cur_dma_len = *(uint32 *) buf; - ASSERT(cur_dma_len <= len); - buf += prot->rx_dataoffset; - len -= (uint16)prot->rx_dataoffset; - } - else { - cur_dma_len = len; - } - if (dhd_process_msgtype(dhd, ring, buf, (uint16)cur_dma_len) != BCME_OK) { - DHD_ERROR(("%s: Error at process msg of dmalen %d\n", - __FUNCTION__, cur_dma_len)); - ret = BCME_ERROR; - } - - len -= (uint16)cur_dma_len; - buf += cur_dma_len; - } - return ret; -} - -static int BCMFASTPATH -dhd_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len) -{ - uint16 pktlen = len; - uint16 msglen; - uint8 msgtype; - cmn_msg_hdr_t *msg = NULL; - int ret = BCME_OK; - -#if defined(PCIE_D2H_SYNC_BZERO) - uint8 *buf_head = buf; -#endif /* PCIE_D2H_SYNC_BZERO */ - - ASSERT(ring && ring->ringmem); - msglen = RING_LEN_ITEMS(ring); - if (msglen == 0) { - DHD_ERROR(("%s: ringidx %d, msglen is %d, pktlen is %d \n", - __FUNCTION__, ring->idx, msglen, pktlen)); - return BCME_ERROR; - } - - while (pktlen > 0) { - msg = (cmn_msg_hdr_t *)buf; - -#if defined(PCIE_D2H_SYNC) - /* Wait until DMA completes, then fetch msgtype */ - msgtype = dhd->prot->d2h_sync_cb(dhd, ring, msg, msglen); -#else - msgtype = msg->msg_type; -#endif /* !PCIE_D2H_SYNC */ - - DHD_INFO(("%s: msgtype %d, msglen is %d, pktlen is %d\n", __FUNCTION__, - msgtype, msglen, pktlen)); - if (msgtype == MSG_TYPE_LOOPBACK) { - bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, msglen); - DHD_ERROR(("%s: MSG_TYPE_LOOPBACK, len %d\n", __FUNCTION__, msglen)); - } - - - if (msgtype >= DHD_PROT_FUNCS) { - DHD_ERROR(("%s: msgtype %d, msglen is %d, pktlen is %d \n", - __FUNCTION__, msgtype, msglen, pktlen)); - ret = BCME_ERROR; - goto done; - } - - if (table_lookup[msgtype]) { - table_lookup[msgtype](dhd, buf, msglen); - } - - if (pktlen < msglen) { - ret = BCME_ERROR; - goto done; - } - pktlen = pktlen - msglen; - buf = buf + msglen; - - if (ring->idx == BCMPCIE_D2H_MSGRING_RX_COMPLETE) - prot_early_upd_rxcpln_read_idx(dhd, ring); - } -done: - -#if defined(PCIE_D2H_SYNC_BZERO) - OSL_CACHE_FLUSH(buf_head, len - pktlen); /* Flush the bzeroed msg */ -#endif /* PCIE_D2H_SYNC_BZERO */ - -#ifdef DHD_RX_CHAINING - dhd_rxchain_commit(dhd); -#endif - - return ret; -} - -static void -dhd_prot_noop(dhd_pub_t *dhd, void * buf, uint16 msglen) -{ - return; -} - -static void -dhd_prot_ringstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen) -{ - pcie_ring_status_t * ring_status = (pcie_ring_status_t *)buf; - DHD_ERROR(("%s: ring status: request_id %d, status 0x%04x, flow ring %d, w_offset %d \n", - __FUNCTION__, - ring_status->cmn_hdr.request_id, ring_status->compl_hdr.status, - ring_status->compl_hdr.flow_ring_id, ring_status->write_idx)); - /* How do we track this to pair it with ??? */ - return; -} - -static void -dhd_prot_genstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen) -{ - pcie_gen_status_t * gen_status = (pcie_gen_status_t *)buf; - DHD_ERROR(("%s: gen status: request_id %d, status 0x%04x, flow ring %d \n", - __FUNCTION__, - gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status, - gen_status->compl_hdr.flow_ring_id)); - - /* How do we track this to pair it with ??? */ - return; -} - -static void -dhd_prot_ioctack_process(dhd_pub_t *dhd, void * buf, uint16 msglen) -{ - ioctl_req_ack_msg_t * ioct_ack = (ioctl_req_ack_msg_t *)buf; - - DHD_CTL(("%s: ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n", - __FUNCTION__, - ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status, - ioct_ack->compl_hdr.flow_ring_id)); - if (ioct_ack->compl_hdr.status != 0) { - DHD_ERROR(("%s: got an error status for the ioctl request...need to handle that\n", - __FUNCTION__)); - } - -#if defined(PCIE_D2H_SYNC_BZERO) - memset(buf, 0, msglen); -#endif /* PCIE_D2H_SYNC_BZERO */ -} - -static void -dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void * buf, uint16 msglen) -{ - uint16 status; - uint32 resp_len = 0; - uint32 pkt_id, xt_id; - ioctl_comp_resp_msg_t * ioct_resp = (ioctl_comp_resp_msg_t *)buf; - - resp_len = ltoh16(ioct_resp->resp_len); - xt_id = ltoh16(ioct_resp->trans_id); - pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id); - status = ioct_resp->compl_hdr.status; - -#if defined(PCIE_D2H_SYNC_BZERO) - memset(buf, 0, msglen); -#endif /* PCIE_D2H_SYNC_BZERO */ - - DHD_CTL(("%s: IOCTL_COMPLETE: pktid %x xtid %d status %x resplen %d\n", __FUNCTION__, - pkt_id, xt_id, status, resp_len)); - - dhd_bus_update_retlen(dhd->bus, sizeof(ioctl_comp_resp_msg_t), pkt_id, status, resp_len); - dhd_os_ioctl_resp_wake(dhd); -} - -static void BCMFASTPATH -dhd_prot_txstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen) -{ - dhd_prot_t *prot = dhd->prot; - host_txbuf_cmpl_t * txstatus; - unsigned long flags; - uint32 pktid; - void *pkt; - ulong pa; - uint32 pa_len; - void *secdma; - /* locks required to protect circular buffer accesses */ - DHD_GENERAL_LOCK(dhd, flags); - - txstatus = (host_txbuf_cmpl_t *)buf; - pktid = ltoh32(txstatus->cmn_hdr.request_id); - - DHD_INFO(("%s: txstatus for pktid 0x%04x\n", __FUNCTION__, pktid)); - if (prot->active_tx_count) - prot->active_tx_count--; - else - DHD_ERROR(("%s: Extra packets are freed\n", __FUNCTION__)); - - ASSERT(pktid != 0); - pkt = PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, pa, pa_len, secdma); - if (pkt) { - if (SECURE_DMA_ENAB(dhd->osh)) { - int offset = 0; - BCM_REFERENCE(offset); - - if (dhd->prot->tx_metadata_offset) - offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN; - SECURE_DMA_UNMAP(dhd->osh, (uint) pa, - (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, 0, - secdma, offset); - } else - DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_RX, 0, dmah); - -#if defined(BCMPCIE) - dhd_txcomplete(dhd, pkt, true); -#endif - -#if DHD_DBG_SHOW_METADATA - if (dhd->prot->tx_metadata_offset && txstatus->metadata_len) { - uchar *ptr; - /* The Ethernet header of TX frame was copied and removed. - * Here, move the data pointer forward by Ethernet header size. - */ - PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN); - ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset); - bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len); - dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len); - } -#endif /* DHD_DBG_SHOW_METADATA */ - PKTFREE(dhd->osh, pkt, TRUE); - } - -#if defined(PCIE_D2H_SYNC_BZERO) - memset(buf, 0, msglen); -#endif /* PCIE_D2H_SYNC_BZERO */ - - DHD_GENERAL_UNLOCK(dhd, flags); - - return; -} - -static void -dhd_prot_event_process(dhd_pub_t *dhd, void* buf, uint16 len) -{ - wlevent_req_msg_t *evnt; - uint32 bufid; - uint16 buflen; - int ifidx = 0; - void* pkt; - unsigned long flags; - dhd_prot_t *prot = dhd->prot; - int post_cnt = 0; - bool zero_posted = FALSE; - - /* Event complete header */ - evnt = (wlevent_req_msg_t *)buf; - bufid = ltoh32(evnt->cmn_hdr.request_id); - buflen = ltoh16(evnt->event_data_len); - - ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr); - - /* Post another rxbuf to the device */ - if (prot->cur_event_bufs_posted) - prot->cur_event_bufs_posted--; - else - zero_posted = TRUE; - - - post_cnt = dhd_msgbuf_rxbuf_post_event_bufs(dhd); - if (zero_posted && (post_cnt <= 0)) { - return; - } - -#if defined(PCIE_D2H_SYNC_BZERO) - memset(buf, 0, len); -#endif /* PCIE_D2H_SYNC_BZERO */ - - /* locks required to protect pktid_map */ - DHD_GENERAL_LOCK(dhd, flags); - pkt = dhd_prot_packet_get(dhd, ltoh32(bufid)); - DHD_GENERAL_UNLOCK(dhd, flags); - - if (!pkt) - return; - - /* DMA RX offset updated through shared area */ - if (dhd->prot->rx_dataoffset) - PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); - - PKTSETLEN(dhd->osh, pkt, buflen); - - dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1); -} - -static void BCMFASTPATH -dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void* buf, uint16 msglen) -{ - host_rxbuf_cmpl_t *rxcmplt_h; - uint16 data_offset; /* offset at which data starts */ - void * pkt; - unsigned long flags; - static uint8 current_phase = 0; - uint ifidx; - - /* RXCMPLT HDR */ - rxcmplt_h = (host_rxbuf_cmpl_t *)buf; - - /* Post another set of rxbufs to the device */ - dhd_prot_return_rxbuf(dhd, 1); - - /* offset from which data starts is populated in rxstatus0 */ - data_offset = ltoh16(rxcmplt_h->data_offset); - - DHD_GENERAL_LOCK(dhd, flags); - pkt = dhd_prot_packet_get(dhd, ltoh32(rxcmplt_h->cmn_hdr.request_id)); - DHD_GENERAL_UNLOCK(dhd, flags); - - if (!pkt) { - return; - } - - DHD_INFO(("%s: id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, pktdata %p, metalen %d\n", - __FUNCTION__, - ltoh32(rxcmplt_h->cmn_hdr.request_id), data_offset, ltoh16(rxcmplt_h->data_len), - rxcmplt_h->cmn_hdr.if_id, rxcmplt_h->cmn_hdr.flags, PKTDATA(dhd->osh, pkt), - ltoh16(rxcmplt_h->metadata_len))); - -#if DHD_DBG_SHOW_METADATA - if (dhd->prot->rx_metadata_offset && rxcmplt_h->metadata_len) { - uchar *ptr; - ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->rx_metadata_offset); - /* header followed by data */ - bcm_print_bytes("rxmetadata", ptr, rxcmplt_h->metadata_len); - dhd_prot_print_metadata(dhd, ptr, rxcmplt_h->metadata_len); - } -#endif /* DHD_DBG_SHOW_METADATA */ - - if (current_phase != rxcmplt_h->cmn_hdr.flags) { - current_phase = rxcmplt_h->cmn_hdr.flags; - } - if (rxcmplt_h->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) - DHD_INFO(("%s: D11 frame rxed\n", __FUNCTION__)); - /* data_offset from buf start */ - if (data_offset) { - /* data offset given from dongle after split rx */ - PKTPULL(dhd->osh, pkt, data_offset); /* data offset */ - } else { - /* DMA RX offset updated through shared area */ - if (dhd->prot->rx_dataoffset) - PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); - } - /* Actual length of the packet */ - PKTSETLEN(dhd->osh, pkt, ltoh16(rxcmplt_h->data_len)); - - ifidx = rxcmplt_h->cmn_hdr.if_id; - -#if defined(PCIE_D2H_SYNC_BZERO) - memset(buf, 0, msglen); -#endif /* PCIE_D2H_SYNC_BZERO */ - -#ifdef DHD_RX_CHAINING - /* Chain the packets */ - dhd_rxchain_frame(dhd, pkt, ifidx); -#else /* ! DHD_RX_CHAINING */ - /* offset from which data starts is populated in rxstatus0 */ - dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1); -#endif /* ! DHD_RX_CHAINING */ - -} - -/* Stop protocol: sync w/dongle state. */ -void dhd_prot_stop(dhd_pub_t *dhd) -{ - /* nothing to do for pcie */ -} - -/* Add any protocol-specific data header. - * Caller must reserve prot_hdrlen prepend space. - */ -void BCMFASTPATH -dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF) -{ - return; -} - -uint -dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF) -{ - return 0; -} - - -#define PKTBUF pktbuf - -int BCMFASTPATH -dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx) -{ - unsigned long flags; - dhd_prot_t *prot = dhd->prot; - host_txbuf_post_t *txdesc = NULL; - dmaaddr_t physaddr, meta_physaddr; - uint8 *pktdata; - uint32 pktlen; - uint32 pktid; - uint8 prio; - uint16 flowid = 0; - uint16 alloced = 0; - uint16 headroom; - - msgbuf_ring_t *msg_ring; - uint8 dhcp_pkt; - - if (!dhd->flow_ring_table) - return BCME_NORESOURCE; - - if (!dhd_bus_is_txmode_push(dhd->bus)) { - flow_ring_table_t *flow_ring_table; - flow_ring_node_t *flow_ring_node; - - flowid = (uint16)DHD_PKTTAG_FLOWID((dhd_pkttag_fr_t*)PKTTAG(PKTBUF)); - - flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table; - flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid]; - - msg_ring = (msgbuf_ring_t *)flow_ring_node->prot_info; - } else { - msg_ring = prot->h2dring_txp_subn; - } - - - - DHD_GENERAL_LOCK(dhd, flags); - - /* Create a unique 32-bit packet id */ - pktid = NATIVE_TO_PKTID_RSV(dhd->prot->pktid_map_handle, PKTBUF); - if (pktid == DHD_PKTID_INVALID) { - DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__)); - /* - * If we return error here, the caller would queue the packet - * again. So we'll just free the skb allocated in DMA Zone. - * Since we have not freed the original SKB yet the caller would - * requeue the same. - */ - goto err_no_res_pktfree; - } - - /* Reserve space in the circular buffer */ - txdesc = (host_txbuf_post_t *)dhd_alloc_ring_space(dhd, - msg_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced); - if (txdesc == NULL) { - void *secdma; - DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n", - __FUNCTION__, __LINE__, prot->active_tx_count)); - /* Free up the PKTID */ - PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, physaddr, - pktlen, secdma); - goto err_no_res_pktfree; - } - - /* test if dhcp pkt */ - dhcp_pkt = pkt_is_dhcp(dhd->osh, PKTBUF); - txdesc->flag2 = (txdesc->flag2 & ~(BCMPCIE_PKT_FLAGS2_FORCELOWRATE_MASK << - BCMPCIE_PKT_FLAGS2_FORCELOWRATE_SHIFT)) | ((dhcp_pkt & - BCMPCIE_PKT_FLAGS2_FORCELOWRATE_MASK) << BCMPCIE_PKT_FLAGS2_FORCELOWRATE_SHIFT); - - /* Extract the data pointer and length information */ - pktdata = PKTDATA(dhd->osh, PKTBUF); - pktlen = PKTLEN(dhd->osh, PKTBUF); - - /* Ethernet header: Copy before we cache flush packet using DMA_MAP */ - bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN); - - /* Extract the ethernet header and adjust the data pointer and length */ - pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN); - pktlen -= ETHER_HDR_LEN; - - /* Map the data pointer to a DMA-able address */ - if (SECURE_DMA_ENAB(dhd->osh)) { - - int offset = 0; - BCM_REFERENCE(offset); - - if (prot->tx_metadata_offset) - offset = prot->tx_metadata_offset + ETHER_HDR_LEN; - - physaddr = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, - DMA_TX, PKTBUF, 0, msg_ring->secdma, offset); - } else - physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0); - - if ((PHYSADDRHI(physaddr) == 0) && (PHYSADDRLO(physaddr) == 0)) { - DHD_ERROR(("%s: Something really bad, unless 0 is a valid phyaddr\n", __FUNCTION__)); - ASSERT(0); - } - - /* No need to lock. Save the rest of the packet's metadata */ - NATIVE_TO_PKTID_SAVE(dhd->prot->pktid_map_handle, PKTBUF, pktid, - physaddr, pktlen, DMA_TX, msg_ring->secdma); - -#ifdef TXP_FLUSH_NITEMS - if (msg_ring->pend_items_count == 0) - msg_ring->start_addr = (void *)txdesc; - msg_ring->pend_items_count++; -#endif - - /* Form the Tx descriptor message buffer */ - - /* Common message hdr */ - txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST; - txdesc->cmn_hdr.request_id = htol32(pktid); - txdesc->cmn_hdr.if_id = ifidx; - txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3; - prio = (uint8)PKTPRIO(PKTBUF); - - - txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT; - txdesc->seg_cnt = 1; - - txdesc->data_len = htol16((uint16)pktlen); - txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr)); - txdesc->data_buf_addr.low_addr = htol32(PHYSADDRLO(physaddr)); - - /* Move data pointer to keep ether header in local PKTBUF for later reference */ - PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN); - - /* Handle Tx metadata */ - headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF); - if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset)) - DHD_ERROR(("%s: No headroom for Metadata tx %d %d\n", __FUNCTION__, - prot->tx_metadata_offset, headroom)); - - if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) { - DHD_TRACE(("%s: Metadata in tx %d\n", __FUNCTION__, prot->tx_metadata_offset)); - - /* Adjust the data pointer to account for meta data in DMA_MAP */ - PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset); - if (SECURE_DMA_ENAB(dhd->osh)) { - meta_physaddr = SECURE_DMA_MAP_TXMETA(dhd->osh, PKTDATA(dhd->osh, PKTBUF), - prot->tx_metadata_offset + ETHER_HDR_LEN, DMA_RX, PKTBUF, - 0, msg_ring->secdma); - } else - meta_physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), - prot->tx_metadata_offset, DMA_RX, PKTBUF, 0); - - if (PHYSADDRISZERO(meta_physaddr)) { - DHD_ERROR(("%s: Something really bad, unless 0 is a valid phyaddr\n", __FUNCTION__)); - ASSERT(0); - } - - /* Adjust the data pointer back to original value */ - PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset); - - txdesc->metadata_buf_len = prot->tx_metadata_offset; - txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_physaddr)); - txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_physaddr)); - } - else { - txdesc->metadata_buf_len = htol16(0); - txdesc->metadata_buf_addr.high_addr = 0; - txdesc->metadata_buf_addr.low_addr = 0; - } - - - DHD_TRACE(("%s: txpost: data_len %d, pktid 0x%04x\n", __FUNCTION__, txdesc->data_len, - txdesc->cmn_hdr.request_id)); - - /* Update the write pointer in TCM & ring bell */ -#ifdef TXP_FLUSH_NITEMS - /* Flush if we have either hit the txp_threshold or if this msg is */ - /* occupying the last slot in the flow_ring - before wrap around. */ - if ((msg_ring->pend_items_count == prot->txp_threshold) || - ((uint8 *) txdesc == (uint8 *) HOST_RING_END(msg_ring))) { - dhd_prot_txdata_write_flush(dhd, flowid, TRUE); - } -#else - prot_ring_write_complete(dhd, msg_ring, txdesc, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); -#endif - - prot->active_tx_count++; - - DHD_GENERAL_UNLOCK(dhd, flags); - - return BCME_OK; - -err_no_res_pktfree: - - - - DHD_GENERAL_UNLOCK(dhd, flags); - return BCME_NORESOURCE; - -} - -/* called with a lock */ -void BCMFASTPATH -dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid, bool in_lock) -{ -#ifdef TXP_FLUSH_NITEMS - unsigned long flags = 0; - flow_ring_table_t *flow_ring_table; - flow_ring_node_t *flow_ring_node; - msgbuf_ring_t *msg_ring; - - if (!dhd->flow_ring_table) - return; - - if (!in_lock) { - DHD_GENERAL_LOCK(dhd, flags); - } - - flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table; - flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid]; - msg_ring = (msgbuf_ring_t *)flow_ring_node->prot_info; - - /* Update the write pointer in TCM & ring bell */ - if (msg_ring->pend_items_count) { - prot_ring_write_complete(dhd, msg_ring, msg_ring->start_addr, - msg_ring->pend_items_count); - msg_ring->pend_items_count = 0; - msg_ring->start_addr = NULL; - } - - if (!in_lock) { - DHD_GENERAL_UNLOCK(dhd, flags); - } -#endif /* TXP_FLUSH_NITEMS */ -} - -#undef PKTBUF /* Only defined in the above routine */ -int BCMFASTPATH -dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len) -{ - return 0; -} - -static void BCMFASTPATH -dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint16 rxcnt) -{ - dhd_prot_t *prot = dhd->prot; - - if (prot->rxbufpost >= rxcnt) { - prot->rxbufpost -= rxcnt; - } else { - /* ASSERT(0); */ - prot->rxbufpost = 0; - } - - if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) - dhd_msgbuf_rxbuf_post(dhd); - - return; -} - - - -/* Use protocol to issue ioctl to dongle */ -int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len) -{ - dhd_prot_t *prot = dhd->prot; - int ret = -1; - uint8 action; - - if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) { - DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); - goto done; - } - - if (dhd->busstate == DHD_BUS_SUSPEND) { - DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__)); - goto done; - } - - DHD_TRACE(("%s: Enter\n", __FUNCTION__)); - - ASSERT(len <= WLC_IOCTL_MAXLEN); - - if (len > WLC_IOCTL_MAXLEN) - goto done; - - if (prot->pending == TRUE) { - DHD_ERROR(("%s: packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n", - __FUNCTION__, - ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd, - (unsigned long)prot->lastcmd)); - if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) { - DHD_TRACE(("iovar cmd=%s\n", (char*)buf)); - } - goto done; - } - - prot->pending = TRUE; - prot->lastcmd = ioc->cmd; - action = ioc->set; - - - if (action & WL_IOCTL_ACTION_SET) { - ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); - } else { - ret = dhdmsgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); - if (ret > 0) - ioc->used = ret; - } - /* Too many programs assume ioctl() returns 0 on success */ - if (ret >= 0) - ret = 0; - else { - DHD_ERROR(("%s: status ret value is %d \n", __FUNCTION__, ret)); - dhd->dongle_error = ret; - } - - /* Intercept the wme_dp ioctl here */ - if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) { - int slen, val = 0; - - slen = strlen("wme_dp") + 1; - if (len >= (int)(slen + sizeof(int))) - bcopy(((char *)buf + slen), &val, sizeof(int)); - dhd->wme_dp = (uint8) ltoh32(val); - } - - - prot->pending = FALSE; - -done: - return ret; - -} - -int -dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len) -{ - unsigned long flags; - dhd_prot_t *prot = dhd->prot; - uint16 alloced = 0; - - ioct_reqst_hdr_t *ioct_rqst; - - uint16 hdrlen = sizeof(ioct_reqst_hdr_t); - uint16 msglen = len + hdrlen; - - - if (msglen > MSGBUF_MAX_MSG_SIZE) - msglen = MSGBUF_MAX_MSG_SIZE; - - msglen = align(msglen, DMA_ALIGN_LEN); - - DHD_GENERAL_LOCK(dhd, flags); - ioct_rqst = (ioct_reqst_hdr_t *)dhd_alloc_ring_space(dhd, - prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced); - - if (ioct_rqst == NULL) { - DHD_GENERAL_UNLOCK(dhd, flags); - return 0; - } - - { - uint8 *ptr; - uint16 i; - - ptr = (uint8 *)ioct_rqst; - for (i = 0; i < msglen; i++) { - ptr[i] = i % 256; - } - } - - - /* Common msg buf hdr */ - ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK; - ioct_rqst->msg.if_id = 0; - - bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen); - - /* Update the write pointer in TCM & ring bell */ - prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, ioct_rqst, - DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); - DHD_GENERAL_UNLOCK(dhd, flags); - - return 0; -} - -void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma) -{ - if (dma == NULL) - return; - - if (dma->srcmem.va) { - DMA_FREE_CONSISTENT(dhd->osh, dma->srcmem.va, - dma->len, dma->srcmem.pa, dma->srcmem.dmah); - dma->srcmem.va = NULL; - } - if (dma->destmem.va) { - DMA_FREE_CONSISTENT(dhd->osh, dma->destmem.va, - dma->len + 8, dma->destmem.pa, dma->destmem.dmah); - dma->destmem.va = NULL; - } -} - -int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, - uint srcdelay, uint destdelay, dhd_dmaxfer_t *dma) -{ - uint i; - - if (!dma) - return BCME_ERROR; - - /* First free up exisiting buffers */ - dmaxfer_free_dmaaddr(dhd, dma); - - dma->srcmem.va = DMA_ALLOC_CONSISTENT(dhd->osh, len, DMA_ALIGN_LEN, - &i, &dma->srcmem.pa, &dma->srcmem.dmah); - if (dma->srcmem.va == NULL) { - return BCME_NOMEM; - } - - /* Populate source with a pattern */ - for (i = 0; i < len; i++) { - ((uint8*)dma->srcmem.va)[i] = i % 256; - } - OSL_CACHE_FLUSH(dma->srcmem.va, len); - - dma->destmem.va = DMA_ALLOC_CONSISTENT(dhd->osh, len + 8, DMA_ALIGN_LEN, - &i, &dma->destmem.pa, &dma->destmem.dmah); - if (dma->destmem.va == NULL) { - DMA_FREE_CONSISTENT(dhd->osh, dma->srcmem.va, - dma->len, dma->srcmem.pa, dma->srcmem.dmah); - dma->srcmem.va = NULL; - return BCME_NOMEM; - } - - - /* Clear the destination buffer */ - bzero(dma->destmem.va, len +8); - OSL_CACHE_FLUSH(dma->destmem.va, len+8); - - dma->len = len; - dma->srcdelay = srcdelay; - dma->destdelay = destdelay; - - return BCME_OK; -} - -static void -dhdmsgbuf_dmaxfer_compare(dhd_pub_t *dhd, void * buf, uint16 msglen) -{ - dhd_prot_t *prot = dhd->prot; - - OSL_CACHE_INV(prot->dmaxfer.destmem.va, prot->dmaxfer.len); - if (prot->dmaxfer.srcmem.va && prot->dmaxfer.destmem.va) { - if (memcmp(prot->dmaxfer.srcmem.va, - prot->dmaxfer.destmem.va, - prot->dmaxfer.len)) { - bcm_print_bytes("XFER SRC: ", - prot->dmaxfer.srcmem.va, prot->dmaxfer.len); - bcm_print_bytes("XFER DEST: ", - prot->dmaxfer.destmem.va, prot->dmaxfer.len); - } - else { - DHD_INFO(("%s: DMA successful\n", __FUNCTION__)); - } - } - dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer); - dhd->prot->dmaxfer_in_progress = FALSE; -} - -int -dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay) -{ - unsigned long flags; - int ret = BCME_OK; - dhd_prot_t *prot = dhd->prot; - pcie_dma_xfer_params_t *dmap; - uint32 xferlen = len > DMA_XFER_LEN_LIMIT ? DMA_XFER_LEN_LIMIT : len; - uint16 msglen = sizeof(pcie_dma_xfer_params_t); - uint16 alloced = 0; - - if (prot->dmaxfer_in_progress) { - DHD_ERROR(("%s: DMA is in progress...\n", __FUNCTION__)); - return ret; - } - prot->dmaxfer_in_progress = TRUE; - if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay, - &prot->dmaxfer)) != BCME_OK) { - prot->dmaxfer_in_progress = FALSE; - return ret; - } - - - if (msglen > MSGBUF_MAX_MSG_SIZE) - msglen = MSGBUF_MAX_MSG_SIZE; - - msglen = align(msglen, DMA_ALIGN_LEN); - - DHD_GENERAL_LOCK(dhd, flags); - dmap = (pcie_dma_xfer_params_t *)dhd_alloc_ring_space(dhd, - prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced); - - if (dmap == NULL) { - dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer); - prot->dmaxfer_in_progress = FALSE; - DHD_GENERAL_UNLOCK(dhd, flags); - return BCME_NOMEM; - } - - /* Common msg buf hdr */ - dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER; - dmap->cmn_hdr.request_id = 0x1234; - - dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa)); - dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa)); - dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.destmem.pa)); - dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.destmem.pa)); - dmap->xfer_len = htol32(prot->dmaxfer.len); - dmap->srcdelay = htol32(prot->dmaxfer.srcdelay); - dmap->destdelay = htol32(prot->dmaxfer.destdelay); - - /* Update the write pointer in TCM & ring bell */ - prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, dmap, - DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); - DHD_GENERAL_UNLOCK(dhd, flags); - - DHD_ERROR(("%s: DMA Started...\n", __FUNCTION__)); - - return BCME_OK; -} - -static int -dhdmsgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) -{ - dhd_prot_t *prot = dhd->prot; - - int ret = 0; - - DHD_TRACE(("%s: Enter\n", __FUNCTION__)); - - /* Respond "bcmerror" and "bcmerrorstr" with local cache */ - if (cmd == WLC_GET_VAR && buf) - { - if (!strcmp((char *)buf, "bcmerrorstr")) - { - strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN); - goto done; - } - else if (!strcmp((char *)buf, "bcmerror")) - { - *(int *)buf = dhd->dongle_error; - goto done; - } - } - - ret = dhd_fillup_ioct_reqst_ptrbased(dhd, (uint16)len, cmd, buf, ifidx); - if (ret < 0) { - DHD_ERROR(("%s : dhd_fillup_ioct_reqst_ptrbased error : %d\n", __FUNCTION__, ret)); - return ret; - } - - DHD_INFO(("%s: ACTION %d ifdix %d cmd %d len %d \n", __FUNCTION__, - action, ifidx, cmd, len)); - - /* wait for interrupt and get first fragment */ - ret = dhdmsgbuf_cmplt(dhd, prot->reqid, len, buf, prot->retbuf.va); - -done: - return ret; -} -static int -dhdmsgbuf_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len, void* buf, void* retbuf) -{ - dhd_prot_t *prot = dhd->prot; - ioctl_comp_resp_msg_t ioct_resp; - void* pkt; - int retlen; - int msgbuf_len = 0; - int post_cnt = 0; - unsigned long flags; - bool zero_posted = FALSE; - - DHD_TRACE(("%s: Enter\n", __FUNCTION__)); - if (dhd->busstate == DHD_BUS_DOWN) { - DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); - return -1; - } - - if (prot->cur_ioctlresp_bufs_posted) - prot->cur_ioctlresp_bufs_posted--; - else - zero_posted = TRUE; - - post_cnt = dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd); - if (zero_posted && (post_cnt <= 0)) { - return -1; - } - - memset(&ioct_resp, 0, sizeof(ioctl_comp_resp_msg_t)); - - retlen = dhd_bus_rxctl(dhd->bus, (uchar*)&ioct_resp, msgbuf_len); - if (retlen <= 0) { - DHD_ERROR(("%s: IOCTL request failed with error code %d\n", __FUNCTION__, retlen)); - return retlen; - } - DHD_INFO(("%s: ioctl resp retlen %d status %d, resp_len %d, pktid %d\n", __FUNCTION__, - retlen, ioct_resp.compl_hdr.status, ioct_resp.resp_len, - ioct_resp.cmn_hdr.request_id)); - if (ioct_resp.resp_len != 0) { - DHD_GENERAL_LOCK(dhd, flags); - pkt = dhd_prot_packet_get(dhd, ioct_resp.cmn_hdr.request_id); - DHD_GENERAL_UNLOCK(dhd, flags); - - DHD_INFO(("%s: ioctl ret buf %p retlen %d status %x\n", __FUNCTION__, pkt, retlen, - ioct_resp.compl_hdr.status)); - /* get ret buf */ - if ((buf) && (pkt)) { - /* bcopy(PKTDATA(dhd->osh, pkt), buf, ioct_resp.resp_len); */ - /* ioct_resp.resp_len could have been changed to make it > 8 bytes */ - bcopy(PKTDATA(dhd->osh, pkt), buf, len); - } - if (pkt) { - PKTFREE(dhd->osh, pkt, FALSE); - } - } else { - DHD_GENERAL_LOCK(dhd, flags); - dhd_prot_packet_free(dhd, ioct_resp.cmn_hdr.request_id); - DHD_GENERAL_UNLOCK(dhd, flags); - } - - return (int)(ioct_resp.compl_hdr.status); -} -static int -dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) -{ - dhd_prot_t *prot = dhd->prot; - - int ret = 0; - - DHD_TRACE(("%s: Enter \n", __FUNCTION__)); - DHD_TRACE(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len)); - - if (dhd->busstate == DHD_BUS_DOWN) { - DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); - return -EIO; - } - - /* don't talk to the dongle if fw is about to be reloaded */ - if (dhd->hang_was_sent) { - DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n", - __FUNCTION__)); - return -EIO; - } - - /* Fill up msgbuf for ioctl req */ - ret = dhd_fillup_ioct_reqst_ptrbased(dhd, (uint16)len, cmd, buf, ifidx); - if (ret < 0) { - DHD_ERROR(("%s : dhd_fillup_ioct_reqst_ptrbased error : %d\n", __FUNCTION__, ret)); - return ret; - } - - DHD_INFO(("%s: ACTIOn %d ifdix %d cmd %d len %d \n", __FUNCTION__, - action, ifidx, cmd, len)); - - ret = dhdmsgbuf_cmplt(dhd, prot->reqid, len, buf, prot->retbuf.va); - - return ret; -} -/* Handles a protocol control response asynchronously */ -int dhd_prot_ctl_complete(dhd_pub_t *dhd) -{ - return 0; -} - -/* Check for and handle local prot-specific iovar commands */ -int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name, - void *params, int plen, void *arg, int len, bool set) -{ - return BCME_UNSUPPORTED; -} - -/* Add prot dump output to a buffer */ -void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) -{ -#if defined(PCIE_D2H_SYNC) - if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) - bcm_bprintf(strbuf, "\nd2h_sync: SEQNUM:"); - else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) - bcm_bprintf(strbuf, "\nd2h_sync: XORCSUM:"); - else - bcm_bprintf(strbuf, "\nd2h_sync: NONE:"); - bcm_bprintf(strbuf, " d2h_sync_wait max<%lu> tot<%lu>\n", - dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot); -#endif /* PCIE_D2H_SYNC */ -} - -/* Update local copy of dongle statistics */ -void dhd_prot_dstats(dhd_pub_t *dhd) -{ - return; -} - -int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, - uint reorder_info_len, void **pkt, uint32 *free_buf_count) -{ - return 0; -} -/* post a dummy message to interrupt dongle */ -/* used to process cons commands */ -int -dhd_post_dummy_msg(dhd_pub_t *dhd) -{ - unsigned long flags; - hostevent_hdr_t *hevent = NULL; - uint16 alloced = 0; - - dhd_prot_t *prot = dhd->prot; - - DHD_GENERAL_LOCK(dhd, flags); - hevent = (hostevent_hdr_t *)dhd_alloc_ring_space(dhd, - prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced); - - if (hevent == NULL) { - DHD_GENERAL_UNLOCK(dhd, flags); - return -1; - } - - /* CMN msg header */ - hevent->msg.msg_type = MSG_TYPE_HOST_EVNT; - hevent->msg.if_id = 0; - - /* Event payload */ - hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD); - - /* Since, we are filling the data directly into the bufptr obtained - * from the msgbuf, we can directly call the write_complete - */ - prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, hevent, - DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); - DHD_GENERAL_UNLOCK(dhd, flags); - - return 0; -} - -static void * BCMFASTPATH -dhd_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced) -{ - void * ret_buf; - uint16 r_index = 0; - - /* Alloc space for nitems in the ring */ - ret_buf = prot_get_ring_space(ring, nitems, alloced); - - if (ret_buf == NULL) { - /* if alloc failed , invalidate cached read ptr */ - if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { - r_index = dhd_get_dmaed_index(dhd, H2D_DMA_READINDX, ring->idx); - ring->ringstate->r_offset = r_index; - } else - dhd_bus_cmn_readshared(dhd->bus, &(RING_READ_PTR(ring)), - RING_READ_PTR, ring->idx); - - /* Try allocating once more */ - ret_buf = prot_get_ring_space(ring, nitems, alloced); - - if (ret_buf == NULL) { - DHD_INFO(("%s: RING space not available on ring %s for %d items \n", __FUNCTION__, - ring->name, nitems)); - DHD_INFO(("%s: write %d read %d \n\n", __FUNCTION__, RING_WRITE_PTR(ring), - RING_READ_PTR(ring))); - return NULL; - } - } - - /* Return alloced space */ - return ret_buf; -} - -#define DHD_IOCTL_REQ_PKTID 0xFFFE - -/* Non inline ioct request */ -/* Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer */ -/* Form a separate request buffer where a 4 byte cmn header is added in the front */ -/* buf contents from parent function is copied to remaining section of this buffer */ -static int -dhd_fillup_ioct_reqst_ptrbased(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx) -{ - dhd_prot_t *prot = dhd->prot; - ioctl_req_msg_t *ioct_rqst; - void * ioct_buf; /* For ioctl payload */ - uint16 rqstlen, resplen; - unsigned long flags; - uint16 alloced = 0; - - rqstlen = len; - resplen = len; - - /* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */ - /* 8K allocation of dongle buffer fails */ - /* dhd doesnt give separate input & output buf lens */ - /* so making the assumption that input length can never be more than 1.5k */ - rqstlen = MIN(rqstlen, MSGBUF_MAX_MSG_SIZE); - - DHD_GENERAL_LOCK(dhd, flags); - /* Request for cbuf space */ - ioct_rqst = (ioctl_req_msg_t*)dhd_alloc_ring_space(dhd, prot->h2dring_ctrl_subn, - DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced); - if (ioct_rqst == NULL) { - DHD_ERROR(("%s: couldn't allocate space on msgring to send ioctl request\n", __FUNCTION__)); - DHD_GENERAL_UNLOCK(dhd, flags); - return -1; - } - - /* Common msg buf hdr */ - ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ; - ioct_rqst->cmn_hdr.if_id = (uint8)ifidx; - ioct_rqst->cmn_hdr.flags = 0; - ioct_rqst->cmn_hdr.request_id = DHD_IOCTL_REQ_PKTID; - - ioct_rqst->cmd = htol32(cmd); - ioct_rqst->output_buf_len = htol16(resplen); - ioct_rqst->trans_id = prot->ioctl_trans_id ++; - - /* populate ioctl buffer info */ - ioct_rqst->input_buf_len = htol16(rqstlen); - ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa)); - ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa)); - /* copy ioct payload */ - ioct_buf = (void *) prot->ioctbuf.va; - - if (buf) - memcpy(ioct_buf, buf, len); - - OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len); - - if ((ulong)ioct_buf % DMA_ALIGN_LEN) - DHD_ERROR(("%s: host ioct address unaligned !!!!! \n", __FUNCTION__)); - - DHD_CTL(("%s: submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n", - __FUNCTION__, - ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len, - ioct_rqst->trans_id)); - - /* upd wrt ptr and raise interrupt */ - prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, ioct_rqst, - DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); - DHD_GENERAL_UNLOCK(dhd, flags); - - return 0; -} - -/* Packet to PacketID mapper */ -typedef struct { - ulong native; - dmaaddr_t pa; - uint32 pa_len; - uchar dma; -} pktid_t; - -typedef struct { - void *osh; - void *mwbmap_hdl; - pktid_t *pktid_list; - uint32 count; -} pktid_map_t; - - -void *pktid_map_init(void *osh, uint32 count) -{ - pktid_map_t *handle; - - handle = (pktid_map_t *) MALLOC(osh, sizeof(pktid_map_t)); - if (handle == NULL) { - printf("%s:%d: MALLOC failed for size %d\n", - __FUNCTION__, __LINE__, (uint32) sizeof(pktid_map_t)); - return NULL; - } - handle->osh = osh; - handle->count = count; - handle->mwbmap_hdl = bcm_mwbmap_init(osh, count); - if (handle->mwbmap_hdl == NULL) { - printf("%s:%d: bcm_mwbmap_init failed for count %d\n", - __FUNCTION__, __LINE__, count); - MFREE(osh, handle, sizeof(pktid_map_t)); - return NULL; - } - - handle->pktid_list = (pktid_t *) MALLOC(osh, sizeof(pktid_t) * (count+1)); - if (handle->pktid_list == NULL) { - printf("%s:%d: MALLOC failed for count %d / total = %d\n", - __FUNCTION__, __LINE__, count, (uint32) sizeof(pktid_t) * count); - bcm_mwbmap_fini(osh, handle->mwbmap_hdl); - MFREE(osh, handle, sizeof(pktid_map_t)); - return NULL; - } - - return handle; -} - -void -pktid_map_uninit(void *pktid_map_handle) -{ - pktid_map_t *handle = (pktid_map_t *) pktid_map_handle; - uint32 ix; - - if (handle != NULL) { - void *osh = handle->osh; - for (ix = 0; ix < MAX_PKTID_ITEMS; ix++) - { - if (!bcm_mwbmap_isfree(handle->mwbmap_hdl, ix)) { - /* Mark the slot as free */ - bcm_mwbmap_free(handle->mwbmap_hdl, ix); - /* - Here we can do dma unmapping for 32 bit also. - Since this in removal path, it will not affect performance - */ - DMA_UNMAP(osh, handle->pktid_list[ix+1].pa, - (uint) handle->pktid_list[ix+1].pa_len, - handle->pktid_list[ix+1].dma, 0, 0); - PKTFREE(osh, (unsigned long*)handle->pktid_list[ix+1].native, TRUE); - } - } - bcm_mwbmap_fini(osh, handle->mwbmap_hdl); - MFREE(osh, handle->pktid_list, sizeof(pktid_t) * (handle->count+1)); - MFREE(osh, handle, sizeof(pktid_map_t)); - } - return; -} - -uint32 BCMFASTPATH -pktid_map_unique(void *pktid_map_handle, void *pkt, dmaaddr_t physaddr, uint32 physlen, uint32 dma) -{ - uint32 id; - pktid_map_t *handle = (pktid_map_t *) pktid_map_handle; - - if (handle == NULL) { - printf("%s:%d: Error !!! pktid_map_unique called without initing pktid_map\n", - __FUNCTION__, __LINE__); - return 0; - } - id = bcm_mwbmap_alloc(handle->mwbmap_hdl); - if (id == BCM_MWBMAP_INVALID_IDX) { - printf("%s:%d: bcm_mwbmap_alloc failed. Free Count = %d\n", - __FUNCTION__, __LINE__, bcm_mwbmap_free_cnt(handle->mwbmap_hdl)); - return 0; - } - - /* id=0 is invalid as we use this for error checking in the dongle */ - id += 1; - handle->pktid_list[id].native = (ulong) pkt; - handle->pktid_list[id].pa = physaddr; - handle->pktid_list[id].pa_len = (uint32) physlen; - handle->pktid_list[id].dma = (uchar)dma; - - return id; -} - -void * BCMFASTPATH -pktid_get_packet(void *pktid_map_handle, uint32 id, dmaaddr_t *physaddr, uint32 *physlen) -{ - void *native = NULL; - pktid_map_t *handle = (pktid_map_t *) pktid_map_handle; - if (handle == NULL) { - printf("%s:%d: Error !!! pktid_get_packet called without initing pktid_map\n", - __FUNCTION__, __LINE__); - return NULL; - } - - /* Debug check */ - if (bcm_mwbmap_isfree(handle->mwbmap_hdl, (id-1))) { - printf("%s:%d: Error !!!. slot (%d/0x%04x) free but the app is using it.\n", - __FUNCTION__, __LINE__, (id-1), (id-1)); - return NULL; - } - - native = (void *) handle->pktid_list[id].native; - *physaddr = handle->pktid_list[id].pa; - *physlen = (uint32) handle->pktid_list[id].pa_len; - - /* Mark the slot as free */ - bcm_mwbmap_free(handle->mwbmap_hdl, (id-1)); - - return native; -} -static msgbuf_ring_t* -prot_ring_attach(dhd_prot_t * prot, char* name, uint16 max_item, uint16 len_item, uint16 ringid) -{ - uint alloced = 0; - msgbuf_ring_t *ring; - dmaaddr_t physaddr; - uint16 size; - - ASSERT(name); - BCM_REFERENCE(physaddr); - - /* allocate ring info */ - ring = MALLOC(prot->osh, sizeof(msgbuf_ring_t)); - if (ring == NULL) { - ASSERT(0); - return NULL; - } - bzero(ring, sizeof(*ring)); - - /* Init name */ - strncpy(ring->name, name, sizeof(ring->name) - 1); - - /* Ringid in the order given in bcmpcie.h */ - ring->idx = ringid; - - /* init ringmem */ - ring->ringmem = MALLOC(prot->osh, sizeof(ring_mem_t)); - if (ring->ringmem == NULL) - goto fail; - bzero(ring->ringmem, sizeof(*ring->ringmem)); - - ring->ringmem->max_item = max_item; - ring->ringmem->len_items = len_item; - size = max_item * len_item; - - /* Ring Memmory allocation */ -#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_FLOWRING) - if (RING_IS_FLOWRING(ring)) { - ring->ring_base.va = DMA_ALLOC_CONSISTENT_STATIC(prot->osh, - size, DMA_ALIGN_LEN, &alloced, &ring->ring_base.pa, - &ring->ring_base.dmah, ringid); - } else -#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_FLOWRING */ - ring->ring_base.va = DMA_ALLOC_CONSISTENT(prot->osh, size, DMA_ALIGN_LEN, - &alloced, &ring->ring_base.pa, &ring->ring_base.dmah); - - if (ring->ring_base.va == NULL) - goto fail; - ring->ringmem->base_addr.high_addr = htol32(PHYSADDRHI(ring->ring_base.pa)); - ring->ringmem->base_addr.low_addr = htol32(PHYSADDRLO(ring->ring_base.pa)); - - ASSERT(MODX((unsigned long)ring->ring_base.va, DMA_ALIGN_LEN) == 0); - bzero(ring->ring_base.va, size); - - OSL_CACHE_FLUSH((void *) ring->ring_base.va, size); - - /* Ring state init */ - ring->ringstate = MALLOC(prot->osh, sizeof(ring_state_t)); - if (ring->ringstate == NULL) - goto fail; - bzero(ring->ringstate, sizeof(*ring->ringstate)); - -#ifdef BCM_SECURE_DMA - if (SECURE_DMA_ENAB(prot->osh)) { - ring->secdma = MALLOC(prot->osh, sizeof(sec_cma_info_t)); - bzero(ring->secdma, sizeof(sec_cma_info_t)); - if (ring->secdma == NULL) { - DHD_ERROR(("%s: MALLOC failure for secdma\n", __FUNCTION__)); - goto fail; - } - } -#endif - DHD_INFO(("%s: RING_ATTACH : %s Max item %d len item %d total size %d " - "ring start %p buf phys addr %x:%x \n", __FUNCTION__, - ring->name, ring->ringmem->max_item, ring->ringmem->len_items, - size, ring->ring_base.va, ring->ringmem->base_addr.high_addr, - ring->ringmem->base_addr.low_addr)); - return ring; -fail: - if (ring->ring_base.va && ring->ringmem) { - PHYSADDRHISET(physaddr, ring->ringmem->base_addr.high_addr); - PHYSADDRLOSET(physaddr, ring->ringmem->base_addr.low_addr); - size = ring->ringmem->max_item * ring->ringmem->len_items; - DMA_FREE_CONSISTENT(prot->osh, ring->ring_base.va, size, ring->ring_base.pa, NULL); - ring->ring_base.va = NULL; - } - if (ring->ringmem) - MFREE(prot->osh, ring->ringmem, sizeof(ring_mem_t)); - MFREE(prot->osh, ring, sizeof(msgbuf_ring_t)); - ASSERT(0); - return NULL; -} -static void -dhd_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring) -{ - /* update buffer address of ring */ - dhd_bus_cmn_writeshared(dhd->bus, &ring->ringmem->base_addr, - sizeof(ring->ringmem->base_addr), RING_BUF_ADDR, ring->idx); - - /* Update max items possible in ring */ - dhd_bus_cmn_writeshared(dhd->bus, &ring->ringmem->max_item, - sizeof(ring->ringmem->max_item), RING_MAX_ITEM, ring->idx); - - /* Update length of each item in the ring */ - dhd_bus_cmn_writeshared(dhd->bus, &ring->ringmem->len_items, - sizeof(ring->ringmem->len_items), RING_LEN_ITEMS, ring->idx); - - /* ring inited */ - ring->inited = TRUE; -} -static void -dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t * ring) -{ - dmaaddr_t phyaddr; - uint16 size; - dhd_prot_t *prot = dhd->prot; - - BCM_REFERENCE(phyaddr); - - if (ring == NULL) - return; - - - if (ring->ringmem == NULL) { - DHD_ERROR(("%s: ring->ringmem is NULL\n", __FUNCTION__)); - return; - } - - ring->inited = FALSE; - - PHYSADDRHISET(phyaddr, ring->ringmem->base_addr.high_addr); - PHYSADDRLOSET(phyaddr, ring->ringmem->base_addr.low_addr); - size = ring->ringmem->max_item * ring->ringmem->len_items; - /* Free up ring */ - if (ring->ring_base.va) { -#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_FLOWRING) - if (RING_IS_FLOWRING(ring)) { - DMA_FREE_CONSISTENT_STATIC(prot->osh, ring->ring_base.va, size, - ring->ring_base.pa, ring->ring_base.dmah, ring->idx); - } else -#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_FLOWRING */ - DMA_FREE_CONSISTENT(prot->osh, ring->ring_base.va, size, ring->ring_base.pa, - ring->ring_base.dmah); - ring->ring_base.va = NULL; - } - - /* Free up ring mem space */ - if (ring->ringmem) { - MFREE(prot->osh, ring->ringmem, sizeof(ring_mem_t)); - ring->ringmem = NULL; - } - - /* Free up ring state info */ - if (ring->ringstate) { - MFREE(prot->osh, ring->ringstate, sizeof(ring_state_t)); - ring->ringstate = NULL; - } -#ifdef BCM_SECURE_DMA - if (SECURE_DMA_ENAB(prot->osh)) { - DHD_ERROR(("%s:free secdma\n", __FUNCTION__)); - SECURE_DMA_UNMAP_ALL(prot->osh, ring->secdma); - MFREE(prot->osh, ring->secdma, sizeof(sec_cma_info_t)); - } -#endif - - /* free up ring info */ - MFREE(prot->osh, ring, sizeof(msgbuf_ring_t)); -} -/* Assumes only one index is updated ata time */ -static void *BCMFASTPATH -prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced) -{ - void *ret_ptr = NULL; - uint16 ring_avail_cnt; - - ASSERT(nitems <= RING_MAX_ITEM(ring)); - - ring_avail_cnt = CHECK_WRITE_SPACE(RING_READ_PTR(ring), RING_WRITE_PTR(ring), - RING_MAX_ITEM(ring)); - - if (ring_avail_cnt == 0) { - return NULL; - } - *alloced = MIN(nitems, ring_avail_cnt); - - /* Return next available space */ - ret_ptr = (char*)HOST_RING_BASE(ring) + (RING_WRITE_PTR(ring) * RING_LEN_ITEMS(ring)); - - /* Update write pointer */ - if ((RING_WRITE_PTR(ring) + *alloced) == RING_MAX_ITEM(ring)) - RING_WRITE_PTR(ring) = 0; - else if ((RING_WRITE_PTR(ring) + *alloced) < RING_MAX_ITEM(ring)) - RING_WRITE_PTR(ring) += *alloced; - else { - /* Should never hit this */ - ASSERT(0); - return NULL; - } - - return ret_ptr; -} - -static void BCMFASTPATH -prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, uint16 nitems) -{ - dhd_prot_t *prot = dhd->prot; - - /* cache flush */ - OSL_CACHE_FLUSH(p, RING_LEN_ITEMS(ring) * nitems); - - /* update write pointer */ - /* If dma'ing h2d indices are supported - * update the values in the host memory - * o/w update the values in TCM - */ - if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) - dhd_set_dmaed_index(dhd, H2D_DMA_WRITEINDX, - ring->idx, (uint16)RING_WRITE_PTR(ring)); - else - dhd_bus_cmn_writeshared(dhd->bus, &(RING_WRITE_PTR(ring)), - sizeof(uint16), RING_WRITE_PTR, ring->idx); - - /* raise h2d interrupt */ - prot->mb_ring_fn(dhd->bus, RING_WRITE_PTR(ring)); -} - -/* If dma'ing h2d indices are supported - * this function updates the indices in - * the host memory - */ -static void -dhd_set_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid, uint16 new_index) -{ - dhd_prot_t *prot = dhd->prot; - - uint32 *ptr = NULL; - uint16 offset = 0; - - switch (type) { - case H2D_DMA_WRITEINDX: - ptr = (uint32 *)(prot->h2d_dma_writeindx_buf.va); - - /* Flow-Rings start at Id BCMPCIE_COMMON_MSGRINGS - * but in host memory their indices start - * after H2D Common Rings - */ - if (ringid >= BCMPCIE_COMMON_MSGRINGS) - offset = ringid - BCMPCIE_COMMON_MSGRINGS + - BCMPCIE_H2D_COMMON_MSGRINGS; - else - offset = ringid; - ptr += offset; - - *ptr = htol16(new_index); - - /* cache flush */ - OSL_CACHE_FLUSH((void *)prot->h2d_dma_writeindx_buf.va, - prot->h2d_dma_writeindx_buf_len); - - break; - - case D2H_DMA_READINDX: - ptr = (uint32 *)(prot->d2h_dma_readindx_buf.va); - - /* H2D Common Righs start at Id BCMPCIE_H2D_COMMON_MSGRINGS */ - offset = ringid - BCMPCIE_H2D_COMMON_MSGRINGS; - ptr += offset; - - *ptr = htol16(new_index); - /* cache flush */ - OSL_CACHE_FLUSH((void *)prot->d2h_dma_readindx_buf.va, - prot->d2h_dma_readindx_buf_len); - - break; - - default: - DHD_ERROR(("%s: Invalid option for DMAing read/write index\n", - __FUNCTION__)); - - break; - } - DHD_TRACE(("%s: Data 0x%p, ringId %d, new_index %d\n", - __FUNCTION__, ptr, ringid, new_index)); -} - - -static uint16 -dhd_get_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid) -{ - uint32 *ptr = NULL; - uint16 data = 0; - uint16 offset = 0; - - switch (type) { - case H2D_DMA_WRITEINDX: - OSL_CACHE_INV((void *)dhd->prot->h2d_dma_writeindx_buf.va, - dhd->prot->h2d_dma_writeindx_buf_len); - ptr = (uint32 *)(dhd->prot->h2d_dma_writeindx_buf.va); - - /* Flow-Rings start at Id BCMPCIE_COMMON_MSGRINGS - * but in host memory their indices start - * after H2D Common Rings - */ - if (ringid >= BCMPCIE_COMMON_MSGRINGS) - offset = ringid - BCMPCIE_COMMON_MSGRINGS + - BCMPCIE_H2D_COMMON_MSGRINGS; - else - offset = ringid; - ptr += offset; - - data = LTOH16((uint16)*ptr); - break; - - case H2D_DMA_READINDX: - OSL_CACHE_INV((void *)dhd->prot->h2d_dma_readindx_buf.va, - dhd->prot->h2d_dma_readindx_buf_len); - ptr = (uint32 *)(dhd->prot->h2d_dma_readindx_buf.va); - - /* Flow-Rings start at Id BCMPCIE_COMMON_MSGRINGS - * but in host memory their indices start - * after H2D Common Rings - */ - if (ringid >= BCMPCIE_COMMON_MSGRINGS) - offset = ringid - BCMPCIE_COMMON_MSGRINGS + - BCMPCIE_H2D_COMMON_MSGRINGS; - else - offset = ringid; - ptr += offset; - - data = LTOH16((uint16)*ptr); - break; - - case D2H_DMA_WRITEINDX: - OSL_CACHE_INV((void *)dhd->prot->d2h_dma_writeindx_buf.va, - dhd->prot->d2h_dma_writeindx_buf_len); - ptr = (uint32 *)(dhd->prot->d2h_dma_writeindx_buf.va); - - /* H2D Common Righs start at Id BCMPCIE_H2D_COMMON_MSGRINGS */ - offset = ringid - BCMPCIE_H2D_COMMON_MSGRINGS; - ptr += offset; - - data = LTOH16((uint16)*ptr); - break; - - case D2H_DMA_READINDX: - OSL_CACHE_INV((void *)dhd->prot->d2h_dma_readindx_buf.va, - dhd->prot->d2h_dma_readindx_buf_len); - ptr = (uint32 *)(dhd->prot->d2h_dma_readindx_buf.va); - - /* H2D Common Righs start at Id BCMPCIE_H2D_COMMON_MSGRINGS */ - offset = ringid - BCMPCIE_H2D_COMMON_MSGRINGS; - ptr += offset; - - data = LTOH16((uint16)*ptr); - break; - - default: - DHD_ERROR(("%s: Invalid option for DMAing read/write index\n", - __FUNCTION__)); - - break; - } - DHD_TRACE(("%s: Data 0x%p, data %d\n", __FUNCTION__, ptr, data)); - return (data); -} - -/* D2H dircetion: get next space to read from */ -static uint8* -prot_get_src_addr(dhd_pub_t *dhd, msgbuf_ring_t * ring, uint16* available_len) -{ - uint16 w_ptr; - uint16 r_ptr; - uint16 depth; - void* ret_addr = NULL; - uint16 d2h_w_index = 0; - - DHD_TRACE(("%s: h2d_dma_readindx_buf %p, d2h_dma_writeindx_buf %p\n", - __FUNCTION__, (uint32 *)(dhd->prot->h2d_dma_readindx_buf.va), - (uint32 *)(dhd->prot->d2h_dma_writeindx_buf.va))); - - /* update write pointer */ - if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { - /* DMAing write/read indices supported */ - d2h_w_index = dhd_get_dmaed_index(dhd, D2H_DMA_WRITEINDX, ring->idx); - ring->ringstate->w_offset = d2h_w_index; - } else - dhd_bus_cmn_readshared(dhd->bus, - &(RING_WRITE_PTR(ring)), RING_WRITE_PTR, ring->idx); - - w_ptr = ring->ringstate->w_offset; - r_ptr = ring->ringstate->r_offset; - depth = ring->ringmem->max_item; - - /* check for avail space */ - *available_len = READ_AVAIL_SPACE(w_ptr, r_ptr, depth); - if (*available_len == 0) - return NULL; - - if (*available_len > ring->ringmem->max_item) { - DHD_ERROR(("%s: *available_len %d, ring->ringmem->max_item %d\n", - __FUNCTION__, *available_len, ring->ringmem->max_item)); - return NULL; - } - - /* if space available, calculate address to be read */ - ret_addr = (char*)ring->ring_base.va + (r_ptr * ring->ringmem->len_items); - - /* update read pointer */ - if ((ring->ringstate->r_offset + *available_len) >= ring->ringmem->max_item) - ring->ringstate->r_offset = 0; - else - ring->ringstate->r_offset += *available_len; - - ASSERT(ring->ringstate->r_offset < ring->ringmem->max_item); - - /* convert index to bytes */ - *available_len = *available_len * ring->ringmem->len_items; - - /* Cache invalidate */ - OSL_CACHE_INV((void *) ret_addr, *available_len); - - /* return read address */ - return ret_addr; -} -static void -prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring) -{ - /* update read index */ - /* If dma'ing h2d indices supported - * update the r -indices in the - * host memory o/w in TCM - */ - if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) - dhd_set_dmaed_index(dhd, D2H_DMA_READINDX, - ring->idx, (uint16)RING_READ_PTR(ring)); - else - dhd_bus_cmn_writeshared(dhd->bus, &(RING_READ_PTR(ring)), - sizeof(uint16), RING_READ_PTR, ring->idx); -} - -static void -prot_store_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring) -{ - dhd_prot_t *prot; - - if (!dhd || !dhd->prot) - return; - - prot = dhd->prot; - prot->rx_cpln_early_upd_idx = RING_READ_PTR(ring); -} - -static void -prot_early_upd_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring) -{ - dhd_prot_t *prot; - - if (!dhd || !dhd->prot) - return; - - prot = dhd->prot; - - if (prot->rx_cpln_early_upd_idx == RING_READ_PTR(ring)) - return; - - if (++prot->rx_cpln_early_upd_idx >= RING_MAX_ITEM(ring)) - prot->rx_cpln_early_upd_idx = 0; - - if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) - dhd_set_dmaed_index(dhd, D2H_DMA_READINDX, - ring->idx, (uint16)prot->rx_cpln_early_upd_idx); - else - dhd_bus_cmn_writeshared(dhd->bus, &(prot->rx_cpln_early_upd_idx), - sizeof(uint16), RING_READ_PTR, ring->idx); -} - -int -dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) -{ - tx_flowring_create_request_t *flow_create_rqst; - msgbuf_ring_t *msgbuf_flow_info; - dhd_prot_t *prot = dhd->prot; - uint16 hdrlen = sizeof(tx_flowring_create_request_t); - uint16 msglen = hdrlen; - unsigned long flags; - char eabuf[ETHER_ADDR_STR_LEN]; - uint16 alloced = 0; - - if (!(msgbuf_flow_info = prot_ring_attach(prot, "h2dflr", - H2DRING_TXPOST_MAX_ITEM, H2DRING_TXPOST_ITEMSIZE, - BCMPCIE_H2D_TXFLOWRINGID + - (flow_ring_node->flowid - BCMPCIE_H2D_COMMON_MSGRINGS)))) { - DHD_ERROR(("%s: kmalloc for H2D TX Flow ring failed\n", __FUNCTION__)); - return BCME_NOMEM; - } - /* Clear write pointer of the ring */ - flow_ring_node->prot_info = (void *)msgbuf_flow_info; - - /* align it to 4 bytes, so that all start addr form cbuf is 4 byte aligned */ - msglen = align(msglen, DMA_ALIGN_LEN); - - - DHD_GENERAL_LOCK(dhd, flags); - /* Request for ring buffer space */ - flow_create_rqst = (tx_flowring_create_request_t *)dhd_alloc_ring_space(dhd, - prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced); - - if (flow_create_rqst == NULL) { - DHD_ERROR(("%s: No space in control ring for Flow create req\n", __FUNCTION__)); - DHD_GENERAL_UNLOCK(dhd, flags); - return BCME_NOMEM; - } - msgbuf_flow_info->inited = TRUE; - - /* Common msg buf hdr */ - flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE; - flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; - flow_create_rqst->msg.request_id = htol16(0); /* TBD */ - - /* Update flow create message */ - flow_create_rqst->tid = flow_ring_node->flow_info.tid; - flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); - memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa)); - memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da)); - flow_create_rqst->flow_ring_ptr.low_addr = msgbuf_flow_info->ringmem->base_addr.low_addr; - flow_create_rqst->flow_ring_ptr.high_addr = msgbuf_flow_info->ringmem->base_addr.high_addr; - flow_create_rqst->max_items = htol16(H2DRING_TXPOST_MAX_ITEM); - flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE); - bcm_ether_ntoa((struct ether_addr *)flow_ring_node->flow_info.da, eabuf); - DHD_ERROR(("%s Send Flow create Req msglen flow ID %d for peer %s prio %d ifindex %d\n", - __FUNCTION__, flow_ring_node->flowid, eabuf, flow_ring_node->flow_info.tid, - flow_ring_node->flow_info.ifindex)); - - /* upd wrt ptr and raise interrupt */ - prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, flow_create_rqst, - DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); - - /* If dma'ing indices supported - * update the w-index in host memory o/w in TCM - */ - if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) - dhd_set_dmaed_index(dhd, H2D_DMA_WRITEINDX, - msgbuf_flow_info->idx, (uint16)RING_WRITE_PTR(msgbuf_flow_info)); - else - dhd_bus_cmn_writeshared(dhd->bus, &(RING_WRITE_PTR(msgbuf_flow_info)), - sizeof(uint16), RING_WRITE_PTR, msgbuf_flow_info->idx); - DHD_GENERAL_UNLOCK(dhd, flags); - - return BCME_OK; -} - -static void -dhd_prot_process_flow_ring_create_response(dhd_pub_t *dhd, void* buf, uint16 msglen) -{ - tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)buf; - - DHD_ERROR(("%s Flow create Response status = %d Flow %d\n", __FUNCTION__, - flow_create_resp->cmplt.status, flow_create_resp->cmplt.flow_ring_id)); - - dhd_bus_flow_ring_create_response(dhd->bus, flow_create_resp->cmplt.flow_ring_id, - flow_create_resp->cmplt.status); -} - -void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info) -{ - msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info; - dhd_prot_ring_detach(dhd, flow_ring); - DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__)); -} - -void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info, - struct bcmstrbuf *strbuf) -{ - msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info; - uint16 rd, wrt; - dhd_bus_cmn_readshared(dhd->bus, &rd, RING_READ_PTR, flow_ring->idx); - dhd_bus_cmn_readshared(dhd->bus, &wrt, RING_WRITE_PTR, flow_ring->idx); - bcm_bprintf(strbuf, "RD %d WR %d\n", rd, wrt); -} - -void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) -{ - bcm_bprintf(strbuf, "CtrlPost: "); - dhd_prot_print_flow_ring(dhd, dhd->prot->h2dring_ctrl_subn, strbuf); - bcm_bprintf(strbuf, "CtrlCpl: "); - dhd_prot_print_flow_ring(dhd, dhd->prot->d2hring_ctrl_cpln, strbuf); - bcm_bprintf(strbuf, "RxPost: "); - bcm_bprintf(strbuf, "RBP %d ", dhd->prot->rxbufpost); - dhd_prot_print_flow_ring(dhd, dhd->prot->h2dring_rxp_subn, strbuf); - bcm_bprintf(strbuf, "RxCpl: "); - dhd_prot_print_flow_ring(dhd, dhd->prot->d2hring_rx_cpln, strbuf); - if (dhd_bus_is_txmode_push(dhd->bus)) { - bcm_bprintf(strbuf, "TxPost: "); - dhd_prot_print_flow_ring(dhd, dhd->prot->h2dring_txp_subn, strbuf); - } - bcm_bprintf(strbuf, "TxCpl: "); - dhd_prot_print_flow_ring(dhd, dhd->prot->d2hring_tx_cpln, strbuf); - bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail %d\n", - dhd->prot->active_tx_count, - dhd_pktid_map_avail_cnt(dhd->prot->pktid_map_handle)); -} - -int -dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) -{ - tx_flowring_delete_request_t *flow_delete_rqst; - dhd_prot_t *prot = dhd->prot; - uint16 msglen = sizeof(tx_flowring_delete_request_t); - unsigned long flags; - char eabuf[ETHER_ADDR_STR_LEN]; - uint16 alloced = 0; - - /* align it to 4 bytes, so that all start addr form cbuf is 4 byte aligned */ - msglen = align(msglen, DMA_ALIGN_LEN); - - /* Request for ring buffer space */ - DHD_GENERAL_LOCK(dhd, flags); - flow_delete_rqst = (tx_flowring_delete_request_t *)dhd_alloc_ring_space(dhd, - prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced); - - if (flow_delete_rqst == NULL) { - DHD_GENERAL_UNLOCK(dhd, flags); - DHD_ERROR(("%s Flow Delete req failure no ring mem %d \n", __FUNCTION__, msglen)); - return BCME_NOMEM; - } - - /* Common msg buf hdr */ - flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE; - flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; - flow_delete_rqst->msg.request_id = htol16(0); /* TBD */ - - /* Update Delete info */ - flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); - flow_delete_rqst->reason = htol16(BCME_OK); - - bcm_ether_ntoa((struct ether_addr *)flow_ring_node->flow_info.da, eabuf); - DHD_ERROR(("%s sending FLOW RING ID %d for peer %s prio %d ifindex %d" - " Delete req msglen %d\n", __FUNCTION__, - flow_ring_node->flowid, eabuf, flow_ring_node->flow_info.tid, - flow_ring_node->flow_info.ifindex, msglen)); - - /* upd wrt ptr and raise interrupt */ - prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, flow_delete_rqst, - DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); - DHD_GENERAL_UNLOCK(dhd, flags); - - return BCME_OK; -} - -static void -dhd_prot_process_flow_ring_delete_response(dhd_pub_t *dhd, void* buf, uint16 msglen) -{ - tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)buf; - - DHD_INFO(("%s Flow Delete Response status = %d \n", __FUNCTION__, - flow_delete_resp->cmplt.status)); - -#ifdef PCIE_TX_DEFERRAL - if (flow_delete_resp->cmplt.status != BCME_OK) { - DHD_ERROR(("%s Flow Delete Response failure error status = %d \n", - __FUNCTION__, flow_delete_resp->cmplt.status)); - return; - } - set_bit(flow_delete_resp->cmplt.flow_ring_id, dhd->bus->delete_flow_map); - queue_work(dhd->bus->tx_wq, &dhd->bus->delete_flow_work); -#else - dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id, - flow_delete_resp->cmplt.status); -#endif /* PCIE_TX_DEFERRAL */ -} - -int -dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) -{ - tx_flowring_flush_request_t *flow_flush_rqst; - dhd_prot_t *prot = dhd->prot; - uint16 msglen = sizeof(tx_flowring_flush_request_t); - unsigned long flags; - uint16 alloced = 0; - - /* align it to 4 bytes, so that all start addr form cbuf is 4 byte aligned */ - msglen = align(msglen, DMA_ALIGN_LEN); - - /* Request for ring buffer space */ - DHD_GENERAL_LOCK(dhd, flags); - flow_flush_rqst = (tx_flowring_flush_request_t *)dhd_alloc_ring_space(dhd, - prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced); - if (flow_flush_rqst == NULL) { - DHD_GENERAL_UNLOCK(dhd, flags); - DHD_ERROR(("%s Flow Flush req failure no ring mem %d \n", __FUNCTION__, msglen)); - return BCME_NOMEM; - } - - /* Common msg buf hdr */ - flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH; - flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; - flow_flush_rqst->msg.request_id = htol16(0); /* TBD */ - - flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); - flow_flush_rqst->reason = htol16(BCME_OK); - - DHD_INFO(("%s sending FLOW RING Flush req msglen %d \n", __FUNCTION__, msglen)); - - /* upd wrt ptr and raise interrupt */ - prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, flow_flush_rqst, - DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); - DHD_GENERAL_UNLOCK(dhd, flags); - - return BCME_OK; -} - -static void -dhd_prot_process_flow_ring_flush_response(dhd_pub_t *dhd, void* buf, uint16 msglen) -{ - tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)buf; - - DHD_INFO(("%s Flow Flush Response status = %d \n", __FUNCTION__, - flow_flush_resp->cmplt.status)); - - dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id, - flow_flush_resp->cmplt.status); -} - -int -dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b) -{ - uint32 *ptr; - uint32 value; - uint32 i; - uint8 txpush = 0; - uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus, &txpush); - - OSL_CACHE_INV((void *)dhd->prot->d2h_dma_writeindx_buf.va, - dhd->prot->d2h_dma_writeindx_buf_len); - - ptr = (uint32 *)(dhd->prot->d2h_dma_writeindx_buf.va); - - bcm_bprintf(b, "\n max_tx_queues %d, txpush mode %d\n", max_h2d_queues, txpush); - - bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr); - value = ltoh32(*ptr); - bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value); - ptr++; - value = ltoh32(*ptr); - bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value); - - if (txpush) { - ptr++; - value = ltoh32(*ptr); - bcm_bprintf(b, "\tH2D TXPOST value 0x%04x\n", value); - } - else { - ptr++; - bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr); - for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) { - value = ltoh32(*ptr); - bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value); - ptr++; - } - } - - OSL_CACHE_INV((void *)dhd->prot->h2d_dma_readindx_buf.va, - dhd->prot->h2d_dma_readindx_buf_len); - - ptr = (uint32 *)(dhd->prot->h2d_dma_readindx_buf.va); - - bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr); - value = ltoh32(*ptr); - bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value); - ptr++; - value = ltoh32(*ptr); - bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value); - ptr++; - value = ltoh32(*ptr); - bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value); - - return 0; -} - -uint32 -dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx) -{ - dhd_prot_t *prot = dhd->prot; - if (rx) - prot->rx_metadata_offset = (uint16)val; - else - prot->tx_metadata_offset = (uint16)val; - return dhd_prot_metadatalen_get(dhd, rx); -} - -uint32 -dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx) -{ - dhd_prot_t *prot = dhd->prot; - if (rx) - return prot->rx_metadata_offset; - else - return prot->tx_metadata_offset; -} - -uint32 -dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val) -{ - dhd_prot_t *prot = dhd->prot; - if (set) - prot->txp_threshold = (uint16)val; - val = prot->txp_threshold; - return val; -} - -#ifdef DHD_RX_CHAINING -static INLINE void BCMFASTPATH -dhd_rxchain_reset(rxchain_info_t *rxchain) -{ - rxchain->pkt_count = 0; -} - -static void BCMFASTPATH -dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx) -{ - uint8 *eh; - uint8 prio; - dhd_prot_t *prot = dhd->prot; - rxchain_info_t *rxchain = &prot->rxchain; - - eh = PKTDATA(dhd->osh, pkt); - prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT; - - /* For routers, with HNDCTF, link the packets using PKTSETCLINK, */ - /* so that the chain can be handed off to CTF bridge as is. */ - if (rxchain->pkt_count == 0) { - /* First packet in chain */ - rxchain->pkthead = rxchain->pkttail = pkt; - - /* Keep a copy of ptr to ether_da, ether_sa and prio */ - rxchain->h_da = ((struct ether_header *)eh)->ether_dhost; - rxchain->h_sa = ((struct ether_header *)eh)->ether_shost; - rxchain->h_prio = prio; - rxchain->ifidx = ifidx; - rxchain->pkt_count++; - } else { - if (PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa, - rxchain->h_da, rxchain->h_prio)) { - /* Same flow - keep chaining */ - PKTSETCLINK(rxchain->pkttail, pkt); - rxchain->pkttail = pkt; - rxchain->pkt_count++; - } else { - /* Different flow - First release the existing chain */ - dhd_rxchain_commit(dhd); - - /* Create a new chain */ - rxchain->pkthead = rxchain->pkttail = pkt; - - /* Keep a copy of ptr to ether_da, ether_sa and prio */ - rxchain->h_da = ((struct ether_header *)eh)->ether_dhost; - rxchain->h_sa = ((struct ether_header *)eh)->ether_shost; - rxchain->h_prio = prio; - rxchain->ifidx = ifidx; - rxchain->pkt_count++; - } - } - - if ((!ETHER_ISMULTI(rxchain->h_da)) && - ((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) || - (((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) { - PKTSETCHAINED(dhd->osh, pkt); - PKTCINCRCNT(rxchain->pkthead); - PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt)); - } else { - dhd_rxchain_commit(dhd); - return; - } - - /* If we have hit the max chain length, dispatch the chain and reset */ - if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) { - dhd_rxchain_commit(dhd); - } -} - -static void BCMFASTPATH -dhd_rxchain_commit(dhd_pub_t *dhd) -{ - dhd_prot_t *prot = dhd->prot; - rxchain_info_t *rxchain = &prot->rxchain; - - if (rxchain->pkt_count == 0) - return; - - /* Release the packets to dhd_linux */ - dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count); - - /* Reset the chain */ - dhd_rxchain_reset(rxchain); -} -#endif /* DHD_RX_CHAINING */ - -static void -dhd_prot_ring_clear(msgbuf_ring_t* ring) -{ - uint16 size; - - DHD_TRACE(("%s\n", __FUNCTION__)); - - size = ring->ringmem->max_item * ring->ringmem->len_items; - ASSERT(MODX((unsigned long)ring->ring_base.va, DMA_ALIGN_LEN) == 0); - OSL_CACHE_INV((void *) ring->ring_base.va, size); - bzero(ring->ring_base.va, size); - - OSL_CACHE_FLUSH((void *) ring->ring_base.va, size); - - bzero(ring->ringstate, sizeof(*ring->ringstate)); -} - -void -dhd_prot_clear(dhd_pub_t *dhd) -{ - struct dhd_prot *prot = dhd->prot; - - DHD_TRACE(("%s\n", __FUNCTION__)); - - if (prot == NULL) - return; - - if (prot->h2dring_txp_subn) - dhd_prot_ring_clear(prot->h2dring_txp_subn); - if (prot->h2dring_rxp_subn) - dhd_prot_ring_clear(prot->h2dring_rxp_subn); - if (prot->h2dring_ctrl_subn) - dhd_prot_ring_clear(prot->h2dring_ctrl_subn); - if (prot->d2hring_tx_cpln) - dhd_prot_ring_clear(prot->d2hring_tx_cpln); - if (prot->d2hring_rx_cpln) - dhd_prot_ring_clear(prot->d2hring_rx_cpln); - if (prot->d2hring_ctrl_cpln) - dhd_prot_ring_clear(prot->d2hring_ctrl_cpln); - - if (prot->retbuf.va) { - OSL_CACHE_INV((void *) prot->retbuf.va, IOCT_RETBUF_SIZE); - bzero(prot->retbuf.va, IOCT_RETBUF_SIZE); - OSL_CACHE_FLUSH((void *) prot->retbuf.va, IOCT_RETBUF_SIZE); - } - - if (prot->ioctbuf.va) { - OSL_CACHE_INV((void *) prot->ioctbuf.va, IOCT_RETBUF_SIZE); - bzero(prot->ioctbuf.va, IOCT_RETBUF_SIZE); - OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, IOCT_RETBUF_SIZE); - } - - if (prot->d2h_dma_scratch_buf.va) { - OSL_CACHE_INV((void *)prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN); - bzero(prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN); - OSL_CACHE_FLUSH((void *)prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN); - } - - if (prot->h2d_dma_readindx_buf.va) { - OSL_CACHE_INV((void *)prot->h2d_dma_readindx_buf.va, - prot->h2d_dma_readindx_buf_len); - bzero(prot->h2d_dma_readindx_buf.va, - prot->h2d_dma_readindx_buf_len); - OSL_CACHE_FLUSH((void *)prot->h2d_dma_readindx_buf.va, - prot->h2d_dma_readindx_buf_len); - } - - if (prot->h2d_dma_writeindx_buf.va) { - OSL_CACHE_INV((void *)prot->h2d_dma_writeindx_buf.va, - prot->h2d_dma_writeindx_buf_len); - bzero(prot->h2d_dma_writeindx_buf.va, prot->h2d_dma_writeindx_buf_len); - OSL_CACHE_FLUSH((void *)prot->h2d_dma_writeindx_buf.va, - prot->h2d_dma_writeindx_buf_len); - } - - if (prot->d2h_dma_readindx_buf.va) { - OSL_CACHE_INV((void *)prot->d2h_dma_readindx_buf.va, - prot->d2h_dma_readindx_buf_len); - bzero(prot->d2h_dma_readindx_buf.va, prot->d2h_dma_readindx_buf_len); - OSL_CACHE_FLUSH((void *)prot->d2h_dma_readindx_buf.va, - prot->d2h_dma_readindx_buf_len); - } - - if (prot->d2h_dma_writeindx_buf.va) { - OSL_CACHE_INV((void *)prot->d2h_dma_writeindx_buf.va, - prot->d2h_dma_writeindx_buf_len); - bzero(prot->d2h_dma_writeindx_buf.va, prot->d2h_dma_writeindx_buf_len); - OSL_CACHE_FLUSH((void *)prot->d2h_dma_writeindx_buf.va, - prot->d2h_dma_writeindx_buf_len); - } - - prot->rx_metadata_offset = 0; - prot->tx_metadata_offset = 0; - - prot->rxbufpost = 0; - prot->cur_event_bufs_posted = 0; - prot->cur_ioctlresp_bufs_posted = 0; - - prot->active_tx_count = 0; - prot->data_seq_no = 0; - prot->ioctl_seq_no = 0; - prot->pending = 0; - prot->lastcmd = 0; - - prot->ioctl_trans_id = 1; - - /* dhd_flow_rings_init is located at dhd_bus_start, - * so when stopping bus, flowrings shall be deleted - */ - dhd_flow_rings_deinit(dhd); - NATIVE_TO_PKTID_CLEAR(prot->pktid_map_handle); -} diff --git a/drivers/net/wireless/bcmdhd/dhd_pcie.h b/drivers/net/wireless/bcmdhd/dhd_pcie.h deleted file mode 100644 index e55365518bc74..0000000000000 --- a/drivers/net/wireless/bcmdhd/dhd_pcie.h +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Linux DHD Bus Module for PCIE - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: dhd_pcie.h 506084 2014-10-02 15:34:59Z $ - */ - - -#ifndef dhd_pcie_h -#define dhd_pcie_h - -#include -#include -#ifdef SUPPORT_LINKDOWN_RECOVERY -#ifdef CONFIG_ARCH_MSM -#ifdef CONFIG_ARCH_MSM8994 -#include -#else -#include -#endif -#endif /* CONFIG_ARCH_MSM */ -#endif /* SUPPORT_LINKDOWN_RECOVERY */ - -/* defines */ - -#define PCMSGBUF_HDRLEN 0 -#define DONGLE_REG_MAP_SIZE (32 * 1024) -#define DONGLE_TCM_MAP_SIZE (4096 * 1024) -#define DONGLE_MIN_MEMSIZE (128 *1024) -#ifdef DHD_DEBUG -#define DHD_PCIE_SUCCESS 0 -#define DHD_PCIE_FAILURE 1 -#endif /* DHD_DEBUG */ -#define REMAP_ENAB(bus) ((bus)->remap) -#define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize))) - -#define MAX_DHD_TX_FLOWS 256 - -/* user defined data structures */ -#ifdef DHD_DEBUG -/* Device console log buffer state */ -#define CONSOLE_LINE_MAX 192 -#define CONSOLE_BUFFER_MAX 2024 - - -typedef struct dhd_console { - uint count; /* Poll interval msec counter */ - uint log_addr; /* Log struct address (fixed) */ - hnd_log_t log; /* Log struct (host copy) */ - uint bufsize; /* Size of log buffer */ - uint8 *buf; /* Log buffer (host copy) */ - uint last; /* Last buffer read index */ -} dhd_console_t; -#endif /* DHD_DEBUG */ -typedef struct ring_sh_info { - uint32 ring_mem_addr; - uint32 ring_state_w; - uint32 ring_state_r; -} ring_sh_info_t; - -typedef struct dhd_bus { - dhd_pub_t *dhd; - struct pci_dev *dev; /* pci device handle */ - dll_t const_flowring; /* constructed list of tx flowring queues */ - - si_t *sih; /* Handle for SI calls */ - char *vars; /* Variables (from CIS and/or other) */ - uint varsz; /* Size of variables buffer */ - uint32 sbaddr; /* Current SB window pointer (-1, invalid) */ - sbpcieregs_t *reg; /* Registers for PCIE core */ - - uint armrev; /* CPU core revision */ - uint ramrev; /* SOCRAM core revision */ - uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */ - uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */ - uint32 srmemsize; /* Size of SRMEM */ - - uint32 bus; /* gSPI or SDIO bus */ - uint32 intstatus; /* Intstatus bits (events) pending */ - bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */ - bool fcstate; /* State of dongle flow-control */ - - uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */ - char *fw_path; /* module_param: path to firmware image */ - char *nv_path; /* module_param: path to nvram vars file */ - char *nvram_params; /* user specified nvram params. */ - int nvram_params_len; - - struct pktq txq; /* Queue length used for flow-control */ - - uint rxlen; /* Length of valid data in buffer */ - - - bool intr; /* Use interrupts */ - bool ipend; /* Device interrupt is pending */ - bool intdis; /* Interrupts disabled by isr */ - uint intrcount; /* Count of device interrupt callbacks */ - uint lastintrs; /* Count as of last watchdog timer */ - -#ifdef DHD_DEBUG - dhd_console_t console; /* Console output polling support */ - uint console_addr; /* Console address from shared struct */ -#endif /* DHD_DEBUG */ - - bool alp_only; /* Don't use HT clock (ALP only) */ - - bool remap; /* Contiguous 1MB RAM: 512K socram + 512K devram - * Available with socram rev 16 - * Remap region not DMA-able - */ - uint32 resetinstr; - uint32 dongle_ram_base; - - ulong shared_addr; - pciedev_shared_t *pcie_sh; - bool bus_flowctrl; - ioctl_comp_resp_msg_t ioct_resp; - uint32 dma_rxoffset; - volatile char *regs; /* pci device memory va */ - volatile char *tcm; /* pci device memory va */ - uint32 tcm_size; -#ifdef CONFIG_ARCH_MSM8994 - uint32 bar1_win_base; - uint32 bar1_win_mask; -#endif - osl_t *osh; - uint32 nvram_csm; /* Nvram checksum */ - uint16 pollrate; - uint16 polltick; - - uint32 *pcie_mb_intr_addr; - void *pcie_mb_intr_osh; - bool sleep_allowed; - - /* version 3 shared struct related info start */ - ring_sh_info_t ring_sh[BCMPCIE_COMMON_MSGRINGS + MAX_DHD_TX_FLOWS]; - uint8 h2d_ring_count; - uint8 d2h_ring_count; - uint32 ringmem_ptr; - uint32 ring_state_ptr; - - uint32 d2h_dma_scratch_buffer_mem_addr; - - uint32 h2d_mb_data_ptr_addr; - uint32 d2h_mb_data_ptr_addr; - /* version 3 shared struct related info end */ - - uint32 def_intmask; - bool ltrsleep_on_unload; - uint wait_for_d3_ack; - uint8 txmode_push; - uint32 max_sub_queues; - bool db1_for_mb; - bool suspended; -#ifdef SUPPORT_LINKDOWN_RECOVERY -#ifdef CONFIG_ARCH_MSM - struct msm_pcie_register_event pcie_event; - bool islinkdown; -#endif /* CONFIG_ARCH_MSM */ -#endif /* SUPPORT_LINKDOWN_RECOVERY */ -#ifdef PCIE_TX_DEFERRAL - struct workqueue_struct *tx_wq; - struct work_struct create_flow_work; - struct work_struct delete_flow_work; - unsigned long *delete_flow_map; - struct sk_buff_head orphan_list; -#endif /* PCIE_TX_DEFERRAL */ - bool irq_registered; -} dhd_bus_t; - -/* function declarations */ - -extern uint32* dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size); -extern int dhdpcie_bus_register(void); -extern void dhdpcie_bus_unregister(void); -extern bool dhdpcie_chipmatch(uint16 vendor, uint16 device); - -extern struct dhd_bus* dhdpcie_bus_attach(osl_t *osh, volatile char* regs, - volatile char* tcm, uint32 tcm_size); -extern uint32 dhdpcie_bus_cfg_read_dword(struct dhd_bus *bus, uint32 addr, uint32 size); -extern void dhdpcie_bus_cfg_write_dword(struct dhd_bus *bus, uint32 addr, uint32 size, uint32 data); -extern void dhdpcie_bus_intr_disable(struct dhd_bus *bus); -extern void dhdpcie_bus_remove_prep(struct dhd_bus *bus); -extern void dhdpcie_bus_release(struct dhd_bus *bus); -extern int32 dhdpcie_bus_isr(struct dhd_bus *bus); -extern void dhdpcie_free_irq(dhd_bus_t *bus); -extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state); -extern int dhdpcie_pci_suspend_resume(struct dhd_bus *bus, bool state); -#ifndef BCMPCIE_OOB_HOST_WAKE -extern void dhdpcie_pme_active(osl_t *osh, bool enable); -#endif /* !BCMPCIE_OOB_HOST_WAKE */ -extern int dhdpcie_start_host_pcieclock(dhd_bus_t *bus); -extern int dhdpcie_stop_host_pcieclock(dhd_bus_t *bus); -extern int dhdpcie_disable_device(dhd_bus_t *bus); -extern int dhdpcie_enable_device(dhd_bus_t *bus); -extern int dhdpcie_alloc_resource(dhd_bus_t *bus); -extern void dhdpcie_free_resource(dhd_bus_t *bus); -extern int dhdpcie_bus_request_irq(struct dhd_bus *bus); -#ifdef BCMPCIE_OOB_HOST_WAKE -extern int dhdpcie_oob_intr_register(dhd_bus_t *bus); -extern void dhdpcie_oob_intr_unregister(dhd_bus_t *bus); -extern void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable); -#endif /* BCMPCIE_OOB_HOST_WAKE */ - -extern int dhd_buzzz_dump_dngl(dhd_bus_t *bus); -#endif /* dhd_pcie_h */ diff --git a/drivers/net/wireless/bcmdhd/dhd_pno.c b/drivers/net/wireless/bcmdhd/dhd_pno.c deleted file mode 100644 index d3f9ad7782ddf..0000000000000 --- a/drivers/net/wireless/bcmdhd/dhd_pno.c +++ /dev/null @@ -1,1874 +0,0 @@ -/* - * Broadcom Dongle Host Driver (DHD) - * Prefered Network Offload and Wi-Fi Location Service(WLS) code. - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: dhd_pno.c 423669 2013-09-18 13:01:55Z yangj$ - */ -#ifdef PNO_SUPPORT -#include -#include - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#ifdef __BIG_ENDIAN -#include -#define htod32(i) (bcmswap32(i)) -#define htod16(i) (bcmswap16(i)) -#define dtoh32(i) (bcmswap32(i)) -#define dtoh16(i) (bcmswap16(i)) -#define htodchanspec(i) htod16(i) -#define dtohchanspec(i) dtoh16(i) -#else -#define htod32(i) (i) -#define htod16(i) (i) -#define dtoh32(i) (i) -#define dtoh16(i) (i) -#define htodchanspec(i) (i) -#define dtohchanspec(i) (i) -#endif /* IL_BIGENDINA */ - -#define NULL_CHECK(p, s, err) \ - do { \ - if (!(p)) { \ - printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \ - err = BCME_ERROR; \ - return err; \ - } \ - } while (0) -#define PNO_GET_PNOSTATE(dhd) ((dhd_pno_status_info_t *)dhd->pno_state) -#define PNO_BESTNET_LEN 1024 -#define PNO_ON 1 -#define PNO_OFF 0 -#define CHANNEL_2G_MAX 14 -#define MAX_NODE_CNT 5 -#define WLS_SUPPORTED(pno_state) (pno_state->wls_supported == TRUE) -#define TIME_DIFF(timestamp1, timestamp2) (abs((uint32)(timestamp1/1000) \ - - (uint32)(timestamp2/1000))) - -#define ENTRY_OVERHEAD strlen("bssid=\nssid=\nfreq=\nlevel=\nage=\ndist=\ndistSd=\n====") -#define TIME_MIN_DIFF 5 -static inline bool -is_dfs(uint16 channel) -{ - if (channel >= 52 && channel <= 64) /* class 2 */ - return TRUE; - else if (channel >= 100 && channel <= 140) /* class 4 */ - return TRUE; - else - return FALSE; -} -int -dhd_pno_clean(dhd_pub_t *dhd) -{ - int pfn = 0; - int err; - dhd_pno_status_info_t *_pno_state; - NULL_CHECK(dhd, "dhd is NULL", err); - NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); - _pno_state = PNO_GET_PNOSTATE(dhd); - DHD_PNO(("%s enter\n", __FUNCTION__)); - /* Disable PNO */ - err = dhd_iovar(dhd, 0, "pfn", (char *)&pfn, sizeof(pfn), 1); - if (err < 0) { - DHD_ERROR(("%s : failed to execute pfn(error : %d)\n", - __FUNCTION__, err)); - goto exit; - } - _pno_state->pno_status = DHD_PNO_DISABLED; - err = dhd_iovar(dhd, 0, "pfnclear", NULL, 0, 1); - if (err < 0) { - DHD_ERROR(("%s : failed to execute pfnclear(error : %d)\n", - __FUNCTION__, err)); - } -exit: - return err; -} - -static int -_dhd_pno_suspend(dhd_pub_t *dhd) -{ - int err; - int suspend = 1; - dhd_pno_status_info_t *_pno_state; - NULL_CHECK(dhd, "dhd is NULL", err); - NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); - - DHD_PNO(("%s enter\n", __FUNCTION__)); - _pno_state = PNO_GET_PNOSTATE(dhd); - err = dhd_iovar(dhd, 0, "pfn_suspend", (char *)&suspend, sizeof(suspend), 1); - if (err < 0) { - DHD_ERROR(("%s : failed to suspend pfn(error :%d)\n", __FUNCTION__, err)); - goto exit; - - } - _pno_state->pno_status = DHD_PNO_SUSPEND; -exit: - return err; -} -static int -_dhd_pno_enable(dhd_pub_t *dhd, int enable) -{ - int err = BCME_OK; - dhd_pno_status_info_t *_pno_state; - NULL_CHECK(dhd, "dhd is NULL", err); - NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); - _pno_state = PNO_GET_PNOSTATE(dhd); - DHD_PNO(("%s enter\n", __FUNCTION__)); - - if (enable & 0xfffe) { - DHD_ERROR(("%s invalid value\n", __FUNCTION__)); - err = BCME_BADARG; - goto exit; - } - if (!dhd_support_sta_mode(dhd)) { - DHD_ERROR(("PNO is not allowed for non-STA mode")); - err = BCME_BADOPTION; - goto exit; - } - if (enable) { - if ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) && - dhd_is_associated(dhd, NULL, NULL)) { - DHD_ERROR(("%s Legacy PNO mode cannot be enabled " - "in assoc mode , ignore it\n", __FUNCTION__)); - err = BCME_BADOPTION; - goto exit; - } - } - /* Enable/Disable PNO */ - err = dhd_iovar(dhd, 0, "pfn", (char *)&enable, sizeof(enable), 1); - if (err < 0) { - DHD_ERROR(("%s : failed to execute pfn_set\n", __FUNCTION__)); - goto exit; - } - _pno_state->pno_status = (enable)? - DHD_PNO_ENABLED : DHD_PNO_DISABLED; - if (!enable) - _pno_state->pno_mode = DHD_PNO_NONE_MODE; - - DHD_PNO(("%s set pno as %s\n", - __FUNCTION__, enable ? "Enable" : "Disable")); -exit: - return err; -} - -static int -_dhd_pno_set(dhd_pub_t *dhd, const dhd_pno_params_t *pno_params, dhd_pno_mode_t mode) -{ - int err = BCME_OK; - wl_pfn_param_t pfn_param; - dhd_pno_params_t *_params; - dhd_pno_status_info_t *_pno_state; - bool combined_scan = FALSE; - DHD_PNO(("%s enter\n", __FUNCTION__)); - - NULL_CHECK(dhd, "dhd is NULL", err); - NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); - _pno_state = PNO_GET_PNOSTATE(dhd); - - memset(&pfn_param, 0, sizeof(pfn_param)); - - /* set pfn parameters */ - pfn_param.version = htod32(PFN_VERSION); - pfn_param.flags = ((PFN_LIST_ORDER << SORT_CRITERIA_BIT) | - (ENABLE << IMMEDIATE_SCAN_BIT) | (ENABLE << REPORT_SEPERATELY_BIT)); - if (mode == DHD_PNO_LEGACY_MODE) { - /* check and set extra pno params */ - if ((pno_params->params_legacy.pno_repeat != 0) || - (pno_params->params_legacy.pno_freq_expo_max != 0)) { - pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT); - pfn_param.repeat = (uchar) (pno_params->params_legacy.pno_repeat); - pfn_param.exp = (uchar) (pno_params->params_legacy.pno_freq_expo_max); - } - /* set up pno scan fr */ - if (pno_params->params_legacy.scan_fr != 0) - pfn_param.scan_freq = htod32(pno_params->params_legacy.scan_fr); - if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { - DHD_PNO(("will enable combined scan with BATCHIG SCAN MODE\n")); - mode |= DHD_PNO_BATCH_MODE; - combined_scan = TRUE; - } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { - DHD_PNO(("will enable combined scan with HOTLIST SCAN MODE\n")); - mode |= DHD_PNO_HOTLIST_MODE; - combined_scan = TRUE; - } - } - if (mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) { - /* Scan frequency of 30 sec */ - pfn_param.scan_freq = htod32(30); - /* slow adapt scan is off by default */ - pfn_param.slow_freq = htod32(0); - /* RSSI margin of 30 dBm */ - pfn_param.rssi_margin = htod16(30); - /* Network timeout 60 sec */ - pfn_param.lost_network_timeout = htod32(60); - /* best n = 2 by default */ - pfn_param.bestn = DEFAULT_BESTN; - /* mscan m=0 by default, so not record best networks by default */ - pfn_param.mscan = DEFAULT_MSCAN; - /* default repeat = 10 */ - pfn_param.repeat = DEFAULT_REPEAT; - /* by default, maximum scan interval = 2^2 - * scan_freq when adaptive scan is turned on - */ - pfn_param.exp = DEFAULT_EXP; - if (mode == DHD_PNO_BATCH_MODE) { - /* In case of BATCH SCAN */ - if (pno_params->params_batch.bestn) - pfn_param.bestn = pno_params->params_batch.bestn; - if (pno_params->params_batch.scan_fr) - pfn_param.scan_freq = htod32(pno_params->params_batch.scan_fr); - if (pno_params->params_batch.mscan) - pfn_param.mscan = pno_params->params_batch.mscan; - /* enable broadcast scan */ - pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); - } else if (mode == DHD_PNO_HOTLIST_MODE) { - /* In case of HOTLIST SCAN */ - if (pno_params->params_hotlist.scan_fr) - pfn_param.scan_freq = htod32(pno_params->params_hotlist.scan_fr); - pfn_param.bestn = 0; - pfn_param.repeat = 0; - /* enable broadcast scan */ - pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); - } - if (combined_scan) { - /* Disable Adaptive Scan */ - pfn_param.flags &= ~(htod16(ENABLE << ENABLE_ADAPTSCAN_BIT)); - pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); - pfn_param.repeat = 0; - pfn_param.exp = 0; - if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { - /* In case of Legacy PNO + BATCH SCAN */ - _params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); - if (_params->params_batch.bestn) - pfn_param.bestn = _params->params_batch.bestn; - if (_params->params_batch.scan_fr) - pfn_param.scan_freq = htod32(_params->params_batch.scan_fr); - if (_params->params_batch.mscan) - pfn_param.mscan = _params->params_batch.mscan; - } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { - /* In case of Legacy PNO + HOTLIST SCAN */ - _params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); - if (_params->params_hotlist.scan_fr) - pfn_param.scan_freq = htod32(_params->params_hotlist.scan_fr); - pfn_param.bestn = 0; - pfn_param.repeat = 0; - } - } - } - if (pfn_param.scan_freq < htod32(PNO_SCAN_MIN_FW_SEC) || - pfn_param.scan_freq > htod32(PNO_SCAN_MAX_FW_SEC)) { - DHD_ERROR(("%s pno freq(%d sec) is not valid \n", - __FUNCTION__, PNO_SCAN_MIN_FW_SEC)); - err = BCME_BADARG; - goto exit; - } - if (mode == DHD_PNO_BATCH_MODE) { - int _tmp = pfn_param.bestn; - /* set bestn to calculate the max mscan which firmware supports */ - err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), 1); - if (err < 0) { - DHD_ERROR(("%s : failed to set pfnmem\n", __FUNCTION__)); - goto exit; - } - /* get max mscan which the firmware supports */ - err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), 0); - if (err < 0) { - DHD_ERROR(("%s : failed to get pfnmem\n", __FUNCTION__)); - goto exit; - } - DHD_PNO((" returned mscan : %d, set bestn : %d\n", _tmp, pfn_param.bestn)); - pfn_param.mscan = MIN(pfn_param.mscan, _tmp); - } - err = dhd_iovar(dhd, 0, "pfn_set", (char *)&pfn_param, sizeof(pfn_param), 1); - if (err < 0) { - DHD_ERROR(("%s : failed to execute pfn_set\n", __FUNCTION__)); - goto exit; - } - /* need to return mscan if this is for batch scan instead of err */ - err = (mode == DHD_PNO_BATCH_MODE)? pfn_param.mscan : err; -exit: - return err; -} -static int -_dhd_pno_add_ssid(dhd_pub_t *dhd, wlc_ssid_t* ssids_list, int nssid) -{ - int err = BCME_OK; - int i = 0; - wl_pfn_t pfn_element; - NULL_CHECK(dhd, "dhd is NULL", err); - if (nssid) { - NULL_CHECK(ssids_list, "ssid list is NULL", err); - } - memset(&pfn_element, 0, sizeof(pfn_element)); - { - int j; - for (j = 0; j < nssid; j++) { - DHD_PNO(("%d: scan for %s size = %d\n", j, - ssids_list[j].SSID, ssids_list[j].SSID_len)); - } - } - /* Check for broadcast ssid */ - for (i = 0; i < nssid; i++) { - if (!ssids_list[i].SSID_len) { - DHD_ERROR(("%d: Broadcast SSID is ilegal for PNO setting\n", i)); - err = BCME_ERROR; - goto exit; - } - } - /* set all pfn ssid */ - for (i = 0; i < nssid; i++) { - pfn_element.infra = htod32(DOT11_BSSTYPE_INFRASTRUCTURE); - pfn_element.auth = (DOT11_OPEN_SYSTEM); - pfn_element.wpa_auth = htod32(WPA_AUTH_PFN_ANY); - pfn_element.wsec = htod32(0); - pfn_element.infra = htod32(1); - pfn_element.flags = htod32(ENABLE << WL_PFN_HIDDEN_BIT); - memcpy((char *)pfn_element.ssid.SSID, ssids_list[i].SSID, - ssids_list[i].SSID_len); - pfn_element.ssid.SSID_len = ssids_list[i].SSID_len; - err = dhd_iovar(dhd, 0, "pfn_add", (char *)&pfn_element, - sizeof(pfn_element), 1); - if (err < 0) { - DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__)); - goto exit; - } - } -exit: - return err; -} -/* qsort compare function */ -static int -_dhd_pno_cmpfunc(const void *a, const void *b) -{ - return (*(uint16*)a - *(uint16*)b); -} -static int -_dhd_pno_chan_merge(uint16 *d_chan_list, int *nchan, - uint16 *chan_list1, int nchan1, uint16 *chan_list2, int nchan2) -{ - int err = BCME_OK; - int i = 0, j = 0, k = 0; - uint16 tmp; - NULL_CHECK(d_chan_list, "d_chan_list is NULL", err); - NULL_CHECK(nchan, "nchan is NULL", err); - NULL_CHECK(chan_list1, "chan_list1 is NULL", err); - NULL_CHECK(chan_list2, "chan_list2 is NULL", err); - /* chan_list1 and chan_list2 should be sorted at first */ - while (i < nchan1 && j < nchan2) { - tmp = chan_list1[i] < chan_list2[j]? - chan_list1[i++] : chan_list2[j++]; - for (; i < nchan1 && chan_list1[i] == tmp; i++); - for (; j < nchan2 && chan_list2[j] == tmp; j++); - d_chan_list[k++] = tmp; - } - - while (i < nchan1) { - tmp = chan_list1[i++]; - for (; i < nchan1 && chan_list1[i] == tmp; i++); - d_chan_list[k++] = tmp; - } - - while (j < nchan2) { - tmp = chan_list2[j++]; - for (; j < nchan2 && chan_list2[j] == tmp; j++); - d_chan_list[k++] = tmp; - - } - *nchan = k; - return err; -} -static int -_dhd_pno_get_channels(dhd_pub_t *dhd, uint16 *d_chan_list, - int *nchan, uint8 band, bool skip_dfs) -{ - int err = BCME_OK; - int i, j; - uint32 chan_buf[WL_NUMCHANNELS + 1]; - wl_uint32_list_t *list; - NULL_CHECK(dhd, "dhd is NULL", err); - if (*nchan) { - NULL_CHECK(d_chan_list, "d_chan_list is NULL", err); - } - list = (wl_uint32_list_t *) (void *)chan_buf; - list->count = htod32(WL_NUMCHANNELS); - err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, chan_buf, sizeof(chan_buf), FALSE, 0); - if (err < 0) { - DHD_ERROR(("failed to get channel list (err: %d)\n", err)); - goto exit; - } - for (i = 0, j = 0; i < dtoh32(list->count) && i < *nchan; i++) { - if (band == WLC_BAND_2G) { - if (dtoh32(list->element[i]) > CHANNEL_2G_MAX) - continue; - } else if (band == WLC_BAND_5G) { - if (dtoh32(list->element[i]) <= CHANNEL_2G_MAX) - continue; - if (skip_dfs && is_dfs(dtoh32(list->element[i]))) - continue; - - } else { /* All channels */ - if (skip_dfs && is_dfs(dtoh32(list->element[i]))) - continue; - } - d_chan_list[j++] = dtoh32(list->element[i]); - } - *nchan = j; -exit: - return err; -} -static int -_dhd_pno_convert_format(dhd_pub_t *dhd, struct dhd_pno_batch_params *params_batch, - char *buf, int nbufsize) -{ - int err = BCME_OK; - int bytes_written = 0, nreadsize = 0; - int t_delta = 0; - int nleftsize = nbufsize; - uint8 cnt = 0; - char *bp = buf; - char eabuf[ETHER_ADDR_STR_LEN]; -#ifdef PNO_DEBUG - char *_base_bp; - char msg[150]; -#endif - dhd_pno_bestnet_entry_t *iter, *next; - dhd_pno_scan_results_t *siter, *snext; - dhd_pno_best_header_t *phead, *pprev; - NULL_CHECK(params_batch, "params_batch is NULL", err); - if (nbufsize > 0) - NULL_CHECK(buf, "buf is NULL", err); - /* initialize the buffer */ - memset(buf, 0, nbufsize); - DHD_PNO(("%s enter \n", __FUNCTION__)); - /* # of scans */ - if (!params_batch->get_batch.batch_started) { - bp += nreadsize = sprintf(bp, "scancount=%d\n", - params_batch->get_batch.expired_tot_scan_cnt); - nleftsize -= nreadsize; - params_batch->get_batch.batch_started = TRUE; - } - DHD_PNO(("%s scancount %d\n", __FUNCTION__, params_batch->get_batch.expired_tot_scan_cnt)); - /* preestimate scan count until which scan result this report is going to end */ - list_for_each_entry_safe(siter, snext, - ¶ms_batch->get_batch.expired_scan_results_list, list) { - phead = siter->bestnetheader; - while (phead != NULL) { - /* if left_size is less than bestheader total size , stop this */ - if (nleftsize <= - (phead->tot_size + phead->tot_cnt * ENTRY_OVERHEAD)) - goto exit; - /* increase scan count */ - cnt++; - /* # best of each scan */ - DHD_PNO(("\n\n", cnt - 1, phead->tot_cnt)); - /* attribute of the scan */ - if (phead->reason & PNO_STATUS_ABORT_MASK) { - bp += nreadsize = sprintf(bp, "trunc\n"); - nleftsize -= nreadsize; - } - list_for_each_entry_safe(iter, next, - &phead->entry_list, list) { - t_delta = jiffies_to_msecs(jiffies - iter->recorded_time); -#ifdef PNO_DEBUG - _base_bp = bp; - memset(msg, 0, sizeof(msg)); -#endif - /* BSSID info */ - bp += nreadsize = sprintf(bp, "bssid=%s\n", - bcm_ether_ntoa((const struct ether_addr *)&iter->BSSID, eabuf)); - nleftsize -= nreadsize; - /* SSID */ - bp += nreadsize = sprintf(bp, "ssid=%s\n", iter->SSID); - nleftsize -= nreadsize; - /* channel */ - bp += nreadsize = sprintf(bp, "freq=%d\n", - wf_channel2mhz(iter->channel, - iter->channel <= CH_MAX_2G_CHANNEL? - WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); - nleftsize -= nreadsize; - /* RSSI */ - bp += nreadsize = sprintf(bp, "level=%d\n", iter->RSSI); - nleftsize -= nreadsize; - /* add the time consumed in Driver to the timestamp of firmware */ - iter->timestamp += t_delta; - bp += nreadsize = sprintf(bp, "age=%d\n", iter->timestamp); - nleftsize -= nreadsize; - /* RTT0 */ - bp += nreadsize = sprintf(bp, "dist=%d\n", - (iter->rtt0 == 0)? -1 : iter->rtt0); - nleftsize -= nreadsize; - /* RTT1 */ - bp += nreadsize = sprintf(bp, "distSd=%d\n", - (iter->rtt0 == 0)? -1 : iter->rtt1); - nleftsize -= nreadsize; - bp += nreadsize = sprintf(bp, "%s", AP_END_MARKER); - nleftsize -= nreadsize; - list_del(&iter->list); - MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE); -#ifdef PNO_DEBUG - memcpy(msg, _base_bp, bp - _base_bp); - DHD_PNO(("Entry : \n%s", msg)); -#endif - } - bp += nreadsize = sprintf(bp, "%s", SCAN_END_MARKER); - DHD_PNO(("%s", SCAN_END_MARKER)); - nleftsize -= nreadsize; - pprev = phead; - /* reset the header */ - siter->bestnetheader = phead = phead->next; - MFREE(dhd->osh, pprev, BEST_HEADER_SIZE); - - siter->cnt_header--; - } - if (phead == NULL) { - /* we store all entry in this scan , so it is ok to delete */ - list_del(&siter->list); - MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE); - } - } -exit: - if (cnt < params_batch->get_batch.expired_tot_scan_cnt) { - DHD_ERROR(("Buffer size is small to save all batch entry," - " cnt : %d (remained_scan_cnt): %d\n", - cnt, params_batch->get_batch.expired_tot_scan_cnt - cnt)); - } - params_batch->get_batch.expired_tot_scan_cnt -= cnt; - /* set FALSE only if the link list is empty after returning the data */ - if (list_empty(¶ms_batch->get_batch.expired_scan_results_list)) { - params_batch->get_batch.batch_started = FALSE; - bp += sprintf(bp, "%s", RESULTS_END_MARKER); - DHD_PNO(("%s", RESULTS_END_MARKER)); - DHD_PNO(("%s : Getting the batching data is complete\n", __FUNCTION__)); - } - /* return used memory in buffer */ - bytes_written = (int32)(bp - buf); - return bytes_written; -} -static int -_dhd_pno_clear_all_batch_results(dhd_pub_t *dhd, struct list_head *head, bool only_last) -{ - int err = BCME_OK; - int removed_scan_cnt = 0; - dhd_pno_scan_results_t *siter, *snext; - dhd_pno_best_header_t *phead, *pprev; - dhd_pno_bestnet_entry_t *iter, *next; - NULL_CHECK(dhd, "dhd is NULL", err); - NULL_CHECK(head, "head is NULL", err); - NULL_CHECK(head->next, "head->next is NULL", err); - DHD_PNO(("%s enter\n", __FUNCTION__)); - list_for_each_entry_safe(siter, snext, - head, list) { - if (only_last) { - /* in case that we need to delete only last one */ - if (!list_is_last(&siter->list, head)) { - /* skip if the one is not last */ - continue; - } - } - /* delete all data belong if the one is last */ - phead = siter->bestnetheader; - while (phead != NULL) { - removed_scan_cnt++; - list_for_each_entry_safe(iter, next, - &phead->entry_list, list) { - list_del(&iter->list); - MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE); - } - pprev = phead; - phead = phead->next; - MFREE(dhd->osh, pprev, BEST_HEADER_SIZE); - } - if (phead == NULL) { - /* it is ok to delete top node */ - list_del(&siter->list); - MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE); - } - } - return removed_scan_cnt; -} - -static int -_dhd_pno_cfg(dhd_pub_t *dhd, uint16 *channel_list, int nchan) -{ - int err = BCME_OK; - int i = 0; - wl_pfn_cfg_t pfncfg_param; - NULL_CHECK(dhd, "dhd is NULL", err); - if (nchan) { - NULL_CHECK(channel_list, "nchan is NULL", err); - } - DHD_PNO(("%s enter : nchan : %d\n", __FUNCTION__, nchan)); - memset(&pfncfg_param, 0, sizeof(wl_pfn_cfg_t)); - /* Setup default values */ - pfncfg_param.reporttype = htod32(WL_PFN_REPORT_ALLNET); - pfncfg_param.channel_num = htod32(0); - - for (i = 0; i < nchan && nchan < WL_NUMCHANNELS; i++) - pfncfg_param.channel_list[i] = channel_list[i]; - - pfncfg_param.channel_num = htod32(nchan); - err = dhd_iovar(dhd, 0, "pfn_cfg", (char *)&pfncfg_param, sizeof(pfncfg_param), 1); - if (err < 0) { - DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__)); - goto exit; - } -exit: - return err; -} -static int -_dhd_pno_reinitialize_prof(dhd_pub_t *dhd, dhd_pno_params_t *params, dhd_pno_mode_t mode) -{ - int err = BCME_OK; - dhd_pno_status_info_t *_pno_state; - NULL_CHECK(dhd, "dhd is NULL\n", err); - NULL_CHECK(dhd->pno_state, "pno_state is NULL\n", err); - DHD_PNO(("%s enter\n", __FUNCTION__)); - _pno_state = PNO_GET_PNOSTATE(dhd); - mutex_lock(&_pno_state->pno_mutex); - switch (mode) { - case DHD_PNO_LEGACY_MODE: { - struct dhd_pno_ssid *iter, *next; - if (params->params_legacy.nssid > 0) { - list_for_each_entry_safe(iter, next, - ¶ms->params_legacy.ssid_list, list) { - list_del(&iter->list); - kfree(iter); - } - } - params->params_legacy.nssid = 0; - params->params_legacy.scan_fr = 0; - params->params_legacy.pno_freq_expo_max = 0; - params->params_legacy.pno_repeat = 0; - params->params_legacy.nchan = 0; - memset(params->params_legacy.chan_list, 0, - sizeof(params->params_legacy.chan_list)); - break; - } - case DHD_PNO_BATCH_MODE: { - params->params_batch.scan_fr = 0; - params->params_batch.mscan = 0; - params->params_batch.nchan = 0; - params->params_batch.rtt = 0; - params->params_batch.bestn = 0; - params->params_batch.nchan = 0; - params->params_batch.band = WLC_BAND_AUTO; - memset(params->params_batch.chan_list, 0, - sizeof(params->params_batch.chan_list)); - params->params_batch.get_batch.batch_started = FALSE; - params->params_batch.get_batch.buf = NULL; - params->params_batch.get_batch.bufsize = 0; - params->params_batch.get_batch.reason = 0; - _dhd_pno_clear_all_batch_results(dhd, - ¶ms->params_batch.get_batch.scan_results_list, FALSE); - _dhd_pno_clear_all_batch_results(dhd, - ¶ms->params_batch.get_batch.expired_scan_results_list, FALSE); - params->params_batch.get_batch.tot_scan_cnt = 0; - params->params_batch.get_batch.expired_tot_scan_cnt = 0; - params->params_batch.get_batch.top_node_cnt = 0; - INIT_LIST_HEAD(¶ms->params_batch.get_batch.scan_results_list); - INIT_LIST_HEAD(¶ms->params_batch.get_batch.expired_scan_results_list); - break; - } - case DHD_PNO_HOTLIST_MODE: { - struct dhd_pno_bssid *iter, *next; - if (params->params_hotlist.nbssid > 0) { - list_for_each_entry_safe(iter, next, - ¶ms->params_hotlist.bssid_list, list) { - list_del(&iter->list); - kfree(iter); - } - } - params->params_hotlist.scan_fr = 0; - params->params_hotlist.nbssid = 0; - params->params_hotlist.nchan = 0; - params->params_batch.band = WLC_BAND_AUTO; - memset(params->params_hotlist.chan_list, 0, - sizeof(params->params_hotlist.chan_list)); - break; - } - default: - DHD_ERROR(("%s : unknown mode : %d\n", __FUNCTION__, mode)); - break; - } - mutex_unlock(&_pno_state->pno_mutex); - return err; -} -static int -_dhd_pno_add_bssid(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, int nbssid) -{ - int err = BCME_OK; - NULL_CHECK(dhd, "dhd is NULL", err); - if (nbssid) { - NULL_CHECK(p_pfn_bssid, "bssid list is NULL", err); - } - err = dhd_iovar(dhd, 0, "pfn_add_bssid", (char *)&p_pfn_bssid, - sizeof(wl_pfn_bssid_t) * nbssid, 1); - if (err < 0) { - DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__)); - goto exit; - } -exit: - return err; -} -int -dhd_pno_stop_for_ssid(dhd_pub_t *dhd) -{ - int err = BCME_OK; - uint32 mode = 0; - dhd_pno_status_info_t *_pno_state; - dhd_pno_params_t *_params; - wl_pfn_bssid_t *p_pfn_bssid; - NULL_CHECK(dhd, "dev is NULL", err); - NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); - _pno_state = PNO_GET_PNOSTATE(dhd); - if (!(_pno_state->pno_mode & DHD_PNO_LEGACY_MODE)) { - DHD_ERROR(("%s : LEGACY PNO MODE is not enabled\n", __FUNCTION__)); - goto exit; - } - DHD_PNO(("%s enter\n", __FUNCTION__)); - _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; - /* restart Batch mode if the batch mode is on */ - if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) { - /* retrieve the batching data from firmware into host */ - dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); - /* save current pno_mode before calling dhd_pno_clean */ - mode = _pno_state->pno_mode; - dhd_pno_clean(dhd); - /* restore previous pno_mode */ - _pno_state->pno_mode = mode; - if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { - _params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); - /* restart BATCH SCAN */ - err = dhd_pno_set_for_batch(dhd, &_params->params_batch); - if (err < 0) { - _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; - DHD_ERROR(("%s : failed to restart batch scan(err: %d)\n", - __FUNCTION__, err)); - goto exit; - } - } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { - /* restart HOTLIST SCAN */ - struct dhd_pno_bssid *iter, *next; - _params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); - p_pfn_bssid = kzalloc(sizeof(wl_pfn_bssid_t) * - _params->params_hotlist.nbssid, GFP_KERNEL); - if (p_pfn_bssid == NULL) { - DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array" - " (count: %d)", - __FUNCTION__, _params->params_hotlist.nbssid)); - err = BCME_ERROR; - _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; - goto exit; - } - /* convert dhd_pno_bssid to wl_pfn_bssid */ - list_for_each_entry_safe(iter, next, - &_params->params_hotlist.bssid_list, list) { - memcpy(&p_pfn_bssid->macaddr, - &iter->macaddr, ETHER_ADDR_LEN); - p_pfn_bssid->flags = iter->flags; - p_pfn_bssid++; - } - err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist); - if (err < 0) { - _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; - DHD_ERROR(("%s : failed to restart hotlist scan(err: %d)\n", - __FUNCTION__, err)); - goto exit; - } - } - } else { - err = dhd_pno_clean(dhd); - if (err < 0) { - DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", - __FUNCTION__, err)); - goto exit; - } - } -exit: - return err; -} - -int -dhd_pno_enable(dhd_pub_t *dhd, int enable) -{ - int err = BCME_OK; - NULL_CHECK(dhd, "dhd is NULL", err); - DHD_PNO(("%s enter\n", __FUNCTION__)); - return (_dhd_pno_enable(dhd, enable)); -} - -int -dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_t* ssid_list, int nssid, - uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan) -{ - struct dhd_pno_ssid *_pno_ssid; - dhd_pno_params_t *_params; - dhd_pno_params_t *_params2; - dhd_pno_status_info_t *_pno_state; - uint16 _chan_list[WL_NUMCHANNELS]; - int32 tot_nchan = 0; - int err = BCME_OK; - int i; - int mode = 0; - NULL_CHECK(dhd, "dhd is NULL", err); - NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); - _pno_state = PNO_GET_PNOSTATE(dhd); - - if (!dhd_support_sta_mode(dhd)) { - err = BCME_BADOPTION; - goto exit; - } - DHD_PNO(("%s enter : scan_fr :%d, pno_repeat :%d," - "pno_freq_expo_max: %d, nchan :%d\n", __FUNCTION__, - scan_fr, pno_repeat, pno_freq_expo_max, nchan)); - - _params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); - if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { - DHD_ERROR(("%s : Legacy PNO mode was already started, " - "will disable previous one to start new one\n", __FUNCTION__)); - err = dhd_pno_stop_for_ssid(dhd); - if (err < 0) { - DHD_ERROR(("%s : failed to stop legacy PNO (err %d)\n", - __FUNCTION__, err)); - goto exit; - } - } - _pno_state->pno_mode |= DHD_PNO_LEGACY_MODE; - err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); - if (err < 0) { - DHD_ERROR(("%s : failed to reinitialize profile (err %d)\n", - __FUNCTION__, err)); - goto exit; - } - memset(_chan_list, 0, sizeof(_chan_list)); - tot_nchan = nchan; - if (tot_nchan > 0 && channel_list) { - for (i = 0; i < nchan; i++) - _params->params_legacy.chan_list[i] = _chan_list[i] = channel_list[i]; - } - if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) { - DHD_PNO(("BATCH SCAN is on progress in firmware\n")); - /* retrieve the batching data from firmware into host */ - dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); - /* store current pno_mode before disabling pno */ - mode = _pno_state->pno_mode; - err = _dhd_pno_enable(dhd, PNO_OFF); - if (err < 0) { - DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); - goto exit; - } - /* restore the previous mode */ - _pno_state->pno_mode = mode; - /* use superset of channel list between two mode */ - if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { - _params2 = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); - if (_params2->params_batch.nchan > 0 && nchan > 0) { - err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, - &_params2->params_batch.chan_list[0], - _params2->params_batch.nchan, - &channel_list[0], nchan); - if (err < 0) { - DHD_ERROR(("%s : failed to merge channel list" - " between legacy and batch\n", - __FUNCTION__)); - goto exit; - } - } else { - DHD_PNO(("superset channel will use" - " all channels in firmware\n")); - } - } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { - _params2 = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); - if (_params2->params_hotlist.nchan > 0 && nchan > 0) { - err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, - &_params2->params_hotlist.chan_list[0], - _params2->params_hotlist.nchan, - &channel_list[0], nchan); - if (err < 0) { - DHD_ERROR(("%s : failed to merge channel list" - " between legacy and hotlist\n", - __FUNCTION__)); - goto exit; - } - } - } - } - _params->params_legacy.scan_fr = scan_fr; - _params->params_legacy.pno_repeat = pno_repeat; - _params->params_legacy.pno_freq_expo_max = pno_freq_expo_max; - _params->params_legacy.nchan = nchan; - _params->params_legacy.nssid = nssid; - INIT_LIST_HEAD(&_params->params_legacy.ssid_list); - if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_LEGACY_MODE)) < 0) { - DHD_ERROR(("failed to set call pno_set (err %d) in firmware\n", err)); - goto exit; - } - if ((err = _dhd_pno_add_ssid(dhd, ssid_list, nssid)) < 0) { - DHD_ERROR(("failed to add ssid list(err %d), %d in firmware\n", err, nssid)); - goto exit; - } - for (i = 0; i < nssid; i++) { - _pno_ssid = kzalloc(sizeof(struct dhd_pno_ssid), GFP_KERNEL); - if (_pno_ssid == NULL) { - DHD_ERROR(("%s : failed to allocate struct dhd_pno_ssid\n", - __FUNCTION__)); - goto exit; - } - _pno_ssid->SSID_len = ssid_list[i].SSID_len; - memcpy(_pno_ssid->SSID, ssid_list[i].SSID, _pno_ssid->SSID_len); - list_add_tail(&_pno_ssid->list, &_params->params_legacy.ssid_list); - - } - if (tot_nchan > 0) { - if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { - DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", - __FUNCTION__, err)); - goto exit; - } - } - if (_pno_state->pno_status == DHD_PNO_DISABLED) { - if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) - DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__)); - } -exit: - /* clear mode in case of error */ - if (err < 0) - _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; - return err; -} -int -dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params) -{ - int err = BCME_OK; - uint16 _chan_list[WL_NUMCHANNELS]; - int rem_nchan = 0, tot_nchan = 0; - int mode = 0, mscan = 0; - int i = 0; - dhd_pno_params_t *_params; - dhd_pno_params_t *_params2; - dhd_pno_status_info_t *_pno_state; - wlc_ssid_t *p_ssid_list = NULL; - NULL_CHECK(dhd, "dhd is NULL", err); - NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); - NULL_CHECK(batch_params, "batch_params is NULL", err); - _pno_state = PNO_GET_PNOSTATE(dhd); - DHD_PNO(("%s enter\n", __FUNCTION__)); - if (!dhd_support_sta_mode(dhd)) { - err = BCME_BADOPTION; - goto exit; - } - if (!WLS_SUPPORTED(_pno_state)) { - DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); - err = BCME_UNSUPPORTED; - goto exit; - } - _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; - if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { - _pno_state->pno_mode |= DHD_PNO_BATCH_MODE; - err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE); - if (err < 0) { - DHD_ERROR(("%s : failed to call _dhd_pno_reinitialize_prof\n", - __FUNCTION__)); - goto exit; - } - } else { - /* batch mode is already started */ - return -EBUSY; - } - _params->params_batch.scan_fr = batch_params->scan_fr; - _params->params_batch.bestn = batch_params->bestn; - _params->params_batch.mscan = (batch_params->mscan)? - batch_params->mscan : DEFAULT_BATCH_MSCAN; - _params->params_batch.nchan = batch_params->nchan; - memcpy(_params->params_batch.chan_list, batch_params->chan_list, - sizeof(_params->params_batch.chan_list)); - - memset(_chan_list, 0, sizeof(_chan_list)); - - rem_nchan = ARRAYSIZE(batch_params->chan_list) - batch_params->nchan; - if (batch_params->band == WLC_BAND_2G || batch_params->band == WLC_BAND_5G) { - /* get a valid channel list based on band B or A */ - err = _dhd_pno_get_channels(dhd, - &_params->params_batch.chan_list[batch_params->nchan], - &rem_nchan, batch_params->band, FALSE); - if (err < 0) { - DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n", - __FUNCTION__, batch_params->band)); - goto exit; - } - /* now we need to update nchan because rem_chan has valid channel count */ - _params->params_batch.nchan += rem_nchan; - /* need to sort channel list */ - sort(_params->params_batch.chan_list, _params->params_batch.nchan, - sizeof(_params->params_batch.chan_list[0]), _dhd_pno_cmpfunc, NULL); - } -#ifdef PNO_DEBUG -{ - DHD_PNO(("Channel list : ")); - for (i = 0; i < _params->params_batch.nchan; i++) { - DHD_PNO(("%d ", _params->params_batch.chan_list[i])); - } - DHD_PNO(("\n")); -} -#endif - if (_params->params_batch.nchan) { - /* copy the channel list into local array */ - memcpy(_chan_list, _params->params_batch.chan_list, sizeof(_chan_list)); - tot_nchan = _params->params_batch.nchan; - } - if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { - struct dhd_pno_ssid *iter, *next; - DHD_PNO(("PNO SSID is on progress in firmware\n")); - /* store current pno_mode before disabling pno */ - mode = _pno_state->pno_mode; - err = _dhd_pno_enable(dhd, PNO_OFF); - if (err < 0) { - DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); - goto exit; - } - /* restore the previous mode */ - _pno_state->pno_mode = mode; - /* Use the superset for channelist between two mode */ - _params2 = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); - if (_params2->params_legacy.nchan > 0 && _params->params_batch.nchan > 0) { - err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, - &_params2->params_legacy.chan_list[0], - _params2->params_legacy.nchan, - &_params->params_batch.chan_list[0], _params->params_batch.nchan); - if (err < 0) { - DHD_ERROR(("%s : failed to merge channel list" - " between legacy and batch\n", - __FUNCTION__)); - goto exit; - } - } else { - DHD_PNO(("superset channel will use all channels in firmware\n")); - } - p_ssid_list = kzalloc(sizeof(wlc_ssid_t) * - _params2->params_legacy.nssid, GFP_KERNEL); - if (p_ssid_list == NULL) { - DHD_ERROR(("%s : failed to allocate wlc_ssid_t array (count: %d)", - __FUNCTION__, _params2->params_legacy.nssid)); - err = BCME_ERROR; - _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; - goto exit; - } - i = 0; - /* convert dhd_pno_ssid to dhd_pno_ssid */ - list_for_each_entry_safe(iter, next, &_params2->params_legacy.ssid_list, list) { - p_ssid_list[i].SSID_len = iter->SSID_len; - memcpy(p_ssid_list->SSID, iter->SSID, p_ssid_list[i].SSID_len); - i++; - } - if ((err = _dhd_pno_add_ssid(dhd, p_ssid_list, - _params2->params_legacy.nssid)) < 0) { - DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err)); - goto exit; - } - } - if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_BATCH_MODE)) < 0) { - DHD_ERROR(("%s : failed to set call pno_set (err %d) in firmware\n", - __FUNCTION__, err)); - goto exit; - } else { - /* we need to return mscan */ - mscan = err; - } - if (tot_nchan > 0) { - if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { - DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", - __FUNCTION__, err)); - goto exit; - } - } - if (_pno_state->pno_status == DHD_PNO_DISABLED) { - if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) - DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__)); - } -exit: - /* clear mode in case of error */ - if (err < 0) - _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; - else { - /* return #max scan firmware can do */ - err = mscan; - } - if (p_ssid_list) - kfree(p_ssid_list); - return err; -} - -static int -_dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) -{ - int err = BCME_OK; - int i, j; - uint32 timestamp = 0; - dhd_pno_params_t *_params = NULL; - dhd_pno_status_info_t *_pno_state = NULL; - wl_pfn_lscanresults_t *plbestnet = NULL; - wl_pfn_lnet_info_t *plnetinfo; - dhd_pno_bestnet_entry_t *pbestnet_entry; - dhd_pno_best_header_t *pbestnetheader = NULL; - dhd_pno_scan_results_t *pscan_results = NULL, *siter, *snext; - bool allocate_header = FALSE; - NULL_CHECK(dhd, "dhd is NULL", err); - NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); - if (!dhd_support_sta_mode(dhd)) { - err = BCME_BADOPTION; - goto exit; - } - DHD_PNO(("%s enter\n", __FUNCTION__)); - _pno_state = PNO_GET_PNOSTATE(dhd); - - if (!WLS_SUPPORTED(_pno_state)) { - DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); - err = BCME_UNSUPPORTED; - goto exit; - } - if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { - DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__)); - goto exit; - } - mutex_lock(&_pno_state->pno_mutex); - _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; - if (buf && bufsize) { - if (!list_empty(&_params->params_batch.get_batch.expired_scan_results_list)) { - /* need to check whether we have cashed data or not */ - DHD_PNO(("%s: have cashed batching data in Driver\n", - __FUNCTION__)); - /* convert to results format */ - goto convert_format; - } else { - /* this is a first try to get batching results */ - if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) { - /* move the scan_results_list to expired_scan_results_lists */ - list_for_each_entry_safe(siter, snext, - &_params->params_batch.get_batch.scan_results_list, list) { - list_move_tail(&siter->list, - &_params->params_batch.get_batch.expired_scan_results_list); - } - _params->params_batch.get_batch.top_node_cnt = 0; - _params->params_batch.get_batch.expired_tot_scan_cnt = - _params->params_batch.get_batch.tot_scan_cnt; - _params->params_batch.get_batch.tot_scan_cnt = 0; - goto convert_format; - } - } - } - /* create dhd_pno_scan_results_t whenever we got event WLC_E_PFN_BEST_BATCHING */ - pscan_results = (dhd_pno_scan_results_t *)MALLOC(dhd->osh, SCAN_RESULTS_SIZE); - if (pscan_results == NULL) { - err = BCME_NOMEM; - DHD_ERROR(("failed to allocate dhd_pno_scan_results_t\n")); - goto exit; - } - pscan_results->bestnetheader = NULL; - pscan_results->cnt_header = 0; - /* add the element into list unless total node cnt is less than MAX_NODE_ CNT */ - if (_params->params_batch.get_batch.top_node_cnt < MAX_NODE_CNT) { - list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list); - _params->params_batch.get_batch.top_node_cnt++; - } else { - int _removed_scan_cnt; - /* remove oldest one and add new one */ - DHD_PNO(("%s : Remove oldest node and add new one\n", __FUNCTION__)); - _removed_scan_cnt = _dhd_pno_clear_all_batch_results(dhd, - &_params->params_batch.get_batch.scan_results_list, TRUE); - _params->params_batch.get_batch.tot_scan_cnt -= _removed_scan_cnt; - list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list); - - } - plbestnet = (wl_pfn_lscanresults_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN); - NULL_CHECK(plbestnet, "failed to allocate buffer for bestnet", err); - DHD_PNO(("%s enter\n", __FUNCTION__)); - memset(plbestnet, 0, PNO_BESTNET_LEN); - while (plbestnet->status != PFN_COMPLETE) { - memset(plbestnet, 0, PNO_BESTNET_LEN); - err = dhd_iovar(dhd, 0, "pfnlbest", (char *)plbestnet, PNO_BESTNET_LEN, 0); - if (err < 0) { - if (err == BCME_EPERM) { - DHD_ERROR(("we cannot get the batching data " - "during scanning in firmware, try again\n,")); - msleep(500); - continue; - } else { - DHD_ERROR(("%s : failed to execute pfnlbest (err :%d)\n", - __FUNCTION__, err)); - goto exit; - } - } - DHD_PNO(("ver %d, status : %d, count %d\n", plbestnet->version, - plbestnet->status, plbestnet->count)); - if (plbestnet->version != PFN_SCANRESULT_VERSION) { - err = BCME_VERSION; - DHD_ERROR(("bestnet version(%d) is mismatch with Driver version(%d)\n", - plbestnet->version, PFN_SCANRESULT_VERSION)); - goto exit; - } - plnetinfo = plbestnet->netinfo; - for (i = 0; i < plbestnet->count; i++) { - pbestnet_entry = (dhd_pno_bestnet_entry_t *) - MALLOC(dhd->osh, BESTNET_ENTRY_SIZE); - if (pbestnet_entry == NULL) { - err = BCME_NOMEM; - DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n")); - goto exit; - } - memset(pbestnet_entry, 0, BESTNET_ENTRY_SIZE); - pbestnet_entry->recorded_time = jiffies; /* record the current time */ - /* create header for the first entry */ - allocate_header = (i == 0)? TRUE : FALSE; - /* check whether the new generation is started or not */ - if (timestamp && (TIME_DIFF(timestamp, plnetinfo->timestamp) - > TIME_MIN_DIFF)) - allocate_header = TRUE; - timestamp = plnetinfo->timestamp; - if (allocate_header) { - pbestnetheader = (dhd_pno_best_header_t *) - MALLOC(dhd->osh, BEST_HEADER_SIZE); - if (pbestnetheader == NULL) { - err = BCME_NOMEM; - if (pbestnet_entry) - MFREE(dhd->osh, pbestnet_entry, - BESTNET_ENTRY_SIZE); - DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n")); - goto exit; - } - /* increase total cnt of bestnet header */ - pscan_results->cnt_header++; - /* need to record the reason to call dhd_pno_get_for_bach */ - if (reason) - pbestnetheader->reason = (ENABLE << reason); - memset(pbestnetheader, 0, BEST_HEADER_SIZE); - /* initialize the head of linked list */ - INIT_LIST_HEAD(&(pbestnetheader->entry_list)); - /* link the pbestnet heaer into existed list */ - if (pscan_results->bestnetheader == NULL) - /* In case of header */ - pscan_results->bestnetheader = pbestnetheader; - else { - dhd_pno_best_header_t *head = pscan_results->bestnetheader; - pscan_results->bestnetheader = pbestnetheader; - pbestnetheader->next = head; - } - } - /* fills the best network info */ - pbestnet_entry->channel = plnetinfo->pfnsubnet.channel; - pbestnet_entry->RSSI = plnetinfo->RSSI; - if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) { - /* if RSSI is positive value, we assume that - * this scan is aborted by other scan - */ - DHD_PNO(("This scan is aborted\n")); - pbestnetheader->reason = (ENABLE << PNO_STATUS_ABORT); - } - pbestnet_entry->rtt0 = plnetinfo->rtt0; - pbestnet_entry->rtt1 = plnetinfo->rtt1; - pbestnet_entry->timestamp = plnetinfo->timestamp; - pbestnet_entry->SSID_len = plnetinfo->pfnsubnet.SSID_len; - memcpy(pbestnet_entry->SSID, plnetinfo->pfnsubnet.SSID, - pbestnet_entry->SSID_len); - memcpy(&pbestnet_entry->BSSID, &plnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN); - /* add the element into list */ - list_add_tail(&pbestnet_entry->list, &pbestnetheader->entry_list); - /* increase best entry count */ - pbestnetheader->tot_cnt++; - pbestnetheader->tot_size += BESTNET_ENTRY_SIZE; - DHD_PNO(("Header %d\n", pscan_results->cnt_header - 1)); - DHD_PNO(("\tSSID : ")); - for (j = 0; j < plnetinfo->pfnsubnet.SSID_len; j++) - DHD_PNO(("%c", plnetinfo->pfnsubnet.SSID[j])); - DHD_PNO(("\n")); - DHD_PNO(("\tBSSID: %02x:%02x:%02x:%02x:%02x:%02x\n", - plnetinfo->pfnsubnet.BSSID.octet[0], - plnetinfo->pfnsubnet.BSSID.octet[1], - plnetinfo->pfnsubnet.BSSID.octet[2], - plnetinfo->pfnsubnet.BSSID.octet[3], - plnetinfo->pfnsubnet.BSSID.octet[4], - plnetinfo->pfnsubnet.BSSID.octet[5])); - DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n", - plnetinfo->pfnsubnet.channel, - plnetinfo->RSSI, plnetinfo->timestamp)); - DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", plnetinfo->rtt0, plnetinfo->rtt1)); - plnetinfo++; - } - } - if (pscan_results->cnt_header == 0) { - /* In case that we didn't get any data from the firmware - * Remove the current scan_result list from get_bach.scan_results_list. - */ - DHD_PNO(("NO BATCH DATA from Firmware, Delete current SCAN RESULT LIST\n")); - list_del(&pscan_results->list); - MFREE(dhd->osh, pscan_results, SCAN_RESULTS_SIZE); - _params->params_batch.get_batch.top_node_cnt--; - } - /* increase total scan count using current scan count */ - _params->params_batch.get_batch.tot_scan_cnt += pscan_results->cnt_header; - - if (buf && bufsize) { - /* This is a first try to get batching results */ - if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) { - /* move the scan_results_list to expired_scan_results_lists */ - list_for_each_entry_safe(siter, snext, - &_params->params_batch.get_batch.scan_results_list, list) { - list_move_tail(&siter->list, - &_params->params_batch.get_batch.expired_scan_results_list); - } - /* reset gloval values after moving to expired list */ - _params->params_batch.get_batch.top_node_cnt = 0; - _params->params_batch.get_batch.expired_tot_scan_cnt = - _params->params_batch.get_batch.tot_scan_cnt; - _params->params_batch.get_batch.tot_scan_cnt = 0; - } -convert_format: - err = _dhd_pno_convert_format(dhd, &_params->params_batch, buf, bufsize); - if (err < 0) { - DHD_ERROR(("failed to convert the data into upper layer format\n")); - goto exit; - } - } -exit: - if (plbestnet) - MFREE(dhd->osh, plbestnet, PNO_BESTNET_LEN); - if (_params) { - _params->params_batch.get_batch.buf = NULL; - _params->params_batch.get_batch.bufsize = 0; - _params->params_batch.get_batch.bytes_written = err; - } - mutex_unlock(&_pno_state->pno_mutex); - if (waitqueue_active(&_pno_state->get_batch_done.wait)) - complete(&_pno_state->get_batch_done); - return err; -} -static void -_dhd_pno_get_batch_handler(struct work_struct *work) -{ - dhd_pno_status_info_t *_pno_state; - dhd_pub_t *dhd; - struct dhd_pno_batch_params *params_batch; - DHD_PNO(("%s enter\n", __FUNCTION__)); - _pno_state = container_of(work, struct dhd_pno_status_info, work); - dhd = _pno_state->dhd; - if (dhd == NULL) { - DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__)); - return; - } - params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; - _dhd_pno_get_for_batch(dhd, params_batch->get_batch.buf, - params_batch->get_batch.bufsize, params_batch->get_batch.reason); - -} - -int -dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) -{ - int err = BCME_OK; - char *pbuf = buf; - dhd_pno_status_info_t *_pno_state; - struct dhd_pno_batch_params *params_batch; - NULL_CHECK(dhd, "dhd is NULL", err); - NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); - if (!dhd_support_sta_mode(dhd)) { - err = BCME_BADOPTION; - goto exit; - } - DHD_PNO(("%s enter\n", __FUNCTION__)); - _pno_state = PNO_GET_PNOSTATE(dhd); - - if (!WLS_SUPPORTED(_pno_state)) { - DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); - err = BCME_UNSUPPORTED; - goto exit; - } - params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; - if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { - DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__)); - memset(pbuf, 0, bufsize); - pbuf += sprintf(pbuf, "scancount=%d\n", 0); - sprintf(pbuf, "%s", RESULTS_END_MARKER); - err = strlen(buf); - goto exit; - } - params_batch->get_batch.buf = buf; - params_batch->get_batch.bufsize = bufsize; - params_batch->get_batch.reason = reason; - params_batch->get_batch.bytes_written = 0; - schedule_work(&_pno_state->work); - wait_for_completion(&_pno_state->get_batch_done); - err = params_batch->get_batch.bytes_written; -exit: - return err; -} - -int -dhd_pno_stop_for_batch(dhd_pub_t *dhd) -{ - int err = BCME_OK; - int mode = 0; - int i = 0; - dhd_pno_status_info_t *_pno_state; - dhd_pno_params_t *_params; - wl_pfn_bssid_t *p_pfn_bssid; - wlc_ssid_t *p_ssid_list = NULL; - NULL_CHECK(dhd, "dhd is NULL", err); - NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); - _pno_state = PNO_GET_PNOSTATE(dhd); - DHD_PNO(("%s enter\n", __FUNCTION__)); - if (!dhd_support_sta_mode(dhd)) { - err = BCME_BADOPTION; - goto exit; - } - if (!WLS_SUPPORTED(_pno_state)) { - DHD_ERROR(("%s : wifi location service is not supported\n", - __FUNCTION__)); - err = BCME_UNSUPPORTED; - goto exit; - } - if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { - DHD_ERROR(("%s : PNO BATCH MODE is not enabled\n", __FUNCTION__)); - goto exit; - } - _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; - if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_HOTLIST_MODE)) { - mode = _pno_state->pno_mode; - dhd_pno_clean(dhd); - _pno_state->pno_mode = mode; - /* restart Legacy PNO if the Legacy PNO is on */ - if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { - struct dhd_pno_legacy_params *_params_legacy; - struct dhd_pno_ssid *iter, *next; - _params_legacy = - &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); - p_ssid_list = kzalloc(sizeof(wlc_ssid_t) * - _params_legacy->nssid, GFP_KERNEL); - if (p_ssid_list == NULL) { - DHD_ERROR(("%s : failed to allocate wlc_ssid_t array (count: %d)", - __FUNCTION__, _params_legacy->nssid)); - err = BCME_ERROR; - _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; - goto exit; - } - i = 0; - /* convert dhd_pno_ssid to dhd_pno_ssid */ - list_for_each_entry_safe(iter, next, &_params_legacy->ssid_list, list) { - p_ssid_list[i].SSID_len = iter->SSID_len; - memcpy(p_ssid_list[i].SSID, iter->SSID, p_ssid_list[i].SSID_len); - i++; - } - err = dhd_pno_set_for_ssid(dhd, p_ssid_list, _params_legacy->nssid, - _params_legacy->scan_fr, _params_legacy->pno_repeat, - _params_legacy->pno_freq_expo_max, _params_legacy->chan_list, - _params_legacy->nchan); - if (err < 0) { - _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; - DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n", - __FUNCTION__, err)); - goto exit; - } - } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { - struct dhd_pno_bssid *iter, *next; - _params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); - p_pfn_bssid = kzalloc(sizeof(wl_pfn_bssid_t) * - _params->params_hotlist.nbssid, GFP_KERNEL); - if (p_pfn_bssid == NULL) { - DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array" - " (count: %d)", - __FUNCTION__, _params->params_hotlist.nbssid)); - err = BCME_ERROR; - _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; - goto exit; - } - i = 0; - /* convert dhd_pno_bssid to wl_pfn_bssid */ - list_for_each_entry_safe(iter, next, - &_params->params_hotlist.bssid_list, list) { - memcpy(&p_pfn_bssid[i].macaddr, &iter->macaddr, ETHER_ADDR_LEN); - p_pfn_bssid[i].flags = iter->flags; - i++; - } - err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist); - if (err < 0) { - _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; - DHD_ERROR(("%s : failed to restart hotlist scan(err: %d)\n", - __FUNCTION__, err)); - goto exit; - } - } - } else { - err = dhd_pno_clean(dhd); - if (err < 0) { - DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", - __FUNCTION__, err)); - goto exit; - } - } -exit: - _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; - _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE); - if (p_ssid_list) - kfree(p_ssid_list); - return err; -} - -int -dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, - struct dhd_pno_hotlist_params *hotlist_params) -{ - int err = BCME_OK; - int i; - uint16 _chan_list[WL_NUMCHANNELS]; - int rem_nchan = 0; - int tot_nchan = 0; - int mode = 0; - dhd_pno_params_t *_params; - dhd_pno_params_t *_params2; - struct dhd_pno_bssid *_pno_bssid; - dhd_pno_status_info_t *_pno_state; - NULL_CHECK(dhd, "dhd is NULL", err); - NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); - NULL_CHECK(hotlist_params, "hotlist_params is NULL", err); - NULL_CHECK(p_pfn_bssid, "p_pfn_bssid is NULL", err); - _pno_state = PNO_GET_PNOSTATE(dhd); - DHD_PNO(("%s enter\n", __FUNCTION__)); - - if (!dhd_support_sta_mode(dhd)) { - err = BCME_BADOPTION; - goto exit; - } - if (!WLS_SUPPORTED(_pno_state)) { - DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); - err = BCME_UNSUPPORTED; - goto exit; - } - _params = &_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]; - if (!(_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE)) { - _pno_state->pno_mode |= DHD_PNO_HOTLIST_MODE; - err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_HOTLIST_MODE); - if (err < 0) { - DHD_ERROR(("%s : failed to call _dhd_pno_reinitialize_prof\n", - __FUNCTION__)); - goto exit; - } - } - _params->params_batch.nchan = hotlist_params->nchan; - _params->params_batch.scan_fr = hotlist_params->scan_fr; - if (hotlist_params->nchan) - memcpy(_params->params_hotlist.chan_list, hotlist_params->chan_list, - sizeof(_params->params_hotlist.chan_list)); - memset(_chan_list, 0, sizeof(_chan_list)); - - rem_nchan = ARRAYSIZE(hotlist_params->chan_list) - hotlist_params->nchan; - if (hotlist_params->band == WLC_BAND_2G || hotlist_params->band == WLC_BAND_5G) { - /* get a valid channel list based on band B or A */ - err = _dhd_pno_get_channels(dhd, - &_params->params_hotlist.chan_list[hotlist_params->nchan], - &rem_nchan, hotlist_params->band, FALSE); - if (err < 0) { - DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n", - __FUNCTION__, hotlist_params->band)); - goto exit; - } - /* now we need to update nchan because rem_chan has valid channel count */ - _params->params_hotlist.nchan += rem_nchan; - /* need to sort channel list */ - sort(_params->params_hotlist.chan_list, _params->params_hotlist.nchan, - sizeof(_params->params_hotlist.chan_list[0]), _dhd_pno_cmpfunc, NULL); - } -#ifdef PNO_DEBUG -{ - int i; - DHD_PNO(("Channel list : ")); - for (i = 0; i < _params->params_batch.nchan; i++) { - DHD_PNO(("%d ", _params->params_batch.chan_list[i])); - } - DHD_PNO(("\n")); -} -#endif - if (_params->params_hotlist.nchan) { - /* copy the channel list into local array */ - memcpy(_chan_list, _params->params_hotlist.chan_list, - sizeof(_chan_list)); - tot_nchan = _params->params_hotlist.nchan; - } - if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { - DHD_PNO(("PNO SSID is on progress in firmware\n")); - /* store current pno_mode before disabling pno */ - mode = _pno_state->pno_mode; - err = _dhd_pno_enable(dhd, PNO_OFF); - if (err < 0) { - DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); - goto exit; - } - /* restore the previous mode */ - _pno_state->pno_mode = mode; - /* Use the superset for channelist between two mode */ - _params2 = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); - if (_params2->params_legacy.nchan > 0 && - _params->params_hotlist.nchan > 0) { - err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, - &_params2->params_legacy.chan_list[0], - _params2->params_legacy.nchan, - &_params->params_hotlist.chan_list[0], - _params->params_hotlist.nchan); - if (err < 0) { - DHD_ERROR(("%s : failed to merge channel list" - "between legacy and hotlist\n", - __FUNCTION__)); - goto exit; - } - } - - } - - INIT_LIST_HEAD(&(_params->params_hotlist.bssid_list)); - - err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, hotlist_params->nbssid); - if (err < 0) { - DHD_ERROR(("%s : failed to call _dhd_pno_add_bssid(err :%d)\n", - __FUNCTION__, err)); - goto exit; - } - if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_HOTLIST_MODE)) < 0) { - DHD_ERROR(("%s : failed to set call pno_set (err %d) in firmware\n", - __FUNCTION__, err)); - goto exit; - } - if (tot_nchan > 0) { - if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { - DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", - __FUNCTION__, err)); - goto exit; - } - } - for (i = 0; i < hotlist_params->nbssid; i++) { - _pno_bssid = kzalloc(sizeof(struct dhd_pno_bssid), GFP_KERNEL); - NULL_CHECK(_pno_bssid, "_pfn_bssid is NULL", err); - memcpy(&_pno_bssid->macaddr, &p_pfn_bssid[i].macaddr, ETHER_ADDR_LEN); - _pno_bssid->flags = p_pfn_bssid[i].flags; - list_add_tail(&_pno_bssid->list, &_params->params_hotlist.bssid_list); - } - _params->params_hotlist.nbssid = hotlist_params->nbssid; - if (_pno_state->pno_status == DHD_PNO_DISABLED) { - if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) - DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__)); - } -exit: - /* clear mode in case of error */ - if (err < 0) - _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; - return err; -} - -int -dhd_pno_stop_for_hotlist(dhd_pub_t *dhd) -{ - int err = BCME_OK; - uint32 mode = 0; - dhd_pno_status_info_t *_pno_state; - dhd_pno_params_t *_params; - wlc_ssid_t *p_ssid_list; - NULL_CHECK(dhd, "dhd is NULL", err); - NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); - _pno_state = PNO_GET_PNOSTATE(dhd); - - if (!WLS_SUPPORTED(_pno_state)) { - DHD_ERROR(("%s : wifi location service is not supported\n", - __FUNCTION__)); - err = BCME_UNSUPPORTED; - goto exit; - } - - if (!(_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE)) { - DHD_ERROR(("%s : Hotlist MODE is not enabled\n", - __FUNCTION__)); - goto exit; - } - _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; - - if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_BATCH_MODE)) { - /* retrieve the batching data from firmware into host */ - dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); - /* save current pno_mode before calling dhd_pno_clean */ - mode = _pno_state->pno_mode; - err = dhd_pno_clean(dhd); - if (err < 0) { - DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", - __FUNCTION__, err)); - goto exit; - } - /* restore previos pno mode */ - _pno_state->pno_mode = mode; - if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { - /* restart Legacy PNO Scan */ - struct dhd_pno_legacy_params *_params_legacy; - struct dhd_pno_ssid *iter, *next; - _params_legacy = - &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); - p_ssid_list = - kzalloc(sizeof(wlc_ssid_t) * _params_legacy->nssid, GFP_KERNEL); - if (p_ssid_list == NULL) { - DHD_ERROR(("%s : failed to allocate wlc_ssid_t array (count: %d)", - __FUNCTION__, _params_legacy->nssid)); - err = BCME_ERROR; - _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; - goto exit; - } - /* convert dhd_pno_ssid to dhd_pno_ssid */ - list_for_each_entry_safe(iter, next, &_params_legacy->ssid_list, list) { - p_ssid_list->SSID_len = iter->SSID_len; - memcpy(p_ssid_list->SSID, iter->SSID, p_ssid_list->SSID_len); - p_ssid_list++; - } - err = dhd_pno_set_for_ssid(dhd, p_ssid_list, _params_legacy->nssid, - _params_legacy->scan_fr, _params_legacy->pno_repeat, - _params_legacy->pno_freq_expo_max, _params_legacy->chan_list, - _params_legacy->nchan); - if (err < 0) { - _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; - DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n", - __FUNCTION__, err)); - goto exit; - } - } else if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { - /* restart Batching Scan */ - _params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); - /* restart BATCH SCAN */ - err = dhd_pno_set_for_batch(dhd, &_params->params_batch); - if (err < 0) { - _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; - DHD_ERROR(("%s : failed to restart batch scan(err: %d)\n", - __FUNCTION__, err)); - goto exit; - } - } - } else { - err = dhd_pno_clean(dhd); - if (err < 0) { - DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", - __FUNCTION__, err)); - goto exit; - } - } -exit: - return err; -} - -int -dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) -{ - int err = BCME_OK; - uint status, event_type, flags, datalen; - dhd_pno_status_info_t *_pno_state; - NULL_CHECK(dhd, "dhd is NULL", err); - NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); - _pno_state = PNO_GET_PNOSTATE(dhd); - if (!WLS_SUPPORTED(_pno_state)) { - DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); - err = BCME_UNSUPPORTED; - goto exit; - } - event_type = ntoh32(event->event_type); - flags = ntoh16(event->flags); - status = ntoh32(event->status); - datalen = ntoh32(event->datalen); - DHD_PNO(("%s enter : event_type :%d\n", __FUNCTION__, event_type)); - switch (event_type) { - case WLC_E_PFN_BSSID_NET_FOUND: - case WLC_E_PFN_BSSID_NET_LOST: - /* TODO : need to implement event logic using generic netlink */ - break; - case WLC_E_PFN_BEST_BATCHING: - { - struct dhd_pno_batch_params *params_batch; - params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; - if (!waitqueue_active(&_pno_state->get_batch_done.wait)) { - DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING\n", __FUNCTION__)); - params_batch->get_batch.buf = NULL; - params_batch->get_batch.bufsize = 0; - params_batch->get_batch.reason = PNO_STATUS_EVENT; - schedule_work(&_pno_state->work); - } else - DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING" - "will skip this event\n", __FUNCTION__)); - break; - } - default: - DHD_ERROR(("unknown event : %d\n", event_type)); - } -exit: - return err; -} - -int dhd_pno_init(dhd_pub_t *dhd) -{ - int err = BCME_OK; - dhd_pno_status_info_t *_pno_state; - NULL_CHECK(dhd, "dhd is NULL", err); - DHD_PNO(("%s enter\n", __FUNCTION__)); - UNUSED_PARAMETER(_dhd_pno_suspend); - if (dhd->pno_state) - goto exit; - dhd->pno_state = MALLOC(dhd->osh, sizeof(dhd_pno_status_info_t)); - NULL_CHECK(dhd->pno_state, "failed to create dhd_pno_state", err); - memset(dhd->pno_state, 0, sizeof(dhd_pno_status_info_t)); - /* need to check whether current firmware support batching and hotlist scan */ - _pno_state = PNO_GET_PNOSTATE(dhd); - _pno_state->wls_supported = TRUE; - _pno_state->dhd = dhd; - mutex_init(&_pno_state->pno_mutex); - INIT_WORK(&_pno_state->work, _dhd_pno_get_batch_handler); - init_completion(&_pno_state->get_batch_done); - err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, 0); - if (err == BCME_UNSUPPORTED) { - _pno_state->wls_supported = FALSE; - DHD_INFO(("Current firmware doesn't support" - " Android Location Service\n")); - } -exit: - return err; -} -int dhd_pno_deinit(dhd_pub_t *dhd) -{ - int err = BCME_OK; - dhd_pno_status_info_t *_pno_state; - dhd_pno_params_t *_params; - NULL_CHECK(dhd, "dhd is NULL", err); - - DHD_PNO(("%s enter\n", __FUNCTION__)); - _pno_state = PNO_GET_PNOSTATE(dhd); - NULL_CHECK(_pno_state, "pno_state is NULL", err); - /* may need to free legacy ssid_list */ - if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { - _params = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; - _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); - } - - if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { - _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; - /* clear resource if the BATCH MODE is on */ - _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE); - } - cancel_work_sync(&_pno_state->work); - MFREE(dhd->osh, _pno_state, sizeof(dhd_pno_status_info_t)); - dhd->pno_state = NULL; - return err; -} -#endif /* PNO_SUPPORT */ diff --git a/drivers/net/wireless/bcmdhd/dhd_pno.h b/drivers/net/wireless/bcmdhd/dhd_pno.h deleted file mode 100644 index d436271e42e51..0000000000000 --- a/drivers/net/wireless/bcmdhd/dhd_pno.h +++ /dev/null @@ -1,247 +0,0 @@ -/* - * Header file of Broadcom Dongle Host Driver (DHD) - * Prefered Network Offload code and Wi-Fi Location Service(WLS) code. - * $Copyright Open Broadcom Corporation$ - * - * $Id: dhd_pno.h 423669 2013-09-18 13:01:55Z $ - */ - -#ifndef __DHD_PNO_H__ -#define __DHD_PNO_H__ - -#if defined(PNO_SUPPORT) -#define PNO_TLV_PREFIX 'S' -#define PNO_TLV_VERSION '1' -#define PNO_TLV_SUBTYPE_LEGACY_PNO '2' -#define PNO_TLV_RESERVED '0' - -#define PNO_BATCHING_SET "SET" -#define PNO_BATCHING_GET "GET" -#define PNO_BATCHING_STOP "STOP" - -#define PNO_PARAMS_DELIMETER " " -#define PNO_PARAM_CHANNEL_DELIMETER "," -#define PNO_PARAM_VALUE_DELLIMETER '=' -#define PNO_PARAM_SCANFREQ "SCANFREQ" -#define PNO_PARAM_BESTN "BESTN" -#define PNO_PARAM_MSCAN "MSCAN" -#define PNO_PARAM_CHANNEL "CHANNEL" -#define PNO_PARAM_RTT "RTT" - -#define PNO_TLV_TYPE_SSID_IE 'S' -#define PNO_TLV_TYPE_TIME 'T' -#define PNO_TLV_FREQ_REPEAT 'R' -#define PNO_TLV_FREQ_EXPO_MAX 'M' - -#define MAXNUM_SSID_PER_ADD 16 -#define MAXNUM_PNO_PARAMS 2 -#define PNO_TLV_COMMON_LENGTH 1 -#define DEFAULT_BATCH_MSCAN 16 - -#define RESULTS_END_MARKER "----\n" -#define SCAN_END_MARKER "####\n" -#define AP_END_MARKER "====\n" - -enum scan_status { - /* SCAN ABORT by other scan */ - PNO_STATUS_ABORT, - /* RTT is presence or not */ - PNO_STATUS_RTT_PRESENCE, - /* Disable PNO by Driver */ - PNO_STATUS_DISABLE, - /* NORMAL BATCHING GET */ - PNO_STATUS_NORMAL, - /* WLC_E_PFN_BEST_BATCHING */ - PNO_STATUS_EVENT, - PNO_STATUS_MAX -}; -#define PNO_STATUS_ABORT_MASK 0x0001 -#define PNO_STATUS_RTT_MASK 0x0002 -#define PNO_STATUS_DISABLE_MASK 0x0004 -#define PNO_STATUS_OOM_MASK 0x0010 - -enum index_mode { - INDEX_OF_LEGACY_PARAMS, - INDEX_OF_BATCH_PARAMS, - INDEX_OF_HOTLIST_PARAMS, - INDEX_MODE_MAX -}; -enum dhd_pno_status { - DHD_PNO_DISABLED, - DHD_PNO_ENABLED, - DHD_PNO_SUSPEND -}; -typedef struct cmd_tlv { - char prefix; - char version; - char subtype; - char reserved; -} cmd_tlv_t; -typedef enum dhd_pno_mode { - /* Wi-Fi Legacy PNO Mode */ - DHD_PNO_NONE_MODE = 0, - DHD_PNO_LEGACY_MODE = (1 << (0)), - /* Wi-Fi Android BATCH SCAN Mode */ - DHD_PNO_BATCH_MODE = (1 << (1)), - /* Wi-Fi Android Hotlist SCAN Mode */ - DHD_PNO_HOTLIST_MODE = (1 << (2)) -} dhd_pno_mode_t; -struct dhd_pno_ssid { - uint32 SSID_len; - uchar SSID[DOT11_MAX_SSID_LEN]; - struct list_head list; -}; -struct dhd_pno_bssid { - struct ether_addr macaddr; - /* Bit4: suppress_lost, Bit3: suppress_found */ - uint16 flags; - struct list_head list; -}; -typedef struct dhd_pno_bestnet_entry { - struct ether_addr BSSID; - uint8 SSID_len; - uint8 SSID[DOT11_MAX_SSID_LEN]; - int8 RSSI; - uint8 channel; - uint32 timestamp; - uint16 rtt0; /* distance_cm based on RTT */ - uint16 rtt1; /* distance_cm based on sample standard deviation */ - unsigned long recorded_time; - struct list_head list; -} dhd_pno_bestnet_entry_t; -#define BESTNET_ENTRY_SIZE (sizeof(dhd_pno_bestnet_entry_t)) - -typedef struct dhd_pno_bestnet_header { - struct dhd_pno_bestnet_header *next; - uint8 reason; - uint32 tot_cnt; - uint32 tot_size; - struct list_head entry_list; -} dhd_pno_best_header_t; -#define BEST_HEADER_SIZE (sizeof(dhd_pno_best_header_t)) - -typedef struct dhd_pno_scan_results { - dhd_pno_best_header_t *bestnetheader; - uint8 cnt_header; - struct list_head list; -} dhd_pno_scan_results_t; -#define SCAN_RESULTS_SIZE (sizeof(dhd_pno_scan_results_t)) - -struct dhd_pno_get_batch_info { - /* info related to get batch */ - char *buf; - bool batch_started; - uint32 tot_scan_cnt; - uint32 expired_tot_scan_cnt; - uint32 top_node_cnt; - uint32 bufsize; - uint32 bytes_written; - int reason; - struct list_head scan_results_list; - struct list_head expired_scan_results_list; -}; -struct dhd_pno_legacy_params { - uint16 scan_fr; - uint16 chan_list[WL_NUMCHANNELS]; - uint16 nchan; - int pno_repeat; - int pno_freq_expo_max; - int nssid; - struct list_head ssid_list; -}; -struct dhd_pno_batch_params { - int32 scan_fr; - uint8 bestn; - uint8 mscan; - uint8 band; - uint16 chan_list[WL_NUMCHANNELS]; - uint16 nchan; - uint16 rtt; - struct dhd_pno_get_batch_info get_batch; -}; -struct dhd_pno_hotlist_params { - uint8 band; - int32 scan_fr; - uint16 chan_list[WL_NUMCHANNELS]; - uint16 nchan; - uint16 nbssid; - struct list_head bssid_list; -}; -typedef union dhd_pno_params { - struct dhd_pno_legacy_params params_legacy; - struct dhd_pno_batch_params params_batch; - struct dhd_pno_hotlist_params params_hotlist; -} dhd_pno_params_t; -typedef struct dhd_pno_status_info { - dhd_pub_t *dhd; - struct work_struct work; - struct mutex pno_mutex; - struct completion get_batch_done; - bool wls_supported; /* wifi location service supported or not */ - enum dhd_pno_status pno_status; - enum dhd_pno_mode pno_mode; - dhd_pno_params_t pno_params_arr[INDEX_MODE_MAX]; - struct list_head head_list; -} dhd_pno_status_info_t; - -/* wrapper functions */ -extern int -dhd_dev_pno_enable(struct net_device *dev, int enable); - -extern int -dhd_dev_pno_stop_for_ssid(struct net_device *dev); - -extern int -dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid, - uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan); - -extern int -dhd_dev_pno_set_for_batch(struct net_device *dev, - struct dhd_pno_batch_params *batch_params); - -extern int -dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize); - -extern int -dhd_dev_pno_stop_for_batch(struct net_device *dev); - -extern int -dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid, - struct dhd_pno_hotlist_params *hotlist_params); - -/* dhd pno fuctions */ -extern int dhd_pno_stop_for_ssid(dhd_pub_t *dhd); -extern int dhd_pno_enable(dhd_pub_t *dhd, int enable); -extern int dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_t* ssid_list, int nssid, - uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan); - -extern int dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params); - -extern int dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason); - - -extern int dhd_pno_stop_for_batch(dhd_pub_t *dhd); - -extern int dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, - struct dhd_pno_hotlist_params *hotlist_params); - -extern int dhd_pno_stop_for_hotlist(dhd_pub_t *dhd); - -extern int dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data); -extern int dhd_pno_init(dhd_pub_t *dhd); -extern int dhd_pno_deinit(dhd_pub_t *dhd); -#endif - -#if defined(NDISVER) -#if defined(PNO_SUPPORT) -#if (NDISVER >= 0x0630) -extern int dhd_pno_cfg(dhd_pub_t *dhd, wl_pfn_cfg_t *pcfg); -extern int dhd_pno_suspend(dhd_pub_t *dhd, int pfn_suspend); -extern int dhd_pno_set_add(dhd_pub_t *dhd, wl_pfn_t *netinfo, int nssid, ushort scan_fr, - ushort slowscan_fr, uint8 pno_repeat, uint8 pno_freq_expo_max, int16 flags); -extern int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled); -extern int dhd_pno_clean(dhd_pub_t *dhd); -#endif /* #if (NDISVER >= 0x0630) */ -#endif /* #if defined(PNO_SUPPORT) */ -#endif /* #if defined(NDISVER) */ -#endif /* __DHD_PNO_H__ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_static_buf.c b/drivers/net/wireless/bcmdhd/dhd_static_buf.c deleted file mode 100644 index 1ebd3ee93ae70..0000000000000 --- a/drivers/net/wireless/bcmdhd/dhd_static_buf.c +++ /dev/null @@ -1,179 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - -#define CONFIG_BROADCOM_WIFI_RESERVED_MEM - -#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM - -#define WLAN_STATIC_PKT_BUF 4 -#define WLAN_STATIC_SCAN_BUF0 5 -#define WLAN_STATIC_SCAN_BUF1 6 -#define WLAN_STATIC_DHD_INFO 7 -#define WLAN_STATIC_DHD_WLFC_INFO 8 -#define PREALLOC_WLAN_SEC_NUM 6 -#define PREALLOC_WLAN_BUF_NUM 160 -#define PREALLOC_WLAN_SECTION_HEADER 24 - -#define WLAN_SECTION_SIZE_0 (PREALLOC_WLAN_BUF_NUM * 128) -#define WLAN_SECTION_SIZE_1 (PREALLOC_WLAN_BUF_NUM * 128) -#define WLAN_SECTION_SIZE_2 (PREALLOC_WLAN_BUF_NUM * 512) -#define WLAN_SECTION_SIZE_3 (PREALLOC_WLAN_BUF_NUM * 1024) -#define WLAN_SECTION_SIZE_7 (PREALLOC_WLAN_BUF_NUM * 128) -#define WLAN_SECTION_SIZE_8 (PREALLOC_WLAN_BUF_NUM * 512) - -#define DHD_SKB_HDRSIZE 336 -#define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE) -#define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE) -#define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE) - -#define WLAN_SKB_BUF_NUM 17 - -static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM]; - -struct wlan_mem_prealloc { - void *mem_ptr; - unsigned long size; -}; - -static struct wlan_mem_prealloc wlan_mem_array[PREALLOC_WLAN_SEC_NUM] = { - {NULL, (WLAN_SECTION_SIZE_0 + PREALLOC_WLAN_SECTION_HEADER)}, - {NULL, (WLAN_SECTION_SIZE_1 + PREALLOC_WLAN_SECTION_HEADER)}, - {NULL, (WLAN_SECTION_SIZE_2 + PREALLOC_WLAN_SECTION_HEADER)}, - {NULL, (WLAN_SECTION_SIZE_3 + PREALLOC_WLAN_SECTION_HEADER)}, - {NULL, (WLAN_SECTION_SIZE_7 + PREALLOC_WLAN_SECTION_HEADER)}, - {NULL, (WLAN_SECTION_SIZE_8 + PREALLOC_WLAN_SECTION_HEADER)} -}; - -void *wlan_static_scan_buf0; -void *wlan_static_scan_buf1; -void *bcmdhd_mem_prealloc(int section, unsigned long size) -{ - if (section == WLAN_STATIC_PKT_BUF) { - printk("1 %s: section=%d, wlan_static_skb=%p\n", - __FUNCTION__, section, wlan_static_skb); - return wlan_static_skb; - } - if (section == WLAN_STATIC_SCAN_BUF0) { - printk("2 %s: section=%d, wlan_static_scan_buf0=%p\n", - __FUNCTION__, section, wlan_static_scan_buf0); - return wlan_static_scan_buf0; - } - if (section == WLAN_STATIC_SCAN_BUF1) { - printk("3 %s: section=%d, wlan_static_scan_buf1=%p\n", - __FUNCTION__, section, wlan_static_scan_buf1); - return wlan_static_scan_buf1; - } - if (section == WLAN_STATIC_DHD_INFO) { - printk("4 %s: section=%d, wlan_mem_array[4]=%p\n", - __FUNCTION__, section, wlan_mem_array[4].mem_ptr); - return wlan_mem_array[4].mem_ptr; - } - if (section == WLAN_STATIC_DHD_WLFC_INFO) { - printk("5 %s: section=%d, wlan_mem_array[5]=%p\n", - __FUNCTION__, section, wlan_mem_array[5].mem_ptr); - return wlan_mem_array[5].mem_ptr; - } - if ((section < 0) || (section > PREALLOC_WLAN_SEC_NUM)) { - printk("6 %s: out of section %d\n", __FUNCTION__, section); - return NULL; - } - - if (wlan_mem_array[section].size < size) { - printk("7 %s: wlan_mem_array[section].size=%lu, size=%lu\n", - __FUNCTION__, wlan_mem_array[section].size, size); - return NULL; - } - printk("8 %s: wlan_mem_array[section].mem_ptr=%p, size=%lu\n", - __FUNCTION__, &wlan_mem_array[section], size); - - return wlan_mem_array[section].mem_ptr; -} - -EXPORT_SYMBOL(bcmdhd_mem_prealloc); - -int bcmdhd_init_wlan_mem(void) -{ - int i; - int j; - - for (i=0; i<8; i++) { - wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE); - if (!wlan_static_skb[i]) - goto err_skb_alloc; - printk("1 %s: wlan_static_skb[%d]=%p, size=%lu\n", - __FUNCTION__, i, wlan_static_skb[i], DHD_SKB_1PAGE_BUFSIZE); - } - - for (; i<16; i++) { - wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE); - if (!wlan_static_skb[i]) - goto err_skb_alloc; - printk("2 %s: wlan_static_skb[%d]=%p, size=%lu\n", - __FUNCTION__, i, wlan_static_skb[i], DHD_SKB_2PAGE_BUFSIZE); - } - - wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE); - if (!wlan_static_skb[i]) - goto err_skb_alloc; - printk("3 %s: wlan_static_skb[%d]=%p, size=%lu\n", - __FUNCTION__, i, wlan_static_skb[i], DHD_SKB_4PAGE_BUFSIZE); - - for (i=0; i - -#define ADDR_64(x) (x.addr) -#define HIGH_ADDR_32(x) ((uint32) (((sh_addr_t) x).high_addr)) -#define LOW_ADDR_32(x) ((uint32) (((sh_addr_t) x).low_addr)) - -typedef struct { - uint32 low_addr; - uint32 high_addr; -} sh_addr_t; - - - -#ifdef BCMPCIE_SUPPORT_TX_PUSH_RING -#define BCMPCIE_PUSH_TX_RING 1 -#else -#define BCMPCIE_PUSH_TX_RING 0 -#endif /* BCMPCIE_SUPPORT_TX_PUSH_RING */ - -/* May be overridden by 43xxxxx-roml.mk */ -#if !defined(BCMPCIE_MAX_TX_FLOWS) -#define BCMPCIE_MAX_TX_FLOWS 40 -#endif /* ! BCMPCIE_MAX_TX_FLOWS */ - -#define PCIE_SHARED_VERSION 0x00005 -#define PCIE_SHARED_VERSION_MASK 0x000FF -#define PCIE_SHARED_ASSERT_BUILT 0x00100 -#define PCIE_SHARED_ASSERT 0x00200 -#define PCIE_SHARED_TRAP 0x00400 -#define PCIE_SHARED_IN_BRPT 0x00800 -#define PCIE_SHARED_SET_BRPT 0x01000 -#define PCIE_SHARED_PENDING_BRPT 0x02000 -#define PCIE_SHARED_TXPUSH_SPRT 0x04000 -#define PCIE_SHARED_EVT_SEQNUM 0x08000 -#define PCIE_SHARED_DMA_INDEX 0x10000 - -/* D2H M2M DMA Complete Sync mechanism: Modulo-253-SeqNum or XORCSUM */ -#define PCIE_SHARED_D2H_SYNC_SEQNUM 0x20000 -#define PCIE_SHARED_D2H_SYNC_XORCSUM 0x40000 -#define PCIE_SHARED_D2H_SYNC_MODE_MASK \ - (PCIE_SHARED_D2H_SYNC_SEQNUM | PCIE_SHARED_D2H_SYNC_XORCSUM) - -#define BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT 0 -#define BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT 1 -#define BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE 2 -#define BCMPCIE_D2H_MSGRING_TX_COMPLETE 3 -#define BCMPCIE_D2H_MSGRING_RX_COMPLETE 4 -#define BCMPCIE_COMMON_MSGRING_MAX_ID 4 - -/* Added only for single tx ring */ -#define BCMPCIE_H2D_TXFLOWRINGID 5 - -#define BCMPCIE_H2D_COMMON_MSGRINGS 2 -#define BCMPCIE_D2H_COMMON_MSGRINGS 3 -#define BCMPCIE_COMMON_MSGRINGS 5 - -enum h2dring_idx { - BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT_IDX = 0, - BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT_IDX = 1, - BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START = 2 -}; - -enum d2hring_idx { - BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE_IDX = 0, - BCMPCIE_D2H_MSGRING_TX_COMPLETE_IDX = 1, - BCMPCIE_D2H_MSGRING_RX_COMPLETE_IDX = 2 -}; - -typedef struct ring_mem { - uint16 idx; - uint8 type; - uint8 rsvd; - uint16 max_item; - uint16 len_items; - sh_addr_t base_addr; -} ring_mem_t; - -#define RINGSTATE_INITED 1 - -typedef struct ring_state { - uint8 idx; - uint8 state; - uint16 r_offset; - uint16 w_offset; - uint16 e_offset; -} ring_state_t; - - - -typedef struct ring_info { - /* locations in the TCM where the ringmem is and ringstate are defined */ - uint32 ringmem_ptr; /* ring mem location in TCM */ - uint32 h2d_w_idx_ptr; - - uint32 h2d_r_idx_ptr; - uint32 d2h_w_idx_ptr; - - uint32 d2h_r_idx_ptr; - /* host locations where the DMA of read/write indices are */ - sh_addr_t h2d_w_idx_hostaddr; - sh_addr_t h2d_r_idx_hostaddr; - sh_addr_t d2h_w_idx_hostaddr; - sh_addr_t d2h_r_idx_hostaddr; - uint16 max_sub_queues; - uint16 rsvd; -} ring_info_t; - -typedef struct { - /* shared area version captured at flags 7:0 */ - uint32 flags; - - uint32 trap_addr; - uint32 assert_exp_addr; - uint32 assert_file_addr; - uint32 assert_line; - uint32 console_addr; /* Address of hnd_cons_t */ - - uint32 msgtrace_addr; - - uint32 fwid; - - /* Used for debug/flow control */ - uint16 total_lfrag_pkt_cnt; - uint16 max_host_rxbufs; /* rsvd in spec */ - - uint32 dma_rxoffset; /* rsvd in spec */ - - /* these will be used for sleep request/ack, d3 req/ack */ - uint32 h2d_mb_data_ptr; - uint32 d2h_mb_data_ptr; - - /* information pertinent to host IPC/msgbuf channels */ - /* location in the TCM memory which has the ring_info */ - uint32 rings_info_ptr; - - /* block of host memory for the scratch buffer */ - uint32 host_dma_scratch_buffer_len; - sh_addr_t host_dma_scratch_buffer; - - /* block of host memory for the dongle to push the status into */ - uint32 device_rings_stsblk_len; - sh_addr_t device_rings_stsblk; -#ifdef BCM_BUZZZ - uint32 buzzz; /* BUZZZ state format strings and trace buffer */ -#endif -} pciedev_shared_t; - - -/* H2D mail box Data */ -#define H2D_HOST_D3_INFORM 0x00000001 -#define H2D_HOST_DS_ACK 0x00000002 -#define H2D_HOST_CONS_INT 0x80000000 /* h2d int for console cmds */ - -/* D2H mail box Data */ -#define D2H_DEV_D3_ACK 0x00000001 -#define D2H_DEV_DS_ENTER_REQ 0x00000002 -#define D2H_DEV_DS_EXIT_NOTE 0x00000004 -#define D2H_DEV_FWHALT 0x10000000 - - -extern pciedev_shared_t pciedev_shared; -#define NEXTTXP(i, d) ((((i)+1) >= (d)) ? 0 : ((i)+1)) -#define NTXPACTIVE(r, w, d) (((r) <= (w)) ? ((w)-(r)) : ((d)-(r)+(w))) -#define NTXPAVAIL(r, w, d) (((d) - NTXPACTIVE((r), (w), (d))) > 1) - -/* Function can be used to notify host of FW halt */ -#define READ_AVAIL_SPACE(w, r, d) \ - ((w >= r) ? (w - r) : (d - r)) - -#define WRT_PEND(x) ((x)->wr_pending) -#define DNGL_RING_WPTR(msgbuf) (*((msgbuf)->tcm_rs_w_ptr)) -#define BCMMSGBUF_RING_SET_W_PTR(msgbuf, a) (DNGL_RING_WPTR(msgbuf) = (a)) - -#define DNGL_RING_RPTR(msgbuf) (*((msgbuf)->tcm_rs_r_ptr)) -#define BCMMSGBUF_RING_SET_R_PTR(msgbuf, a) (DNGL_RING_RPTR(msgbuf) = (a)) - -#define RING_READ_PTR(x) ((x)->ringstate->r_offset) -#define RING_WRITE_PTR(x) ((x)->ringstate->w_offset) -#define RING_START_PTR(x) ((x)->ringmem->base_addr.low_addr) -#define RING_MAX_ITEM(x) ((x)->ringmem->max_item) -#define RING_LEN_ITEMS(x) ((x)->ringmem->len_items) -#define HOST_RING_BASE(x) ((x)->ring_base.va) -#define HOST_RING_END(x) ((uint8 *)HOST_RING_BASE((x)) + \ - ((RING_MAX_ITEM((x))-1)*RING_LEN_ITEMS((x)))) - -#define WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d) ((w >= r) ? (d - w) : (r - w)) -#define WRITE_SPACE_AVAIL(r, w, d) (d - (NTXPACTIVE(r, w, d)) - 1) -#define CHECK_WRITE_SPACE(r, w, d) \ - MIN(WRITE_SPACE_AVAIL(r, w, d), WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d)) - -#endif /* _bcmpcie_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmperf.h b/drivers/net/wireless/bcmdhd/include/bcmperf.h deleted file mode 100644 index 39cfc4516d4f7..0000000000000 --- a/drivers/net/wireless/bcmdhd/include/bcmperf.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Performance counters software interface. - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: bcmperf.h 241182 2011-02-17 21:50:03Z $ - */ -/* essai */ -#ifndef _BCMPERF_H_ -#define _BCMPERF_H_ -/* get cache hits and misses */ -#define BCMPERF_ENABLE_INSTRCOUNT() -#define BCMPERF_ENABLE_ICACHE_MISS() -#define BCMPERF_ENABLE_ICACHE_HIT() -#define BCMPERF_GETICACHE_MISS(x) ((x) = 0) -#define BCMPERF_GETICACHE_HIT(x) ((x) = 0) -#define BCMPERF_GETINSTRCOUNT(x) ((x) = 0) -#endif /* _BCMPERF_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmspi.h b/drivers/net/wireless/bcmdhd/include/bcmspi.h deleted file mode 100644 index bb0ee1503ee29..0000000000000 --- a/drivers/net/wireless/bcmdhd/include/bcmspi.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Broadcom SPI Low-Level Hardware Driver API - * - * $ Copyright Open Broadcom Corporation $ - * - * $Id: bcmspi.h 241182 2011-02-17 21:50:03Z $ - */ -#ifndef _BCM_SPI_H -#define _BCM_SPI_H - -extern void spi_devintr_off(sdioh_info_t *sd); -extern void spi_devintr_on(sdioh_info_t *sd); -extern bool spi_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor); -extern bool spi_controller_highspeed_mode(sdioh_info_t *sd, bool hsmode); -extern bool spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr); -extern bool spi_hw_attach(sdioh_info_t *sd); -extern bool spi_hw_detach(sdioh_info_t *sd); -extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen); -extern void spi_spinbits(sdioh_info_t *sd); -extern void spi_waitbits(sdioh_info_t *sd, bool yield); - -#endif /* _BCM_SPI_H */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmwifi_rates.h b/drivers/net/wireless/bcmdhd/include/bcmwifi_rates.h deleted file mode 100644 index cf5f88d78cf70..0000000000000 --- a/drivers/net/wireless/bcmdhd/include/bcmwifi_rates.h +++ /dev/null @@ -1,452 +0,0 @@ -/* - * Indices for 802.11 a/b/g/n/ac 1-3 chain symmetric transmit rates - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: bcmwifi_rates.h 5187 2012-06-29 06:17:50Z $ - */ - -#ifndef _bcmwifi_rates_h_ -#define _bcmwifi_rates_h_ - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - - -#define WL_RATESET_SZ_DSSS 4 -#define WL_RATESET_SZ_OFDM 8 -#define WL_RATESET_SZ_VHT_MCS 10 - -#if defined(WLPROPRIETARY_11N_RATES) -#define WL_RATESET_SZ_HT_MCS WL_RATESET_SZ_VHT_MCS -#else -#define WL_RATESET_SZ_HT_MCS 8 -#endif - -#define WL_RATESET_SZ_HT_IOCTL 8 /* MAC histogram, compatibility with wl utility */ - -#define WL_TX_CHAINS_MAX 3 - -#define WL_RATE_DISABLED (-128) /* Power value corresponding to unsupported rate */ - -/* Transmit channel bandwidths */ -typedef enum wl_tx_bw { - WL_TX_BW_20, - WL_TX_BW_40, - WL_TX_BW_80, - WL_TX_BW_20IN40, - WL_TX_BW_20IN80, - WL_TX_BW_40IN80, - WL_TX_BW_160, - WL_TX_BW_20IN160, - WL_TX_BW_40IN160, - WL_TX_BW_80IN160, - WL_TX_BW_ALL, - WL_TX_BW_8080, - WL_TX_BW_8080CHAN2, - WL_TX_BW_20IN8080, - WL_TX_BW_40IN8080, - WL_TX_BW_80IN8080 -} wl_tx_bw_t; - - -/* - * Transmit modes. - * Not all modes are listed here, only those required for disambiguation. e.g. SPEXP is not listed - */ -typedef enum wl_tx_mode { - WL_TX_MODE_NONE, - WL_TX_MODE_STBC, - WL_TX_MODE_CDD, - WL_TX_MODE_TXBF, - WL_NUM_TX_MODES -} wl_tx_mode_t; - - -/* Number of transmit chains */ -typedef enum wl_tx_chains { - WL_TX_CHAINS_1 = 1, - WL_TX_CHAINS_2, - WL_TX_CHAINS_3 -} wl_tx_chains_t; - - -/* Number of transmit streams */ -typedef enum wl_tx_nss { - WL_TX_NSS_1 = 1, - WL_TX_NSS_2, - WL_TX_NSS_3 -} wl_tx_nss_t; - - -typedef enum clm_rates { - /************ - * 1 chain * - ************ - */ - - /* 1 Stream */ - WL_RATE_1X1_DSSS_1 = 0, - WL_RATE_1X1_DSSS_2 = 1, - WL_RATE_1X1_DSSS_5_5 = 2, - WL_RATE_1X1_DSSS_11 = 3, - - WL_RATE_1X1_OFDM_6 = 4, - WL_RATE_1X1_OFDM_9 = 5, - WL_RATE_1X1_OFDM_12 = 6, - WL_RATE_1X1_OFDM_18 = 7, - WL_RATE_1X1_OFDM_24 = 8, - WL_RATE_1X1_OFDM_36 = 9, - WL_RATE_1X1_OFDM_48 = 10, - WL_RATE_1X1_OFDM_54 = 11, - - WL_RATE_1X1_MCS0 = 12, - WL_RATE_1X1_MCS1 = 13, - WL_RATE_1X1_MCS2 = 14, - WL_RATE_1X1_MCS3 = 15, - WL_RATE_1X1_MCS4 = 16, - WL_RATE_1X1_MCS5 = 17, - WL_RATE_1X1_MCS6 = 18, - WL_RATE_1X1_MCS7 = 19, - - WL_RATE_1X1_VHT0SS1 = 12, - WL_RATE_1X1_VHT1SS1 = 13, - WL_RATE_1X1_VHT2SS1 = 14, - WL_RATE_1X1_VHT3SS1 = 15, - WL_RATE_1X1_VHT4SS1 = 16, - WL_RATE_1X1_VHT5SS1 = 17, - WL_RATE_1X1_VHT6SS1 = 18, - WL_RATE_1X1_VHT7SS1 = 19, - WL_RATE_1X1_VHT8SS1 = 20, - WL_RATE_1X1_VHT9SS1 = 21, - - - /************ - * 2 chains * - ************ - */ - - /* 1 Stream expanded + 1 */ - WL_RATE_1X2_DSSS_1 = 22, - WL_RATE_1X2_DSSS_2 = 23, - WL_RATE_1X2_DSSS_5_5 = 24, - WL_RATE_1X2_DSSS_11 = 25, - - WL_RATE_1X2_CDD_OFDM_6 = 26, - WL_RATE_1X2_CDD_OFDM_9 = 27, - WL_RATE_1X2_CDD_OFDM_12 = 28, - WL_RATE_1X2_CDD_OFDM_18 = 29, - WL_RATE_1X2_CDD_OFDM_24 = 30, - WL_RATE_1X2_CDD_OFDM_36 = 31, - WL_RATE_1X2_CDD_OFDM_48 = 32, - WL_RATE_1X2_CDD_OFDM_54 = 33, - - WL_RATE_1X2_CDD_MCS0 = 34, - WL_RATE_1X2_CDD_MCS1 = 35, - WL_RATE_1X2_CDD_MCS2 = 36, - WL_RATE_1X2_CDD_MCS3 = 37, - WL_RATE_1X2_CDD_MCS4 = 38, - WL_RATE_1X2_CDD_MCS5 = 39, - WL_RATE_1X2_CDD_MCS6 = 40, - WL_RATE_1X2_CDD_MCS7 = 41, - - WL_RATE_1X2_VHT0SS1 = 34, - WL_RATE_1X2_VHT1SS1 = 35, - WL_RATE_1X2_VHT2SS1 = 36, - WL_RATE_1X2_VHT3SS1 = 37, - WL_RATE_1X2_VHT4SS1 = 38, - WL_RATE_1X2_VHT5SS1 = 39, - WL_RATE_1X2_VHT6SS1 = 40, - WL_RATE_1X2_VHT7SS1 = 41, - WL_RATE_1X2_VHT8SS1 = 42, - WL_RATE_1X2_VHT9SS1 = 43, - - /* 2 Streams */ - WL_RATE_2X2_STBC_MCS0 = 44, - WL_RATE_2X2_STBC_MCS1 = 45, - WL_RATE_2X2_STBC_MCS2 = 46, - WL_RATE_2X2_STBC_MCS3 = 47, - WL_RATE_2X2_STBC_MCS4 = 48, - WL_RATE_2X2_STBC_MCS5 = 49, - WL_RATE_2X2_STBC_MCS6 = 50, - WL_RATE_2X2_STBC_MCS7 = 51, - - WL_RATE_2X2_STBC_VHT0SS1 = 44, - WL_RATE_2X2_STBC_VHT1SS1 = 45, - WL_RATE_2X2_STBC_VHT2SS1 = 46, - WL_RATE_2X2_STBC_VHT3SS1 = 47, - WL_RATE_2X2_STBC_VHT4SS1 = 48, - WL_RATE_2X2_STBC_VHT5SS1 = 49, - WL_RATE_2X2_STBC_VHT6SS1 = 50, - WL_RATE_2X2_STBC_VHT7SS1 = 51, - WL_RATE_2X2_STBC_VHT8SS1 = 52, - WL_RATE_2X2_STBC_VHT9SS1 = 53, - - WL_RATE_2X2_SDM_MCS8 = 54, - WL_RATE_2X2_SDM_MCS9 = 55, - WL_RATE_2X2_SDM_MCS10 = 56, - WL_RATE_2X2_SDM_MCS11 = 57, - WL_RATE_2X2_SDM_MCS12 = 58, - WL_RATE_2X2_SDM_MCS13 = 59, - WL_RATE_2X2_SDM_MCS14 = 60, - WL_RATE_2X2_SDM_MCS15 = 61, - - WL_RATE_2X2_VHT0SS2 = 54, - WL_RATE_2X2_VHT1SS2 = 55, - WL_RATE_2X2_VHT2SS2 = 56, - WL_RATE_2X2_VHT3SS2 = 57, - WL_RATE_2X2_VHT4SS2 = 58, - WL_RATE_2X2_VHT5SS2 = 59, - WL_RATE_2X2_VHT6SS2 = 60, - WL_RATE_2X2_VHT7SS2 = 61, - WL_RATE_2X2_VHT8SS2 = 62, - WL_RATE_2X2_VHT9SS2 = 63, - - /************ - * 3 chains * - ************ - */ - - /* 1 Stream expanded + 2 */ - WL_RATE_1X3_DSSS_1 = 64, - WL_RATE_1X3_DSSS_2 = 65, - WL_RATE_1X3_DSSS_5_5 = 66, - WL_RATE_1X3_DSSS_11 = 67, - - WL_RATE_1X3_CDD_OFDM_6 = 68, - WL_RATE_1X3_CDD_OFDM_9 = 69, - WL_RATE_1X3_CDD_OFDM_12 = 70, - WL_RATE_1X3_CDD_OFDM_18 = 71, - WL_RATE_1X3_CDD_OFDM_24 = 72, - WL_RATE_1X3_CDD_OFDM_36 = 73, - WL_RATE_1X3_CDD_OFDM_48 = 74, - WL_RATE_1X3_CDD_OFDM_54 = 75, - - WL_RATE_1X3_CDD_MCS0 = 76, - WL_RATE_1X3_CDD_MCS1 = 77, - WL_RATE_1X3_CDD_MCS2 = 78, - WL_RATE_1X3_CDD_MCS3 = 79, - WL_RATE_1X3_CDD_MCS4 = 80, - WL_RATE_1X3_CDD_MCS5 = 81, - WL_RATE_1X3_CDD_MCS6 = 82, - WL_RATE_1X3_CDD_MCS7 = 83, - - WL_RATE_1X3_VHT0SS1 = 76, - WL_RATE_1X3_VHT1SS1 = 77, - WL_RATE_1X3_VHT2SS1 = 78, - WL_RATE_1X3_VHT3SS1 = 79, - WL_RATE_1X3_VHT4SS1 = 80, - WL_RATE_1X3_VHT5SS1 = 81, - WL_RATE_1X3_VHT6SS1 = 82, - WL_RATE_1X3_VHT7SS1 = 83, - WL_RATE_1X3_VHT8SS1 = 84, - WL_RATE_1X3_VHT9SS1 = 85, - - /* 2 Streams expanded + 1 */ - WL_RATE_2X3_STBC_MCS0 = 86, - WL_RATE_2X3_STBC_MCS1 = 87, - WL_RATE_2X3_STBC_MCS2 = 88, - WL_RATE_2X3_STBC_MCS3 = 89, - WL_RATE_2X3_STBC_MCS4 = 90, - WL_RATE_2X3_STBC_MCS5 = 91, - WL_RATE_2X3_STBC_MCS6 = 92, - WL_RATE_2X3_STBC_MCS7 = 93, - - WL_RATE_2X3_STBC_VHT0SS1 = 86, - WL_RATE_2X3_STBC_VHT1SS1 = 87, - WL_RATE_2X3_STBC_VHT2SS1 = 88, - WL_RATE_2X3_STBC_VHT3SS1 = 89, - WL_RATE_2X3_STBC_VHT4SS1 = 90, - WL_RATE_2X3_STBC_VHT5SS1 = 91, - WL_RATE_2X3_STBC_VHT6SS1 = 92, - WL_RATE_2X3_STBC_VHT7SS1 = 93, - WL_RATE_2X3_STBC_VHT8SS1 = 94, - WL_RATE_2X3_STBC_VHT9SS1 = 95, - - WL_RATE_2X3_SDM_MCS8 = 96, - WL_RATE_2X3_SDM_MCS9 = 97, - WL_RATE_2X3_SDM_MCS10 = 98, - WL_RATE_2X3_SDM_MCS11 = 99, - WL_RATE_2X3_SDM_MCS12 = 100, - WL_RATE_2X3_SDM_MCS13 = 101, - WL_RATE_2X3_SDM_MCS14 = 102, - WL_RATE_2X3_SDM_MCS15 = 103, - - WL_RATE_2X3_VHT0SS2 = 96, - WL_RATE_2X3_VHT1SS2 = 97, - WL_RATE_2X3_VHT2SS2 = 98, - WL_RATE_2X3_VHT3SS2 = 99, - WL_RATE_2X3_VHT4SS2 = 100, - WL_RATE_2X3_VHT5SS2 = 101, - WL_RATE_2X3_VHT6SS2 = 102, - WL_RATE_2X3_VHT7SS2 = 103, - WL_RATE_2X3_VHT8SS2 = 104, - WL_RATE_2X3_VHT9SS2 = 105, - - /* 3 Streams */ - WL_RATE_3X3_SDM_MCS16 = 106, - WL_RATE_3X3_SDM_MCS17 = 107, - WL_RATE_3X3_SDM_MCS18 = 108, - WL_RATE_3X3_SDM_MCS19 = 109, - WL_RATE_3X3_SDM_MCS20 = 110, - WL_RATE_3X3_SDM_MCS21 = 111, - WL_RATE_3X3_SDM_MCS22 = 112, - WL_RATE_3X3_SDM_MCS23 = 113, - - WL_RATE_3X3_VHT0SS3 = 106, - WL_RATE_3X3_VHT1SS3 = 107, - WL_RATE_3X3_VHT2SS3 = 108, - WL_RATE_3X3_VHT3SS3 = 109, - WL_RATE_3X3_VHT4SS3 = 110, - WL_RATE_3X3_VHT5SS3 = 111, - WL_RATE_3X3_VHT6SS3 = 112, - WL_RATE_3X3_VHT7SS3 = 113, - WL_RATE_3X3_VHT8SS3 = 114, - WL_RATE_3X3_VHT9SS3 = 115, - - - /**************************** - * TX Beamforming, 2 chains * - **************************** - */ - - /* 1 Stream expanded + 1 */ - - WL_RATE_1X2_TXBF_OFDM_6 = 116, - WL_RATE_1X2_TXBF_OFDM_9 = 117, - WL_RATE_1X2_TXBF_OFDM_12 = 118, - WL_RATE_1X2_TXBF_OFDM_18 = 119, - WL_RATE_1X2_TXBF_OFDM_24 = 120, - WL_RATE_1X2_TXBF_OFDM_36 = 121, - WL_RATE_1X2_TXBF_OFDM_48 = 122, - WL_RATE_1X2_TXBF_OFDM_54 = 123, - - WL_RATE_1X2_TXBF_MCS0 = 124, - WL_RATE_1X2_TXBF_MCS1 = 125, - WL_RATE_1X2_TXBF_MCS2 = 126, - WL_RATE_1X2_TXBF_MCS3 = 127, - WL_RATE_1X2_TXBF_MCS4 = 128, - WL_RATE_1X2_TXBF_MCS5 = 129, - WL_RATE_1X2_TXBF_MCS6 = 130, - WL_RATE_1X2_TXBF_MCS7 = 131, - - WL_RATE_1X2_TXBF_VHT0SS1 = 124, - WL_RATE_1X2_TXBF_VHT1SS1 = 125, - WL_RATE_1X2_TXBF_VHT2SS1 = 126, - WL_RATE_1X2_TXBF_VHT3SS1 = 127, - WL_RATE_1X2_TXBF_VHT4SS1 = 128, - WL_RATE_1X2_TXBF_VHT5SS1 = 129, - WL_RATE_1X2_TXBF_VHT6SS1 = 130, - WL_RATE_1X2_TXBF_VHT7SS1 = 131, - WL_RATE_1X2_TXBF_VHT8SS1 = 132, - WL_RATE_1X2_TXBF_VHT9SS1 = 133, - - /* 2 Streams */ - - WL_RATE_2X2_TXBF_SDM_MCS8 = 134, - WL_RATE_2X2_TXBF_SDM_MCS9 = 135, - WL_RATE_2X2_TXBF_SDM_MCS10 = 136, - WL_RATE_2X2_TXBF_SDM_MCS11 = 137, - WL_RATE_2X2_TXBF_SDM_MCS12 = 138, - WL_RATE_2X2_TXBF_SDM_MCS13 = 139, - WL_RATE_2X2_TXBF_SDM_MCS14 = 140, - WL_RATE_2X2_TXBF_SDM_MCS15 = 141, - - WL_RATE_2X2_TXBF_VHT0SS2 = 134, - WL_RATE_2X2_TXBF_VHT1SS2 = 135, - WL_RATE_2X2_TXBF_VHT2SS2 = 136, - WL_RATE_2X2_TXBF_VHT3SS2 = 137, - WL_RATE_2X2_TXBF_VHT4SS2 = 138, - WL_RATE_2X2_TXBF_VHT5SS2 = 139, - WL_RATE_2X2_TXBF_VHT6SS2 = 140, - WL_RATE_2X2_TXBF_VHT7SS2 = 141, - - - /**************************** - * TX Beamforming, 3 chains * - **************************** - */ - - /* 1 Stream expanded + 2 */ - - WL_RATE_1X3_TXBF_OFDM_6 = 142, - WL_RATE_1X3_TXBF_OFDM_9 = 143, - WL_RATE_1X3_TXBF_OFDM_12 = 144, - WL_RATE_1X3_TXBF_OFDM_18 = 145, - WL_RATE_1X3_TXBF_OFDM_24 = 146, - WL_RATE_1X3_TXBF_OFDM_36 = 147, - WL_RATE_1X3_TXBF_OFDM_48 = 148, - WL_RATE_1X3_TXBF_OFDM_54 = 149, - - WL_RATE_1X3_TXBF_MCS0 = 150, - WL_RATE_1X3_TXBF_MCS1 = 151, - WL_RATE_1X3_TXBF_MCS2 = 152, - WL_RATE_1X3_TXBF_MCS3 = 153, - WL_RATE_1X3_TXBF_MCS4 = 154, - WL_RATE_1X3_TXBF_MCS5 = 155, - WL_RATE_1X3_TXBF_MCS6 = 156, - WL_RATE_1X3_TXBF_MCS7 = 157, - - WL_RATE_1X3_TXBF_VHT0SS1 = 150, - WL_RATE_1X3_TXBF_VHT1SS1 = 151, - WL_RATE_1X3_TXBF_VHT2SS1 = 152, - WL_RATE_1X3_TXBF_VHT3SS1 = 153, - WL_RATE_1X3_TXBF_VHT4SS1 = 154, - WL_RATE_1X3_TXBF_VHT5SS1 = 155, - WL_RATE_1X3_TXBF_VHT6SS1 = 156, - WL_RATE_1X3_TXBF_VHT7SS1 = 157, - WL_RATE_1X3_TXBF_VHT8SS1 = 158, - WL_RATE_1X3_TXBF_VHT9SS1 = 159, - - /* 2 Streams expanded + 1 */ - - WL_RATE_2X3_TXBF_SDM_MCS8 = 160, - WL_RATE_2X3_TXBF_SDM_MCS9 = 161, - WL_RATE_2X3_TXBF_SDM_MCS10 = 162, - WL_RATE_2X3_TXBF_SDM_MCS11 = 163, - WL_RATE_2X3_TXBF_SDM_MCS12 = 164, - WL_RATE_2X3_TXBF_SDM_MCS13 = 165, - WL_RATE_2X3_TXBF_SDM_MCS14 = 166, - WL_RATE_2X3_TXBF_SDM_MCS15 = 167, - - WL_RATE_2X3_TXBF_VHT0SS2 = 160, - WL_RATE_2X3_TXBF_VHT1SS2 = 161, - WL_RATE_2X3_TXBF_VHT2SS2 = 162, - WL_RATE_2X3_TXBF_VHT3SS2 = 163, - WL_RATE_2X3_TXBF_VHT4SS2 = 164, - WL_RATE_2X3_TXBF_VHT5SS2 = 165, - WL_RATE_2X3_TXBF_VHT6SS2 = 166, - WL_RATE_2X3_TXBF_VHT7SS2 = 167, - WL_RATE_2X3_TXBF_VHT8SS2 = 168, - WL_RATE_2X3_TXBF_VHT9SS2 = 169, - - /* 3 Streams */ - - WL_RATE_3X3_TXBF_SDM_MCS16 = 170, - WL_RATE_3X3_TXBF_SDM_MCS17 = 171, - WL_RATE_3X3_TXBF_SDM_MCS18 = 172, - WL_RATE_3X3_TXBF_SDM_MCS19 = 173, - WL_RATE_3X3_TXBF_SDM_MCS20 = 174, - WL_RATE_3X3_TXBF_SDM_MCS21 = 175, - WL_RATE_3X3_TXBF_SDM_MCS22 = 176, - WL_RATE_3X3_TXBF_SDM_MCS23 = 177, - - WL_RATE_3X3_TXBF_VHT0SS3 = 170, - WL_RATE_3X3_TXBF_VHT1SS3 = 171, - WL_RATE_3X3_TXBF_VHT2SS3 = 172, - WL_RATE_3X3_TXBF_VHT3SS3 = 173, - WL_RATE_3X3_TXBF_VHT4SS3 = 174, - WL_RATE_3X3_TXBF_VHT5SS3 = 175, - WL_RATE_3X3_TXBF_VHT6SS3 = 176, - WL_RATE_3X3_TXBF_VHT7SS3 = 177 -} clm_rates_t; - -/* Number of rate codes */ -#define WL_NUMRATES 178 - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif /* _bcmwifi_rates_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/brcm_nl80211.h b/drivers/net/wireless/bcmdhd/include/brcm_nl80211.h deleted file mode 100644 index 5a63facfa2574..0000000000000 --- a/drivers/net/wireless/bcmdhd/include/brcm_nl80211.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Definitions for nl80211 vendor command/event access to host driver - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: brcm_nl80211.h 487126 2014-06-24 23:06:12Z $ - * - */ - -#ifndef _brcm_nl80211_h_ -#define _brcm_nl80211_h_ - -#define OUI_BRCM 0x001018 - -enum wl_vendor_subcmd { - BRCM_VENDOR_SCMD_UNSPEC, - BRCM_VENDOR_SCMD_PRIV_STR -}; - -struct bcm_nlmsg_hdr { - uint cmd; /* common ioctl definition */ - uint len; /* expected return buffer length */ - uint offset; /* user buffer offset */ - uint set; /* get or set request optional */ - uint magic; /* magic number for verification */ -}; - -enum bcmnl_attrs { - BCM_NLATTR_UNSPEC, - - BCM_NLATTR_LEN, - BCM_NLATTR_DATA, - - __BCM_NLATTR_AFTER_LAST, - BCM_NLATTR_MAX = __BCM_NLATTR_AFTER_LAST - 1 -}; - -struct nl_prv_data { - int err; /* return result */ - void *data; /* ioctl return buffer pointer */ - uint len; /* ioctl return buffer length */ - struct bcm_nlmsg_hdr *nlioc; /* bcm_nlmsg_hdr header pointer */ -}; - -#endif /* _brcm_nl80211_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/circularbuf.h b/drivers/net/wireless/bcmdhd/include/circularbuf.h deleted file mode 100644 index fa939ca3a6774..0000000000000 --- a/drivers/net/wireless/bcmdhd/include/circularbuf.h +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Initialization and support routines for self-booting compressed image. - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: circularbuf.h 452258 2014-01-29 19:17:57Z $ - */ - -#ifndef __CIRCULARBUF_H_INCLUDED__ -#define __CIRCULARBUF_H_INCLUDED__ - -#include -#include -#include - -/* Enumerations of return values provided by MsgBuf implementation */ -typedef enum { - CIRCULARBUF_FAILURE = -1, - CIRCULARBUF_SUCCESS -} circularbuf_ret_t; - -/* Core circularbuf circular buffer structure */ -typedef struct circularbuf_s -{ - uint16 depth; /* Depth of circular buffer */ - uint16 r_ptr; /* Read Ptr */ - uint16 w_ptr; /* Write Ptr */ - uint16 e_ptr; /* End Ptr */ - uint16 wp_ptr; /* wp_ptr/pending - scheduled for DMA. But, not yet complete. */ - uint16 rp_ptr; /* rp_ptr/pending - scheduled for DMA. But, not yet complete. */ - - uint8 *buf_addr; - void *mb_ctx; - void (*mb_ring_bell)(void *ctx); -} circularbuf_t; - -#define CBUF_ERROR_VAL 0x00000001 /* Error level tracing */ -#define CBUF_TRACE_VAL 0x00000002 /* Function level tracing */ -#define CBUF_INFORM_VAL 0x00000004 /* debug level tracing */ - -extern int cbuf_msg_level; - -#define CBUF_ERROR(args) do {if (cbuf_msg_level & CBUF_ERROR_VAL) printf args;} while (0) -#define CBUF_TRACE(args) do {if (cbuf_msg_level & CBUF_TRACE_VAL) printf args;} while (0) -#define CBUF_INFO(args) do {if (cbuf_msg_level & CBUF_INFORM_VAL) printf args;} while (0) - -#define CIRCULARBUF_START(x) ((x)->buf_addr) -#define CIRCULARBUF_WRITE_PTR(x) ((x)->w_ptr) -#define CIRCULARBUF_READ_PTR(x) ((x)->r_ptr) -#define CIRCULARBUF_END_PTR(x) ((x)->e_ptr) - -#define circularbuf_debug_print(handle) \ - CBUF_INFO(("%s:%d:\t%p rp=%4d r=%4d wp=%4d w=%4d e=%4d\n", \ - __FUNCTION__, __LINE__, \ - (void *) CIRCULARBUF_START(handle), \ - (int) (handle)->rp_ptr, (int) (handle)->r_ptr, \ - (int) (handle)->wp_ptr, (int) (handle)->w_ptr, \ - (int) (handle)->e_ptr)); - - -/* Callback registered by application/mail-box with the circularbuf implementation. - * This will be invoked by the circularbuf implementation when write is complete and - * ready for informing the peer - */ -typedef void (*mb_ring_t)(void *ctx); - - -/* Public Functions exposed by circularbuf */ -void -circularbuf_init(circularbuf_t *handle, void *buf_base_addr, uint16 total_buf_len); -void -circularbuf_register_cb(circularbuf_t *handle, mb_ring_t mb_ring_func, void *ctx); - -/* Write Functions */ -void * -circularbuf_reserve_for_write(circularbuf_t *handle, uint16 size); -void -circularbuf_write_complete(circularbuf_t *handle, uint16 bytes_written); - -/* Read Functions */ -void * -circularbuf_get_read_ptr(circularbuf_t *handle, uint16 *avail_len); -circularbuf_ret_t -circularbuf_read_complete(circularbuf_t *handle, uint16 bytes_read); - -/* - * circularbuf_get_read_ptr() updates rp_ptr by the amount that the consumer - * is supposed to read. The consumer may not read the entire amount. - * In such a case, circularbuf_revert_rp_ptr() call follows a corresponding - * circularbuf_get_read_ptr() call to revert the rp_ptr back to - * the point till which data has actually been processed. - * It is not valid if it is preceded by multiple get_read_ptr() calls - */ -circularbuf_ret_t -circularbuf_revert_rp_ptr(circularbuf_t *handle, uint16 bytes); - -#endif /* __CIRCULARBUF_H_INCLUDED__ */ diff --git a/drivers/net/wireless/bcmdhd/include/epivers.h b/drivers/net/wireless/bcmdhd/include/epivers.h deleted file mode 100644 index 83e558e93f494..0000000000000 --- a/drivers/net/wireless/bcmdhd/include/epivers.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * $Copyright Open Broadcom Corporation$ - * - * $Id: epivers.h.in,v 13.33 2010-09-08 22:08:53 $ - * -*/ - -#ifndef _epivers_h_ -#define _epivers_h_ - -#define EPI_MAJOR_VERSION 1 - -#define EPI_MINOR_VERSION 201 - -#define EPI_RC_NUMBER 59 - -#define EPI_INCREMENTAL_NUMBER 0 - -#define EPI_BUILD_NUMBER 0 - -#define EPI_VERSION 1, 201, 59, 0 - -#define EPI_VERSION_NUM 0x01c93b00 - -#define EPI_VERSION_DEV 1.201.59 - -/* Driver Version String, ASCII, 32 chars max */ -#define EPI_VERSION_STR "1.201.59.6 (r506368)" - -#endif /* _epivers_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/epivers.h.in b/drivers/net/wireless/bcmdhd/include/epivers.h.in deleted file mode 100755 index 9897e987a87c7..0000000000000 --- a/drivers/net/wireless/bcmdhd/include/epivers.h.in +++ /dev/null @@ -1,30 +0,0 @@ -/* - * $Copyright Open Broadcom Corporation$ - * - * $Id: epivers.h.in,v 13.33 2010-09-08 22:08:53 $ - * -*/ - -#ifndef _epivers_h_ -#define _epivers_h_ - -#define EPI_MAJOR_VERSION @EPI_MAJOR_VERSION@ - -#define EPI_MINOR_VERSION @EPI_MINOR_VERSION@ - -#define EPI_RC_NUMBER @EPI_RC_NUMBER@ - -#define EPI_INCREMENTAL_NUMBER @EPI_INCREMENTAL_NUMBER@ - -#define EPI_BUILD_NUMBER @EPI_BUILD_NUMBER@ - -#define EPI_VERSION @EPI_VERSION@ - -#define EPI_VERSION_NUM @EPI_VERSION_NUM@ - -#define EPI_VERSION_DEV @EPI_VERSION_DEV@ - -/* Driver Version String, ASCII, 32 chars max */ -#define EPI_VERSION_STR "@EPI_VERSION_STR@@EPI_VERSION_TYPE@ (@VC_VERSION_NUM@)" - -#endif /* _epivers_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/epivers.sh b/drivers/net/wireless/bcmdhd/include/epivers.sh deleted file mode 100755 index 25ff49b8c6d91..0000000000000 --- a/drivers/net/wireless/bcmdhd/include/epivers.sh +++ /dev/null @@ -1,333 +0,0 @@ -#! /bin/bash -# -# Create the epivers.h file from epivers.h.in -# -# Epivers.h version support svn/sparse/gclient workspaces -# -# $Id: epivers.sh 389103 2013-03-05 17:24:49Z $ -# -# Version generation works off of svn property HeadURL, if -# not set it keys its versions from current svn workspace or -# via .gclient_info deps contents -# -# GetCompVer.py return value and action needed -# i. trunk => use current date as version string -# ii. local => use SVNURL expanded by HeadURL keyword -# iii. => use it as as is -# (some components can override and say give me native ver) -# iv. empty => -# a) If TAG is specified use it -# a) If no TAG is specified use date -# -# Contact: Prakash Dhavali -# Contact: hnd-software-scm-list -# - -# If the version header file already exists, increment its build number. -# Otherwise, create a new file. -if [ -f epivers.h ]; then - - # If REUSE_VERSION is set, epivers iteration is not incremented - # This can be used precommit and continuous integration projects - if [ -n "$REUSE_VERSION" ]; then - echo "Previous epivers.h exists. Skipping version increment" - exit 0 - fi - - build=$(grep EPI_BUILD_NUMBER epivers.h | sed -e "s,.*BUILD_NUMBER[ ]*,,") - build=$(expr ${build} + 1) - echo build=${build} - sed -e "s,.*_BUILD_NUMBER.*,#define EPI_BUILD_NUMBER ${build}," \ - < epivers.h > epivers.h.new - cp -p epivers.h epivers.h.prev - mv epivers.h.new epivers.h - exit 0 - -else # epivers.h doesn't exist - - SVNCMD=${SVNCMD:-"svn --non-interactive"} - SRCBASE=${SRCBASE:-..} - NULL=/dev/null - [ -z "$VERBOSE" ] || NULL=/dev/stderr - - # Check for the in file, if not there we're in the wrong directory - if [ ! -f epivers.h.in ]; then - echo "ERROR: No epivers.h.in found" - exit 1 - fi - - # Following SVNURL should be expanded on checkout - SVNURL='$HeadURL: http://svn.sj.broadcom.com/svn/wlansvn/proj/tags/DHD/DHD_REL_1_201_59/src/include/epivers.sh $' - - # .gclient_info is created by gclient checkout/sync steps - # and contains "DEPS=' ..." entry - GCLIENT_INFO=${GCLIENT_INFO:-${SRCBASE}/../.gclient_info} - - # In gclient, derive SVNURL from gclient_info file - if [ -s "${GCLIENT_INFO}" ]; then - source ${GCLIENT_INFO} - if [ -z "$DEPS" ]; then - echo "ERROR: DEPS entry missing in $GCLIENT_INFO" - exit 1 - else - for dep in $DEPS; do - SVNURL=${SVNURL:-$dep} - # Set SVNURL to first DEPS with /tags/ (if any) - if [[ $dep == */tags/* ]]; then - SVNURL=$dep - echo "INFO: Found gclient DEPS: $SVNURL" - break - fi - done - fi - elif [ -f "${GCLIENT_INFO}" ]; then - echo "ERROR: $GCLIENT_INFO exists, but it is empty" - exit 1 - fi - - # If SVNURL isn't expanded, extract it from svn info - if echo "$SVNURL" | egrep -vq 'HeadURL.*epivers.sh.*|http://.*/DEPS'; then - [ -n "$VERBOSE" ] && \ - echo "DBG: SVN URL ($SVNURL) wasn't expanded. Getting it from svn info" - SVNURL=$($SVNCMD info epivers.sh 2> $NULL | egrep "^URL:") - fi - - if echo "${TAG}" | grep -q "_BRANCH_\|_TWIG_"; then - branchtag=$TAG - else - branchtag="" - fi - - # If this is a tagged build, use the tag to supply the numbers - # Tag should be in the form - # _REL__ - # or - # _REL___RC - # or - # _REL___RC_ - - MERGERLOG=${SRCBASE}/../merger_sources.log - GETCOMPVER=getcompver.py - GETCOMPVER_NET=/projects/hnd_software/gallery/src/tools/build/$GETCOMPVER - GETCOMPVER_NET_WIN=Z:${GETCOMPVER_NET} - - # - # If there is a local copy GETCOMPVER use it ahead of network copy - # - if [ -s "$GETCOMPVER" ]; then - GETCOMPVER_PATH="$GETCOMPVER" - elif [ -s "${SRCBASE}/../src/tools/build/$GETCOMPVER" ]; then - GETCOMPVER_PATH="${SRCBASE}/../src/tools/build/$GETCOMPVER" - elif [ -s "$GETCOMPVER_NET" ]; then - GETCOMPVER_PATH="$GETCOMPVER_NET" - elif [ -s "$GETCOMPVER_NET_WIN" ]; then - GETCOMPVER_PATH="$GETCOMPVER_NET_WIN" - fi - - # - # If $GETCOMPVER isn't found, fetch it from SVN - # (this should be very rare) - # - if [ ! -s "$GETCOMPVER_PATH" ]; then - [ -n "$VERBOSE" ] && \ - echo "DBG: Fetching $GETCOMPVER from trunk" - - $SVNCMD export -q \ - ^/proj/trunk/src/tools/build/${GETCOMPVER} \ - ${GETCOMPVER} 2> $NULL - - GETCOMPVER_PATH=$GETCOMPVER - fi - - # Now get tag for src/include from automerger log - [ -n "$VERBOSE" ] && \ - echo "DBG: python $GETCOMPVER_PATH $MERGERLOG src/include" - - COMPTAG=$(python $GETCOMPVER_PATH $MERGERLOG src/include 2> $NULL | sed -e 's/[[:space:]]*//g') - - echo "DBG: Component Tag String Derived = $COMPTAG" - - # Process COMPTAG values - # Rule: - # If trunk is returned, use date as component tag - # If LOCAL_COMPONENT is returned, use SVN URL to get native tag - # If component is returned or empty, assign it to SVNTAG - # GetCompVer.py return value and action needed - # i. trunk => use current date as version string - # ii. local => use SVNURL expanded by HeadURL keyword - # iii. => use it as as is - # iv. empty => - # a) If TAG is specified use it - # a) If no TAG is specified use SVNURL from HeadURL - - SVNURL_VER=false - - if [ "$COMPTAG" == "" ]; then - SVNURL_VER=true - elif [ "$COMPTAG" == "LOCAL_COMPONENT" ]; then - SVNURL_VER=true - elif [ "$COMPTAG" == "trunk" ]; then - SVNTAG=$(date '+TRUNKCOMP_REL_%Y_%m_%d') - else - SVNTAG=$COMPTAG - fi - - # Given SVNURL path conventions or naming conventions, derive SVNTAG - # TO-DO: SVNTAG derivation logic can move to a central common API - # TO-DO: ${SRCBASE}/tools/build/svnurl2tag.sh - if [ "$SVNURL_VER" == "true" ]; then - case "${SVNURL}" in - *_BRANCH_*) - SVNTAG=$(echo $SVNURL | tr '/' '\n' | awk '/_BRANCH_/{printf "%s",$1}') - ;; - *_TWIG_*) - SVNTAG=$(echo $SVNURL | tr '/' '\n' | awk '/_TWIG_/{printf "%s",$1}') - ;; - *_REL_*) - SVNTAG=$(echo $SVNURL | tr '/' '\n' | awk '/_REL_/{printf "%s",$1}') - ;; - */branches/*) - SVNTAG=${SVNURL#*/branches/} - SVNTAG=${SVNTAG%%/*} - ;; - */proj/tags/*|*/deps/tags/*) - SVNTAG=${SVNURL#*/tags/*/} - SVNTAG=${SVNTAG%%/*} - ;; - */trunk/*) - SVNTAG=$(date '+TRUNKURL_REL_%Y_%m_%d') - ;; - *) - SVNTAG=$(date '+OTHER_REL_%Y_%m_%d') - ;; - esac - echo "DBG: Native Tag String Derived from URL: $SVNTAG" - else - echo "DBG: Native Tag String Derived: $SVNTAG" - fi - - TAG=${SVNTAG} - - # Normalize the branch name portion to "D11" in case it has underscores in it - branch_name=$(expr match "$TAG" '\(.*\)_\(BRANCH\|TWIG\|REL\)_.*') - TAG=$(echo $TAG | sed -e "s%^$branch_name%D11%") - - # Split the tag into an array on underbar or whitespace boundaries. - IFS="_ " tag=(${TAG}) - unset IFS - - tagged=1 - if [ ${#tag[*]} -eq 0 ]; then - tag=($(date '+TOT REL %Y %m %d 0 %y')); - # reconstruct a TAG from the date - TAG=${tag[0]}_${tag[1]}_${tag[2]}_${tag[3]}_${tag[4]}_${tag[5]} - tagged=0 - fi - - # Allow environment variable to override values. - # Missing values default to 0 - # - maj=${EPI_MAJOR_VERSION:-${tag[2]:-0}} - min=${EPI_MINOR_VERSION:-${tag[3]:-0}} - rcnum=${EPI_RC_NUMBER:-${tag[4]:-0}} - - # If increment field is 0, set it to date suffix if on TOB - if [ -n "$branchtag" ]; then - [ "${tag[5]:-0}" -eq 0 ] && echo "Using date suffix for incr" - today=${EPI_DATE_STR:-$(date '+%Y%m%d')} - incremental=${EPI_INCREMENTAL_NUMBER:-${tag[5]:-${today:-0}}} - else - incremental=${EPI_INCREMENTAL_NUMBER:-${tag[5]:-0}} - fi - origincr=${EPI_INCREMENTAL_NUMBER:-${tag[5]:-0}} - build=${EPI_BUILD_NUMBER:-0} - - # Strip 'RC' from front of rcnum if present - rcnum=${rcnum/#RC/} - - # strip leading zero off the number (otherwise they look like octal) - maj=${maj/#0/} - min=${min/#0/} - rcnum=${rcnum/#0/} - incremental=${incremental/#0/} - origincr=${origincr/#0/} - build=${build/#0/} - - # some numbers may now be null. replace with with zero. - maj=${maj:-0} - min=${min:-0} - - rcnum=${rcnum:-0} - incremental=${incremental:-0} - origincr=${origincr:-0} - build=${build:-0} - - if [ -n "$EPI_VERSION_NUM" ]; then - vernum=$EPI_VERSION_NUM - elif [ ${tagged} -eq 1 ]; then - # vernum is 32chars max - vernum=$(printf "0x%02x%02x%02x%02x" ${maj} ${min} ${rcnum} ${origincr}) - else - vernum=$(printf "0x00%02x%02x%02x" ${tag[7]} ${min} ${rcnum}) - fi - - # make sure the size of vernum is under 32 bits. - # Otherwise, truncate. The string will keep full information. - vernum=${vernum:0:10} - - # build the string directly from the tag, irrespective of its length - # remove the name , the tag type, then replace all _ by . - tag_ver_str=${TAG/${tag[0]}_} - tag_ver_str=${tag_ver_str/${tag[1]}_} - tag_ver_str=${tag_ver_str//_/.} - - # record tag type - tagtype= - - if [ "${tag[1]}" = "BRANCH" -o "${tag[1]}" = "TWIG" ]; then - tagtype=" (TOB)" - echo "tag type: $tagtype" - fi - - echo "Effective version string: $tag_ver_str" - - if [ "$(uname -s)" == "Darwin" ]; then - # Mac does not like 2-digit numbers so convert the number to single - # digit. 5.100 becomes 5.1 - if [ $min -gt 99 ]; then - minmac=$(expr $min / 100) - else - minmac=$min - fi - epi_ver_dev="${maj}.${minmac}.0" - else - epi_ver_dev="${maj}.${min}.${rcnum}" - fi - - # Finally get version control revision number of (if any) - vc_version_num=$($SVNCMD info ${SRCBASE} 2> $NULL | awk -F': ' '/^Last Changed Rev: /{printf "%s", $2}') - - # OK, go do it - echo "maj=${maj}, min=${min}, rc=${rcnum}, inc=${incremental}, build=${build}" - - sed \ - -e "s;@EPI_MAJOR_VERSION@;${maj};" \ - -e "s;@EPI_MINOR_VERSION@;${min};" \ - -e "s;@EPI_RC_NUMBER@;${rcnum};" \ - -e "s;@EPI_INCREMENTAL_NUMBER@;${incremental};" \ - -e "s;@EPI_BUILD_NUMBER@;${build};" \ - -e "s;@EPI_VERSION@;${maj}, ${min}, ${rcnum}, ${incremental};" \ - -e "s;@EPI_VERSION_STR@;${tag_ver_str};" \ - -e "s;@EPI_VERSION_TYPE@;${tagtype};" \ - -e "s;@VERSION_TYPE@;${tagtype};" \ - -e "s;@EPI_VERSION_NUM@;${vernum};" \ - -e "s;@EPI_VERSION_DEV@;${epi_ver_dev};" \ - -e "s;@VC_VERSION_NUM@;r${vc_version_num};" \ - < epivers.h.in > epivers.h - - # In shared workspaces across different platforms, ensure that - # windows generated file is made platform neutral without CRLF - if uname -s | egrep -i -q "cygwin"; then - dos2unix epivers.h > $NULL 2>&1 - fi -fi # epivers.h diff --git a/drivers/net/wireless/bcmdhd/include/hndpmu.h b/drivers/net/wireless/bcmdhd/include/hndpmu.h deleted file mode 100644 index fc402b07228fa..0000000000000 --- a/drivers/net/wireless/bcmdhd/include/hndpmu.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * HND SiliconBackplane PMU support. - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: hndpmu.h 471127 2014-04-17 23:24:23Z $ - */ - -#ifndef _hndpmu_h_ -#define _hndpmu_h_ - -#include -#include -#include - - -extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on, uint32* min_res_mask); -extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength); - -extern void si_pmu_minresmask_htavail_set(si_t *sih, osl_t *osh, bool set_clear); -extern void si_pmu_slow_clk_reinit(si_t *sih, osl_t *osh); - -#endif /* _hndpmu_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/osl_decl.h b/drivers/net/wireless/bcmdhd/include/osl_decl.h deleted file mode 100644 index 944945838e98c..0000000000000 --- a/drivers/net/wireless/bcmdhd/include/osl_decl.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * osl forward declarations - * - * $Copyright Open Broadcom Corporation$ - * - * $Id$ - */ - -#ifndef _osl_decl_h_ -#define _osl_decl_h_ - -/* osl handle type forward declaration */ -typedef struct osl_info osl_t; -typedef struct osl_dmainfo osldma_t; - -#endif diff --git a/drivers/net/wireless/bcmdhd/include/packed_section_end.h b/drivers/net/wireless/bcmdhd/include/packed_section_end.h deleted file mode 100644 index bde3959f350bd..0000000000000 --- a/drivers/net/wireless/bcmdhd/include/packed_section_end.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Declare directives for structure packing. No padding will be provided - * between the members of packed structures, and therefore, there is no - * guarantee that structure members will be aligned. - * - * Declaring packed structures is compiler specific. In order to handle all - * cases, packed structures should be delared as: - * - * #include - * - * typedef BWL_PRE_PACKED_STRUCT struct foobar_t { - * some_struct_members; - * } BWL_POST_PACKED_STRUCT foobar_t; - * - * #include - * - * - * $Copyright Open Broadcom Corporation$ - * $Id: packed_section_end.h 437241 2013-11-18 07:39:24Z $ - */ - - -/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h - * and undefined in packed_section_end.h. If it is NOT defined at this - * point, then there is a missing include of packed_section_start.h. - */ -#ifdef BWL_PACKED_SECTION - #undef BWL_PACKED_SECTION -#else - #error "BWL_PACKED_SECTION is NOT defined!" -#endif - - - - -/* Compiler-specific directives for structure packing are declared in - * packed_section_start.h. This marks the end of the structure packing section, - * so, undef them here. - */ -#undef BWL_PRE_PACKED_STRUCT -#undef BWL_POST_PACKED_STRUCT diff --git a/drivers/net/wireless/bcmdhd/include/packed_section_start.h b/drivers/net/wireless/bcmdhd/include/packed_section_start.h deleted file mode 100644 index 3c22dcb276a91..0000000000000 --- a/drivers/net/wireless/bcmdhd/include/packed_section_start.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Declare directives for structure packing. No padding will be provided - * between the members of packed structures, and therefore, there is no - * guarantee that structure members will be aligned. - * - * Declaring packed structures is compiler specific. In order to handle all - * cases, packed structures should be delared as: - * - * #include - * - * typedef BWL_PRE_PACKED_STRUCT struct foobar_t { - * some_struct_members; - * } BWL_POST_PACKED_STRUCT foobar_t; - * - * #include - * - * - * $Copyright Open Broadcom Corporation$ - * $Id: packed_section_start.h 437241 2013-11-18 07:39:24Z $ - */ - - -/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h - * and undefined in packed_section_end.h. If it is already defined at this - * point, then there is a missing include of packed_section_end.h. - */ -#ifdef BWL_PACKED_SECTION - #error "BWL_PACKED_SECTION is already defined!" -#else - #define BWL_PACKED_SECTION -#endif - - - - -/* Declare compiler-specific directives for structure packing. */ -#if defined(__GNUC__) || defined(__lint) - #define BWL_PRE_PACKED_STRUCT - #define BWL_POST_PACKED_STRUCT __attribute__ ((packed)) -#elif defined(__CC_ARM) - #define BWL_PRE_PACKED_STRUCT __packed - #define BWL_POST_PACKED_STRUCT -#else - #error "Unknown compiler!" -#endif diff --git a/drivers/net/wireless/bcmdhd/include/pcicfg.h b/drivers/net/wireless/bcmdhd/include/pcicfg.h deleted file mode 100644 index 9ff1332e9f1d2..0000000000000 --- a/drivers/net/wireless/bcmdhd/include/pcicfg.h +++ /dev/null @@ -1,600 +0,0 @@ -/* - * pcicfg.h: PCI configuration constants and structures. - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: pcicfg.h 506084 2014-10-02 15:34:59Z $ - */ - -#ifndef _h_pcicfg_ -#define _h_pcicfg_ - -#ifndef LINUX_POSTMOGRIFY_REMOVAL -/* The following inside ifndef's so we don't collide with NTDDK.H */ -#ifndef PCI_MAX_BUS -#define PCI_MAX_BUS 0x100 -#endif -#ifndef PCI_MAX_DEVICES -#define PCI_MAX_DEVICES 0x20 -#endif -#ifndef PCI_MAX_FUNCTION -#define PCI_MAX_FUNCTION 0x8 -#endif - -#ifndef PCI_INVALID_VENDORID -#define PCI_INVALID_VENDORID 0xffff -#endif -#ifndef PCI_INVALID_DEVICEID -#define PCI_INVALID_DEVICEID 0xffff -#endif - - -/* Convert between bus-slot-function-register and config addresses */ - -#define PCICFG_BUS_SHIFT 16 /* Bus shift */ -#define PCICFG_SLOT_SHIFT 11 /* Slot shift */ -#define PCICFG_FUN_SHIFT 8 /* Function shift */ -#define PCICFG_OFF_SHIFT 0 /* Register shift */ - -#define PCICFG_BUS_MASK 0xff /* Bus mask */ -#define PCICFG_SLOT_MASK 0x1f /* Slot mask */ -#define PCICFG_FUN_MASK 7 /* Function mask */ -#define PCICFG_OFF_MASK 0xff /* Bus mask */ - -#define PCI_CONFIG_ADDR(b, s, f, o) \ - ((((b) & PCICFG_BUS_MASK) << PCICFG_BUS_SHIFT) \ - | (((s) & PCICFG_SLOT_MASK) << PCICFG_SLOT_SHIFT) \ - | (((f) & PCICFG_FUN_MASK) << PCICFG_FUN_SHIFT) \ - | (((o) & PCICFG_OFF_MASK) << PCICFG_OFF_SHIFT)) - -#define PCI_CONFIG_BUS(a) (((a) >> PCICFG_BUS_SHIFT) & PCICFG_BUS_MASK) -#define PCI_CONFIG_SLOT(a) (((a) >> PCICFG_SLOT_SHIFT) & PCICFG_SLOT_MASK) -#define PCI_CONFIG_FUN(a) (((a) >> PCICFG_FUN_SHIFT) & PCICFG_FUN_MASK) -#define PCI_CONFIG_OFF(a) (((a) >> PCICFG_OFF_SHIFT) & PCICFG_OFF_MASK) - -/* PCIE Config space accessing MACROS */ - -#define PCIECFG_BUS_SHIFT 24 /* Bus shift */ -#define PCIECFG_SLOT_SHIFT 19 /* Slot/Device shift */ -#define PCIECFG_FUN_SHIFT 16 /* Function shift */ -#define PCIECFG_OFF_SHIFT 0 /* Register shift */ - -#define PCIECFG_BUS_MASK 0xff /* Bus mask */ -#define PCIECFG_SLOT_MASK 0x1f /* Slot/Device mask */ -#define PCIECFG_FUN_MASK 7 /* Function mask */ -#define PCIECFG_OFF_MASK 0xfff /* Register mask */ - -#define PCIE_CONFIG_ADDR(b, s, f, o) \ - ((((b) & PCIECFG_BUS_MASK) << PCIECFG_BUS_SHIFT) \ - | (((s) & PCIECFG_SLOT_MASK) << PCIECFG_SLOT_SHIFT) \ - | (((f) & PCIECFG_FUN_MASK) << PCIECFG_FUN_SHIFT) \ - | (((o) & PCIECFG_OFF_MASK) << PCIECFG_OFF_SHIFT)) - -#define PCIE_CONFIG_BUS(a) (((a) >> PCIECFG_BUS_SHIFT) & PCIECFG_BUS_MASK) -#define PCIE_CONFIG_SLOT(a) (((a) >> PCIECFG_SLOT_SHIFT) & PCIECFG_SLOT_MASK) -#define PCIE_CONFIG_FUN(a) (((a) >> PCIECFG_FUN_SHIFT) & PCIECFG_FUN_MASK) -#define PCIE_CONFIG_OFF(a) (((a) >> PCIECFG_OFF_SHIFT) & PCIECFG_OFF_MASK) - -/* The actual config space */ - -#define PCI_BAR_MAX 6 - -#define PCI_ROM_BAR 8 - -#define PCR_RSVDA_MAX 2 - -/* Bits in PCI bars' flags */ - -#define PCIBAR_FLAGS 0xf -#define PCIBAR_IO 0x1 -#define PCIBAR_MEM1M 0x2 -#define PCIBAR_MEM64 0x4 -#define PCIBAR_PREFETCH 0x8 -#define PCIBAR_MEM32_MASK 0xFFFFFF80 - -typedef struct _pci_config_regs { - uint16 vendor; - uint16 device; - uint16 command; - uint16 status; - uint8 rev_id; - uint8 prog_if; - uint8 sub_class; - uint8 base_class; - uint8 cache_line_size; - uint8 latency_timer; - uint8 header_type; - uint8 bist; - uint32 base[PCI_BAR_MAX]; - uint32 cardbus_cis; - uint16 subsys_vendor; - uint16 subsys_id; - uint32 baserom; - uint32 rsvd_a[PCR_RSVDA_MAX]; - uint8 int_line; - uint8 int_pin; - uint8 min_gnt; - uint8 max_lat; - uint8 dev_dep[192]; -} pci_config_regs; - -#define SZPCR (sizeof (pci_config_regs)) -#define MINSZPCR 64 /* offsetof (dev_dep[0] */ - -#endif /* !LINUX_POSTMOGRIFY_REMOVAL */ - -/* pci config status reg has a bit to indicate that capability ptr is present */ - -#define PCI_CAPPTR_PRESENT 0x0010 - -/* A structure for the config registers is nice, but in most - * systems the config space is not memory mapped, so we need - * field offsetts. :-( - */ -#define PCI_CFG_VID 0 -#define PCI_CFG_DID 2 -#define PCI_CFG_CMD 4 -#define PCI_CFG_STAT 6 -#define PCI_CFG_REV 8 -#define PCI_CFG_PROGIF 9 -#define PCI_CFG_SUBCL 0xa -#define PCI_CFG_BASECL 0xb -#define PCI_CFG_CLSZ 0xc -#define PCI_CFG_LATTIM 0xd -#define PCI_CFG_HDR 0xe -#define PCI_CFG_BIST 0xf -#define PCI_CFG_BAR0 0x10 -#define PCI_CFG_BAR1 0x14 -#define PCI_CFG_BAR2 0x18 -#define PCI_CFG_BAR3 0x1c -#define PCI_CFG_BAR4 0x20 -#define PCI_CFG_BAR5 0x24 -#define PCI_CFG_CIS 0x28 -#define PCI_CFG_SVID 0x2c -#define PCI_CFG_SSID 0x2e -#define PCI_CFG_ROMBAR 0x30 -#define PCI_CFG_CAPPTR 0x34 -#define PCI_CFG_INT 0x3c -#define PCI_CFG_PIN 0x3d -#define PCI_CFG_MINGNT 0x3e -#define PCI_CFG_MAXLAT 0x3f -#define PCI_CFG_DEVCTRL 0xd8 -#ifndef LINUX_POSTMOGRIFY_REMOVAL - - - -/* Classes and subclasses */ - -typedef enum { - PCI_CLASS_OLD = 0, - PCI_CLASS_DASDI, - PCI_CLASS_NET, - PCI_CLASS_DISPLAY, - PCI_CLASS_MMEDIA, - PCI_CLASS_MEMORY, - PCI_CLASS_BRIDGE, - PCI_CLASS_COMM, - PCI_CLASS_BASE, - PCI_CLASS_INPUT, - PCI_CLASS_DOCK, - PCI_CLASS_CPU, - PCI_CLASS_SERIAL, - PCI_CLASS_INTELLIGENT = 0xe, - PCI_CLASS_SATELLITE, - PCI_CLASS_CRYPT, - PCI_CLASS_DSP, - PCI_CLASS_XOR = 0xfe -} pci_classes; - -typedef enum { - PCI_DASDI_SCSI, - PCI_DASDI_IDE, - PCI_DASDI_FLOPPY, - PCI_DASDI_IPI, - PCI_DASDI_RAID, - PCI_DASDI_OTHER = 0x80 -} pci_dasdi_subclasses; - -typedef enum { - PCI_NET_ETHER, - PCI_NET_TOKEN, - PCI_NET_FDDI, - PCI_NET_ATM, - PCI_NET_OTHER = 0x80 -} pci_net_subclasses; - -typedef enum { - PCI_DISPLAY_VGA, - PCI_DISPLAY_XGA, - PCI_DISPLAY_3D, - PCI_DISPLAY_OTHER = 0x80 -} pci_display_subclasses; - -typedef enum { - PCI_MMEDIA_VIDEO, - PCI_MMEDIA_AUDIO, - PCI_MMEDIA_PHONE, - PCI_MEDIA_OTHER = 0x80 -} pci_mmedia_subclasses; - -typedef enum { - PCI_MEMORY_RAM, - PCI_MEMORY_FLASH, - PCI_MEMORY_OTHER = 0x80 -} pci_memory_subclasses; - -typedef enum { - PCI_BRIDGE_HOST, - PCI_BRIDGE_ISA, - PCI_BRIDGE_EISA, - PCI_BRIDGE_MC, - PCI_BRIDGE_PCI, - PCI_BRIDGE_PCMCIA, - PCI_BRIDGE_NUBUS, - PCI_BRIDGE_CARDBUS, - PCI_BRIDGE_RACEWAY, - PCI_BRIDGE_OTHER = 0x80 -} pci_bridge_subclasses; - -typedef enum { - PCI_COMM_UART, - PCI_COMM_PARALLEL, - PCI_COMM_MULTIUART, - PCI_COMM_MODEM, - PCI_COMM_OTHER = 0x80 -} pci_comm_subclasses; - -typedef enum { - PCI_BASE_PIC, - PCI_BASE_DMA, - PCI_BASE_TIMER, - PCI_BASE_RTC, - PCI_BASE_PCI_HOTPLUG, - PCI_BASE_OTHER = 0x80 -} pci_base_subclasses; - -typedef enum { - PCI_INPUT_KBD, - PCI_INPUT_PEN, - PCI_INPUT_MOUSE, - PCI_INPUT_SCANNER, - PCI_INPUT_GAMEPORT, - PCI_INPUT_OTHER = 0x80 -} pci_input_subclasses; - -typedef enum { - PCI_DOCK_GENERIC, - PCI_DOCK_OTHER = 0x80 -} pci_dock_subclasses; - -typedef enum { - PCI_CPU_386, - PCI_CPU_486, - PCI_CPU_PENTIUM, - PCI_CPU_ALPHA = 0x10, - PCI_CPU_POWERPC = 0x20, - PCI_CPU_MIPS = 0x30, - PCI_CPU_COPROC = 0x40, - PCI_CPU_OTHER = 0x80 -} pci_cpu_subclasses; - -typedef enum { - PCI_SERIAL_IEEE1394, - PCI_SERIAL_ACCESS, - PCI_SERIAL_SSA, - PCI_SERIAL_USB, - PCI_SERIAL_FIBER, - PCI_SERIAL_SMBUS, - PCI_SERIAL_OTHER = 0x80 -} pci_serial_subclasses; - -typedef enum { - PCI_INTELLIGENT_I2O -} pci_intelligent_subclasses; - -typedef enum { - PCI_SATELLITE_TV, - PCI_SATELLITE_AUDIO, - PCI_SATELLITE_VOICE, - PCI_SATELLITE_DATA, - PCI_SATELLITE_OTHER = 0x80 -} pci_satellite_subclasses; - -typedef enum { - PCI_CRYPT_NETWORK, - PCI_CRYPT_ENTERTAINMENT, - PCI_CRYPT_OTHER = 0x80 -} pci_crypt_subclasses; - -typedef enum { - PCI_DSP_DPIO, - PCI_DSP_OTHER = 0x80 -} pci_dsp_subclasses; - -typedef enum { - PCI_XOR_QDMA, - PCI_XOR_OTHER = 0x80 -} pci_xor_subclasses; - -/* Overlay for a PCI-to-PCI bridge */ - -#define PPB_RSVDA_MAX 2 -#define PPB_RSVDD_MAX 8 - -typedef struct _ppb_config_regs { - uint16 vendor; - uint16 device; - uint16 command; - uint16 status; - uint8 rev_id; - uint8 prog_if; - uint8 sub_class; - uint8 base_class; - uint8 cache_line_size; - uint8 latency_timer; - uint8 header_type; - uint8 bist; - uint32 rsvd_a[PPB_RSVDA_MAX]; - uint8 prim_bus; - uint8 sec_bus; - uint8 sub_bus; - uint8 sec_lat; - uint8 io_base; - uint8 io_lim; - uint16 sec_status; - uint16 mem_base; - uint16 mem_lim; - uint16 pf_mem_base; - uint16 pf_mem_lim; - uint32 pf_mem_base_hi; - uint32 pf_mem_lim_hi; - uint16 io_base_hi; - uint16 io_lim_hi; - uint16 subsys_vendor; - uint16 subsys_id; - uint32 rsvd_b; - uint8 rsvd_c; - uint8 int_pin; - uint16 bridge_ctrl; - uint8 chip_ctrl; - uint8 diag_ctrl; - uint16 arb_ctrl; - uint32 rsvd_d[PPB_RSVDD_MAX]; - uint8 dev_dep[192]; -} ppb_config_regs; - -/* Everything below is BRCM HND proprietary */ - - -/* Brcm PCI configuration registers */ -#define cap_list rsvd_a[0] -#define bar0_window dev_dep[0x80 - 0x40] -#define bar1_window dev_dep[0x84 - 0x40] -#define sprom_control dev_dep[0x88 - 0x40] -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ - - -/* PCI CAPABILITY DEFINES */ -#define PCI_CAP_POWERMGMTCAP_ID 0x01 -#define PCI_CAP_MSICAP_ID 0x05 -#define PCI_CAP_VENDSPEC_ID 0x09 -#define PCI_CAP_PCIECAP_ID 0x10 - -/* Data structure to define the Message Signalled Interrupt facility - * Valid for PCI and PCIE configurations - */ -typedef struct _pciconfig_cap_msi { - uint8 capID; - uint8 nextptr; - uint16 msgctrl; - uint32 msgaddr; -} pciconfig_cap_msi; -#define MSI_ENABLE 0x1 /* bit 0 of msgctrl */ - -/* Data structure to define the Power managment facility - * Valid for PCI and PCIE configurations - */ -typedef struct _pciconfig_cap_pwrmgmt { - uint8 capID; - uint8 nextptr; - uint16 pme_cap; - uint16 pme_sts_ctrl; - uint8 pme_bridge_ext; - uint8 data; -} pciconfig_cap_pwrmgmt; - -#define PME_CAP_PM_STATES (0x1f << 27) /* Bits 31:27 states that can generate PME */ -#define PME_CSR_OFFSET 0x4 /* 4-bytes offset */ -#define PME_CSR_PME_EN (1 << 8) /* Bit 8 Enable generating of PME */ -#define PME_CSR_PME_STAT (1 << 15) /* Bit 15 PME got asserted */ - -/* Data structure to define the PCIE capability */ -typedef struct _pciconfig_cap_pcie { - uint8 capID; - uint8 nextptr; - uint16 pcie_cap; - uint32 dev_cap; - uint16 dev_ctrl; - uint16 dev_status; - uint32 link_cap; - uint16 link_ctrl; - uint16 link_status; - uint32 slot_cap; - uint16 slot_ctrl; - uint16 slot_status; - uint16 root_ctrl; - uint16 root_cap; - uint32 root_status; -} pciconfig_cap_pcie; - -/* PCIE Enhanced CAPABILITY DEFINES */ -#define PCIE_EXTCFG_OFFSET 0x100 -#define PCIE_ADVERRREP_CAPID 0x0001 -#define PCIE_VC_CAPID 0x0002 -#define PCIE_DEVSNUM_CAPID 0x0003 -#define PCIE_PWRBUDGET_CAPID 0x0004 - -/* PCIE Extended configuration */ -#define PCIE_ADV_CORR_ERR_MASK 0x114 -#define CORR_ERR_RE (1 << 0) /* Receiver */ -#define CORR_ERR_BT (1 << 6) /* Bad TLP */ -#define CORR_ERR_BD (1 << 7) /* Bad DLLP */ -#define CORR_ERR_RR (1 << 8) /* REPLAY_NUM rollover */ -#define CORR_ERR_RT (1 << 12) /* Reply timer timeout */ -#define ALL_CORR_ERRORS (CORR_ERR_RE | CORR_ERR_BT | CORR_ERR_BD | \ - CORR_ERR_RR | CORR_ERR_RT) - -/* PCIE Root Control Register bits (Host mode only) */ -#define PCIE_RC_CORR_SERR_EN 0x0001 -#define PCIE_RC_NONFATAL_SERR_EN 0x0002 -#define PCIE_RC_FATAL_SERR_EN 0x0004 -#define PCIE_RC_PME_INT_EN 0x0008 -#define PCIE_RC_CRS_EN 0x0010 - -/* PCIE Root Capability Register bits (Host mode only) */ -#define PCIE_RC_CRS_VISIBILITY 0x0001 - -/* Header to define the PCIE specific capabilities in the extended config space */ -typedef struct _pcie_enhanced_caphdr { - uint16 capID; - uint16 cap_ver : 4; - uint16 next_ptr : 12; -} pcie_enhanced_caphdr; - - -#define PCI_BAR0_WIN 0x80 /* backplane addres space accessed by BAR0 */ -#define PCI_BAR1_WIN 0x84 /* backplane addres space accessed by BAR1 */ -#define PCI_SPROM_CONTROL 0x88 /* sprom property control */ -#define PCI_BAR1_CONTROL 0x8c /* BAR1 region burst control */ -#define PCI_INT_STATUS 0x90 /* PCI and other cores interrupts */ -#define PCI_INT_MASK 0x94 /* mask of PCI and other cores interrupts */ -#define PCI_TO_SB_MB 0x98 /* signal backplane interrupts */ -#define PCI_BACKPLANE_ADDR 0xa0 /* address an arbitrary location on the system backplane */ -#define PCI_BACKPLANE_DATA 0xa4 /* data at the location specified by above address */ -#define PCI_CLK_CTL_ST 0xa8 /* pci config space clock control/status (>=rev14) */ -#define PCI_BAR0_WIN2 0xac /* backplane addres space accessed by second 4KB of BAR0 */ -#define PCI_GPIO_IN 0xb0 /* pci config space gpio input (>=rev3) */ -#define PCI_GPIO_OUT 0xb4 /* pci config space gpio output (>=rev3) */ -#define PCI_GPIO_OUTEN 0xb8 /* pci config space gpio output enable (>=rev3) */ -#define PCI_LINK_CTRL 0xbc /* PCI link control register */ -#define PCI_DEV_STAT_CTRL2 0xd4 /* PCI device status control 2 register */ -#define PCIE_LTR_MAX_SNOOP 0x1b4 /* PCIE LTRMaxSnoopLatency */ -#define PCI_L1SS_CTRL 0x248 /* The L1 PM Substates Control register */ -#define PCI_L1SS_CTRL2 0x24c /* The L1 PM Substates Control 2 register */ - -/* Private Registers */ -#define PCI_STAT_CTRL 0xa80 -#define PCI_L0_EVENTCNT 0xa84 -#define PCI_L0_STATETMR 0xa88 -#define PCI_L1_EVENTCNT 0xa8c -#define PCI_L1_STATETMR 0xa90 -#define PCI_L1_1_EVENTCNT 0xa94 -#define PCI_L1_1_STATETMR 0xa98 -#define PCI_L1_2_EVENTCNT 0xa9c -#define PCI_L1_2_STATETMR 0xaa0 -#define PCI_L2_EVENTCNT 0xaa4 -#define PCI_L2_STATETMR 0xaa8 - -#define PCI_PMCR_REFUP 0x1814 /* Trefup time */ -#define PCI_PMCR_REFUP_EXT 0x1818 /* Trefup extend Max */ -#define PCI_TPOWER_SCALE_MASK 0x3 -#define PCI_TPOWER_SCALE_SHIFT 3 /* 0:1 is scale and 2 is rsvd */ - - -#define PCI_BAR0_SHADOW_OFFSET (2 * 1024) /* bar0 + 2K accesses sprom shadow (in pci core) */ -#define PCI_BAR0_SPROM_OFFSET (4 * 1024) /* bar0 + 4K accesses external sprom */ -#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024) /* bar0 + 6K accesses pci core registers */ -#define PCI_BAR0_PCISBR_OFFSET (4 * 1024) /* pci core SB registers are at the end of the - * 8KB window, so their address is the "regular" - * address plus 4K - */ -/* - * PCIE GEN2 changed some of the above locations for - * Bar0WrapperBase, SecondaryBAR0Window and SecondaryBAR0WrapperBase - * BAR0 maps 32K of register space -*/ -#define PCIE2_BAR0_WIN2 0x70 /* backplane addres space accessed by second 4KB of BAR0 */ -#define PCIE2_BAR0_CORE2_WIN 0x74 /* backplane addres space accessed by second 4KB of BAR0 */ -#define PCIE2_BAR0_CORE2_WIN2 0x78 /* backplane addres space accessed by second 4KB of BAR0 */ - -#define PCI_BAR0_WINSZ (16 * 1024) /* bar0 window size Match with corerev 13 */ -/* On pci corerev >= 13 and all pcie, the bar0 is now 16KB and it maps: */ -#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024) /* bar0 + 8K accesses pci/pcie core registers */ -#define PCI_16KB0_CCREGS_OFFSET (12 * 1024) /* bar0 + 12K accesses chipc core registers */ -#define PCI_16KBB0_WINSZ (16 * 1024) /* bar0 window size */ - -#ifndef LINUX_POSTMOGRIFY_REMOVAL -/* On AI chips we have a second window to map DMP regs are mapped: */ -#define PCI_16KB0_WIN2_OFFSET (4 * 1024) /* bar0 + 4K is "Window 2" */ - -/* PCI_INT_STATUS */ -#define PCI_SBIM_STATUS_SERR 0x4 /* backplane SBErr interrupt status */ - -/* PCI_INT_MASK */ -#define PCI_SBIM_SHIFT 8 /* backplane core interrupt mask bits offset */ -#define PCI_SBIM_MASK 0xff00 /* backplane core interrupt mask */ -#define PCI_SBIM_MASK_SERR 0x4 /* backplane SBErr interrupt mask */ - -#ifndef LINUX_POSTMOGRIFY_REMOVAL -/* PCI_SPROM_CONTROL */ -#define SPROM_SZ_MSK 0x02 /* SPROM Size Mask */ -#define SPROM_LOCKED 0x08 /* SPROM Locked */ -#define SPROM_BLANK 0x04 /* indicating a blank SPROM */ -#define SPROM_WRITEEN 0x10 /* SPROM write enable */ -#define SPROM_BOOTROM_WE 0x20 /* external bootrom write enable */ -#define SPROM_BACKPLANE_EN 0x40 /* Enable indirect backplane access */ -#define SPROM_OTPIN_USE 0x80 /* device OTP In use */ -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ - -/* Bits in PCI command and status regs */ -#define PCI_CMD_IO 0x00000001 /* I/O enable */ -#define PCI_CMD_MEMORY 0x00000002 /* Memory enable */ -#define PCI_CMD_MASTER 0x00000004 /* Master enable */ -#define PCI_CMD_SPECIAL 0x00000008 /* Special cycles enable */ -#define PCI_CMD_INVALIDATE 0x00000010 /* Invalidate? */ -#define PCI_CMD_VGA_PAL 0x00000040 /* VGA Palate */ -#define PCI_STAT_TA 0x08000000 /* target abort status */ -#endif /* LINUX_POSTMOGRIFY_REMOVAL */ - -/* Header types */ -#define PCI_HEADER_MULTI 0x80 -#define PCI_HEADER_MASK 0x7f -typedef enum { - PCI_HEADER_NORMAL, - PCI_HEADER_BRIDGE, - PCI_HEADER_CARDBUS -} pci_header_types; - -#define PCI_CONFIG_SPACE_SIZE 256 - -#define DWORD_ALIGN(x) (x & ~(0x03)) -#define BYTE_POS(x) (x & 0x3) -#define WORD_POS(x) (x & 0x1) - -#define BYTE_SHIFT(x) (8 * BYTE_POS(x)) -#define WORD_SHIFT(x) (16 * WORD_POS(x)) - -#define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF) -#define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF) - -#define read_pci_cfg_byte(a) \ - (BYTE_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xff) - -#define read_pci_cfg_word(a) \ - (WORD_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xffff) - -#define write_pci_cfg_byte(a, val) do { \ - uint32 tmpval; \ - tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFF << BYTE_POS(a)) | \ - val << BYTE_POS(a); \ - OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \ - } while (0) - -#define write_pci_cfg_word(a, val) do { \ - uint32 tmpval; \ - tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFFFF << WORD_POS(a)) | \ - val << WORD_POS(a); \ - OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \ - } while (0) - -#endif /* _h_pcicfg_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h b/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h deleted file mode 100644 index 1a53542d90188..0000000000000 --- a/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * BT-AMP (BlueTooth Alternate Mac and Phy) 802.11 PAL (Protocol Adaptation Layer) - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: 802.11_bta.h 382882 2013-02-04 23:24:31Z $ -*/ - -#ifndef _802_11_BTA_H_ -#define _802_11_BTA_H_ - -#define BT_SIG_SNAP_MPROT "\xAA\xAA\x03\x00\x19\x58" - -/* BT-AMP 802.11 PAL Protocols */ -#define BTA_PROT_L2CAP 1 -#define BTA_PROT_ACTIVITY_REPORT 2 -#define BTA_PROT_SECURITY 3 -#define BTA_PROT_LINK_SUPERVISION_REQUEST 4 -#define BTA_PROT_LINK_SUPERVISION_REPLY 5 - -/* BT-AMP 802.11 PAL AMP_ASSOC Type IDs */ -#define BTA_TYPE_ID_MAC_ADDRESS 1 -#define BTA_TYPE_ID_PREFERRED_CHANNELS 2 -#define BTA_TYPE_ID_CONNECTED_CHANNELS 3 -#define BTA_TYPE_ID_CAPABILITIES 4 -#define BTA_TYPE_ID_VERSION 5 -#endif /* _802_11_bta_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.1d.h b/drivers/net/wireless/bcmdhd/include/proto/802.1d.h deleted file mode 100644 index e7eceb093bb3f..0000000000000 --- a/drivers/net/wireless/bcmdhd/include/proto/802.1d.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * $Copyright Open Broadcom Corporation$ - * - * Fundamental types and constants relating to 802.1D - * - * $Id: 802.1d.h 382882 2013-02-04 23:24:31Z $ - */ - -#ifndef _802_1_D_ -#define _802_1_D_ - -/* 802.1D priority defines */ -#define PRIO_8021D_NONE 2 /* None = - */ -#define PRIO_8021D_BK 1 /* BK - Background */ -#define PRIO_8021D_BE 0 /* BE - Best-effort */ -#define PRIO_8021D_EE 3 /* EE - Excellent-effort */ -#define PRIO_8021D_CL 4 /* CL - Controlled Load */ -#define PRIO_8021D_VI 5 /* Vi - Video */ -#define PRIO_8021D_VO 6 /* Vo - Voice */ -#define PRIO_8021D_NC 7 /* NC - Network Control */ -#define MAXPRIO 7 /* 0-7 */ -#define NUMPRIO (MAXPRIO + 1) - -#define ALLPRIO -1 /* All prioirty */ - -/* Converts prio to precedence since the numerical value of - * PRIO_8021D_BE and PRIO_8021D_NONE are swapped. - */ -#define PRIO2PREC(prio) \ - (((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? ((prio^2)) : (prio)) - -#endif /* _802_1_D__ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.3.h b/drivers/net/wireless/bcmdhd/include/proto/802.3.h deleted file mode 100644 index 9901a23be127e..0000000000000 --- a/drivers/net/wireless/bcmdhd/include/proto/802.3.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * $Copyright Open Broadcom Corporation$ - * - * Fundamental constants relating to 802.3 - * - * $Id: 802.3.h 417943 2013-08-13 07:54:04Z $ - */ - -#ifndef _802_3_h_ -#define _802_3_h_ - -/* This marks the start of a packed structure section. */ -#include - -#define SNAP_HDR_LEN 6 /* 802.3 SNAP header length */ -#define DOT3_OUI_LEN 3 /* 802.3 oui length */ - -BWL_PRE_PACKED_STRUCT struct dot3_mac_llc_snap_header { - uint8 ether_dhost[ETHER_ADDR_LEN]; /* dest mac */ - uint8 ether_shost[ETHER_ADDR_LEN]; /* src mac */ - uint16 length; /* frame length incl header */ - uint8 dsap; /* always 0xAA */ - uint8 ssap; /* always 0xAA */ - uint8 ctl; /* always 0x03 */ - uint8 oui[DOT3_OUI_LEN]; /* RFC1042: 0x00 0x00 0x00 - * Bridge-Tunnel: 0x00 0x00 0xF8 - */ - uint16 type; /* ethertype */ -} BWL_POST_PACKED_STRUCT; - -/* This marks the end of a packed structure section. */ -#include - -#endif /* #ifndef _802_3_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/sbconfig.h b/drivers/net/wireless/bcmdhd/include/sbconfig.h deleted file mode 100644 index cc9223a1634b5..0000000000000 --- a/drivers/net/wireless/bcmdhd/include/sbconfig.h +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Broadcom SiliconBackplane hardware register definitions. - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: sbconfig.h 456346 2014-02-18 16:48:52Z $ - */ - -#ifndef _SBCONFIG_H -#define _SBCONFIG_H - -/* cpp contortions to concatenate w/arg prescan */ -#ifndef PAD -#define _PADLINE(line) pad ## line -#define _XSTR(line) _PADLINE(line) -#define PAD _XSTR(__LINE__) -#endif - -/* enumeration in SB is based on the premise that cores are contiguos in the - * enumeration space. - */ -#define SB_BUS_SIZE 0x10000 /* Each bus gets 64Kbytes for cores */ -#define SB_BUS_BASE(b) (SI_ENUM_BASE + (b) * SB_BUS_SIZE) -#define SB_BUS_MAXCORES (SB_BUS_SIZE / SI_CORE_SIZE) /* Max cores per bus */ - -/* - * Sonics Configuration Space Registers. - */ -#define SBCONFIGOFF 0xf00 /* core sbconfig regs are top 256bytes of regs */ -#define SBCONFIGSIZE 256 /* sizeof (sbconfig_t) */ - -#define SBIPSFLAG 0x08 -#define SBTPSFLAG 0x18 -#define SBTMERRLOGA 0x48 /* sonics >= 2.3 */ -#define SBTMERRLOG 0x50 /* sonics >= 2.3 */ -#define SBADMATCH3 0x60 -#define SBADMATCH2 0x68 -#define SBADMATCH1 0x70 -#define SBIMSTATE 0x90 -#define SBINTVEC 0x94 -#define SBTMSTATELOW 0x98 -#define SBTMSTATEHIGH 0x9c -#define SBBWA0 0xa0 -#define SBIMCONFIGLOW 0xa8 -#define SBIMCONFIGHIGH 0xac -#define SBADMATCH0 0xb0 -#define SBTMCONFIGLOW 0xb8 -#define SBTMCONFIGHIGH 0xbc -#define SBBCONFIG 0xc0 -#define SBBSTATE 0xc8 -#define SBACTCNFG 0xd8 -#define SBFLAGST 0xe8 -#define SBIDLOW 0xf8 -#define SBIDHIGH 0xfc - -/* All the previous registers are above SBCONFIGOFF, but with Sonics 2.3, we have - * a few registers *below* that line. I think it would be very confusing to try - * and change the value of SBCONFIGOFF, so I'm definig them as absolute offsets here, - */ - -#define SBIMERRLOGA 0xea8 -#define SBIMERRLOG 0xeb0 -#define SBTMPORTCONNID0 0xed8 -#define SBTMPORTLOCK0 0xef8 - -#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) - -typedef volatile struct _sbconfig { - uint32 PAD[2]; - uint32 sbipsflag; /* initiator port ocp slave flag */ - uint32 PAD[3]; - uint32 sbtpsflag; /* target port ocp slave flag */ - uint32 PAD[11]; - uint32 sbtmerrloga; /* (sonics >= 2.3) */ - uint32 PAD; - uint32 sbtmerrlog; /* (sonics >= 2.3) */ - uint32 PAD[3]; - uint32 sbadmatch3; /* address match3 */ - uint32 PAD; - uint32 sbadmatch2; /* address match2 */ - uint32 PAD; - uint32 sbadmatch1; /* address match1 */ - uint32 PAD[7]; - uint32 sbimstate; /* initiator agent state */ - uint32 sbintvec; /* interrupt mask */ - uint32 sbtmstatelow; /* target state */ - uint32 sbtmstatehigh; /* target state */ - uint32 sbbwa0; /* bandwidth allocation table0 */ - uint32 PAD; - uint32 sbimconfiglow; /* initiator configuration */ - uint32 sbimconfighigh; /* initiator configuration */ - uint32 sbadmatch0; /* address match0 */ - uint32 PAD; - uint32 sbtmconfiglow; /* target configuration */ - uint32 sbtmconfighigh; /* target configuration */ - uint32 sbbconfig; /* broadcast configuration */ - uint32 PAD; - uint32 sbbstate; /* broadcast state */ - uint32 PAD[3]; - uint32 sbactcnfg; /* activate configuration */ - uint32 PAD[3]; - uint32 sbflagst; /* current sbflags */ - uint32 PAD[3]; - uint32 sbidlow; /* identification */ - uint32 sbidhigh; /* identification */ -} sbconfig_t; - -#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */ - -/* sbipsflag */ -#define SBIPS_INT1_MASK 0x3f /* which sbflags get routed to mips interrupt 1 */ -#define SBIPS_INT1_SHIFT 0 -#define SBIPS_INT2_MASK 0x3f00 /* which sbflags get routed to mips interrupt 2 */ -#define SBIPS_INT2_SHIFT 8 -#define SBIPS_INT3_MASK 0x3f0000 /* which sbflags get routed to mips interrupt 3 */ -#define SBIPS_INT3_SHIFT 16 -#define SBIPS_INT4_MASK 0x3f000000 /* which sbflags get routed to mips interrupt 4 */ -#define SBIPS_INT4_SHIFT 24 - -/* sbtpsflag */ -#define SBTPS_NUM0_MASK 0x3f /* interrupt sbFlag # generated by this core */ -#define SBTPS_F0EN0 0x40 /* interrupt is always sent on the backplane */ - -/* sbtmerrlog */ -#define SBTMEL_CM 0x00000007 /* command */ -#define SBTMEL_CI 0x0000ff00 /* connection id */ -#define SBTMEL_EC 0x0f000000 /* error code */ -#define SBTMEL_ME 0x80000000 /* multiple error */ - -/* sbimstate */ -#define SBIM_PC 0xf /* pipecount */ -#define SBIM_AP_MASK 0x30 /* arbitration policy */ -#define SBIM_AP_BOTH 0x00 /* use both timeslaces and token */ -#define SBIM_AP_TS 0x10 /* use timesliaces only */ -#define SBIM_AP_TK 0x20 /* use token only */ -#define SBIM_AP_RSV 0x30 /* reserved */ -#define SBIM_IBE 0x20000 /* inbanderror */ -#define SBIM_TO 0x40000 /* timeout */ -#define SBIM_BY 0x01800000 /* busy (sonics >= 2.3) */ -#define SBIM_RJ 0x02000000 /* reject (sonics >= 2.3) */ - -/* sbtmstatelow */ -#define SBTML_RESET 0x0001 /* reset */ -#define SBTML_REJ_MASK 0x0006 /* reject field */ -#define SBTML_REJ 0x0002 /* reject */ -#define SBTML_TMPREJ 0x0004 /* temporary reject, for error recovery */ - -#define SBTML_SICF_SHIFT 16 /* Shift to locate the SI control flags in sbtml */ - -/* sbtmstatehigh */ -#define SBTMH_SERR 0x0001 /* serror */ -#define SBTMH_INT 0x0002 /* interrupt */ -#define SBTMH_BUSY 0x0004 /* busy */ -#define SBTMH_TO 0x0020 /* timeout (sonics >= 2.3) */ - -#define SBTMH_SISF_SHIFT 16 /* Shift to locate the SI status flags in sbtmh */ - -/* sbbwa0 */ -#define SBBWA_TAB0_MASK 0xffff /* lookup table 0 */ -#define SBBWA_TAB1_MASK 0xffff /* lookup table 1 */ -#define SBBWA_TAB1_SHIFT 16 - -/* sbimconfiglow */ -#define SBIMCL_STO_MASK 0x7 /* service timeout */ -#define SBIMCL_RTO_MASK 0x70 /* request timeout */ -#define SBIMCL_RTO_SHIFT 4 -#define SBIMCL_CID_MASK 0xff0000 /* connection id */ -#define SBIMCL_CID_SHIFT 16 - -/* sbimconfighigh */ -#define SBIMCH_IEM_MASK 0xc /* inband error mode */ -#define SBIMCH_TEM_MASK 0x30 /* timeout error mode */ -#define SBIMCH_TEM_SHIFT 4 -#define SBIMCH_BEM_MASK 0xc0 /* bus error mode */ -#define SBIMCH_BEM_SHIFT 6 - -/* sbadmatch0 */ -#define SBAM_TYPE_MASK 0x3 /* address type */ -#define SBAM_AD64 0x4 /* reserved */ -#define SBAM_ADINT0_MASK 0xf8 /* type0 size */ -#define SBAM_ADINT0_SHIFT 3 -#define SBAM_ADINT1_MASK 0x1f8 /* type1 size */ -#define SBAM_ADINT1_SHIFT 3 -#define SBAM_ADINT2_MASK 0x1f8 /* type2 size */ -#define SBAM_ADINT2_SHIFT 3 -#define SBAM_ADEN 0x400 /* enable */ -#define SBAM_ADNEG 0x800 /* negative decode */ -#define SBAM_BASE0_MASK 0xffffff00 /* type0 base address */ -#define SBAM_BASE0_SHIFT 8 -#define SBAM_BASE1_MASK 0xfffff000 /* type1 base address for the core */ -#define SBAM_BASE1_SHIFT 12 -#define SBAM_BASE2_MASK 0xffff0000 /* type2 base address for the core */ -#define SBAM_BASE2_SHIFT 16 - -/* sbtmconfiglow */ -#define SBTMCL_CD_MASK 0xff /* clock divide */ -#define SBTMCL_CO_MASK 0xf800 /* clock offset */ -#define SBTMCL_CO_SHIFT 11 -#define SBTMCL_IF_MASK 0xfc0000 /* interrupt flags */ -#define SBTMCL_IF_SHIFT 18 -#define SBTMCL_IM_MASK 0x3000000 /* interrupt mode */ -#define SBTMCL_IM_SHIFT 24 - -/* sbtmconfighigh */ -#define SBTMCH_BM_MASK 0x3 /* busy mode */ -#define SBTMCH_RM_MASK 0x3 /* retry mode */ -#define SBTMCH_RM_SHIFT 2 -#define SBTMCH_SM_MASK 0x30 /* stop mode */ -#define SBTMCH_SM_SHIFT 4 -#define SBTMCH_EM_MASK 0x300 /* sb error mode */ -#define SBTMCH_EM_SHIFT 8 -#define SBTMCH_IM_MASK 0xc00 /* int mode */ -#define SBTMCH_IM_SHIFT 10 - -/* sbbconfig */ -#define SBBC_LAT_MASK 0x3 /* sb latency */ -#define SBBC_MAX0_MASK 0xf0000 /* maxccntr0 */ -#define SBBC_MAX0_SHIFT 16 -#define SBBC_MAX1_MASK 0xf00000 /* maxccntr1 */ -#define SBBC_MAX1_SHIFT 20 - -/* sbbstate */ -#define SBBS_SRD 0x1 /* st reg disable */ -#define SBBS_HRD 0x2 /* hold reg disable */ - -/* sbidlow */ -#define SBIDL_CS_MASK 0x3 /* config space */ -#define SBIDL_AR_MASK 0x38 /* # address ranges supported */ -#define SBIDL_AR_SHIFT 3 -#define SBIDL_SYNCH 0x40 /* sync */ -#define SBIDL_INIT 0x80 /* initiator */ -#define SBIDL_MINLAT_MASK 0xf00 /* minimum backplane latency */ -#define SBIDL_MINLAT_SHIFT 8 -#define SBIDL_MAXLAT 0xf000 /* maximum backplane latency */ -#define SBIDL_MAXLAT_SHIFT 12 -#define SBIDL_FIRST 0x10000 /* this initiator is first */ -#define SBIDL_CW_MASK 0xc0000 /* cycle counter width */ -#define SBIDL_CW_SHIFT 18 -#define SBIDL_TP_MASK 0xf00000 /* target ports */ -#define SBIDL_TP_SHIFT 20 -#define SBIDL_IP_MASK 0xf000000 /* initiator ports */ -#define SBIDL_IP_SHIFT 24 -#define SBIDL_RV_MASK 0xf0000000 /* sonics backplane revision code */ -#define SBIDL_RV_SHIFT 28 -#define SBIDL_RV_2_2 0x00000000 /* version 2.2 or earlier */ -#define SBIDL_RV_2_3 0x10000000 /* version 2.3 */ - -/* sbidhigh */ -#define SBIDH_RC_MASK 0x000f /* revision code */ -#define SBIDH_RCE_MASK 0x7000 /* revision code extension field */ -#define SBIDH_RCE_SHIFT 8 -#define SBCOREREV(sbidh) \ - ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK)) -#define SBIDH_CC_MASK 0x8ff0 /* core code */ -#define SBIDH_CC_SHIFT 4 -#define SBIDH_VC_MASK 0xffff0000 /* vendor code */ -#define SBIDH_VC_SHIFT 16 - -#define SB_COMMIT 0xfd8 /* update buffered registers value */ - -/* vendor codes */ -#define SB_VEND_BCM 0x4243 /* Broadcom's SB vendor code */ - -#endif /* _SBCONFIG_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sbhnddma.h b/drivers/net/wireless/bcmdhd/include/sbhnddma.h deleted file mode 100644 index 3427a75d96654..0000000000000 --- a/drivers/net/wireless/bcmdhd/include/sbhnddma.h +++ /dev/null @@ -1,399 +0,0 @@ -/* - * Generic Broadcom Home Networking Division (HND) DMA engine HW interface - * This supports the following chips: BCM42xx, 44xx, 47xx . - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: sbhnddma.h 452424 2014-01-30 09:43:39Z $ - */ - -#ifndef _sbhnddma_h_ -#define _sbhnddma_h_ - -/* DMA structure: - * support two DMA engines: 32 bits address or 64 bit addressing - * basic DMA register set is per channel(transmit or receive) - * a pair of channels is defined for convenience - */ - - -/* 32 bits addressing */ - -/* dma registers per channel(xmt or rcv) */ -typedef volatile struct { - uint32 control; /* enable, et al */ - uint32 addr; /* descriptor ring base address (4K aligned) */ - uint32 ptr; /* last descriptor posted to chip */ - uint32 status; /* current active descriptor, et al */ -} dma32regs_t; - -typedef volatile struct { - dma32regs_t xmt; /* dma tx channel */ - dma32regs_t rcv; /* dma rx channel */ -} dma32regp_t; - -typedef volatile struct { /* diag access */ - uint32 fifoaddr; /* diag address */ - uint32 fifodatalow; /* low 32bits of data */ - uint32 fifodatahigh; /* high 32bits of data */ - uint32 pad; /* reserved */ -} dma32diag_t; - -/* - * DMA Descriptor - * Descriptors are only read by the hardware, never written back. - */ -typedef volatile struct { - uint32 ctrl; /* misc control bits & bufcount */ - uint32 addr; /* data buffer address */ -} dma32dd_t; - -/* - * Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page. - */ -#define D32RINGALIGN_BITS 12 -#define D32MAXRINGSZ (1 << D32RINGALIGN_BITS) -#define D32RINGALIGN (1 << D32RINGALIGN_BITS) - -#define D32MAXDD (D32MAXRINGSZ / sizeof (dma32dd_t)) - -/* transmit channel control */ -#define XC_XE ((uint32)1 << 0) /* transmit enable */ -#define XC_SE ((uint32)1 << 1) /* transmit suspend request */ -#define XC_LE ((uint32)1 << 2) /* loopback enable */ -#define XC_FL ((uint32)1 << 4) /* flush request */ -#define XC_MR_MASK 0x000001C0 /* Multiple outstanding reads */ -#define XC_MR_SHIFT 6 -#define XC_PD ((uint32)1 << 11) /* parity check disable */ -#define XC_AE ((uint32)3 << 16) /* address extension bits */ -#define XC_AE_SHIFT 16 -#define XC_BL_MASK 0x001C0000 /* BurstLen bits */ -#define XC_BL_SHIFT 18 -#define XC_PC_MASK 0x00E00000 /* Prefetch control */ -#define XC_PC_SHIFT 21 -#define XC_PT_MASK 0x03000000 /* Prefetch threshold */ -#define XC_PT_SHIFT 24 - -/* Multiple outstanding reads */ -#define DMA_MR_1 0 -#define DMA_MR_2 1 -#define DMA_MR_4 2 -#define DMA_MR_8 3 -#define DMA_MR_12 4 -#define DMA_MR_16 5 -#define DMA_MR_20 6 -#define DMA_MR_32 7 - -/* DMA Burst Length in bytes */ -#define DMA_BL_16 0 -#define DMA_BL_32 1 -#define DMA_BL_64 2 -#define DMA_BL_128 3 -#define DMA_BL_256 4 -#define DMA_BL_512 5 -#define DMA_BL_1024 6 - -/* Prefetch control */ -#define DMA_PC_0 0 -#define DMA_PC_4 1 -#define DMA_PC_8 2 -#define DMA_PC_16 3 -/* others: reserved */ - -/* Prefetch threshold */ -#define DMA_PT_1 0 -#define DMA_PT_2 1 -#define DMA_PT_4 2 -#define DMA_PT_8 3 - -/* transmit descriptor table pointer */ -#define XP_LD_MASK 0xfff /* last valid descriptor */ - -/* transmit channel status */ -#define XS_CD_MASK 0x0fff /* current descriptor pointer */ -#define XS_XS_MASK 0xf000 /* transmit state */ -#define XS_XS_SHIFT 12 -#define XS_XS_DISABLED 0x0000 /* disabled */ -#define XS_XS_ACTIVE 0x1000 /* active */ -#define XS_XS_IDLE 0x2000 /* idle wait */ -#define XS_XS_STOPPED 0x3000 /* stopped */ -#define XS_XS_SUSP 0x4000 /* suspend pending */ -#define XS_XE_MASK 0xf0000 /* transmit errors */ -#define XS_XE_SHIFT 16 -#define XS_XE_NOERR 0x00000 /* no error */ -#define XS_XE_DPE 0x10000 /* descriptor protocol error */ -#define XS_XE_DFU 0x20000 /* data fifo underrun */ -#define XS_XE_BEBR 0x30000 /* bus error on buffer read */ -#define XS_XE_BEDA 0x40000 /* bus error on descriptor access */ -#define XS_AD_MASK 0xfff00000 /* active descriptor */ -#define XS_AD_SHIFT 20 - -/* receive channel control */ -#define RC_RE ((uint32)1 << 0) /* receive enable */ -#define RC_RO_MASK 0xfe /* receive frame offset */ -#define RC_RO_SHIFT 1 -#define RC_FM ((uint32)1 << 8) /* direct fifo receive (pio) mode */ -#define RC_SH ((uint32)1 << 9) /* separate rx header descriptor enable */ -#define RC_OC ((uint32)1 << 10) /* overflow continue */ -#define RC_PD ((uint32)1 << 11) /* parity check disable */ -#define RC_AE ((uint32)3 << 16) /* address extension bits */ -#define RC_AE_SHIFT 16 -#define RC_BL_MASK 0x001C0000 /* BurstLen bits */ -#define RC_BL_SHIFT 18 -#define RC_PC_MASK 0x00E00000 /* Prefetch control */ -#define RC_PC_SHIFT 21 -#define RC_PT_MASK 0x03000000 /* Prefetch threshold */ -#define RC_PT_SHIFT 24 - -/* receive descriptor table pointer */ -#define RP_LD_MASK 0xfff /* last valid descriptor */ - -/* receive channel status */ -#define RS_CD_MASK 0x0fff /* current descriptor pointer */ -#define RS_RS_MASK 0xf000 /* receive state */ -#define RS_RS_SHIFT 12 -#define RS_RS_DISABLED 0x0000 /* disabled */ -#define RS_RS_ACTIVE 0x1000 /* active */ -#define RS_RS_IDLE 0x2000 /* idle wait */ -#define RS_RS_STOPPED 0x3000 /* reserved */ -#define RS_RE_MASK 0xf0000 /* receive errors */ -#define RS_RE_SHIFT 16 -#define RS_RE_NOERR 0x00000 /* no error */ -#define RS_RE_DPE 0x10000 /* descriptor protocol error */ -#define RS_RE_DFO 0x20000 /* data fifo overflow */ -#define RS_RE_BEBW 0x30000 /* bus error on buffer write */ -#define RS_RE_BEDA 0x40000 /* bus error on descriptor access */ -#define RS_AD_MASK 0xfff00000 /* active descriptor */ -#define RS_AD_SHIFT 20 - -/* fifoaddr */ -#define FA_OFF_MASK 0xffff /* offset */ -#define FA_SEL_MASK 0xf0000 /* select */ -#define FA_SEL_SHIFT 16 -#define FA_SEL_XDD 0x00000 /* transmit dma data */ -#define FA_SEL_XDP 0x10000 /* transmit dma pointers */ -#define FA_SEL_RDD 0x40000 /* receive dma data */ -#define FA_SEL_RDP 0x50000 /* receive dma pointers */ -#define FA_SEL_XFD 0x80000 /* transmit fifo data */ -#define FA_SEL_XFP 0x90000 /* transmit fifo pointers */ -#define FA_SEL_RFD 0xc0000 /* receive fifo data */ -#define FA_SEL_RFP 0xd0000 /* receive fifo pointers */ -#define FA_SEL_RSD 0xe0000 /* receive frame status data */ -#define FA_SEL_RSP 0xf0000 /* receive frame status pointers */ - -/* descriptor control flags */ -#define CTRL_BC_MASK 0x00001fff /* buffer byte count, real data len must <= 4KB */ -#define CTRL_AE ((uint32)3 << 16) /* address extension bits */ -#define CTRL_AE_SHIFT 16 -#define CTRL_PARITY ((uint32)3 << 18) /* parity bit */ -#define CTRL_EOT ((uint32)1 << 28) /* end of descriptor table */ -#define CTRL_IOC ((uint32)1 << 29) /* interrupt on completion */ -#define CTRL_EOF ((uint32)1 << 30) /* end of frame */ -#define CTRL_SOF ((uint32)1 << 31) /* start of frame */ - -/* control flags in the range [27:20] are core-specific and not defined here */ -#define CTRL_CORE_MASK 0x0ff00000 - -/* 64 bits addressing */ - -/* dma registers per channel(xmt or rcv) */ -typedef volatile struct { - uint32 control; /* enable, et al */ - uint32 ptr; /* last descriptor posted to chip */ - uint32 addrlow; /* descriptor ring base address low 32-bits (8K aligned) */ - uint32 addrhigh; /* descriptor ring base address bits 63:32 (8K aligned) */ - uint32 status0; /* current descriptor, xmt state */ - uint32 status1; /* active descriptor, xmt error */ -} dma64regs_t; - -typedef volatile struct { - dma64regs_t tx; /* dma64 tx channel */ - dma64regs_t rx; /* dma64 rx channel */ -} dma64regp_t; - -typedef volatile struct { /* diag access */ - uint32 fifoaddr; /* diag address */ - uint32 fifodatalow; /* low 32bits of data */ - uint32 fifodatahigh; /* high 32bits of data */ - uint32 pad; /* reserved */ -} dma64diag_t; - -/* - * DMA Descriptor - * Descriptors are only read by the hardware, never written back. - */ -typedef volatile struct { - uint32 ctrl1; /* misc control bits */ - uint32 ctrl2; /* buffer count and address extension */ - uint32 addrlow; /* memory address of the date buffer, bits 31:0 */ - uint32 addrhigh; /* memory address of the date buffer, bits 63:32 */ -} dma64dd_t; - -/* - * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical addresss. - */ -#define D64RINGALIGN_BITS 13 -#define D64MAXRINGSZ (1 << D64RINGALIGN_BITS) -#define D64RINGBOUNDARY (1 << D64RINGALIGN_BITS) - -#define D64MAXDD (D64MAXRINGSZ / sizeof (dma64dd_t)) - -/* for cores with large descriptor ring support, descriptor ring size can be up to 4096 */ -#define D64MAXDD_LARGE ((1 << 16) / sizeof (dma64dd_t)) - -/* for cores with large descriptor ring support (4k descriptors), descriptor ring cannot cross - * 64K boundary - */ -#define D64RINGBOUNDARY_LARGE (1 << 16) - -/* - * Default DMA Burstlen values for USBRev >= 12 and SDIORev >= 11. - * When this field contains the value N, the burst length is 2**(N + 4) bytes. - */ -#define D64_DEF_USBBURSTLEN 2 -#define D64_DEF_SDIOBURSTLEN 1 - - -#ifndef D64_USBBURSTLEN -#define D64_USBBURSTLEN DMA_BL_64 -#endif -#ifndef D64_SDIOBURSTLEN -#define D64_SDIOBURSTLEN DMA_BL_32 -#endif - -/* transmit channel control */ -#define D64_XC_XE 0x00000001 /* transmit enable */ -#define D64_XC_SE 0x00000002 /* transmit suspend request */ -#define D64_XC_LE 0x00000004 /* loopback enable */ -#define D64_XC_FL 0x00000010 /* flush request */ -#define D64_XC_MR_MASK 0x000001C0 /* Multiple outstanding reads */ -#define D64_XC_MR_SHIFT 6 -#define D64_XC_PD 0x00000800 /* parity check disable */ -#define D64_XC_AE 0x00030000 /* address extension bits */ -#define D64_XC_AE_SHIFT 16 -#define D64_XC_BL_MASK 0x001C0000 /* BurstLen bits */ -#define D64_XC_BL_SHIFT 18 -#define D64_XC_PC_MASK 0x00E00000 /* Prefetch control */ -#define D64_XC_PC_SHIFT 21 -#define D64_XC_PT_MASK 0x03000000 /* Prefetch threshold */ -#define D64_XC_PT_SHIFT 24 - -/* transmit descriptor table pointer */ -#define D64_XP_LD_MASK 0x00001fff /* last valid descriptor */ - -/* transmit channel status */ -#define D64_XS0_CD_MASK (di->d64_xs0_cd_mask) /* current descriptor pointer */ -#define D64_XS0_XS_MASK 0xf0000000 /* transmit state */ -#define D64_XS0_XS_SHIFT 28 -#define D64_XS0_XS_DISABLED 0x00000000 /* disabled */ -#define D64_XS0_XS_ACTIVE 0x10000000 /* active */ -#define D64_XS0_XS_IDLE 0x20000000 /* idle wait */ -#define D64_XS0_XS_STOPPED 0x30000000 /* stopped */ -#define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */ - -#define D64_XS1_AD_MASK (di->d64_xs1_ad_mask) /* active descriptor */ -#define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */ -#define D64_XS1_XE_SHIFT 28 -#define D64_XS1_XE_NOERR 0x00000000 /* no error */ -#define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */ -#define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */ -#define D64_XS1_XE_DTE 0x30000000 /* data transfer error */ -#define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */ -#define D64_XS1_XE_COREE 0x50000000 /* core error */ - -/* receive channel control */ -#define D64_RC_RE 0x00000001 /* receive enable */ -#define D64_RC_RO_MASK 0x000000fe /* receive frame offset */ -#define D64_RC_RO_SHIFT 1 -#define D64_RC_FM 0x00000100 /* direct fifo receive (pio) mode */ -#define D64_RC_SH 0x00000200 /* separate rx header descriptor enable */ -#define D64_RC_SHIFT 9 /* separate rx header descriptor enable */ -#define D64_RC_OC 0x00000400 /* overflow continue */ -#define D64_RC_PD 0x00000800 /* parity check disable */ -#define D64_RC_GE 0x00004000 /* Glom enable */ -#define D64_RC_AE 0x00030000 /* address extension bits */ -#define D64_RC_AE_SHIFT 16 -#define D64_RC_BL_MASK 0x001C0000 /* BurstLen bits */ -#define D64_RC_BL_SHIFT 18 -#define D64_RC_PC_MASK 0x00E00000 /* Prefetch control */ -#define D64_RC_PC_SHIFT 21 -#define D64_RC_PT_MASK 0x03000000 /* Prefetch threshold */ -#define D64_RC_PT_SHIFT 24 - -/* flags for dma controller */ -#define DMA_CTRL_PEN (1 << 0) /* partity enable */ -#define DMA_CTRL_ROC (1 << 1) /* rx overflow continue */ -#define DMA_CTRL_RXMULTI (1 << 2) /* allow rx scatter to multiple descriptors */ -#define DMA_CTRL_UNFRAMED (1 << 3) /* Unframed Rx/Tx data */ -#define DMA_CTRL_USB_BOUNDRY4KB_WAR (1 << 4) -#define DMA_CTRL_DMA_AVOIDANCE_WAR (1 << 5) /* DMA avoidance WAR for 4331 */ -#define DMA_CTRL_RXSINGLE (1 << 6) /* always single buffer */ -#define DMA_CTRL_SDIO_RXGLOM (1 << 7) /* DMA Rx glome is enabled */ - -/* receive descriptor table pointer */ -#define D64_RP_LD_MASK 0x00001fff /* last valid descriptor */ - -/* receive channel status */ -#define D64_RS0_CD_MASK (di->d64_rs0_cd_mask) /* current descriptor pointer */ -#define D64_RS0_RS_MASK 0xf0000000 /* receive state */ -#define D64_RS0_RS_SHIFT 28 -#define D64_RS0_RS_DISABLED 0x00000000 /* disabled */ -#define D64_RS0_RS_ACTIVE 0x10000000 /* active */ -#define D64_RS0_RS_IDLE 0x20000000 /* idle wait */ -#define D64_RS0_RS_STOPPED 0x30000000 /* stopped */ -#define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */ - -#define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */ -#define D64_RS1_RE_MASK 0xf0000000 /* receive errors */ -#define D64_RS1_RE_SHIFT 28 -#define D64_RS1_RE_NOERR 0x00000000 /* no error */ -#define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */ -#define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */ -#define D64_RS1_RE_DTE 0x30000000 /* data transfer error */ -#define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */ -#define D64_RS1_RE_COREE 0x50000000 /* core error */ - -/* fifoaddr */ -#define D64_FA_OFF_MASK 0xffff /* offset */ -#define D64_FA_SEL_MASK 0xf0000 /* select */ -#define D64_FA_SEL_SHIFT 16 -#define D64_FA_SEL_XDD 0x00000 /* transmit dma data */ -#define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */ -#define D64_FA_SEL_RDD 0x40000 /* receive dma data */ -#define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */ -#define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */ -#define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */ -#define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */ -#define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */ -#define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */ -#define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */ - -/* descriptor control flags 1 */ -#define D64_CTRL_COREFLAGS 0x0ff00000 /* core specific flags */ -#define D64_CTRL1_NOTPCIE ((uint32)1 << 18) /* buirst size control */ -#define D64_CTRL1_EOT ((uint32)1 << 28) /* end of descriptor table */ -#define D64_CTRL1_IOC ((uint32)1 << 29) /* interrupt on completion */ -#define D64_CTRL1_EOF ((uint32)1 << 30) /* end of frame */ -#define D64_CTRL1_SOF ((uint32)1 << 31) /* start of frame */ - -/* descriptor control flags 2 */ -#define D64_CTRL2_BC_MASK 0x00007fff /* buffer byte count. real data len must <= 16KB */ -#define D64_CTRL2_AE 0x00030000 /* address extension bits */ -#define D64_CTRL2_AE_SHIFT 16 -#define D64_CTRL2_PARITY 0x00040000 /* parity bit */ - -/* control flags in the range [27:20] are core-specific and not defined here */ -#define D64_CTRL_CORE_MASK 0x0ff00000 - -#define D64_RX_FRM_STS_LEN 0x0000ffff /* frame length mask */ -#define D64_RX_FRM_STS_OVFL 0x00800000 /* RxOverFlow */ -#define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /* no. of descriptors used - 1, d11corerev >= 22 */ -#define D64_RX_FRM_STS_DATATYPE 0xf0000000 /* core-dependent data type */ - -/* receive frame status */ -typedef volatile struct { - uint16 len; - uint16 flags; -} dma_rxh_t; - -#endif /* _sbhnddma_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/sbpcmcia.h b/drivers/net/wireless/bcmdhd/include/sbpcmcia.h deleted file mode 100644 index 91aa2fb1590a2..0000000000000 --- a/drivers/net/wireless/bcmdhd/include/sbpcmcia.h +++ /dev/null @@ -1,352 +0,0 @@ -/* - * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions. - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: sbpcmcia.h 446298 2014-01-03 11:30:17Z $ - */ - -#ifndef _SBPCMCIA_H -#define _SBPCMCIA_H - -/* All the addresses that are offsets in attribute space are divided - * by two to account for the fact that odd bytes are invalid in - * attribute space and our read/write routines make the space appear - * as if they didn't exist. Still we want to show the original numbers - * as documented in the hnd_pcmcia core manual. - */ - -/* PCMCIA Function Configuration Registers */ -#define PCMCIA_FCR (0x700 / 2) - -#define FCR0_OFF 0 -#define FCR1_OFF (0x40 / 2) -#define FCR2_OFF (0x80 / 2) -#define FCR3_OFF (0xc0 / 2) - -#define PCMCIA_FCR0 (0x700 / 2) -#define PCMCIA_FCR1 (0x740 / 2) -#define PCMCIA_FCR2 (0x780 / 2) -#define PCMCIA_FCR3 (0x7c0 / 2) - -/* Standard PCMCIA FCR registers */ - -#define PCMCIA_COR 0 - -#define COR_RST 0x80 -#define COR_LEV 0x40 -#define COR_IRQEN 0x04 -#define COR_BLREN 0x01 -#define COR_FUNEN 0x01 - - -#define PCICIA_FCSR (2 / 2) -#define PCICIA_PRR (4 / 2) -#define PCICIA_SCR (6 / 2) -#define PCICIA_ESR (8 / 2) - - -#define PCM_MEMOFF 0x0000 -#define F0_MEMOFF 0x1000 -#define F1_MEMOFF 0x2000 -#define F2_MEMOFF 0x3000 -#define F3_MEMOFF 0x4000 - -/* Memory base in the function fcr's */ -#define MEM_ADDR0 (0x728 / 2) -#define MEM_ADDR1 (0x72a / 2) -#define MEM_ADDR2 (0x72c / 2) - -/* PCMCIA base plus Srom access in fcr0: */ -#define PCMCIA_ADDR0 (0x072e / 2) -#define PCMCIA_ADDR1 (0x0730 / 2) -#define PCMCIA_ADDR2 (0x0732 / 2) - -#define MEM_SEG (0x0734 / 2) -#define SROM_CS (0x0736 / 2) -#define SROM_DATAL (0x0738 / 2) -#define SROM_DATAH (0x073a / 2) -#define SROM_ADDRL (0x073c / 2) -#define SROM_ADDRH (0x073e / 2) -#define SROM_INFO2 (0x0772 / 2) /* Corerev >= 2 && <= 5 */ -#define SROM_INFO (0x07be / 2) /* Corerev >= 6 */ - -/* Values for srom_cs: */ -#define SROM_IDLE 0 -#define SROM_WRITE 1 -#define SROM_READ 2 -#define SROM_WEN 4 -#define SROM_WDS 7 -#define SROM_DONE 8 - -/* Fields in srom_info: */ -#define SRI_SZ_MASK 0x03 -#define SRI_BLANK 0x04 -#define SRI_OTP 0x80 - -#if !defined(LINUX_POSTMOGRIFY_REMOVAL) -/* CIS stuff */ - -/* The CIS stops where the FCRs start */ -#define CIS_SIZE PCMCIA_FCR -#define CIS_SIZE_12K 1154 /* Maximum h/w + s/w sub region size for 12k OTP */ - -/* CIS tuple length field max */ -#define CIS_TUPLE_LEN_MAX 0xff - -/* Standard tuples we know about */ - -#define CISTPL_NULL 0x00 -#define CISTPL_VERS_1 0x15 /* CIS ver, manf, dev & ver strings */ -#define CISTPL_MANFID 0x20 /* Manufacturer and device id */ -#define CISTPL_FUNCID 0x21 /* Function identification */ -#define CISTPL_FUNCE 0x22 /* Function extensions */ -#define CISTPL_CFTABLE 0x1b /* Config table entry */ -#define CISTPL_END 0xff /* End of the CIS tuple chain */ - -/* Function identifier provides context for the function extentions tuple */ -#define CISTPL_FID_SDIO 0x0c /* Extensions defined by SDIO spec */ - -/* Function extensions for LANs (assumed for extensions other than SDIO) */ -#define LAN_TECH 1 /* Technology type */ -#define LAN_SPEED 2 /* Raw bit rate */ -#define LAN_MEDIA 3 /* Transmission media */ -#define LAN_NID 4 /* Node identification (aka MAC addr) */ -#define LAN_CONN 5 /* Connector standard */ - - -/* CFTable */ -#define CFTABLE_REGWIN_2K 0x08 /* 2k reg windows size */ -#define CFTABLE_REGWIN_4K 0x10 /* 4k reg windows size */ -#define CFTABLE_REGWIN_8K 0x20 /* 8k reg windows size */ - -/* Vendor unique tuples are 0x80-0x8f. Within Broadcom we'll - * take one for HNBU, and use "extensions" (a la FUNCE) within it. - */ - -#define CISTPL_BRCM_HNBU 0x80 - -/* Subtypes of BRCM_HNBU: */ - -#define HNBU_SROMREV 0x00 /* A byte with sromrev, 1 if not present */ -#define HNBU_CHIPID 0x01 /* Two 16bit values: PCI vendor & device id */ -#define HNBU_BOARDREV 0x02 /* One byte board revision */ -#define HNBU_PAPARMS 0x03 /* PA parameters: 8 (sromrev == 1) - * or 9 (sromrev > 1) bytes - */ -#define HNBU_OEM 0x04 /* Eight bytes OEM data (sromrev == 1) */ -#define HNBU_CC 0x05 /* Default country code (sromrev == 1) */ -#define HNBU_AA 0x06 /* Antennas available */ -#define HNBU_AG 0x07 /* Antenna gain */ -#define HNBU_BOARDFLAGS 0x08 /* board flags (2 or 4 bytes) */ -#define HNBU_LEDS 0x09 /* LED set */ -#define HNBU_CCODE 0x0a /* Country code (2 bytes ascii + 1 byte cctl) - * in rev 2 - */ -#define HNBU_CCKPO 0x0b /* 2 byte cck power offsets in rev 3 */ -#define HNBU_OFDMPO 0x0c /* 4 byte 11g ofdm power offsets in rev 3 */ -#define HNBU_GPIOTIMER 0x0d /* 2 bytes with on/off values in rev 3 */ -#define HNBU_PAPARMS5G 0x0e /* 5G PA params */ -#define HNBU_ANT5G 0x0f /* 4328 5G antennas available/gain */ -#define HNBU_RDLID 0x10 /* 2 byte USB remote downloader (RDL) product Id */ -#define HNBU_RSSISMBXA2G 0x11 /* 4328 2G RSSI mid pt sel & board switch arch, - * 2 bytes, rev 3. - */ -#define HNBU_RSSISMBXA5G 0x12 /* 4328 5G RSSI mid pt sel & board switch arch, - * 2 bytes, rev 3. - */ -#define HNBU_XTALFREQ 0x13 /* 4 byte Crystal frequency in kilohertz */ -#define HNBU_TRI2G 0x14 /* 4328 2G TR isolation, 1 byte */ -#define HNBU_TRI5G 0x15 /* 4328 5G TR isolation, 3 bytes */ -#define HNBU_RXPO2G 0x16 /* 4328 2G RX power offset, 1 byte */ -#define HNBU_RXPO5G 0x17 /* 4328 5G RX power offset, 1 byte */ -#define HNBU_BOARDNUM 0x18 /* board serial number, independent of mac addr */ -#define HNBU_MACADDR 0x19 /* mac addr override for the standard CIS LAN_NID */ -#define HNBU_RDLSN 0x1a /* 2 bytes; serial # advertised in USB descriptor */ -#define HNBU_BOARDTYPE 0x1b /* 2 bytes; boardtype */ -#define HNBU_LEDDC 0x1c /* 2 bytes; LED duty cycle */ -#define HNBU_HNBUCIS 0x1d /* what follows is proprietary HNBU CIS format */ -#define HNBU_PAPARMS_SSLPNPHY 0x1e /* SSLPNPHY PA params */ -#define HNBU_RSSISMBXA2G_SSLPNPHY 0x1f /* SSLPNPHY RSSI mid pt sel & board switch arch */ -#define HNBU_RDLRNDIS 0x20 /* 1 byte; 1 = RDL advertises RNDIS config */ -#define HNBU_CHAINSWITCH 0x21 /* 2 byte; txchain, rxchain */ -#define HNBU_REGREV 0x22 /* 1 byte; */ -#define HNBU_FEM 0x23 /* 2 or 4 byte: 11n frontend specification */ -#define HNBU_PAPARMS_C0 0x24 /* 8 or 30 bytes: 11n pa paramater for chain 0 */ -#define HNBU_PAPARMS_C1 0x25 /* 8 or 30 bytes: 11n pa paramater for chain 1 */ -#define HNBU_PAPARMS_C2 0x26 /* 8 or 30 bytes: 11n pa paramater for chain 2 */ -#define HNBU_PAPARMS_C3 0x27 /* 8 or 30 bytes: 11n pa paramater for chain 3 */ -#define HNBU_PO_CCKOFDM 0x28 /* 6 or 18 bytes: cck2g/ofdm2g/ofdm5g power offset */ -#define HNBU_PO_MCS2G 0x29 /* 8 bytes: mcs2g power offset */ -#define HNBU_PO_MCS5GM 0x2a /* 8 bytes: mcs5g mid band power offset */ -#define HNBU_PO_MCS5GLH 0x2b /* 16 bytes: mcs5g low-high band power offset */ -#define HNBU_PO_CDD 0x2c /* 2 bytes: cdd2g/5g power offset */ -#define HNBU_PO_STBC 0x2d /* 2 bytes: stbc2g/5g power offset */ -#define HNBU_PO_40M 0x2e /* 2 bytes: 40Mhz channel 2g/5g power offset */ -#define HNBU_PO_40MDUP 0x2f /* 2 bytes: 40Mhz channel dup 2g/5g power offset */ - -#define HNBU_RDLRWU 0x30 /* 1 byte; 1 = RDL advertises Remote Wake-up */ -#define HNBU_WPS 0x31 /* 1 byte; GPIO pin for WPS button */ -#define HNBU_USBFS 0x32 /* 1 byte; 1 = USB advertises FS mode only */ -#define HNBU_BRMIN 0x33 /* 4 byte bootloader min resource mask */ -#define HNBU_BRMAX 0x34 /* 4 byte bootloader max resource mask */ -#define HNBU_PATCH 0x35 /* bootloader patch addr(2b) & data(4b) pair */ -#define HNBU_CCKFILTTYPE 0x36 /* CCK digital filter selection options */ -#define HNBU_OFDMPO5G 0x37 /* 4 * 3 = 12 byte 11a ofdm power offsets in rev 3 */ -#define HNBU_ELNA2G 0x38 -#define HNBU_ELNA5G 0x39 -#define HNBU_TEMPTHRESH 0x3A /* 2 bytes - * byte1 tempthresh - * byte2 period(msb 4 bits) | hysterisis(lsb 4 bits) - */ -#define HNBU_UUID 0x3B /* 16 Bytes Hex */ - -#define HNBU_USBEPNUM 0x40 /* USB endpoint numbers */ - -/* POWER PER RATE for SROM V9 */ -#define HNBU_CCKBW202GPO 0x41 /* 2 bytes each - * CCK Power offsets for 20 MHz rates (11, 5.5, 2, 1Mbps) - * cckbw202gpo cckbw20ul2gpo - */ - -#define HNBU_LEGOFDMBW202GPO 0x42 /* 4 bytes each - * OFDM power offsets for 20 MHz Legacy rates - * (54, 48, 36, 24, 18, 12, 9, 6 Mbps) - * legofdmbw202gpo legofdmbw20ul2gpo - */ - -#define HNBU_LEGOFDMBW205GPO 0x43 /* 4 bytes each - * 5G band: OFDM power offsets for 20 MHz Legacy rates - * (54, 48, 36, 24, 18, 12, 9, 6 Mbps) - * low subband : legofdmbw205glpo legofdmbw20ul2glpo - * mid subband :legofdmbw205gmpo legofdmbw20ul2gmpo - * high subband :legofdmbw205ghpo legofdmbw20ul2ghpo - */ - -#define HNBU_MCS2GPO 0x44 /* 4 bytes each - * mcs 0-7 power-offset. LSB nibble: m0, MSB nibble: m7 - * mcsbw202gpo mcsbw20ul2gpo mcsbw402gpo - */ -#define HNBU_MCS5GLPO 0x45 /* 4 bytes each - * 5G low subband mcs 0-7 power-offset. - * LSB nibble: m0, MSB nibble: m7 - * mcsbw205glpo mcsbw20ul5glpo mcsbw405glpo - */ -#define HNBU_MCS5GMPO 0x46 /* 4 bytes each - * 5G mid subband mcs 0-7 power-offset. - * LSB nibble: m0, MSB nibble: m7 - * mcsbw205gmpo mcsbw20ul5gmpo mcsbw405gmpo - */ -#define HNBU_MCS5GHPO 0x47 /* 4 bytes each - * 5G high subband mcs 0-7 power-offset. - * LSB nibble: m0, MSB nibble: m7 - * mcsbw205ghpo mcsbw20ul5ghpo mcsbw405ghpo - */ -#define HNBU_MCS32PO 0x48 /* 2 bytes total - * mcs-32 power offset for each band/subband. - * LSB nibble: 2G band, MSB nibble: - * mcs322ghpo, mcs325gmpo, mcs325glpo, mcs322gpo - */ -#define HNBU_LEG40DUPPO 0x49 /* 2 bytes total - * Additional power offset for Legacy Dup40 transmissions. - * Applied in addition to legofdmbw20ulXpo, X=2g, 5gl, 5gm, or 5gh. - * LSB nibble: 2G band, MSB nibble: 5G band high subband. - * leg40dup5ghpo, leg40dup5gmpo, leg40dup5glpo, leg40dup2gpo - */ - -#define HNBU_PMUREGS 0x4a /* Variable length (5 bytes for each register) - * The setting of the ChipCtrl, PLL, RegulatorCtrl, Up/Down Timer and - * ResourceDependency Table registers. - */ - -#define HNBU_PATCH2 0x4b /* bootloader TCAM patch addr(4b) & data(4b) pair . - * This is required for socram rev 15 onwards. - */ - -#define HNBU_USBRDY 0x4c /* Variable length (upto 5 bytes) - * This is to indicate the USB/HSIC host controller - * that the device is ready for enumeration. - */ - -#define HNBU_USBREGS 0x4d /* Variable length - * The setting of the devcontrol, HSICPhyCtrl1 and HSICPhyCtrl2 - * registers during the USB initialization. - */ - -#define HNBU_BLDR_TIMEOUT 0x4e /* 2 bytes used for HSIC bootloader to reset chip - * on connect timeout. - * The Delay after USBConnect for timeout till dongle receives - * get_descriptor request. - */ -#define HNBU_USBFLAGS 0x4f -#define HNBU_PATCH_AUTOINC 0x50 -#define HNBU_MDIO_REGLIST 0x51 -#define HNBU_MDIOEX_REGLIST 0x52 -/* Unified OTP: tupple to embed USB manfid inside SDIO CIS */ -#define HNBU_UMANFID 0x53 -#define HNBU_PUBKEY 0x54 /* 128 byte; publick key to validate downloaded FW */ -#define HNBU_WOWLGPIO 0x55 /* 1 byte bit 7 initial polarity, bit 6..0 gpio pin */ -#define HNBU_MUXENAB 0x56 /* 1 byte to enable mux options */ -#define HNBU_GCI_CCR 0x57 /* GCI Chip control register */ - -#define HNBU_FEM_CFG 0x58 /* FEM config */ -#define HNBU_ACPA_C0 0x59 /* ACPHY PA parameters: chain 0 */ -#define HNBU_ACPA_C1 0x5a /* ACPHY PA parameters: chain 1 */ -#define HNBU_ACPA_C2 0x5b /* ACPHY PA parameters: chain 2 */ -#define HNBU_MEAS_PWR 0x5c -#define HNBU_PDOFF 0x5d -#define HNBU_ACPPR_2GPO 0x5e /* ACPHY Power-per-rate 2gpo */ -#define HNBU_ACPPR_5GPO 0x5f /* ACPHY Power-per-rate 5gpo */ -#define HNBU_ACPPR_SBPO 0x60 /* ACPHY Power-per-rate sbpo */ -#define HNBU_NOISELVL 0x61 -#define HNBU_RXGAIN_ERR 0x62 -#define HNBU_AGBGA 0x63 -#define HNBU_USBDESC_COMPOSITE 0x64 /* USB WLAN/BT composite descriptor */ -#define HNBU_PATCH_AUTOINC8 0x65 /* Auto increment patch entry for 8 byte patching */ -#define HNBU_PATCH8 0x66 /* Patch entry for 8 byte patching */ -#define HNBU_ACRXGAINS_C0 0x67 /* ACPHY rxgains: chain 0 */ -#define HNBU_ACRXGAINS_C1 0x68 /* ACPHY rxgains: chain 1 */ -#define HNBU_ACRXGAINS_C2 0x69 /* ACPHY rxgains: chain 2 */ -#define HNBU_TXDUTY 0x6a /* Tx duty cycle for ACPHY 5g 40/80 Mhz */ -#define HNBU_USBUTMI_CTL 0x6b /* 2 byte USB UTMI/LDO Control */ -#define HNBU_PDOFF_2G 0x6c -#define HNBU_USBSSPHY_UTMI_CTL0 0x6d /* 4 byte USB SSPHY UTMI Control */ -#define HNBU_USBSSPHY_UTMI_CTL1 0x6e /* 4 byte USB SSPHY UTMI Control */ -#define HNBU_USBSSPHY_UTMI_CTL2 0x6f /* 4 byte USB SSPHY UTMI Control */ -#define HNBU_USBSSPHY_SLEEP0 0x70 /* 2 byte USB SSPHY sleep */ -#define HNBU_USBSSPHY_SLEEP1 0x71 /* 2 byte USB SSPHY sleep */ -#define HNBU_USBSSPHY_SLEEP2 0x72 /* 2 byte USB SSPHY sleep */ -#define HNBU_USBSSPHY_SLEEP3 0x73 /* 2 byte USB SSPHY sleep */ -#define HNBU_USBSSPHY_MDIO 0x74 /* USB SSPHY INIT regs setting */ -#define HNBU_USB30PHY_NOSS 0x75 /* USB30 NO Super Speed */ -#define HNBU_USB30PHY_U1U2 0x76 /* USB30 PHY U1U2 Enable */ -#define HNBU_USB30PHY_REGS 0x77 /* USB30 PHY REGs update */ - -#define HNBU_SROM3SWRGN 0x80 /* 78 bytes; srom rev 3 s/w region without crc8 - * plus extra info appended. - */ -#define HNBU_RESERVED 0x81 /* Reserved for non-BRCM post-mfg additions */ -#define HNBU_CUSTOM1 0x82 /* 4 byte; For non-BRCM post-mfg additions */ -#define HNBU_CUSTOM2 0x83 /* Reserved; For non-BRCM post-mfg additions */ -#define HNBU_ACPAPARAM 0x84 /* ACPHY PAPARAM */ -#define HNBU_ACPA_CCK 0x86 /* ACPHY PA trimming parameters: CCK */ -#define HNBU_ACPA_40 0x87 /* ACPHY PA trimming parameters: 40 */ -#define HNBU_ACPA_80 0x88 /* ACPHY PA trimming parameters: 80 */ -#define HNBU_ACPA_4080 0x89 /* ACPHY PA trimming parameters: 40/80 */ -#define HNBU_SUBBAND5GVER 0x8a /* subband5gver */ -#define HNBU_PAPARAMBWVER 0x8b /* paparambwver */ - -#define HNBU_MCS5Gx1PO 0x8c -#define HNBU_ACPPR_SB8080_PO 0x8d - - -#endif /* !defined(LINUX_POSTMOGRIFY_REMOVAL) */ - -/* sbtmstatelow */ -#define SBTML_INT_ACK 0x40000 /* ack the sb interrupt */ -#define SBTML_INT_EN 0x20000 /* enable sb interrupt */ - -/* sbtmstatehigh */ -#define SBTMH_INT_STATUS 0x40000 /* sb interrupt status */ - -#endif /* _SBPCMCIA_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sdiovar.h b/drivers/net/wireless/bcmdhd/include/sdiovar.h deleted file mode 100644 index 9cec56e03ec81..0000000000000 --- a/drivers/net/wireless/bcmdhd/include/sdiovar.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Structure used by apps whose drivers access SDIO drivers. - * Pulled out separately so dhdu and wlu can both use it. - * - * $ Copyright Open Broadcom Corporation $ - * - * $Id: sdiovar.h 241182 2011-02-17 21:50:03Z $ - */ - -#ifndef _sdiovar_h_ -#define _sdiovar_h_ - -#include - -/* require default structure packing */ -#define BWL_DEFAULT_PACKING -#include - -typedef struct sdreg { - int func; - int offset; - int value; -} sdreg_t; - -/* Common msglevel constants */ -#define SDH_ERROR_VAL 0x0001 /* Error */ -#define SDH_TRACE_VAL 0x0002 /* Trace */ -#define SDH_INFO_VAL 0x0004 /* Info */ -#define SDH_DEBUG_VAL 0x0008 /* Debug */ -#define SDH_DATA_VAL 0x0010 /* Data */ -#define SDH_CTRL_VAL 0x0020 /* Control Regs */ -#define SDH_LOG_VAL 0x0040 /* Enable bcmlog */ -#define SDH_DMA_VAL 0x0080 /* DMA */ -#define SDH_COST_VAL 0x8000 /* Control Regs */ - -#define NUM_PREV_TRANSACTIONS 16 - - -#include - -#endif /* _sdiovar_h_ */ diff --git a/drivers/net/wireless/bcmdhd/pcie_core.c b/drivers/net/wireless/bcmdhd/pcie_core.c deleted file mode 100644 index 508eccb2bcce3..0000000000000 --- a/drivers/net/wireless/bcmdhd/pcie_core.c +++ /dev/null @@ -1,70 +0,0 @@ -/** @file pcie_core.c - * - * Contains PCIe related functions that are shared between different driver models (e.g. firmware - * builds, DHD builds, BMAC builds), in order to avoid code duplication. - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: pcie_core.c 444841 2013-12-21 04:32:29Z $ - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "pcie_core.h" - -/* local prototypes */ - -/* local variables */ - -/* function definitions */ - -#ifdef BCMDRIVER - -void pcie_watchdog_reset(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs) -{ - uint32 val, i, lsc; - uint16 cfg_offset[] = {PCIECFGREG_STATUS_CMD, PCIECFGREG_PM_CSR, - PCIECFGREG_MSI_CAP, PCIECFGREG_MSI_ADDR_L, - PCIECFGREG_MSI_ADDR_H, PCIECFGREG_MSI_DATA, - PCIECFGREG_LINK_STATUS_CTRL2, PCIECFGREG_RBAR_CTRL, - PCIECFGREG_PML1_SUB_CTRL1, PCIECFGREG_REG_BAR2_CONFIG, - PCIECFGREG_REG_BAR3_CONFIG}; - sbpcieregs_t *pcie = NULL; - uint32 origidx = si_coreidx(sih); - - /* Switch to PCIE2 core */ - pcie = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0); - BCM_REFERENCE(pcie); - ASSERT(pcie != NULL); - - /* Disable/restore ASPM Control to protect the watchdog reset */ - W_REG(osh, &sbpcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL); - lsc = R_REG(osh, &sbpcieregs->configdata); - val = lsc & (~PCIE_ASPM_ENAB); - W_REG(osh, &sbpcieregs->configdata, val); - - si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, 4); - OSL_DELAY(100000); - - W_REG(osh, &sbpcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL); - W_REG(osh, &sbpcieregs->configdata, lsc); - - /* Write configuration registers back to the shadow registers - * cause shadow registers are cleared out after watchdog reset. - */ - for (i = 0; i < ARRAYSIZE(cfg_offset); i++) { - W_REG(osh, &sbpcieregs->configaddr, cfg_offset[i]); - val = R_REG(osh, &sbpcieregs->configdata); - W_REG(osh, &sbpcieregs->configdata, val); - } - si_setcoreidx(sih, origidx); -} - -#endif /* BCMDRIVER */ diff --git a/drivers/net/wireless/bcmdhd/wl_cfgvendor.h b/drivers/net/wireless/bcmdhd/wl_cfgvendor.h deleted file mode 100644 index 05cb6d0f18353..0000000000000 --- a/drivers/net/wireless/bcmdhd/wl_cfgvendor.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Linux cfg80211 Vendor Extension Code - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: wl_cfgvendor.h 455257 2014-02-20 08:10:24Z $ - */ - - -#ifndef _wl_cfgvendor_h_ -#define _wl_cfgvendor_h_ - -#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) && !defined(VENDOR_EXT_SUPPORT) -#define VENDOR_EXT_SUPPORT -#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0) && !VENDOR_EXT_SUPPORT */ - -enum wl_vendor_event { - BRCM_VENDOR_EVENT_UNSPEC, - BRCM_VENDOR_EVENT_PRIV_STR -}; - -/* Capture the BRCM_VENDOR_SUBCMD_PRIV_STRINGS* here */ -#define BRCM_VENDOR_SCMD_CAPA "cap" - -#ifdef VENDOR_EXT_SUPPORT -extern int cfgvendor_attach(struct wiphy *wiphy); -extern int cfgvendor_detach(struct wiphy *wiphy); -#else -static INLINE int cfgvendor_attach(struct wiphy *wiphy) { return 0; } -static INLINE int cfgvendor_detach(struct wiphy *wiphy) { return 0; } -#endif /* VENDOR_EXT_SUPPORT */ - -#endif /* _wl_cfgvendor_h_ */ diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index d9bc2b0e66040..e8e3ef8dc1570 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -28,7 +28,7 @@ #endif /* CONFIG_PPC */ #include - +#include /** * of_fdt_is_compatible - Return true if given node from the given blob has * compat in its compatible list @@ -830,14 +830,17 @@ int __init early_init_dt_scan_memory(unsigned long node, const char *uname, } else if (strcmp(type, "memory") != 0) return 0; - ddr_size = get_ddr_size(); - if (ddr_size == 2) - reg = of_get_flat_dt_prop(node, "linux,usable-memory-2g", &l); - else if (ddr_size == 3) - reg = of_get_flat_dt_prop(node, "linux,usable-memory-3g", &l); - else + if (get_cpu_type() == MESON_CPU_MAJOR_ID_GXM) { + ddr_size = get_ddr_size(); + if (ddr_size == 2) + reg = of_get_flat_dt_prop(node, "linux,usable-memory-2g", &l); + else if (ddr_size == 3) + reg = of_get_flat_dt_prop(node, "linux,usable-memory-3g", &l); + else + reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l); + } else { reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l); - + } if (reg == NULL) reg = of_get_flat_dt_prop(node, "reg", &l); if (reg == NULL) diff --git a/drivers/scsi/.gitignore b/drivers/scsi/.gitignore deleted file mode 100644 index c89ae9a043991..0000000000000 --- a/drivers/scsi/.gitignore +++ /dev/null @@ -1 +0,0 @@ -53c700_d.h diff --git a/drivers/scsi/aic7xxx/.gitignore b/drivers/scsi/aic7xxx/.gitignore deleted file mode 100644 index b8ee24d5748a2..0000000000000 --- a/drivers/scsi/aic7xxx/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -aic79xx_reg.h -aic79xx_reg_print.c -aic79xx_seq.h -aic7xxx_reg.h -aic7xxx_reg_print.c -aic7xxx_seq.h diff --git a/drivers/staging/usbip/userspace/.gitignore b/drivers/staging/usbip/userspace/.gitignore deleted file mode 100644 index 9aad9e30a8ba8..0000000000000 --- a/drivers/staging/usbip/userspace/.gitignore +++ /dev/null @@ -1,28 +0,0 @@ -Makefile -Makefile.in -aclocal.m4 -autom4te.cache/ -config.guess -config.h -config.h.in -config.log -config.status -config.sub -configure -depcomp -install-sh -libsrc/Makefile -libsrc/Makefile.in -libtool -ltmain.sh -missing -src/Makefile -src/Makefile.in -stamp-h1 -libsrc/libusbip.la -libsrc/libusbip_la-names.lo -libsrc/libusbip_la-usbip_common.lo -libsrc/libusbip_la-usbip_host_driver.lo -libsrc/libusbip_la-vhci_driver.lo -src/usbip -src/usbipd diff --git a/drivers/tty/vt/.gitignore b/drivers/tty/vt/.gitignore deleted file mode 100644 index 83683a2d8e6a9..0000000000000 --- a/drivers/tty/vt/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -consolemap_deftbl.c -defkeymap.c diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 6274f20cec05e..83fbd11ea39d4 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -772,6 +772,42 @@ static int dwc3_probe(struct platform_device *pdev) return ret; } +#ifdef CONFIG_AMLOGIC_USB +void dwc3_shutdown(struct platform_device *pdev) +{ + struct dwc3 *dwc = platform_get_drvdata(pdev); + + dwc3_debugfs_exit(dwc); + + switch (dwc->dr_mode) { + case USB_DR_MODE_PERIPHERAL: + dwc3_gadget_exit(dwc); + break; + case USB_DR_MODE_HOST: + dwc3_host_exit(dwc); + break; + case USB_DR_MODE_OTG: + dwc3_host_exit(dwc); + dwc3_gadget_exit(dwc); + break; + default: + /* do nothing */ + break; + } + + dwc3_event_buffers_cleanup(dwc); + dwc3_free_event_buffers(dwc); + + usb_phy_set_suspend(dwc->usb2_phy, 1); + usb_phy_set_suspend(dwc->usb3_phy, 1); + + dwc3_core_exit(dwc); + + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); +} +#endif + static int dwc3_remove(struct platform_device *pdev) { struct dwc3 *dwc = platform_get_drvdata(pdev); @@ -927,6 +963,9 @@ static const struct dev_pm_ops dwc3_dev_pm_ops = { static struct platform_driver dwc3_driver = { .probe = dwc3_probe, .remove = dwc3_remove, +#ifdef CONFIG_AMLOGIC_USB + .shutdown = dwc3_shutdown, +#endif .driver = { .name = "dwc3", .of_match_table = of_match_ptr(of_dwc3_match), diff --git a/drivers/video/logo/.gitignore b/drivers/video/logo/.gitignore deleted file mode 100644 index e48355f538fa8..0000000000000 --- a/drivers/video/logo/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -# -# Generated files -# -*_mono.c -*_vga16.c -*_clut224.c -*_gray256.c diff --git a/drivers/zorro/.gitignore b/drivers/zorro/.gitignore deleted file mode 100644 index 34f980bd8ff65..0000000000000 --- a/drivers/zorro/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -devlist.h -gen-devlist diff --git a/firmware/.gitignore b/firmware/.gitignore deleted file mode 100644 index d9c69017bc9af..0000000000000 --- a/firmware/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -*.gen.S -*.fw -*.bin -*.csp -*.dsp -ihex2fw diff --git a/firmware/cis/.gitignore b/firmware/cis/.gitignore deleted file mode 100644 index 1de39847f261d..0000000000000 --- a/firmware/cis/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.cis diff --git a/fs/Kconfig b/fs/Kconfig index 6e5e8aab3f717..3e47127a0be08 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -210,7 +210,7 @@ source "fs/ufs/Kconfig" source "fs/exofs/Kconfig" source "fs/f2fs/Kconfig" source "fs/efivarfs/Kconfig" -source "fs/aufs/Kconfig" +#source "fs/aufs/Kconfig" endif # MISC_FILESYSTEMS diff --git a/fs/Makefile b/fs/Makefile index 0352355603b02..df0ba5c2d7ba7 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -127,4 +127,4 @@ obj-y += exofs/ # Multiple modules obj-$(CONFIG_CEPH_FS) += ceph/ obj-$(CONFIG_PSTORE) += pstore/ obj-$(CONFIG_EFIVAR_FS) += efivarfs/ -obj-$(CONFIG_AUFS_FS) += aufs/ +#obj-$(CONFIG_AUFS_FS) += aufs/ diff --git a/fs/buffer.c b/fs/buffer.c index 061db785fa8e8..4d06a573d199b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2458,7 +2458,7 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, * Update file times before taking page lock. We may end up failing the * fault so this update may be superfluous but who really cares... */ - vma_file_update_time(vma); + file_update_time(vma->vm_file); ret = __block_page_mkwrite(vma, vmf, get_block); sb_end_pagefault(sb); diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 7f1da9f010ff8..db80d47e055be 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -224,6 +224,140 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, return err; } +struct compat_dtv_property { + __u32 cmd; + __u32 reserved[3]; + union { + __u32 data; + struct { + __u8 data[32]; + __u32 len; + __u32 reserved1[3]; + compat_uptr_t reserved2; + } buffer; + } u; + int result; +}; + +struct compat_dtv_properties { + __u32 num; + compat_uptr_t props; +}; + +#define FE_SET_PROPERTY32 _IOW('o', 82, struct compat_dtv_properties) +#define FE_GET_PROPERTY32 _IOR('o', 83, struct compat_dtv_properties) + +static int do_fe_set_property(unsigned int fd, unsigned int cmd, + struct compat_dtv_properties __user *dtv32) +{ + struct dtv_properties __user *dtv; + struct dtv_property __user *properties; + struct compat_dtv_property __user *properties32; + compat_uptr_t data; + + int err; + int i; + __u32 num; + + err = get_user(num, &dtv32->num); + err |= get_user(data, &dtv32->props); + + if(err) + return -EFAULT; + + dtv = compat_alloc_user_space(sizeof(struct dtv_properties) + + sizeof(struct dtv_property) * num); + properties = (struct dtv_property*)((char*)dtv + + sizeof(struct dtv_properties)); + + err = put_user(properties, &dtv->props); + err |= put_user(num, &dtv->num); + + properties32 = compat_ptr(data); + + if(err) + return -EFAULT; + + for(i = 0; i < num; i++) { + compat_uptr_t reserved2; + + err |= copy_in_user(&properties[i], &properties32[i], + (8 * sizeof(__u32)) + (32 * sizeof(__u8))); + err |= get_user(reserved2, &properties32[i].u.buffer.reserved2); + err |= put_user(compat_ptr(reserved2), + &properties[i].u.buffer.reserved2); + } + + if(err) + return -EFAULT; + + err = sys_ioctl(fd, FE_SET_PROPERTY, (unsigned long) dtv); + + for(i = 0; i < num; i++) { + if(copy_in_user(&properties32[i].result, &properties[i].result, + sizeof(int))) + return -EFAULT; + } + + return err; +} + +static int do_fe_get_property(unsigned int fd, unsigned int cmd, + struct compat_dtv_properties __user *dtv32) +{ + struct dtv_properties __user *dtv; + struct dtv_property __user *properties; + struct compat_dtv_property __user *properties32; + compat_uptr_t data; + + int err; + int i; + __u32 num; + + err = get_user(num, &dtv32->num); + err |= get_user(data, &dtv32->props); + + if(err) + return -EFAULT; + + dtv = compat_alloc_user_space(sizeof(struct dtv_properties) + + sizeof(struct dtv_property) * num); + properties = (struct dtv_property*)((char*)dtv + + sizeof(struct dtv_properties)); + + err = put_user(properties, &dtv->props); + err |= put_user(num, &dtv->num); + + properties32 = compat_ptr(data); + + if(err) + return -EFAULT; + + for(i = 0; i < num; i++) { + compat_uptr_t reserved2; + + err |= copy_in_user(&properties[i], &properties32[i], + (8 * sizeof(__u32)) + (32 * sizeof(__u8))); + err |= get_user(reserved2, &properties32[i].u.buffer.reserved2); + err |= put_user(compat_ptr(reserved2), + &properties[i].u.buffer.reserved2); + } + + if(err) + return -EFAULT; + + err = sys_ioctl(fd, FE_GET_PROPERTY, (unsigned long) dtv); + + for(i = 0; i < num; i++) { + + if(copy_in_user(&properties32[i], &properties[i], + sizeof(properties32[i]))) + return -EFAULT; + } + + return err; +} + #ifdef CONFIG_BLOCK typedef struct sg_io_hdr32 { compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */ @@ -1492,6 +1626,10 @@ static long do_ioctl_trans(int fd, unsigned int cmd, return do_video_stillpicture(fd, cmd, argp); case VIDEO_SET_SPU_PALETTE: return do_video_set_spu_palette(fd, cmd, argp); + case FE_SET_PROPERTY32: + return do_fe_set_property(fd, cmd, argp); + case FE_GET_PROPERTY32: + return do_fe_get_property(fd, cmd, argp); } /* diff --git a/fs/dcache.c b/fs/dcache.c index 08f3095a967d0..484bb4a37a49d 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1060,7 +1060,7 @@ enum d_walk_ret { * * The @enter() and @finish() callbacks are called with d_lock held. */ -void d_walk(struct dentry *parent, void *data, +static void d_walk(struct dentry *parent, void *data, enum d_walk_ret (*enter)(void *, struct dentry *), void (*finish)(void *)) { @@ -1164,7 +1164,6 @@ void d_walk(struct dentry *parent, void *data, seq = 1; goto again; } -EXPORT_SYMBOL(d_walk); /* * Search for at least 1 mount point in the dentry's subdirs. diff --git a/fs/file_table.c b/fs/file_table.c index 15ca9fc143f4b..79ecae62209a0 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -148,7 +148,6 @@ struct file *get_empty_filp(void) } return ERR_PTR(-ENFILE); } -EXPORT_SYMBOL(get_empty_filp); /** * alloc_file - allocate and initialize a 'struct file' @@ -345,7 +344,6 @@ void put_filp(struct file *file) file_free(file); } } -EXPORT_SYMBOL(put_filp); void __init files_init(unsigned long mempages) { diff --git a/fs/inode.c b/fs/inode.c index 3e6463ac107d6..e846a32e8d6e6 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -57,7 +57,6 @@ static struct hlist_head *inode_hashtable __read_mostly; static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); -EXPORT_SYMBOL(inode_sb_list_lock); /* * Empty aops. Can be used for the cases where the user does not @@ -1498,7 +1497,7 @@ static int relatime_need_update(struct vfsmount *mnt, struct inode *inode, * This does the actual work of updating an inodes time or version. Must have * had called mnt_want_write() before calling this. */ -int update_time(struct inode *inode, struct timespec *time, int flags) +static int update_time(struct inode *inode, struct timespec *time, int flags) { if (inode->i_op->update_time) return inode->i_op->update_time(inode, time, flags); @@ -1514,7 +1513,6 @@ int update_time(struct inode *inode, struct timespec *time, int flags) mark_inode_dirty_sync(inode); return 0; } -EXPORT_SYMBOL(update_time); /** * touch_atime - update the access time diff --git a/fs/namespace.c b/fs/namespace.c index 5ceb2ed18495e..7ee574249ff31 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -455,7 +455,6 @@ void __mnt_drop_write(struct vfsmount *mnt) mnt_dec_writers(real_mount(mnt)); preempt_enable(); } -EXPORT_SYMBOL_GPL(__mnt_drop_write); /** * mnt_drop_write - give up write access to a mount @@ -1628,7 +1627,6 @@ int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg, } return 0; } -EXPORT_SYMBOL(iterate_mounts); static void cleanup_group_ids(struct mount *mnt, struct mount *end) { diff --git a/fs/notify/group.c b/fs/notify/group.c index adf290d67d158..ad19959804565 100644 --- a/fs/notify/group.c +++ b/fs/notify/group.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include "fsnotify.h" @@ -73,7 +72,6 @@ void fsnotify_get_group(struct fsnotify_group *group) { atomic_inc(&group->refcnt); } -EXPORT_SYMBOL(fsnotify_get_group); /* * Drop a reference to a group. Free it if it's through. @@ -83,7 +81,6 @@ void fsnotify_put_group(struct fsnotify_group *group) if (atomic_dec_and_test(&group->refcnt)) fsnotify_final_destroy_group(group); } -EXPORT_SYMBOL(fsnotify_put_group); /* * Create a new fsnotify_group and hold a reference for the group returned. @@ -112,7 +109,6 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops) return group; } -EXPORT_SYMBOL(fsnotify_alloc_group); int fsnotify_fasync(int fd, struct file *file, int on) { diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 176b4357b9408..923fe4a5f503f 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -109,7 +109,6 @@ void fsnotify_put_mark(struct fsnotify_mark *mark) mark->free_mark(mark); } } -EXPORT_SYMBOL(fsnotify_put_mark); /* * Any time a mark is getting freed we end up here. @@ -192,7 +191,6 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark, fsnotify_destroy_mark_locked(mark, group); mutex_unlock(&group->mark_mutex); } -EXPORT_SYMBOL(fsnotify_destroy_mark); void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask) { @@ -277,7 +275,6 @@ int fsnotify_add_mark_locked(struct fsnotify_mark *mark, return ret; } -EXPORT_SYMBOL(fsnotify_add_mark); int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group, struct inode *inode, struct vfsmount *mnt, int allow_dups) @@ -339,7 +336,6 @@ void fsnotify_init_mark(struct fsnotify_mark *mark, atomic_set(&mark->refcnt, 1); mark->free_mark = free_mark; } -EXPORT_SYMBOL(fsnotify_init_mark); static int fsnotify_mark_destroy(void *ignored) { diff --git a/fs/open.c b/fs/open.c index 718c5ba3bee8e..c92c6ef8bf6dc 100644 --- a/fs/open.c +++ b/fs/open.c @@ -62,7 +62,6 @@ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs, mutex_unlock(&dentry->d_inode->i_mutex); return ret; } -EXPORT_SYMBOL(do_truncate); long vfs_truncate(struct path *path, loff_t length) { @@ -280,7 +279,6 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len) sb_end_write(inode->i_sb); return ret; } -EXPORT_SYMBOL(do_fallocate); SYSCALL_DEFINE4(fallocate, int, fd, int, mode, loff_t, offset, loff_t, len) { @@ -664,7 +662,6 @@ int open_check_o_direct(struct file *f) } return 0; } -EXPORT_SYMBOL(open_check_o_direct); static int do_dentry_open(struct file *f, int (*open)(struct inode *, struct file *), diff --git a/fs/proc/base.c b/fs/proc/base.c index b7e9866284651..f5acec4f1c740 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1865,7 +1865,7 @@ static int proc_map_files_get_link(struct dentry *dentry, struct path *path) down_read(&mm->mmap_sem); vma = find_exact_vma(mm, vm_start, vm_end); if (vma && vma->vm_file) { - *path = vma_pr_or_file(vma)->f_path; + *path = vma->vm_file->f_path; path_get(path); rc = 0; } diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c index 13971813209e8..d4a35746cab91 100644 --- a/fs/proc/nommu.c +++ b/fs/proc/nommu.c @@ -45,10 +45,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region) file = region->vm_file; if (file) { - struct inode *inode; - - file = vmr_pr_or_file(region); - inode = file_inode(file); + struct inode *inode = file_inode(region->vm_file); dev = inode->i_sb->s_dev; ino = inode->i_ino; } diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index c3cc7aa4fa696..0482e7cb1fb77 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -315,10 +315,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) const char *name = NULL; if (file) { - struct inode *inode; - - file = vma_pr_or_file(vma); - inode = file_inode(file); + struct inode *inode = file_inode(vma->vm_file); dev = inode->i_sb->s_dev; ino = inode->i_ino; pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; @@ -1476,7 +1473,6 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) seq_printf(m, "%08lx %s", vma->vm_start, buffer); if (file) { - file = vma_pr_or_file(vma); seq_printf(m, " file="); seq_path(m, &file->f_path, "\n\t= "); } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index ebd34ba73c08a..678455d2d6839 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c @@ -141,10 +141,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma, file = vma->vm_file; if (file) { - struct inode *inode; - - file = vma_pr_or_file(vma); - inode = file_inode(file); + struct inode *inode = file_inode(vma->vm_file); dev = inode->i_sb->s_dev; ino = inode->i_ino; pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT; diff --git a/fs/splice.c b/fs/splice.c index 3ff6576aeae7c..ffb92b9d34ba4 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -1111,8 +1111,8 @@ EXPORT_SYMBOL(generic_splice_sendpage); /* * Attempt to initiate a splice from pipe to file. */ -long do_splice_from(struct pipe_inode_info *pipe, struct file *out, - loff_t *ppos, size_t len, unsigned int flags) +static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, + loff_t *ppos, size_t len, unsigned int flags) { ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); @@ -1124,14 +1124,13 @@ long do_splice_from(struct pipe_inode_info *pipe, struct file *out, return splice_write(pipe, out, ppos, len, flags); } -EXPORT_SYMBOL(do_splice_from); /* * Attempt to initiate a splice from a file to a pipe. */ -long do_splice_to(struct file *in, loff_t *ppos, - struct pipe_inode_info *pipe, size_t len, - unsigned int flags) +static long do_splice_to(struct file *in, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, + unsigned int flags) { ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); @@ -1151,7 +1150,6 @@ long do_splice_to(struct file *in, loff_t *ppos, return splice_read(in, ppos, pipe, len, flags); } -EXPORT_SYMBOL(do_splice_to); /** * splice_direct_to_actor - splices data directly between two non-pipes diff --git a/fs/xattr.c b/fs/xattr.c index 1de03c80b68ee..3377dff184042 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -207,7 +207,6 @@ vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value, *xattr_value = value; return error; } -EXPORT_SYMBOL(vfs_getxattr_alloc); /* Compare an extended attribute value with the given value */ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name, diff --git a/include/linux/amlogic/pwm_meson.h b/include/linux/amlogic/pwm_meson.h index 13ccab4cd8f95..f929c348786ca 100644 --- a/include/linux/amlogic/pwm_meson.h +++ b/include/linux/amlogic/pwm_meson.h @@ -166,38 +166,38 @@ int pwm_set_blink_times(struct aml_pwm_chip *chip, #else static inline int pwm_constant_enable - (struct aml_pwm_chip *chip, int index) + (struct aml_pwm_chip *chip, int index); { return -EINVAL; } static inline int pwm_constant_disable - (struct aml_pwm_chip *chip , int index) + (struct aml_pwm_chip *chip , int index); { return -EINVAL; } static inline int pwm_blink_enable - (struct aml_pwm_chip *chip, int index) + (struct aml_pwm_chip *chip, int index); { return -EINVAL; } static inline int pwm_blink_disable - (struct aml_pwm_chip *chip , int index) + (struct aml_pwm_chip *chip , int index); { return -EINVAL; } static inline int pwm_set_times(struct aml_pwm_chip *chip, - int index, int value) + int index, int value); { return -EINVAL; } static inline int pwm_set_blink_times(struct aml_pwm_chip *chip, int index, - int value) + int value); { return -EINVAL; } diff --git a/include/linux/amlogic/vout/vout_notify.h b/include/linux/amlogic/vout/vout_notify.h index 58418a33ca806..a60226c6ed21d 100644 --- a/include/linux/amlogic/vout/vout_notify.h +++ b/include/linux/amlogic/vout/vout_notify.h @@ -39,6 +39,7 @@ struct vout_op_s { int (*get_vframe_rate_policy)(void); int (*vout_suspend)(void); int (*vout_resume)(void); + int (*vout_shutdown)(void); }; struct vout_server_s { @@ -83,6 +84,7 @@ extern void set_vout_mode_fr_auto(char *name); extern int vout_suspend(void); extern int vout_resume(void); +extern int vout_shutdown(void); extern int get_power_level(void); diff --git a/include/linux/file.h b/include/linux/file.h index 62cffc0ee84e1..4d69123377a2b 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -19,7 +19,6 @@ struct dentry; struct path; extern struct file *alloc_file(struct path *, fmode_t mode, const struct file_operations *fop); -extern struct file *get_empty_filp(void); static inline void fput_light(struct file *file, int fput_needed) { diff --git a/include/linux/fs.h b/include/linux/fs.h index 46498a335b193..e1cee8b12d1af 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2708,7 +2708,6 @@ extern int inode_change_ok(const struct inode *, struct iattr *); extern int inode_newsize_ok(const struct inode *, loff_t offset); extern void setattr_copy(struct inode *inode, const struct iattr *attr); -extern int update_time(struct inode *, struct timespec *, int); extern int file_update_time(struct file *file); extern int generic_show_options(struct seq_file *m, struct dentry *root); diff --git a/include/linux/mm.h b/include/linux/mm.h index 8fa7007562d27..b7bf847bf359d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1205,28 +1205,6 @@ static inline int fixup_user_fault(struct task_struct *tsk, } #endif -extern void vma_do_file_update_time(struct vm_area_struct *, const char[], int); -extern struct file *vma_do_pr_or_file(struct vm_area_struct *, const char[], - int); -extern void vma_do_get_file(struct vm_area_struct *, const char[], int); -extern void vma_do_fput(struct vm_area_struct *, const char[], int); - -#define vma_file_update_time(vma) vma_do_file_update_time(vma, __func__, \ - __LINE__) -#define vma_pr_or_file(vma) vma_do_pr_or_file(vma, __func__, \ - __LINE__) -#define vma_get_file(vma) vma_do_get_file(vma, __func__, __LINE__) -#define vma_fput(vma) vma_do_fput(vma, __func__, __LINE__) - -#ifndef CONFIG_MMU -extern struct file *vmr_do_pr_or_file(struct vm_region *, const char[], int); -extern void vmr_do_fput(struct vm_region *, const char[], int); - -#define vmr_pr_or_file(region) vmr_do_pr_or_file(region, __func__, \ - __LINE__) -#define vmr_fput(region) vmr_do_fput(region, __func__, __LINE__) -#endif /* !CONFIG_MMU */ - extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, int write); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 6c9e8dea7ba1b..3dd59b3d68121 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -234,7 +234,6 @@ struct vm_region { unsigned long vm_top; /* region allocated to here */ unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */ struct file *vm_file; /* the backing file or NULL */ - struct file *vm_prfile; /* the virtual backing file or NULL */ int vm_usage; /* region usage count (access under nommu_region_sem) */ bool vm_icache_flushed : 1; /* true if the icache has been flushed for @@ -308,7 +307,6 @@ struct vm_area_struct { unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE units, *not* PAGE_CACHE_SIZE */ struct file * vm_file; /* File we map to (can be NULL). */ - struct file *vm_prfile; /* shadow of vm_file */ void * vm_private_data; /* was vm_pte (shared mem) */ #ifndef CONFIG_MMU diff --git a/include/linux/splice.h b/include/linux/splice.h index 304169ea44394..0e43906d2fda6 100644 --- a/include/linux/splice.h +++ b/include/linux/splice.h @@ -93,10 +93,4 @@ extern void splice_shrink_spd(struct splice_pipe_desc *); extern void spd_release_page(struct splice_pipe_desc *, unsigned int); extern const struct pipe_buf_operations page_cache_pipe_buf_ops; - -extern long do_splice_from(struct pipe_inode_info *pipe, struct file *out, - loff_t *ppos, size_t len, unsigned int flags); -extern long do_splice_to(struct file *in, loff_t *ppos, - struct pipe_inode_info *pipe, size_t len, - unsigned int flags); #endif diff --git a/kernel/.gitignore b/kernel/.gitignore deleted file mode 100644 index 790d83c7d1607..0000000000000 --- a/kernel/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -# -# Generated files -# -config_data.h -config_data.gz -timeconst.h -hz.bc -x509_certificate_list diff --git a/kernel/debug/kdb/.gitignore b/kernel/debug/kdb/.gitignore deleted file mode 100644 index 396d12eda9e8e..0000000000000 --- a/kernel/debug/kdb/.gitignore +++ /dev/null @@ -1 +0,0 @@ -gen-kdb_cmds.c diff --git a/kernel/fork.c b/kernel/fork.c index 9c6180017262c..f1df4895cd40f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -510,7 +510,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) struct inode *inode = file_inode(file); struct address_space *mapping = file->f_mapping; - vma_get_file(tmp); + get_file(file); if (tmp->vm_flags & VM_DENYWRITE) atomic_dec(&inode->i_writecount); mutex_lock(&mapping->i_mmap_mutex); diff --git a/lib/.gitignore b/lib/.gitignore deleted file mode 100644 index 09aae85418ab9..0000000000000 --- a/lib/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -# -# Generated files -# -gen_crc32table -crc32table.h -oid_registry_data.c diff --git a/lib/mpi/mpi-inline.h b/lib/mpi/mpi-inline.h index c245ea31f7853..e2b39852b30a5 100644 --- a/lib/mpi/mpi-inline.h +++ b/lib/mpi/mpi-inline.h @@ -30,7 +30,7 @@ #define G10_MPI_INLINE_H #ifndef G10_MPI_INLINE_DECL -#define G10_MPI_INLINE_DECL static inline +#define G10_MPI_INLINE_DECL extern inline #endif G10_MPI_INLINE_DECL mpi_limb_t diff --git a/lib/mpi/mpi-internal.h b/lib/mpi/mpi-internal.h index 54c1e9718fc82..60cf765628e90 100644 --- a/lib/mpi/mpi-internal.h +++ b/lib/mpi/mpi-internal.h @@ -168,12 +168,20 @@ void mpi_rshift_limbs(MPI a, unsigned int count); int mpi_lshift_limbs(MPI a, unsigned int count); /*-- mpihelp-add.c --*/ +mpi_limb_t mpihelp_add_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, + mpi_size_t s1_size, mpi_limb_t s2_limb); mpi_limb_t mpihelp_add_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_ptr_t s2_ptr, mpi_size_t size); +mpi_limb_t mpihelp_add(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, + mpi_ptr_t s2_ptr, mpi_size_t s2_size); /*-- mpihelp-sub.c --*/ +mpi_limb_t mpihelp_sub_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, + mpi_size_t s1_size, mpi_limb_t s2_limb); mpi_limb_t mpihelp_sub_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_ptr_t s2_ptr, mpi_size_t size); +mpi_limb_t mpihelp_sub(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, + mpi_ptr_t s2_ptr, mpi_size_t s2_size); /*-- mpihelp-cmp.c --*/ int mpihelp_cmp(mpi_ptr_t op1_ptr, mpi_ptr_t op2_ptr, mpi_size_t size); diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore deleted file mode 100644 index 0a7e494b2bcda..0000000000000 --- a/lib/raid6/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -mktables -altivec*.c -int*.c -tables.c -neon?.c diff --git a/mm/Makefile b/mm/Makefile index 39ad6aa1d2203..bb6e41845100a 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -17,7 +17,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \ util.o mmzone.o vmstat.o backing-dev.o \ mm_init.o mmu_context.o percpu.o slab_common.o \ compaction.o balloon_compaction.o vmacache.o \ - interval_tree.o list_lru.o prfile.o $(mmu-y) + interval_tree.o list_lru.o $(mmu-y) obj-y += init-mm.o diff --git a/mm/filemap.c b/mm/filemap.c index bcdea673955f7..9b6a67fef5322 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2032,7 +2032,7 @@ int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) int ret = VM_FAULT_LOCKED; sb_start_pagefault(inode->i_sb); - vma_file_update_time(vma); + file_update_time(vma->vm_file); lock_page(page); if (page->mapping != inode->i_mapping) { unlock_page(page); diff --git a/mm/fremap.c b/mm/fremap.c index 5397350859357..34feba60a17e6 100644 --- a/mm/fremap.c +++ b/mm/fremap.c @@ -223,28 +223,16 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, */ if (mapping_cap_account_dirty(mapping)) { unsigned long addr; - struct file *file = vma->vm_file, - *prfile = vma->vm_prfile; - + struct file *file = get_file(vma->vm_file); /* mmap_region may free vma; grab the info now */ vm_flags = vma->vm_flags; - vma_get_file(vma); addr = mmap_region(file, start, size, vm_flags, pgoff); - vma_fput(vma); + fput(file); if (IS_ERR_VALUE(addr)) { err = addr; } else { BUG_ON(addr != start); - if (prfile) { - struct vm_area_struct *new_vma; - - new_vma = find_vma(mm, addr); - if (!new_vma->vm_prfile) - new_vma->vm_prfile = prfile; - if (new_vma != vma) - get_file(prfile); - } err = 0; } goto out_freed; diff --git a/mm/memory.c b/mm/memory.c index 2339d85017512..0b985f28b314e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2777,7 +2777,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, set_page_dirty_balance(dirty_page, page_mkwrite); /* file_update_time outside page_lock */ if (vma->vm_file) - vma_file_update_time(vma); + file_update_time(vma->vm_file); } put_page(dirty_page); if (page_mkwrite) { @@ -3496,7 +3496,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, /* file_update_time outside page_lock */ if (vma->vm_file && !page_mkwrite) - vma_file_update_time(vma); + file_update_time(vma->vm_file); } else { unlock_page(vmf.page); if (anon) diff --git a/mm/mmap.c b/mm/mmap.c index e4c6b8d306355..c7aeee07eefba 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -262,7 +262,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); if (vma->vm_file) - vma_fput(vma); + fput(vma->vm_file); mpol_put(vma_policy(vma)); kmem_cache_free(vm_area_cachep, vma); return next; @@ -876,7 +876,7 @@ again: remove_next = 1 + (end > next->vm_end); if (remove_next) { if (file) { uprobe_munmap(next, next->vm_start, next->vm_end); - vma_fput(vma); + fput(file); } if (next->anon_vma) anon_vma_merge(vma, next); @@ -1663,8 +1663,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr, unmap_and_free_vma: if (vm_flags & VM_DENYWRITE) allow_write_access(file); - vma_fput(vma); vma->vm_file = NULL; + fput(file); /* Undo any partial mapping done by a device driver. */ unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); @@ -2458,7 +2458,7 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, goto out_free_mpol; if (new->vm_file) - vma_get_file(new); + get_file(new->vm_file); if (new->vm_ops && new->vm_ops->open) new->vm_ops->open(new); @@ -2477,7 +2477,7 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, if (new->vm_ops && new->vm_ops->close) new->vm_ops->close(new); if (new->vm_file) - vma_fput(new); + fput(new->vm_file); unlink_anon_vmas(new); out_free_mpol: mpol_put(vma_policy(new)); @@ -2867,7 +2867,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, if (anon_vma_clone(new_vma, vma)) goto out_free_mempol; if (new_vma->vm_file) - vma_get_file(new_vma); + get_file(new_vma->vm_file); if (new_vma->vm_ops && new_vma->vm_ops->open) new_vma->vm_ops->open(new_vma); vma_link(mm, new_vma, prev, rb_link, rb_parent); diff --git a/mm/nommu.c b/mm/nommu.c index a44ef36498bfb..3ee4f74fbfbe3 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -654,7 +654,7 @@ static void __put_nommu_region(struct vm_region *region) up_write(&nommu_region_sem); if (region->vm_file) - vmr_fput(region); + fput(region->vm_file); /* IO memory and memory shared directly out of the pagecache * from ramfs/tmpfs mustn't be released here */ @@ -819,7 +819,7 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); if (vma->vm_file) - vma_fput(vma); + fput(vma->vm_file); put_nommu_region(vma->vm_region); kmem_cache_free(vm_area_cachep, vma); } @@ -1385,7 +1385,7 @@ unsigned long do_mmap_pgoff(struct file *file, goto error_just_free; } } - vmr_fput(region); + fput(region->vm_file); kmem_cache_free(vm_region_jar, region); region = pregion; result = start; @@ -1461,10 +1461,10 @@ unsigned long do_mmap_pgoff(struct file *file, up_write(&nommu_region_sem); error: if (region->vm_file) - vmr_fput(region); + fput(region->vm_file); kmem_cache_free(vm_region_jar, region); if (vma->vm_file) - vma_fput(vma); + fput(vma->vm_file); kmem_cache_free(vm_area_cachep, vma); kleave(" = %d", ret); return ret; diff --git a/mm/prfile.c b/mm/prfile.c deleted file mode 100644 index 532e5187823de..0000000000000 --- a/mm/prfile.c +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Mainly for aufs which mmap(2) diffrent file and wants to print different path - * in /proc/PID/maps. - * Call these functions via macros defined in linux/mm.h. - * - * See Documentation/filesystems/aufs/design/06mmap.txt - * - * Copyright (c) 2014 Junjro R. Okajima - * Copyright (c) 2014 Ian Campbell - */ - -#include -#include -#include - -/* #define PRFILE_TRACE */ -static inline void prfile_trace(struct file *f, struct file *pr, - const char func[], int line, const char func2[]) -{ -#ifdef PRFILE_TRACE - if (pr) - pr_info("%s:%d: %s, %s\n", func, line, func2, - f ? (char *)f->f_dentry->d_name.name : "(null)"); -#endif -} - -void vma_do_file_update_time(struct vm_area_struct *vma, const char func[], - int line) -{ - struct file *f = vma->vm_file, *pr = vma->vm_prfile; - - prfile_trace(f, pr, func, line, __func__); - file_update_time(f); - if (f && pr) - file_update_time(pr); -} - -struct file *vma_do_pr_or_file(struct vm_area_struct *vma, const char func[], - int line) -{ - struct file *f = vma->vm_file, *pr = vma->vm_prfile; - - prfile_trace(f, pr, func, line, __func__); - return (f && pr) ? pr : f; -} - -void vma_do_get_file(struct vm_area_struct *vma, const char func[], int line) -{ - struct file *f = vma->vm_file, *pr = vma->vm_prfile; - - prfile_trace(f, pr, func, line, __func__); - get_file(f); - if (f && pr) - get_file(pr); -} - -void vma_do_fput(struct vm_area_struct *vma, const char func[], int line) -{ - struct file *f = vma->vm_file, *pr = vma->vm_prfile; - - prfile_trace(f, pr, func, line, __func__); - fput(f); - if (f && pr) - fput(pr); -} - -#ifndef CONFIG_MMU -struct file *vmr_do_pr_or_file(struct vm_region *region, const char func[], - int line) -{ - struct file *f = region->vm_file, *pr = region->vm_prfile; - - prfile_trace(f, pr, func, line, __func__); - return (f && pr) ? pr : f; -} - -void vmr_do_fput(struct vm_region *region, const char func[], int line) -{ - struct file *f = region->vm_file, *pr = region->vm_prfile; - - prfile_trace(f, pr, func, line, __func__); - fput(f); - if (f && pr) - fput(pr); -} -#endif /* !CONFIG_MMU */ diff --git a/net/wireless/.gitignore b/net/wireless/.gitignore deleted file mode 100644 index c33451b896d94..0000000000000 --- a/net/wireless/.gitignore +++ /dev/null @@ -1 +0,0 @@ -regdb.c diff --git a/samples/hidraw/.gitignore b/samples/hidraw/.gitignore deleted file mode 100644 index 05e51a6852422..0000000000000 --- a/samples/hidraw/.gitignore +++ /dev/null @@ -1 +0,0 @@ -hid-example diff --git a/samples/seccomp/.gitignore b/samples/seccomp/.gitignore deleted file mode 100644 index 78fb781842911..0000000000000 --- a/samples/seccomp/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -bpf-direct -bpf-fancy -dropper diff --git a/scripts/.gitignore b/scripts/.gitignore deleted file mode 100644 index fb070fa1038fe..0000000000000 --- a/scripts/.gitignore +++ /dev/null @@ -1,13 +0,0 @@ -# -# Generated files -# -conmakehash -kallsyms -pnmtologo -bin2c -unifdef -ihex2fw -recordmcount -docproc -sortextable -asn1_compiler diff --git a/scripts/basic/.gitignore b/scripts/basic/.gitignore deleted file mode 100644 index a776371a35024..0000000000000 --- a/scripts/basic/.gitignore +++ /dev/null @@ -1 +0,0 @@ -fixdep diff --git a/scripts/dtc/.gitignore b/scripts/dtc/.gitignore deleted file mode 100644 index 095acb49a374b..0000000000000 --- a/scripts/dtc/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -dtc -dtc-lexer.lex.c -dtc-parser.tab.c -dtc-parser.tab.h - diff --git a/scripts/genksyms/.gitignore b/scripts/genksyms/.gitignore deleted file mode 100644 index 86dc07a01b439..0000000000000 --- a/scripts/genksyms/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*.hash.c -*.lex.c -*.tab.c -*.tab.h -genksyms diff --git a/scripts/kconfig/.gitignore b/scripts/kconfig/.gitignore deleted file mode 100644 index be603c4fef624..0000000000000 --- a/scripts/kconfig/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# -# Generated files -# -config* -*.lex.c -*.tab.c -*.tab.h -zconf.hash.c -*.moc -gconf.glade.h -*.pot -*.mo - -# -# configuration programs -# -conf -mconf -nconf -qconf -gconf -kxgettext diff --git a/scripts/kconfig/lxdialog/.gitignore b/scripts/kconfig/lxdialog/.gitignore deleted file mode 100644 index 90b08ff025a65..0000000000000 --- a/scripts/kconfig/lxdialog/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -# -# Generated files -# -lxdialog diff --git a/scripts/mod/.gitignore b/scripts/mod/.gitignore deleted file mode 100644 index 33bae0df4de5b..0000000000000 --- a/scripts/mod/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -elfconfig.h -mk_elfconfig -modpost -devicetable-offsets.h - diff --git a/scripts/selinux/genheaders/.gitignore b/scripts/selinux/genheaders/.gitignore deleted file mode 100644 index 4c0b646ff8d54..0000000000000 --- a/scripts/selinux/genheaders/.gitignore +++ /dev/null @@ -1 +0,0 @@ -genheaders diff --git a/scripts/selinux/mdp/.gitignore b/scripts/selinux/mdp/.gitignore deleted file mode 100644 index 654546d8dffd2..0000000000000 --- a/scripts/selinux/mdp/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# Generated file -mdp diff --git a/security/apparmor/.gitignore b/security/apparmor/.gitignore deleted file mode 100644 index 9cdec70d72b8e..0000000000000 --- a/security/apparmor/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -# -# Generated include files -# -capability_names.h -rlim_names.h diff --git a/security/commoncap.c b/security/commoncap.c index bc50fce77edb3..116050e2dfa9f 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -1002,11 +1002,9 @@ int cap_mmap_addr(unsigned long addr) } return ret; } -EXPORT_SYMBOL(cap_mmap_addr); int cap_mmap_file(struct file *file, unsigned long reqprot, unsigned long prot, unsigned long flags) { return 0; } -EXPORT_SYMBOL(cap_mmap_file); diff --git a/security/device_cgroup.c b/security/device_cgroup.c index ffd4259d471cb..6e4e6eb732fec 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -7,7 +7,6 @@ #include #include #include -#include #include #include #include @@ -862,7 +861,6 @@ int __devcgroup_inode_permission(struct inode *inode, int mask) return __devcgroup_check_permission(type, imajor(inode), iminor(inode), access); } -EXPORT_SYMBOL(__devcgroup_inode_permission); int devcgroup_inode_mknod(int mode, dev_t dev) { diff --git a/security/security.c b/security/security.c index 17512af389351..322ff712fbbd7 100644 --- a/security/security.c +++ b/security/security.c @@ -427,7 +427,6 @@ int security_path_rmdir(struct path *dir, struct dentry *dentry) return 0; return security_ops->path_rmdir(dir, dentry); } -EXPORT_SYMBOL(security_path_rmdir); int security_path_unlink(struct path *dir, struct dentry *dentry) { @@ -444,7 +443,6 @@ int security_path_symlink(struct path *dir, struct dentry *dentry, return 0; return security_ops->path_symlink(dir, dentry, old_name); } -EXPORT_SYMBOL(security_path_symlink); int security_path_link(struct dentry *old_dentry, struct path *new_dir, struct dentry *new_dentry) @@ -453,7 +451,6 @@ int security_path_link(struct dentry *old_dentry, struct path *new_dir, return 0; return security_ops->path_link(old_dentry, new_dir, new_dentry); } -EXPORT_SYMBOL(security_path_link); int security_path_rename(struct path *old_dir, struct dentry *old_dentry, struct path *new_dir, struct dentry *new_dentry, @@ -481,7 +478,6 @@ int security_path_truncate(struct path *path) return 0; return security_ops->path_truncate(path); } -EXPORT_SYMBOL(security_path_truncate); int security_path_chmod(struct path *path, umode_t mode) { @@ -489,7 +485,6 @@ int security_path_chmod(struct path *path, umode_t mode) return 0; return security_ops->path_chmod(path, mode); } -EXPORT_SYMBOL(security_path_chmod); int security_path_chown(struct path *path, kuid_t uid, kgid_t gid) { @@ -497,7 +492,6 @@ int security_path_chown(struct path *path, kuid_t uid, kgid_t gid) return 0; return security_ops->path_chown(path, uid, gid); } -EXPORT_SYMBOL(security_path_chown); int security_path_chroot(struct path *path) { @@ -583,7 +577,6 @@ int security_inode_readlink(struct dentry *dentry) return 0; return security_ops->inode_readlink(dentry); } -EXPORT_SYMBOL(security_inode_readlink); int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd) { @@ -598,7 +591,6 @@ int security_inode_permission(struct inode *inode, int mask) return 0; return security_ops->inode_permission(inode, mask); } -EXPORT_SYMBOL(security_inode_permission); int security_inode_setattr(struct dentry *dentry, struct iattr *attr) { @@ -721,7 +713,6 @@ int security_file_permission(struct file *file, int mask) return fsnotify_perm(file, mask); } -EXPORT_SYMBOL(security_file_permission); int security_file_alloc(struct file *file) { @@ -782,7 +773,6 @@ int security_mmap_file(struct file *file, unsigned long prot, return ret; return ima_file_mmap(file, prot); } -EXPORT_SYMBOL(security_mmap_file); int security_mmap_addr(unsigned long addr) { diff --git a/security/selinux/.gitignore b/security/selinux/.gitignore deleted file mode 100644 index 2e5040a3d48bc..0000000000000 --- a/security/selinux/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -av_permissions.h -flask.h diff --git a/security/tomoyo/.gitignore b/security/tomoyo/.gitignore deleted file mode 100644 index 5caf1a6f59078..0000000000000 --- a/security/tomoyo/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -builtin-policy.h -policy/ diff --git a/sound/oss/.gitignore b/sound/oss/.gitignore deleted file mode 100644 index 12a3920d6fb63..0000000000000 --- a/sound/oss/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -#Ignore generated files -pss_boot.h -trix_boot.h diff --git a/sound/soc/aml/m8/aml_m8.c b/sound/soc/aml/m8/aml_m8.c index 54cb030176a62..812c2c21a6f1b 100644 --- a/sound/soc/aml/m8/aml_m8.c +++ b/sound/soc/aml/m8/aml_m8.c @@ -748,6 +748,10 @@ static int aml_m8_audio_probe(struct platform_device *pdev) static void aml_audio_shutdown(struct platform_device *pdev) { struct pinctrl_state *state; + struct snd_soc_card *card; + + card = platform_get_drvdata(pdev); + aml_suspend_pre(card); if (IS_ERR_OR_NULL(p_audio->pin_ctl)) { pr_info("no audio pin_ctrl to shutdown\n"); diff --git a/tools/cgroup/.gitignore b/tools/cgroup/.gitignore deleted file mode 100644 index 633cd9b874f95..0000000000000 --- a/tools/cgroup/.gitignore +++ /dev/null @@ -1 +0,0 @@ -cgroup_event_listener diff --git a/tools/lguest/.gitignore b/tools/lguest/.gitignore deleted file mode 100644 index 115587fd5f657..0000000000000 --- a/tools/lguest/.gitignore +++ /dev/null @@ -1 +0,0 @@ -lguest diff --git a/tools/lib/traceevent/.gitignore b/tools/lib/traceevent/.gitignore deleted file mode 100644 index 35f56be5a4cdb..0000000000000 --- a/tools/lib/traceevent/.gitignore +++ /dev/null @@ -1 +0,0 @@ -TRACEEVENT-CFLAGS diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore deleted file mode 100644 index 782d86e961b9f..0000000000000 --- a/tools/perf/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -PERF-CFLAGS -PERF-GUI-VARS -PERF-VERSION-FILE -perf -perf-help -perf-record -perf-report -perf-stat -perf-top -perf*.1 -perf*.xml -perf*.html -common-cmds.h -perf.data -perf.data.old -output.svg -perf-archive -tags -TAGS -cscope* -config.mak -config.mak.autogen -*-bison.* -*-flex.* -*.pyc -*.pyo diff --git a/tools/perf/config/feature-checks/.gitignore b/tools/perf/config/feature-checks/.gitignore deleted file mode 100644 index 80f3da0c35155..0000000000000 --- a/tools/perf/config/feature-checks/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.d -*.bin diff --git a/tools/power/cpupower/.gitignore b/tools/power/cpupower/.gitignore deleted file mode 100644 index d42073f12609e..0000000000000 --- a/tools/power/cpupower/.gitignore +++ /dev/null @@ -1,29 +0,0 @@ -.libs -libcpupower.so -libcpupower.so.0 -libcpupower.so.0.0.0 -build/ccdv -cpufreq-info -cpufreq-set -cpufreq-aperf -lib/.libs -lib/cpufreq.lo -lib/cpufreq.o -lib/proc.lo -lib/proc.o -lib/sysfs.lo -lib/sysfs.o -po/cpupowerutils.pot -po/*.gmo -utils/cpufreq-info.o -utils/cpufreq-set.o -utils/cpufreq-aperf.o -cpupower -bench/cpufreq-bench -debug/kernel/Module.symvers -debug/i386/centrino-decode -debug/i386/dump_psb -debug/i386/intel_gsic -debug/i386/powernow-k8-decode -debug/x86_64/centrino-decode -debug/x86_64/powernow-k8-decode diff --git a/tools/power/x86/turbostat/.gitignore b/tools/power/x86/turbostat/.gitignore deleted file mode 100644 index 7521370d35687..0000000000000 --- a/tools/power/x86/turbostat/.gitignore +++ /dev/null @@ -1 +0,0 @@ -turbostat diff --git a/tools/testing/selftests/kcmp/.gitignore b/tools/testing/selftests/kcmp/.gitignore deleted file mode 100644 index 5a9b3732b2de2..0000000000000 --- a/tools/testing/selftests/kcmp/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -kcmp_test -kcmp-test-file diff --git a/tools/testing/selftests/mqueue/.gitignore b/tools/testing/selftests/mqueue/.gitignore deleted file mode 100644 index d8d42377205ac..0000000000000 --- a/tools/testing/selftests/mqueue/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -mq_open_tests -mq_perf_tests diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore deleted file mode 100644 index 00326629d4af8..0000000000000 --- a/tools/testing/selftests/net/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -socket -psock_fanout -psock_tpacket diff --git a/tools/testing/selftests/rcutorture/.gitignore b/tools/testing/selftests/rcutorture/.gitignore deleted file mode 100644 index 05838f6f2ebe0..0000000000000 --- a/tools/testing/selftests/rcutorture/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -initrd -linux-2.6 -b[0-9]* -rcu-test-image -res -*.swp diff --git a/tools/testing/selftests/vm/.gitignore b/tools/testing/selftests/vm/.gitignore deleted file mode 100644 index ff1bb16cec4f5..0000000000000 --- a/tools/testing/selftests/vm/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -hugepage-mmap -hugepage-shm -map_hugetlb -thuge-gen diff --git a/tools/virtio/.gitignore b/tools/virtio/.gitignore deleted file mode 100644 index 1cfbb0157a46d..0000000000000 --- a/tools/virtio/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*.d -virtio_test -vringh_test diff --git a/tools/vm/.gitignore b/tools/vm/.gitignore deleted file mode 100644 index 44f095fa26043..0000000000000 --- a/tools/vm/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -slabinfo -page-types diff --git a/usr/.gitignore b/usr/.gitignore deleted file mode 100644 index 8e48117a3f3dc..0000000000000 --- a/usr/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -# -# Generated files -# -gen_init_cpio -initramfs_data.cpio -initramfs_data.cpio.gz -initramfs_data.cpio.bz2 -initramfs_data.cpio.lzma -initramfs_list -include