����JFIFXX�����    $.' ",#(7),01444'9=82<.342  2!!22222222222222222222222222222222222222222222222222����"��4�� ���,�PG"Z_�4�˷����kjز�Z�,F+��_z�,�© �����zh6�٨�ic�fu���#ډb���_�N�?��wQ���5-�~�I���8����TK<5o�Iv-�����k�_U_�����~b�M��d����Ӝ�U�Hh��?]��E�w��Q���k�{��_}qFW7HTՑ��Y��F�?_�'ϔ��_�Ջt��=||I ��6�έ"�����D���/[�k�9���Y�8ds|\���Ҿp6�Ҵ���]��.����6�z<�v��@]�i%��$j��~�g��J>��no����pM[me�i$[����s�o�ᘨ�˸ nɜG-�ĨU�ycP�3.DB�li�;��hj���x7Z^�N�h������N3u{�:j�x�힞��#M&��jL P@_���� P��&��o8������9�����@Sz6�t7#O�ߋ �s}Yf�T���lmr����Z)'N��k�۞p����w\�Tȯ?�8`�O��i{wﭹW�[�r�� ��Q4F�׊���3m&L�=��h3����z~��#�\�l :�F,j@�� ʱ�wQT����8�"kJO���6�֚l����}���R�>ډK���]��y����&����p�}b��;N�1�m�r$�|��7�>e�@B�TM*-iH��g�D�)� E�m�|�ؘbҗ�a��Ҿ����t4���o���G��*oCN�rP���Q��@z,|?W[0�����:�n,jWiE��W��$~/�hp\��?��{(�0���+�Y8rΟ�+����>S-S����VN;�}�s?.����� w�9��˟<���Mq4�Wv'��{)0�1mB��V����W[�����8�/<� �%���wT^�5���b��)iM� pg�N�&ݝ��VO~�q���u���9� ����!��J27����$O-���! �:�%H��� ـ����y�ΠM=t{!S�� oK8������t<����è:a������[�����ա�H���~��w��Qz`�po�^ ����Q��n� �,uu�C�$ ^���,������8�#��:�6��e�|~���!�3�3.�\0��q��o�4`.|� ����y�Q�`~;�d�ׯ,��O�Zw�������`73�v�܋�<���Ȏ�� ـ4k��5�K�a�u�=9Yd��$>x�A�&�� j0� ���vF��� Y�|�y��� ~�6�@c��1vOp�Ig����4��l�OD���L����� R���c���j�_�uX6��3?nk��Wy�f;^*B� ��@�~a�`��Eu������+���6�L��.ü>��}y���}_�O�6�͐�:�YrG�X��kG�����l^w���~㒶sy��Iu�!� W ��X��N�7BV��O��!X�2����wvG�R�f�T#�����t�/?���%8�^�W�aT��G�cL�M���I��(J����1~�8�?aT ���]����AS�E��(��*E}� 2��#I/�׍qz��^t�̔���b�Yz4x���t�){ OH��+(E��A&�N�������XT��o��"�XC��'���)}�J�z�p� ��~5�}�^����+�6����w��c��Q�|Lp�d�H��}�(�.|����k��c4^�"�����Z?ȕ ��a<�L�!039C� �Eu�C�F�Ew�ç ;�n?�*o���B�8�bʝ���'#Rqf���M}7����]����s2tcS{�\icTx;�\��7K���P���ʇ Z O-��~��c>"��?�������P��E��O�8��@�8��G��Q�g�a�Վ���󁶠�䧘��_%#r�>�1�z�a��eb��qcPѵ��n���#L��� =��׀t� L�7�`��V���A{�C:�g���e@�w1 Xp3�c3�ġ����p��M"'-�@n4���fG��B3�DJ�8[Jo�ߐ���gK)ƛ��$���� ���8�3�����+���� �����6�ʻ���� ���S�kI�*KZlT _`���?��K����QK�d����B`�s}�>���`��*�>��,*@J�d�oF*����弝��O}�k��s��]��y�ߘ��c1G�V���<=�7��7����6�q�PT��tXԀ�!9*4�4Tހ3XΛex�46���Y��D ����� �BdemDa����\�_l,��G�/���֌7���Y�](�xTt^%�GE�����4�}bT���ڹ�����;Y)���B�Q��u��>J/J �⮶.�XԄ��j�ݳ�+E��d ��r�5�_D�1 ��o�� �B�x�΢�#���<��W�����8���R6�@g�M�.��� dr�D��>(otU��@x=��~v���2� ӣ�d�oBd��3�eO�6�㣷�����ݜ6��6Y��Qz`��S��{���\P�~z m5{J/L��1������<�e�ͅPu�b�]�ϔ���'������f�b� Zpw��c`"��i���BD@:)ִ�:�]��hv�E�w���T�l��P���"Ju�}��وV J��G6��. J/�Qgl߭�e�����@�z�Zev2u�)]կ�����7x���s�M�-<ɯ�c��r�v�����@��$�ޮ}lk���a���'����>x��O\�ZFu>�����ck#��&:��`�$�ai�>2Δ����l���oF[h��lE�ܺ�Πk:)���`�� $[6�����9�����kOw�\|���8}������ބ:��񶐕��I�A1/�=�2[�,�!��.}gN#�u����b��� ~��݊��}34q����d�E��Lc��$��"�[q�U�硬g^��%B �z���r�pJ�ru%v\h1Y�ne`ǥ:g���pQM~�^�Xi� ��`S�:V29.�P���V�?B�k�� AEvw%�_�9C�Q����wKekPؠ�\�;Io d�{ ߞo�c1eP����\� `����E=���@K<�Y���eڼ�J���w����{av�F�'�M�@/J��+9p���|]�����Iw &`��8���&M�hg��[�{��Xj��%��Ӓ�$��(����ʹN���<>�I���RY���K2�NPlL�ɀ)��&e����B+ь����( � �JTx���_?EZ� }@ 6�U���뙢ط�z��dWI�n` D����噥�[��uV��"�G&Ú����2g�}&m��?ċ�"����Om#��������� ��{�ON��"S�X��Ne��ysQ���@Fn��Vg���dX�~nj�]J�<�K]:��FW��b�������62�=��5f����JKw��bf�X�55��~J �%^����:�-�QIE��P��v�nZum� z � ~ə ���� ���ة����;�f��\v���g�8�1��f24;�V���ǔ�)����9���1\��c��v�/'Ƞ�w�������$�4�R-��t���� e�6�/�ġ �̕Ecy�J���u�B���<�W�ַ~�w[B1L۲�-JS΂�{���΃������A��20�c#��@ 0!1@AP"#2Q`$3V�%45a6�FRUq��� ����^7ׅ,$n�������+��F�`��2X'��0vM��p�L=������5��8������u�p~���.�`r�����\���O��,ư�0oS ��_�M�����l���4�kv\JSd���x���SW�<��Ae�IX����������$I���w�:S���y���›R��9�Q[���,�5�;�@]�%���u�@ *ro�lbI �� ��+���%m:�͇ZV�����u�̉����θau<�fc�.����{�4Ա� �Q����*�Sm��8\ujqs]{kN���)qO�y�_*dJ�b�7���yQqI&9�ԌK!�M}�R�;������S�T���1���i[U�ɵz�]��U)V�S6���3$K{�ߊ<�(� E]Զ[ǼENg�����'�\?#)Dkf��J���o��v���'�%ƞ�&K�u�!��b�35LX�Ϸ��63$K�a�;�9>,R��W��3�3� d�JeTYE.Mϧ��-�o�j3+y��y^�c�������VO�9NV\nd�1 ��!͕_)a�v;����թ�M�lWR1��)El��P;��yوÏ�u 3�k�5Pr6<�⒲l�!˞*��u־�n�!�l:����UNW ��%��Chx8vL'��X�@��*��)���̮��ˍ��� ���D-M�+J�U�kvK����+�x8��cY������?�Ԡ��~3mo��|�u@[XeY�C�\Kp�x8�oC�C�&����N�~3-H���� ��MX�s�u<`���~"WL��$8ξ��3���a�)|:@�m�\���^�`�@ҷ)�5p+��6���p�%i)P M���ngc�����#0Aruz���RL+xSS?���ʮ}()#�t��mˇ!��0}}y����<�e� �-ή�Ԩ��X������ MF���ԙ~l L.3���}�V뽺�v�����멬��Nl�)�2����^�Iq��a��M��qG��T�����c3#������3U�Ǎ���}��לS�|qa��ڃ�+���-��2�f����/��bz��ڐ�� �ݼ[2�ç����k�X�2�* �Z�d���J�G����M*9W���s{��w���T��x��y,�in�O�v��]���n����P�$�JB@=4�OTI�n��e�22a\����q�d���%�$��(���:���: /*�K[PR�fr\nڙdN���F�n�$�4�[�� U�zƶ����� �mʋ���,�ao�u 3�z� �x��Kn����\[��VFmbE;�_U��&V�Gg�]L�۪&#n%�$ɯ�dG���D�TI=�%+AB�Ru#��b4�1�»x�cs�YzڙJG��f��Il��d�eF'T� iA��T���uC�$����Y��H?����[!G`}���ͪ� �纤Hv\������j�Ex�K���!���OiƸ�Yj�+u-<���'q����uN�*�r\��+�]���<�wOZ.fp�ێ��,-*)V?j-kÊ#�`�r��dV����(�ݽBk�����G�ƛk�QmUڗe��Z���f}|����8�8��a���i��3'J�����~G_�^���d�8w������ R�`(�~�.��u���l�s+g�bv���W���lGc}��u���afE~1�Ue������Z�0�8�=e�� f@/�jqEKQQ�J��oN��J���W5~M>$6�Lt�;$ʳ{���^��6�{����v6���ķܰg�V�cnn �~z�x�«�,2�u�?cE+Ș�H؎�%�Za�)���X>uW�Tz�Nyo����s���FQƤ��$��*�&�LLXL)�1�" L��eO��ɟ�9=���:t��Z���c��Ž���Y?�ӭV�wv�~,Y��r�ۗ�|�y��GaF�����C�����.�+� ���v1���fήJ�����]�S��T��B��n5sW}y�$��~z�'�c ��8 ��� ,! �p��VN�S��N�N�q��y8z˱�A��4��*��'������2n<�s���^ǧ˭P�Jޮɏ�U�G�L�J�*#��<�V��t7�8����TĜ>��i}K%,���)[��z�21z ?�N�i�n1?T�I�R#��m-�����������������1����lA�`��fT5+��ܐ�c�q՝��ʐ��,���3�f2U�եmab��#ŠdQ�y>\��)�SLY����w#��.���ʑ�f��� ,"+�w�~�N�'�c�O�3F�������N<���)j��&��,-� �љ���֊�_�zS���TǦ����w�>��?�������n��U仆�V���e�����0���$�C�d���rP �m�׈e�Xm�Vu� �L��.�bֹ��� �[Դaզ���*��\y�8�Է:�Ez\�0�Kq�C b��̘��cө���Q��=0Y��s�N��S.���3.���O�o:���#���v7�[#߫ ��5�܎�L���Er4���9n��COWlG�^��0k�%<���ZB���aB_���������'=��{i�v�l�$�uC���mƎҝ{�c㱼�y]���W�i ��ߧc��m�H� m�"�"�����;Y�ߝ�Z�Ǔ�����:S#��|}�y�,/k�Ld� TA�(�AI$+I3��;Y*���Z��}|��ӧO��d�v��..#:n��f>�>���ȶI�TX��� 8��y����"d�R�|�)0���=���n4��6ⲑ�+��r<�O�܂~zh�z����7ܓ�HH�Ga롏���nCo�>������a ���~]���R���̲c?�6(�q�;5%� |�uj�~z8R=X��I�V=�|{v�Gj\gc��q����z�؋%M�ߍ����1y��#��@f^���^�>N�����#x#۹��6�Y~�?�dfPO��{��P�4��V��u1E1J �*|���%���JN��`eWu�zk M6���q t[�� ��g�G���v��WIG��u_ft����5�j�"�Y�:T��ɐ���*�;� e5���4����q$C��2d�}���� _S�L#m�Yp��O�.�C�;��c����Hi#֩%+) �Ӎ��ƲV���SYź��g |���tj��3�8���r|���V��1#;.SQ�A[���S������#���`n�+���$��$I �P\[�@�s��(�ED�z���P��])8�G#��0B��[ى��X�II�q<��9�~[Z멜�Z�⊔IWU&A>�P~�#��dp<�?����7���c��'~���5 ��+$���lx@�M�dm��n<=e�dyX��?{�|Aef ,|n3�<~z�ƃ�uۧ�����P��Y,�ӥQ�*g�#먙R�\���;T��i,��[9Qi歉����c>]9�� ��"�c��P�� �Md?٥��If�ت�u��k��/����F��9�c*9��Ǎ:�ØF���z�n*�@|I�ށ9����N3{'��[�'ͬ�Ҳ4��#}��!�V� Fu��,�,mTIk���v C�7v���B�6k�T9��1�*l� '~��ƞF��lU��'�M ����][ΩũJ_�{�i�I�n��$���L�� j��O�dx�����kza۪��#�E��Cl����x˘�o�����V���ɞ�ljr��)�/,�߬h�L��#��^��L�ф�,íMƁe�̩�NB�L�����iL����q�}��(��q��6IçJ$�W�E$��:������=#����(�K�B����zђ <��K(�N�۫K�w��^O{!����)�H���>x�������lx�?>Պ�+�>�W���,Ly!_�D���Ō�l���Q�!�[ �S����J��1��Ɛ�Y}��b,+�Lo�x�ɓ)����=�y�oh�@�꥟/��I��ѭ=��P�y9��� �ۍYӘ�e+�p�Jnϱ?V\SO%�(�t� ���=?MR�[Ș�����d�/ ��n�l��B�7j� ��!�;ӥ�/�[-���A�>�dN�sLj ��,ɪv��=1c�.SQ�O3�U���ƀ�ܽ�E����������̻��9G�ϷD�7(�}��Ävӌ\�y�_0[w ���<΍>����a_��[0+�L��F.�޺��f�>oN�T����q;���y\��bՃ��y�jH�<|q-eɏ�_?_9+P���Hp$�����[ux�K w�Mw��N�ی'$Y2�=��q���KB��P��~������Yul:�[<����F1�2�O���5=d����]Y�sw:���Ϯ���E��j,_Q��X��z`H1,#II ��d�wr��P˂@�ZJV����y$�\y�{}��^~���[:N����ߌ�U�������O��d�����ؾe��${p>G��3c���Ė�lʌ�� ת��[��`ϱ�-W����dg�I��ig2��� ��}s ��ؤ(%#sS@���~���3�X�nRG�~\jc3�v��ӍL��M[JB�T��s3}��j�Nʖ��W����;7��ç?=X�F=-�=����q�ߚ���#���='�c��7���ڑW�I(O+=:uxq�������������e2�zi+�kuG�R��������0�&e�n���iT^J����~\jy���p'dtG��s����O��3����9* �b#Ɋ�� p������[Bws�T�>d4�ۧs���nv�n���U���_�~,�v����ƜJ1��s�� �QIz��)�(lv8M���U=�;����56��G���s#�K���MP�=��LvyGd��}�VwWBF�'�à �?MH�U�g2�� ����!�p�7Q��j��ڴ����=��j�u��� Jn�A s���uM������e��Ɔ�Ҕ�!)'��8Ϣ�ٔ��ޝ(��Vp���צ֖d=�IC�J�Ǡ{q������kԭ�߸���i��@K����u�|�p=..�*+����x�����z[Aqġ#s2a�Ɗ���RR�)*HRsi�~�a &f��M��P����-K�L@��Z��Xy�'x�{}��Zm+���:�)�) IJ�-i�u���� ���ܒH��'�L(7�y�GӜq���� j��� 6ߌg1�g�o���,kر���tY�?W,���p���e���f�OQS��!K�۟cҒA�|ս�j�>��=⬒��˧L[�� �߿2JaB~R��u�:��Q�] �0H~���]�7��Ƽ�I���(}��cq '�ήET���q�?f�ab���ӥvr� �)o��-Q��_'����ᴎo��K������;��V���o��%���~OK ����*��b�f:���-ťIR��`B�5!RB@���ï�� �u �̯e\�_U�_������� g�ES��3�������QT��a����x����U<~�c?�*�#]�MW,[8O�a�x��]�1bC|踤�P��lw5V%�)�{t�<��d��5���0i�XSU��m:��Z�┵�i�"��1�^B�-��P�hJ��&)O��*�D��c�W��vM��)����}���P��ܗ-q����\mmζZ-l@�}��a��E�6��F�@��&Sg@���ݚ�M����� ȹ 4����#p�\H����dYDo�H���"��\��..R�B�H�z_�/5˘����6��KhJR��P�mƶi�m���3�,#c�co��q�a)*Pt����R�m�k�7x�D�E�\Y�閣_X�<���~�)���c[[�BP����6�Yq���S��0����%_����;��Àv�~�| VS؇ ��'O0��F0��\���U�-�d@�����7�SJ*z��3n��y��P����O���������m�~�P�3|Y��ʉr#�C�<�G~�.,! ���bqx���h~0=��!ǫ�jy����l�O,�[B��~��|9��ٱ����Xly�#�i�B��g%�S��������tˋ���e���ې��\[d�t)��.+u�|1 ������#�~Oj����hS�%��i.�~X���I�H�m��0n���c�1uE�q��cF�RF�o���7� �O�ꮧ� ���ۛ{��ʛi5�rw?׌#Qn�TW��~?y$��m\�\o����%W� ?=>S�N@�� �Ʈ���R����N�)�r"C�:��:����� �����#��qb��Y�. �6[��2K����2u�Ǧ�HYR��Q�MV��� �G�$��Q+.>�����nNH��q�^��� ����q��mM��V��D�+�-�#*�U�̒ ���p욳��u:�������IB���m���PV@O���r[b= �� ��1U�E��_Nm�yKbN�O���U�}�the�`�|6֮P>�\2�P�V���I�D�i�P�O;�9�r�mAHG�W�S]��J*�_�G��+kP�2����Ka�Z���H�'K�x�W�MZ%�O�YD�Rc+o��?�q��Ghm��d�S�oh�\�D�|:W������UA�Qc yT�q������~^�H��/��#p�CZ���T�I�1�ӏT����4��"�ČZ�����}��`w�#�*,ʹ�� ��0�i��課�Om�*�da��^gJ݅{���l�e9uF#T�ֲ��̲�ٞC"�q���ߍ ոޑ�o#�XZTp����@ o�8��(jd��xw�]�,f���`~�|,s��^����f�1���t��|��m�򸄭/ctr��5s��7�9Q�4�H1꠲BB@l9@���C�����+�wp�xu�£Yc�9��?`@#�o�mH�s2��)�=��2�.�l����jg�9$�Y�S�%*L������R�Y������7Z���,*=�䷘$�������arm�o�ϰ���UW.|�r�uf����IGw�t����Zwo��~5 ��YյhO+=8fF�)�W�7�L9lM�̘·Y���֘YLf�큹�pRF���99.A �"wz��=E\Z���'a� 2��Ǚ�#;�'}�G���*��l��^"q��+2FQ� hj��kŦ��${���ޮ-�T�٭cf�|�3#~�RJ����t��$b�(R��(����r���dx� >U b�&9,>���%E\� Ά�e�$��'�q't��*�א���ެ�b��-|d���SB�O�O��$�R+�H�)�܎�K��1m`;�J�2�Y~9��O�g8=vqD`K[�F)k�[���1m޼c��n���]s�k�z$@��)!I �x՝"v��9=�ZA=`Ɠi �:�E��)`7��vI��}d�YI�_ �o�:ob���o ���3Q��&D&�2=�� �Ά��;>�h����y.*ⅥS������Ӭ�+q&����j|UƧ����}���J0��WW< ۋS�)jQR�j���Ư��rN)�Gű�4Ѷ(�S)Ǣ�8��i��W52���No˓� ۍ%�5brOn�L�;�n��\G����=�^U�dI���8$�&���h��'���+�(������cȁ߫k�l��S^���cƗjԌE�ꭔ��gF���Ȓ��@���}O���*;e�v�WV���YJ\�]X'5��ղ�k�F��b 6R�o՜m��i N�i����>J����?��lPm�U��}>_Z&�KK��q�r��I�D�Չ~�q�3fL�:S�e>���E���-G���{L�6p�e,8��������QI��h��a�Xa��U�A'���ʂ���s�+טIjP�-��y�8ۈZ?J$��W�P� ��R�s�]��|�l(�ԓ��sƊi��o(��S0��Y� 8�T97.�����WiL��c�~�dxc�E|�2!�X�K�Ƙਫ਼�$((�6�~|d9u+�qd�^3�89��Y�6L�.I�����?���iI�q���9�)O/뚅����O���X��X�V��ZF[�یgQ�L��K1���RҖr@v�#��X�l��F���Нy�S�8�7�kF!A��sM���^rkp�jP�DyS$N���q��nxҍ!U�f�!eh�i�2�m���`�Y�I�9r�6� �TF���C}/�y�^���Η���5d�'��9A-��J��>{�_l+�`��A���[�'��յ�ϛ#w:݅�%��X�}�&�PSt�Q�"�-��\縵�/����$Ɨh�Xb�*�y��BS����;W�ջ_mc�����vt?2}1�;qS�d�d~u:2k5�2�R�~�z+|HE!)�Ǟl��7`��0�<�,�2*���Hl-��x�^����'_TV�gZA�'j� ^�2Ϊ��N7t�����?w�� �x1��f��Iz�C-Ȗ��K�^q�;���-W�DvT�7��8�Z�������� hK�(P:��Q- �8�n�Z���܃e貾�<�1�YT<�,�����"�6{/ �?�͟��|1�:�#g��W�>$����d��J��d�B��=��jf[��%rE^��il:��B���x���Sּ�1հ��,�=��*�7 fcG��#q� �eh?��2�7�����,�!7x��6�n�LC�4x��},Geǝ�tC.��vS �F�43��zz\��;QYC,6����~;RYS/6���|2���5���v��T��i����������mlv��������&� �nRh^ejR�LG�f���? �ۉҬܦƩ��|��Ȱ����>3����!v��i�ʯ�>�v��オ�X3e���_1z�Kȗ\<������!�8���V��]��?b�k41�Re��T�q��mz��TiOʦ�Z��Xq���L������q"+���2ۨ��8}�&N7XU7Ap�d�X��~�׿��&4e�o�F��� �H����O���č�c�� 懴�6���͉��+)��v;j��ݷ�� �UV�� i��� j���Y9GdÒJ1��詞�����V?h��l����l�cGs�ځ�������y�Ac�����\V3�? �� ܙg�>qH�S,�E�W�[�㺨�uch�⍸�O�}���a��>�q�6�n6����N6�q������N ! 1AQaq�0@����"2BRb�#Pr���3C`��Scst���$4D���%Td�� ?���N����a��3��m���C���w��������xA�m�q�m���m������$����4n淿t'��C"w��zU=D�\R+w�p+Y�T�&�պ@��ƃ��3ޯ?�Aﶂ��aŘ���@-�����Q�=���9D��ռ�ѻ@��M�V��P��܅�G5�f�Y<�u=,EC)�<�Fy'�"�&�չ�X~f��l�KԆV��?�� �W�N����=(� �;���{�r����ٌ�Y���h{�١������jW����P���Tc�����X�K�r��}���w�R��%��?���E��m�� �Y�q|����\lEE4���r���}�lsI�Y������f�$�=�d�yO����p�����yBj8jU�o�/�S��?�U��*������ˍ�0������u�q�m [�?f����a�� )Q�>����6#������� ?����0UQ����,IX���(6ڵ[�DI�MNލ�c&���υ�j\��X�R|,4��� j������T�hA�e��^���d���b<����n�� �즇�=!���3�^�`j�h�ȓr��jẕ�c�,ٞX����-����a�ﶔ���#�$��]w�O��Ӫ�1y%��L�Y<�wg#�ǝ�̗`�x�xa�t�w��»1���o7o5��>�m뭛C���Uƃߜ}�C���y1Xνm�F8�jI���]����H���ۺиE@I�i;r�8ӭ����V�F�Շ| ��&?�3|x�B�MuS�Ge�=Ӕ�#BE5G�����Y!z��_e��q�р/W>|-�Ci߇�t�1ޯќd�R3�u��g�=0 5��[?�#͏��q�cf���H��{ ?u�=?�?ǯ���}Z��z���hmΔ�BFTW�����<�q�(v� ��!��z���iW]*�J�V�z��gX֧A�q�&��/w���u�gYӘa���; �i=����g:��?2�dž6�ى�k�4�>�Pxs����}������G�9��3 ���)gG�R<>r h�$��'nc�h�P��Bj��J�ҧH� -��N1���N��?��~��}-q!=��_2hc�M��l�vY%UE�@|�v����M2�.Y[|y�"Eï��K�ZF,�ɯ?,q�?v�M 80jx�"�;�9vk�����+ ֧�� �ȺU��?�%�vcV��mA�6��Qg^M����A}�3�nl� QRN�l8�kkn�'�����(��M�7m9و�q���%ޟ���*h$Zk"��$�9��: �?U8�Sl��,,|ɒ��xH(ѷ����Gn�/Q�4�P��G�%��Ա8�N��!� �&�7�;���eKM7�4��9R/%����l�c>�x;������>��C�:�����t��h?aKX�bhe�ᜋ^�$�Iհ �hr7%F$�E��Fd���t��5���+�(M6�t����Ü�UU|zW�=a�Ts�Tg������dqP�Q����b'�m���1{|Y����X�N��b �P~��F^F:����k6�"�j!�� �I�r�`��1&�-$�Bevk:y���#yw��I0��x��=D�4��tU���P�ZH��ڠ底taP��6����b>�xa����Q�#� WeF��ŮNj�p�J* mQ�N����*I�-*�ȩ�F�g�3 �5��V�ʊ�ɮ�a��5F���O@{���NX��?����H�]3��1�Ri_u��������ѕ�� ����0��� F��~��:60�p�͈�S��qX#a�5>���`�o&+�<2�D����: �������ڝ�$�nP���*)�N�|y�Ej�F�5ټ�e���ihy�Z �>���k�bH�a�v��h�-#���!�Po=@k̆IEN��@��}Ll?j�O������߭�ʞ���Q|A07x���wt!xf���I2?Z��<ץ�T���cU�j��]��陎Ltl �}5�ϓ��$�,��O�mˊ�;�@O��jE��j(�ا,��LX���LO���Ц�90�O �.����a��nA���7������j4 ��W��_ٓ���zW�jcB������y՗+EM�)d���N�g6�y1_x��p�$Lv:��9�"z��p���ʙ$��^��JԼ*�ϭ����o���=x�Lj�6�J��u82�A�H�3$�ٕ@�=Vv�]�'�qEz�;I˼��)��=��ɯ���x �/�W(V���p�����$ �m�������u�����񶤑Oqˎ�T����r��㠚x�sr�GC��byp�G��1ߠ�w e�8�$⿄����/�M{*}��W�]˷.�CK\�ުx���/$�WPw���r� |i���&�}�{�X� �>��$-��l���?-z���g����lΆ���(F���h�vS*���b���߲ڡn,|)mrH[���a�3�ר�[1��3o_�U�3�TC�$��(�=�)0�kgP���� ��u�^=��4 �WYCҸ:��vQ�ר�X�à��tk�m,�t*��^�,�}D*� �"(�I��9R����>`�`��[~Q]�#af��i6l��8���6�:,s�s�N6�j"�A4���IuQ��6E,�GnH��zS�HO�uk�5$�I�4��ؤ�Q9�@��C����wp�BGv[]�u�Ov���0I4���\��y�����Q�Ѹ��~>Z��8�T��a��q�ޣ;z��a���/��S��I:�ܫ_�|������>=Z����8:�S��U�I�J��"IY���8%b8���H��:�QO�6�;7�I�S��J��ҌAά3��>c���E+&jf$eC+�z�;��V����� �r���ʺ������my�e���aQ�f&��6�ND��.:��NT�vm�<- u���ǝ\MvZY�N�NT��-A�>jr!S��n�O 1�3�Ns�%�3D@���`������ܟ 1�^c<���� �a�ɽ�̲�Xë#�w�|y�cW�=�9I*H8�p�^(4���՗�k��arOcW�tO�\�ƍR��8����'�K���I�Q�����?5�>[�}��yU�ײ -h��=��% q�ThG�2�)���"ו3]�!kB��*p�FDl�A���,�eEi�H�f�Ps�����5�H:�Փ~�H�0Dت�D�I����h�F3�������c��2���E��9�H��5�zԑ�ʚ�i�X�=:m�xg�hd(�v����׊�9iS��O��d@0ڽ���:�p�5�h-��t�&���X�q�ӕ,��ie�|���7A�2���O%P��E��htj��Y1��w�Ѓ!����  ���� ࢽ��My�7�\�a�@�ţ�J �4�Ȼ�F�@o�̒?4�wx��)��]�P��~�����u�����5�����7X ��9��^ܩ�U;Iꭆ 5 �������eK2�7(�{|��Y׎ �V��\"���Z�1� Z�����}��(�Ǝ"�1S���_�vE30>���p;� ΝD��%x�W�?W?v����o�^V�i�d��r[��/&>�~`�9Wh��y�;���R��� ;;ɮT��?����r$�g1�K����A��C��c��K��l:�'��3 c�ﳯ*"t8�~l��)���m��+U,z��`(�>yJ�?����h>��]��v��ЍG*�{`��;y]��I�T� ;c��NU�fo¾h���/$���|NS���1�S�"�H��V���T���4��uhǜ�]�v;���5�͠x��'C\�SBpl���h}�N����� A�Bx���%��ޭ�l��/����T��w�ʽ]D�=����K���ž�r㻠l4�S�O?=�k �M:� ��c�C�a�#ha���)�ѐxc�s���gP�iG��{+���x���Q���I= �� z��ԫ+ �8"�k�ñ�j=|����c ��y��CF��/��*9ж�h{ �?4�o� ��k�m�Q�N�x��;�Y��4膚�a�w?�6�>e]�����Q�r�:����g�,i"�����ԩA�*M�<�G��b�if��l^M��5� �Ҩ�{����6J��ZJ�����P�*�����Y���ݛu�_4�9�I8�7���������,^ToR���m4�H��?�N�S�ѕw��/S��甍�@�9H�S�T��t�ƻ���ʒU��*{Xs�@����f�����֒Li�K{H�w^���������Ϥm�tq���s� ���ք��f:��o~s��g�r��ט� �S�ѱC�e]�x���a��) ���(b-$(�j>�7q�B?ӕ�F��hV25r[7 Y� }L�R��}����*sg+��x�r�2�U=�*'WS��ZDW]�WǞ�<��叓���{�$�9Ou4��y�90-�1�'*D`�c�^o?(�9��u���ݐ��'PI&� f�Jݮ�������:wS����jfP1F:X �H�9dԯ���˝[�_54 �}*;@�ܨ�� ð�yn�T���?�ןd�#���4rG�ͨ��H�1�|-#���Mr�S3��G�3�����)�.᧏3v�z֑��r����$G"�`j �1t��x0<Ɔ�Wh6�y�6��,œ�Ga��gA����y��b��)��h�D��ß�_�m��ü �gG;��e�v��ݝ�nQ� ��C����-�*��o���y�a��M��I�>�<���]obD��"�:���G�A��-\%LT�8���c�)��+y76���o�Q�#*{�(F�⽕�y����=���rW�\p���۩�c���A���^e6��K������ʐ�cVf5$�'->���ՉN"���F�"�UQ@�f��Gb~��#�&�M=��8�ט�JNu9��D��[̤�s�o�~������ G��9T�tW^g5y$b��Y'��س�Ǵ�=��U-2 #�MC�t(�i� �lj�@Q 5�̣i�*�O����s�x�K�f��}\��M{E�V�{�υ��Ƈ�����);�H����I��fe�Lȣr�2��>��W�I�Ȃ6������i��k�� �5�YOxȺ����>��Y�f5'��|��H+��98pj�n�.O�y�������jY��~��i�w'������l�;�s�2��Y��:'lg�ꥴ)o#'Sa�a�K��Z� �m��}�`169�n���"���x��I ��*+� }F<��cГ���F�P�������ֹ*�PqX�x۩��,� ��N�� �4<-����%����:��7����W���u�`����� $�?�I��&����o��o��`v�>��P��"��l���4��5'�Z�gE���8���?��[�X�7(��.Q�-��*���ތL@̲����v��.5���[��=�t\+�CNܛ��,g�SQnH����}*F�G16���&:�t��4ُ"A��̣��$�b �|����#rs��a�����T�� ]�<�j��BS�('$�ɻ� �wP;�/�n��?�ݜ��x�F��yUn�~mL*-�������Xf�wd^�a�}��f�,=t�׵i�.2/wpN�Ep8�OР���•��R�FJ� 55TZ��T �ɭ�<��]��/�0�r�@�f��V��V����Nz�G��^���7hZi����k��3�,kN�e|�vg�1{9]_i��X5y7� 8e]�U����'�-2,���e"����]ot�I��Y_��n�(JҼ��1�O ]bXc���Nu�No��pS���Q_���_�?i�~�x h5d'�(qw52] ��'ޤ�q��o1�R!���`ywy�A4u���h<קy���\[~�4�\ X�Wt/� 6�����n�F�a8��f���z �3$�t(���q��q�x��^�XWeN'p<-v�!�{�(>ӽDP7��ո0�y)�e$ٕv�Ih'Q�EA�m*�H��RI��=:��� ���4牢) �%_iN�ݧ�l]� �Nt���G��H�L��� ɱ�g<���1V�,�J~�ٹ�"K��Q�� 9�HS�9�?@��k����r�;we݁�]I�!{ �@�G�[�"��`���J:�n]�{�cA�E����V��ʆ���#��U9�6����j�#Y�m\��q�e4h�B�7��C�������d<�?J����1g:ٳ���=Y���D�p�ц� ׈ǔ��1�]26؜oS�'��9�V�FVu�P�h�9�xc�oq�X��p�o�5��Ա5$�9W�V(�[Ak�aY錎qf;�'�[�|���b�6�Ck��)��#a#a˙��8���=äh�4��2��C��4tm^ �n'c���]GQ$[Wҿ��i���vN�{Fu ��1�gx��1┷���N�m��{j-,��x�� Ūm�ЧS�[�s���Gna���䑴�� x�p 8<������97�Q���ϴ�v�aϚG��Rt�Һ׈�f^\r��WH�JU�7Z���y)�vg=����n��4�_)y��D'y�6�]�c�5̪�\� �PF�k����&�c;��cq�$~T�7j ���nç]�<�g ":�to�t}�159�<�/�8������m�b�K#g'I'.W�����6��I/��>v��\�MN��g���m�A�yQL�4u�Lj�j9��#44�t��l^�}L����n��R��!��t��±]��r��h6ٍ>�yҏ�N��fU�� ���� Fm@�8}�/u��jb9������he:A�y�ծw��GpΧh�5����l}�3p468��)U��d��c����;Us/�֔�YX�1�O2��uq�s��`hwg�r~�{ R��mhN��؎*q 42�*th��>�#���E����#��Hv�O����q�}�����6�e��\�,Wk�#���X��b>��p}�դ��3���T5��†��6��[��@�P�y*n��|'f�֧>�lư΂�̺����SU�'*�q�p�_S�����M�� '��c�6�����m�� ySʨ;M��r���Ƌ�m�Kxo,���Gm�P��A�G�:��i��w�9�}M(�^�V��$ǒ�ѽ�9���|���� �a����J�SQ�a���r�B;����}���ٻ֢�2�%U���c�#�g���N�a�ݕ�'�v�[�OY'��3L�3�;,p�]@�S��{ls��X�'���c�jw�k'a�.��}�}&�� �dP�*�bK=ɍ!����;3n�gΊU�ߴmt�'*{,=SzfD� A��ko~�G�aoq�_mi}#�m�������P�Xhύ����mxǍ�΂���巿zf��Q���c���|kc�����?���W��Y�$���_Lv����l߶��c���`?����l�j�ݲˏ!V��6����U�Ђ(A���4y)H���p�Z_�x��>���e��R��$�/�`^'3qˏ�-&Q�=?��CFVR �D�fV�9��{�8g�������n�h�(P"��6�[�D���< E�����~0<@�`�G�6����Hг�cc�� �c�K.5��D��d�B���`?�XQ��2��ٿyqo&+�1^� DW�0�ꊩ���G�#��Q�nL3��c���������/��x ��1�1[y�x�პCW��C�c�UĨ80�m�e�4.{�m��u���I=��f�����0QRls9���f���������9���~f�����Ǩ��a�"@�8���ȁ�Q����#c�ic������G��$���G���r/$W�(��W���V�"��m�7�[m�A�m����bo��D� j����۳� l���^�k�h׽����� ��#� iXn�v��eT�k�a�^Y�4�BN��ĕ��0 !01@Q"2AaPq3BR������?���@4�Q�����T3,���㺠�W�[=JK�Ϟ���2�r^7��vc�:�9 �E�ߴ�w�S#d���Ix��u��:��Hp��9E!�� V 2;73|F��9Y���*ʬ�F��D����u&���y؟��^EA��A��(ɩ���^��GV:ݜDy�`��Jr29ܾ�㝉��[���E;Fzx��YG��U�e�Y�C���� ����v-tx����I�sם�Ę�q��Eb�+P\ :>�i�C'�;�����k|z�رn�y]�#ǿb��Q��������w�����(�r|ӹs��[�D��2v-%��@;�8<a���[\o[ϧw��I!��*0�krs)�[�J9^��ʜ��p1)� "��/_>��o��<1����A�E�y^�C��`�x1'ܣn�p��s`l���fQ��):�l����b>�Me�jH^?�kl3(�z:���1ŠK&?Q�~�{�ٺ�h�y���/�[��V�|6��}�KbX����mn[-��7�5q�94�������dm���c^���h� X��5��<�eޘ>G���-�}�دB�ޟ� ��|�rt�M��V+�]�c?�-#ڛ��^ǂ}���Lkr���O��u�>�-D�ry� D?:ޞ�U��ǜ�7�V��?瓮�"�#���r��չģVR;�n���/_� ؉v�ݶe5d�b9��/O��009�G���5n�W����JpA�*�r9�>�1��.[t���s�F���nQ� V 77R�]�ɫ8����_0<՜�IF�u(v��4��F�k�3��E)��N:��yڮe��P�`�1}�$WS��J�SQ�N�j�ٺ��޵�#l���ј(�5=��5�lǏmoW�v-�1����v,W�mn��߀$x�<����v�j(����c]��@#��1������Ǔ���o'��u+����;G�#�޸��v-lη��/(`i⣍Pm^���ԯ̾9Z��F��������n��1��� ��]�[��)�'������:�֪�W��FC����� �B9،!?���]��V��A�Վ�M��b�w��G F>_DȬ0¤�#�QR�[V��kz���m�w�"��9ZG�7'[��=�Q����j8R?�zf�\a�=��O�U����*oB�A�|G���2�54 �p��.w7� �� ��&������ξxGHp� B%��$g�����t�Џ򤵍z���HN�u�Я�-�'4��0��;_��3 !01"@AQa2Pq#3BR������?��ʩca��en��^��8���<�u#��m*08r��y�N"�<�Ѳ0��@\�p��� �����Kv�D��J8�Fҽ� �f�Y��-m�ybX�NP����}�!*8t(�OqѢ��Q�wW�K��ZD��Δ^e��!� ��B�K��p~�����e*l}z#9ң�k���q#�Ft�o��S�R����-�w�!�S���Ӥß|M�l޶V��!eˈ�8Y���c�ЮM2��tk���� ������J�fS����Ö*i/2�����n]�k�\���|4yX�8��U�P.���Ы[���l��@"�t�<������5�lF���vU�����W��W��;�b�cД^6[#7@vU�xgZv��F�6��Q,K�v��� �+Ъ��n��Ǣ��Ft���8��0��c�@�!�Zq s�v�t�;#](B��-�nῃ~���3g������5�J�%���O������n�kB�ĺ�.r��+���#�N$?�q�/�s�6��p��a����a��J/��M�8��6�ܰ"�*������ɗud"\w���aT(����[��F��U՛����RT�b���n�*��6���O��SJ�.�ij<�v�MT��R\c��5l�sZB>F��<7�;EA��{��E���Ö��1U/�#��d1�a�n.1ě����0�ʾR�h��|�R��Ao�3�m3 ��%�� ���28Q� ��y��φ���H�To�7�lW>����#i`�q���c����a��� �m,B�-j����݋�'mR1Ήt�>��V��p���s�0IbI�C.���1R�ea�����]H�6����������4B>��o��](��$B���m�����a�!=��?�B� K�Ǿ+�Ծ"�n���K��*��+��[T#�{E�J�S����Q�����s�5�:�U�\wĐ�f�3����܆&�)����I���Ԇw��E T�lrTf6Q|R�h:��[K�� �z��c֧�G�C��%\��_�a�84��HcO�bi��ؖV��7H �)*ģK~Xhչ0��4?�0��� �E<���}3���#���u�?�� ��|g�S�6ꊤ�|�I#Hڛ� �ա��w�X��9��7���Ŀ%�SL��y6č��|�F�a 8���b��$�sק�h���b9RAu7�˨p�Č�_\*w��묦��F ����4D~�f����|(�"m���NK��i�S�>�$d7SlA��/�²����SL��|6N�}���S�˯���g��]6��; �#�.��<���q'Q�1|KQ$�����񛩶"�$r�b:���N8�w@��8$�� �AjfG|~�9F ���Y��ʺ��Bwؒ������M:I岎�G��`s�YV5����6��A �b:�W���G�q%l�����F��H���7�������Fsv7��k�� 403WebShell
403Webshell
Server IP : 54.89.25.11  /  Your IP : 216.73.216.255
Web Server : Apache/2.4.7 (Ubuntu)
System : Linux ip-172-31-88-18 3.13.0-74-generic #118-Ubuntu SMP Thu Dec 17 22:52:10 UTC 2015 x86_64
User : www-data ( 33)
PHP Version : 5.5.9-1ubuntu4.24
Disable Function : pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority,
MySQL : ON  |  cURL : ON  |  WGET : ON  |  Perl : ON  |  Python : ON  |  Sudo : ON  |  Pkexec : ON
Directory :  /proc/self/root/proc/self/root/lib/modules/3.13.0-74-generic/build/include/linux/

Upload File :
current_dir [ Writeable ] document_root [ Writeable ]

 

Command :


[ Back ]     

Current File : /proc/self/root/proc/self/root/lib/modules/3.13.0-74-generic/build/include/linux/mm.h
#ifndef _LINUX_MM_H
#define _LINUX_MM_H

#include <linux/errno.h>

#ifdef __KERNEL__

#include <linux/gfp.h>
#include <linux/bug.h>
#include <linux/list.h>
#include <linux/mmzone.h>
#include <linux/rbtree.h>
#include <linux/atomic.h>
#include <linux/debug_locks.h>
#include <linux/mm_types.h>
#include <linux/range.h>
#include <linux/pfn.h>
#include <linux/bit_spinlock.h>
#include <linux/shrinker.h>
#include <linux/dcache.h>
#include <linux/file.h>
#include <linux/fs.h>

struct mempolicy;
struct anon_vma;
struct anon_vma_chain;
struct file_ra_state;
struct user_struct;
struct writeback_control;

#ifndef CONFIG_NEED_MULTIPLE_NODES	/* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr;

static inline void set_max_mapnr(unsigned long limit)
{
	max_mapnr = limit;
}
#else
static inline void set_max_mapnr(unsigned long limit) { }
#endif

extern unsigned long totalram_pages;
extern void * high_memory;
extern int page_cluster;

#ifdef CONFIG_SYSCTL
extern int sysctl_legacy_va_layout;
#else
#define sysctl_legacy_va_layout 0
#endif

#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>

#ifndef __pa_symbol
#define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
#endif

extern unsigned long sysctl_user_reserve_kbytes;
extern unsigned long sysctl_admin_reserve_kbytes;

#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))

/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)

/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
#define PAGE_ALIGNED(addr)	IS_ALIGNED((unsigned long)addr, PAGE_SIZE)

/*
 * Linux kernel virtual memory manager primitives.
 * The idea being to have a "virtual" mm in the same way
 * we have a virtual fs - giving a cleaner interface to the
 * mm details, and allowing different kinds of memory mappings
 * (from shared memory to executable loading to arbitrary
 * mmap() functions).
 */

extern struct kmem_cache *vm_area_cachep;

#ifndef CONFIG_MMU
extern struct rb_root nommu_region_tree;
extern struct rw_semaphore nommu_region_sem;

extern unsigned int kobjsize(const void *objp);
#endif

/*
 * vm_flags in vm_area_struct, see mm_types.h.
 */
#define VM_NONE		0x00000000

#define VM_READ		0x00000001	/* currently active flags */
#define VM_WRITE	0x00000002
#define VM_EXEC		0x00000004
#define VM_SHARED	0x00000008

/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
#define VM_MAYREAD	0x00000010	/* limits for mprotect() etc */
#define VM_MAYWRITE	0x00000020
#define VM_MAYEXEC	0x00000040
#define VM_MAYSHARE	0x00000080

#define VM_GROWSDOWN	0x00000100	/* general info on the segment */
#define VM_PFNMAP	0x00000400	/* Page-ranges managed without "struct page", just pure PFN */
#define VM_DENYWRITE	0x00000800	/* ETXTBSY on write attempts.. */

#define VM_LOCKED	0x00002000
#define VM_IO           0x00004000	/* Memory mapped I/O or similar */

					/* Used by sys_madvise() */
#define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
#define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */

#define VM_DONTCOPY	0x00020000      /* Do not copy this vma on fork */
#define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
#define VM_ACCOUNT	0x00100000	/* Is a VM accounted object */
#define VM_NORESERVE	0x00200000	/* should the VM suppress accounting */
#define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
#define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
#define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
#define VM_DONTDUMP	0x04000000	/* Do not include in the core dump */

#ifdef CONFIG_MEM_SOFT_DIRTY
# define VM_SOFTDIRTY	0x08000000	/* Not soft dirty clean area */
#else
# define VM_SOFTDIRTY	0
#endif

#define VM_MIXEDMAP	0x10000000	/* Can contain "struct page" and pure PFN pages */
#define VM_HUGEPAGE	0x20000000	/* MADV_HUGEPAGE marked this vma */
#define VM_NOHUGEPAGE	0x40000000	/* MADV_NOHUGEPAGE marked this vma */
#define VM_MERGEABLE	0x80000000	/* KSM may merge identical pages */

#if defined(CONFIG_X86)
# define VM_PAT		VM_ARCH_1	/* PAT reserves whole VMA at once (x86) */
#elif defined(CONFIG_PPC)
# define VM_SAO		VM_ARCH_1	/* Strong Access Ordering (powerpc) */
#elif defined(CONFIG_PARISC)
# define VM_GROWSUP	VM_ARCH_1
#elif defined(CONFIG_METAG)
# define VM_GROWSUP	VM_ARCH_1
#elif defined(CONFIG_IA64)
# define VM_GROWSUP	VM_ARCH_1
#elif !defined(CONFIG_MMU)
# define VM_MAPPED_COPY	VM_ARCH_1	/* T if mapped copy of data (nommu mmap) */
#endif

#ifndef VM_GROWSUP
# define VM_GROWSUP	VM_NONE
#endif

/* Bits set in the VMA until the stack is in its final location */
#define VM_STACK_INCOMPLETE_SETUP	(VM_RAND_READ | VM_SEQ_READ)

#ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
#endif

#ifdef CONFIG_STACK_GROWSUP
#define VM_STACK_FLAGS	(VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
#else
#define VM_STACK_FLAGS	(VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
#endif

/*
 * Special vmas that are non-mergable, non-mlock()able.
 * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
 */
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)

/*
 * mapping from the currently active vm_flags protection bits (the
 * low four bits) to a page protection mask..
 */
extern pgprot_t protection_map[16];

#define FAULT_FLAG_WRITE	0x01	/* Fault was a write access */
#define FAULT_FLAG_NONLINEAR	0x02	/* Fault was via a nonlinear mapping */
#define FAULT_FLAG_MKWRITE	0x04	/* Fault was mkwrite of existing pte */
#define FAULT_FLAG_ALLOW_RETRY	0x08	/* Retry fault if blocking */
#define FAULT_FLAG_RETRY_NOWAIT	0x10	/* Don't drop mmap_sem and wait when retrying */
#define FAULT_FLAG_KILLABLE	0x20	/* The fault task is in SIGKILL killable region */
#define FAULT_FLAG_TRIED	0x40	/* second try */
#define FAULT_FLAG_USER		0x80	/* The fault originated in userspace */

/*
 * vm_fault is filled by the the pagefault handler and passed to the vma's
 * ->fault function. The vma's ->fault is responsible for returning a bitmask
 * of VM_FAULT_xxx flags that give details about how the fault was handled.
 *
 * pgoff should be used in favour of virtual_address, if possible. If pgoff
 * is used, one may implement ->remap_pages to get nonlinear mapping support.
 */
struct vm_fault {
	unsigned int flags;		/* FAULT_FLAG_xxx flags */
	pgoff_t pgoff;			/* Logical page offset based on vma */
	void __user *virtual_address;	/* Faulting virtual address */

	struct page *page;		/* ->fault handlers should return a
					 * page here, unless VM_FAULT_NOPAGE
					 * is set (which is also implied by
					 * VM_FAULT_ERROR).
					 */
};

/*
 * These are the virtual MM functions - opening of an area, closing and
 * unmapping it (needed to keep files on disk up-to-date etc), pointer
 * to the functions called when a no-page or a wp-page exception occurs. 
 */
struct vm_operations_struct {
	void (*open)(struct vm_area_struct * area);
	void (*close)(struct vm_area_struct * area);
	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);

	/* notification that a previously read-only page is about to become
	 * writable, if an error is returned it will cause a SIGBUS */
	int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);

	/* called by access_process_vm when get_user_pages() fails, typically
	 * for use by special VMAs that can switch between memory and hardware
	 */
	int (*access)(struct vm_area_struct *vma, unsigned long addr,
		      void *buf, int len, int write);
#ifdef CONFIG_NUMA
	/*
	 * set_policy() op must add a reference to any non-NULL @new mempolicy
	 * to hold the policy upon return.  Caller should pass NULL @new to
	 * remove a policy and fall back to surrounding context--i.e. do not
	 * install a MPOL_DEFAULT policy, nor the task or system default
	 * mempolicy.
	 */
	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);

	/*
	 * get_policy() op must add reference [mpol_get()] to any policy at
	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
	 * in mm/mempolicy.c will do this automatically.
	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
	 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
	 * must return NULL--i.e., do not "fallback" to task or system default
	 * policy.
	 */
	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
					unsigned long addr);
	int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
		const nodemask_t *to, unsigned long flags);
#endif
	/* called by sys_remap_file_pages() to populate non-linear mapping */
	int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
			   unsigned long size, pgoff_t pgoff);
};

struct mmu_gather;
struct inode;

#define page_private(page)		((page)->private)
#define set_page_private(page, v)	((page)->private = (v))

/* It's valid only if the page is free path or free_list */
static inline void set_freepage_migratetype(struct page *page, int migratetype)
{
	page->index = migratetype;
}

/* It's valid only if the page is free path or free_list */
static inline int get_freepage_migratetype(struct page *page)
{
	return page->index;
}

/*
 * FIXME: take this include out, include page-flags.h in
 * files which need it (119 of them)
 */
#include <linux/page-flags.h>
#include <linux/huge_mm.h>

/*
 * Methods to modify the page usage count.
 *
 * What counts for a page usage:
 * - cache mapping   (page->mapping)
 * - private data    (page->private)
 * - page mapped in a task's page tables, each mapping
 *   is counted separately
 *
 * Also, many kernel routines increase the page count before a critical
 * routine so they can be sure the page doesn't go away from under them.
 */

/*
 * Drop a ref, return true if the refcount fell to zero (the page has no users)
 */
static inline int put_page_testzero(struct page *page)
{
	VM_BUG_ON(atomic_read(&page->_count) == 0);
	return atomic_dec_and_test(&page->_count);
}

/*
 * Try to grab a ref unless the page has a refcount of zero, return false if
 * that is the case.
 * This can be called when MMU is off so it must not access
 * any of the virtual mappings.
 */
static inline int get_page_unless_zero(struct page *page)
{
	return atomic_inc_not_zero(&page->_count);
}

/*
 * Try to drop a ref unless the page has a refcount of one, return false if
 * that is the case.
 * This is to make sure that the refcount won't become zero after this drop.
 * This can be called when MMU is off so it must not access
 * any of the virtual mappings.
 */
static inline int put_page_unless_one(struct page *page)
{
	return atomic_add_unless(&page->_count, -1, 1);
}

extern int page_is_ram(unsigned long pfn);

/* Support for virtually mapped pages */
struct page *vmalloc_to_page(const void *addr);
unsigned long vmalloc_to_pfn(const void *addr);

/*
 * Determine if an address is within the vmalloc range
 *
 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
 * is no special casing required.
 */
static inline int is_vmalloc_addr(const void *x)
{
#ifdef CONFIG_MMU
	unsigned long addr = (unsigned long)x;

	return addr >= VMALLOC_START && addr < VMALLOC_END;
#else
	return 0;
#endif
}
#ifdef CONFIG_MMU
extern int is_vmalloc_or_module_addr(const void *x);
#else
static inline int is_vmalloc_or_module_addr(const void *x)
{
	return 0;
}
#endif

extern void kvfree(const void *addr);

static inline void compound_lock(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	VM_BUG_ON(PageSlab(page));
	bit_spin_lock(PG_compound_lock, &page->flags);
#endif
}

static inline void compound_unlock(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	VM_BUG_ON(PageSlab(page));
	bit_spin_unlock(PG_compound_lock, &page->flags);
#endif
}

static inline unsigned long compound_lock_irqsave(struct page *page)
{
	unsigned long uninitialized_var(flags);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	local_irq_save(flags);
	compound_lock(page);
#endif
	return flags;
}

static inline void compound_unlock_irqrestore(struct page *page,
					      unsigned long flags)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	compound_unlock(page);
	local_irq_restore(flags);
#endif
}

static inline struct page *compound_head(struct page *page)
{
	if (unlikely(PageTail(page))) {
		struct page *head = page->first_page;

		/*
		 * page->first_page may be a dangling pointer to an old
		 * compound page, so recheck that it is still a tail
		 * page before returning.
		 */
		smp_rmb();
		if (likely(PageTail(page)))
			return head;
	}
	return page;
}

/*
 * The atomic page->_mapcount, starts from -1: so that transitions
 * both from it and to it can be tracked, using atomic_inc_and_test
 * and atomic_add_negative(-1).
 */
static inline void page_mapcount_reset(struct page *page)
{
	atomic_set(&(page)->_mapcount, -1);
}

static inline int page_mapcount(struct page *page)
{
	return atomic_read(&(page)->_mapcount) + 1;
}

static inline int page_count(struct page *page)
{
	return atomic_read(&compound_head(page)->_count);
}

static inline void get_huge_page_tail(struct page *page)
{
	/*
	 * __split_huge_page_refcount() cannot run
	 * from under us.
	 */
	VM_BUG_ON(page_mapcount(page) < 0);
	VM_BUG_ON(atomic_read(&page->_count) != 0);
	atomic_inc(&page->_mapcount);
}

extern bool __get_page_tail(struct page *page);

static inline void get_page(struct page *page)
{
	if (unlikely(PageTail(page)))
		if (likely(__get_page_tail(page)))
			return;
	/*
	 * Getting a normal page or the head of a compound page
	 * requires to already have an elevated page->_count.
	 */
	VM_BUG_ON(atomic_read(&page->_count) <= 0);
	atomic_inc(&page->_count);
}

static inline struct page *virt_to_head_page(const void *x)
{
	struct page *page = virt_to_page(x);
	return compound_head(page);
}

/*
 * Setup the page count before being freed into the page allocator for
 * the first time (boot or memory hotplug)
 */
static inline void init_page_count(struct page *page)
{
	atomic_set(&page->_count, 1);
}

/*
 * PageBuddy() indicate that the page is free and in the buddy system
 * (see mm/page_alloc.c).
 *
 * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
 * -2 so that an underflow of the page_mapcount() won't be mistaken
 * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
 * efficiently by most CPU architectures.
 */
#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)

static inline int PageBuddy(struct page *page)
{
	return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
}

static inline void __SetPageBuddy(struct page *page)
{
	VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
	atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
}

static inline void __ClearPageBuddy(struct page *page)
{
	VM_BUG_ON(!PageBuddy(page));
	atomic_set(&page->_mapcount, -1);
}

void put_page(struct page *page);
void put_pages_list(struct list_head *pages);

void split_page(struct page *page, unsigned int order);
int split_free_page(struct page *page);

/*
 * Compound pages have a destructor function.  Provide a
 * prototype for that function and accessor functions.
 * These are _only_ valid on the head of a PG_compound page.
 */
typedef void compound_page_dtor(struct page *);

static inline void set_compound_page_dtor(struct page *page,
						compound_page_dtor *dtor)
{
	page[1].lru.next = (void *)dtor;
}

static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
{
	return (compound_page_dtor *)page[1].lru.next;
}

static inline int compound_order(struct page *page)
{
	if (!PageHead(page))
		return 0;
	return (unsigned long)page[1].lru.prev;
}

static inline void set_compound_order(struct page *page, unsigned long order)
{
	page[1].lru.prev = (void *)order;
}

#ifdef CONFIG_MMU
/*
 * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
 * servicing faults for write access.  In the normal case, do always want
 * pte_mkwrite.  But get_user_pages can cause write faults for mappings
 * that do not have writing enabled, when used by access_process_vm.
 */
static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
	if (likely(vma->vm_flags & VM_WRITE))
		pte = pte_mkwrite(pte);
	return pte;
}
#endif

/*
 * Multiple processes may "see" the same page. E.g. for untouched
 * mappings of /dev/null, all processes see the same page full of
 * zeroes, and text pages of executables and shared libraries have
 * only one copy in memory, at most, normally.
 *
 * For the non-reserved pages, page_count(page) denotes a reference count.
 *   page_count() == 0 means the page is free. page->lru is then used for
 *   freelist management in the buddy allocator.
 *   page_count() > 0  means the page has been allocated.
 *
 * Pages are allocated by the slab allocator in order to provide memory
 * to kmalloc and kmem_cache_alloc. In this case, the management of the
 * page, and the fields in 'struct page' are the responsibility of mm/slab.c
 * unless a particular usage is carefully commented. (the responsibility of
 * freeing the kmalloc memory is the caller's, of course).
 *
 * A page may be used by anyone else who does a __get_free_page().
 * In this case, page_count still tracks the references, and should only
 * be used through the normal accessor functions. The top bits of page->flags
 * and page->virtual store page management information, but all other fields
 * are unused and could be used privately, carefully. The management of this
 * page is the responsibility of the one who allocated it, and those who have
 * subsequently been given references to it.
 *
 * The other pages (we may call them "pagecache pages") are completely
 * managed by the Linux memory manager: I/O, buffers, swapping etc.
 * The following discussion applies only to them.
 *
 * A pagecache page contains an opaque `private' member, which belongs to the
 * page's address_space. Usually, this is the address of a circular list of
 * the page's disk buffers. PG_private must be set to tell the VM to call
 * into the filesystem to release these pages.
 *
 * A page may belong to an inode's memory mapping. In this case, page->mapping
 * is the pointer to the inode, and page->index is the file offset of the page,
 * in units of PAGE_CACHE_SIZE.
 *
 * If pagecache pages are not associated with an inode, they are said to be
 * anonymous pages. These may become associated with the swapcache, and in that
 * case PG_swapcache is set, and page->private is an offset into the swapcache.
 *
 * In either case (swapcache or inode backed), the pagecache itself holds one
 * reference to the page. Setting PG_private should also increment the
 * refcount. The each user mapping also has a reference to the page.
 *
 * The pagecache pages are stored in a per-mapping radix tree, which is
 * rooted at mapping->page_tree, and indexed by offset.
 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
 * lists, we instead now tag pages as dirty/writeback in the radix tree.
 *
 * All pagecache pages may be subject to I/O:
 * - inode pages may need to be read from disk,
 * - inode pages which have been modified and are MAP_SHARED may need
 *   to be written back to the inode on disk,
 * - anonymous pages (including MAP_PRIVATE file mappings) which have been
 *   modified may need to be swapped out to swap space and (later) to be read
 *   back into memory.
 */

/*
 * The zone field is never updated after free_area_init_core()
 * sets it, so none of the operations on it need to be atomic.
 */

/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
#define SECTIONS_PGOFF		((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
#define NODES_PGOFF		(SECTIONS_PGOFF - NODES_WIDTH)
#define ZONES_PGOFF		(NODES_PGOFF - ZONES_WIDTH)
#define LAST_CPUPID_PGOFF	(ZONES_PGOFF - LAST_CPUPID_WIDTH)

/*
 * Define the bit shifts to access each section.  For non-existent
 * sections we define the shift as 0; that plus a 0 mask ensures
 * the compiler will optimise away reference to them.
 */
#define SECTIONS_PGSHIFT	(SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
#define NODES_PGSHIFT		(NODES_PGOFF * (NODES_WIDTH != 0))
#define ZONES_PGSHIFT		(ZONES_PGOFF * (ZONES_WIDTH != 0))
#define LAST_CPUPID_PGSHIFT	(LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))

/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
#ifdef NODE_NOT_IN_PAGE_FLAGS
#define ZONEID_SHIFT		(SECTIONS_SHIFT + ZONES_SHIFT)
#define ZONEID_PGOFF		((SECTIONS_PGOFF < ZONES_PGOFF)? \
						SECTIONS_PGOFF : ZONES_PGOFF)
#else
#define ZONEID_SHIFT		(NODES_SHIFT + ZONES_SHIFT)
#define ZONEID_PGOFF		((NODES_PGOFF < ZONES_PGOFF)? \
						NODES_PGOFF : ZONES_PGOFF)
#endif

#define ZONEID_PGSHIFT		(ZONEID_PGOFF * (ZONEID_SHIFT != 0))

#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
#endif

#define ZONES_MASK		((1UL << ZONES_WIDTH) - 1)
#define NODES_MASK		((1UL << NODES_WIDTH) - 1)
#define SECTIONS_MASK		((1UL << SECTIONS_WIDTH) - 1)
#define LAST_CPUPID_MASK	((1UL << LAST_CPUPID_WIDTH) - 1)
#define ZONEID_MASK		((1UL << ZONEID_SHIFT) - 1)

static inline enum zone_type page_zonenum(const struct page *page)
{
	return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
}

#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define SECTION_IN_PAGE_FLAGS
#endif

/*
 * The identification function is mainly used by the buddy allocator for
 * determining if two pages could be buddies. We are not really identifying
 * the zone since we could be using the section number id if we do not have
 * node id available in page flags.
 * We only guarantee that it will return the same value for two combinable
 * pages in a zone.
 */
static inline int page_zone_id(struct page *page)
{
	return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
}

static inline int zone_to_nid(struct zone *zone)
{
#ifdef CONFIG_NUMA
	return zone->node;
#else
	return 0;
#endif
}

#ifdef NODE_NOT_IN_PAGE_FLAGS
extern int page_to_nid(const struct page *page);
#else
static inline int page_to_nid(const struct page *page)
{
	return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
}
#endif

#ifdef CONFIG_NUMA_BALANCING
static inline int cpu_pid_to_cpupid(int cpu, int pid)
{
	return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
}

static inline int cpupid_to_pid(int cpupid)
{
	return cpupid & LAST__PID_MASK;
}

static inline int cpupid_to_cpu(int cpupid)
{
	return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
}

static inline int cpupid_to_nid(int cpupid)
{
	return cpu_to_node(cpupid_to_cpu(cpupid));
}

static inline bool cpupid_pid_unset(int cpupid)
{
	return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
}

static inline bool cpupid_cpu_unset(int cpupid)
{
	return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
}

static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
{
	return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
}

#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
{
	return xchg(&page->_last_cpupid, cpupid);
}

static inline int page_cpupid_last(struct page *page)
{
	return page->_last_cpupid;
}
static inline void page_cpupid_reset_last(struct page *page)
{
	page->_last_cpupid = -1;
}
#else
static inline int page_cpupid_last(struct page *page)
{
	return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
}

extern int page_cpupid_xchg_last(struct page *page, int cpupid);

static inline void page_cpupid_reset_last(struct page *page)
{
	int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;

	page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
	page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
}
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
#else /* !CONFIG_NUMA_BALANCING */
static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
{
	return page_to_nid(page); /* XXX */
}

static inline int page_cpupid_last(struct page *page)
{
	return page_to_nid(page); /* XXX */
}

static inline int cpupid_to_nid(int cpupid)
{
	return -1;
}

static inline int cpupid_to_pid(int cpupid)
{
	return -1;
}

static inline int cpupid_to_cpu(int cpupid)
{
	return -1;
}

static inline int cpu_pid_to_cpupid(int nid, int pid)
{
	return -1;
}

static inline bool cpupid_pid_unset(int cpupid)
{
	return 1;
}

static inline void page_cpupid_reset_last(struct page *page)
{
}

static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
{
	return false;
}
#endif /* CONFIG_NUMA_BALANCING */

static inline struct zone *page_zone(const struct page *page)
{
	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
}

#ifdef SECTION_IN_PAGE_FLAGS
static inline void set_page_section(struct page *page, unsigned long section)
{
	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
	page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
}

static inline unsigned long page_to_section(const struct page *page)
{
	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
}
#endif

static inline void set_page_zone(struct page *page, enum zone_type zone)
{
	page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
	page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
}

static inline void set_page_node(struct page *page, unsigned long node)
{
	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
	page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
}

static inline void set_page_links(struct page *page, enum zone_type zone,
	unsigned long node, unsigned long pfn)
{
	set_page_zone(page, zone);
	set_page_node(page, node);
#ifdef SECTION_IN_PAGE_FLAGS
	set_page_section(page, pfn_to_section_nr(pfn));
#endif
}

/*
 * Some inline functions in vmstat.h depend on page_zone()
 */
#include <linux/vmstat.h>

static __always_inline void *lowmem_page_address(const struct page *page)
{
	return __va(PFN_PHYS(page_to_pfn(page)));
}

#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
#define HASHED_PAGE_VIRTUAL
#endif

#if defined(WANT_PAGE_VIRTUAL)
static inline void *page_address(const struct page *page)
{
	return page->virtual;
}
static inline void set_page_address(struct page *page, void *address)
{
	page->virtual = address;
}
#define page_address_init()  do { } while(0)
#endif

#if defined(HASHED_PAGE_VIRTUAL)
void *page_address(const struct page *page);
void set_page_address(struct page *page, void *virtual);
void page_address_init(void);
#endif

#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
#define page_address(page) lowmem_page_address(page)
#define set_page_address(page, address)  do { } while(0)
#define page_address_init()  do { } while(0)
#endif

/*
 * On an anonymous page mapped into a user virtual memory area,
 * page->mapping points to its anon_vma, not to a struct address_space;
 * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
 *
 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
 * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
 * and then page->mapping points, not to an anon_vma, but to a private
 * structure which KSM associates with that merged page.  See ksm.h.
 *
 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
 *
 * Please note that, confusingly, "page_mapping" refers to the inode
 * address_space which maps the page from disk; whereas "page_mapped"
 * refers to user virtual address space into which the page is mapped.
 */
#define PAGE_MAPPING_ANON	1
#define PAGE_MAPPING_KSM	2
#define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)

extern struct address_space *page_mapping(struct page *page);

/* Neutral page->mapping pointer to address_space or anon_vma or other */
static inline void *page_rmapping(struct page *page)
{
	return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
}

extern struct address_space *__page_file_mapping(struct page *);

static inline
struct address_space *page_file_mapping(struct page *page)
{
	if (unlikely(PageSwapCache(page)))
		return __page_file_mapping(page);

	return page->mapping;
}

static inline int PageAnon(struct page *page)
{
	return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
}

/*
 * Return the pagecache index of the passed page.  Regular pagecache pages
 * use ->index whereas swapcache pages use ->private
 */
static inline pgoff_t page_index(struct page *page)
{
	if (unlikely(PageSwapCache(page)))
		return page_private(page);
	return page->index;
}

extern pgoff_t __page_file_index(struct page *page);

/*
 * Return the file index of the page. Regular pagecache pages use ->index
 * whereas swapcache pages use swp_offset(->private)
 */
static inline pgoff_t page_file_index(struct page *page)
{
	if (unlikely(PageSwapCache(page)))
		return __page_file_index(page);

	return page->index;
}

/*
 * Return true if this page is mapped into pagetables.
 */
static inline int page_mapped(struct page *page)
{
	return atomic_read(&(page)->_mapcount) >= 0;
}

/*
 * Different kinds of faults, as returned by handle_mm_fault().
 * Used to decide whether a process gets delivered SIGBUS or
 * just gets major/minor fault counters bumped up.
 */

#define VM_FAULT_MINOR	0 /* For backwards compat. Remove me quickly. */

#define VM_FAULT_OOM	0x0001
#define VM_FAULT_SIGBUS	0x0002
#define VM_FAULT_MAJOR	0x0004
#define VM_FAULT_WRITE	0x0008	/* Special case for get_user_pages */
#define VM_FAULT_HWPOISON 0x0010	/* Hit poisoned small page */
#define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index encoded in upper bits */
#define VM_FAULT_SIGSEGV 0x0040

#define VM_FAULT_NOPAGE	0x0100	/* ->fault installed the pte, not return page */
#define VM_FAULT_LOCKED	0x0200	/* ->fault locked the returned page */
#define VM_FAULT_RETRY	0x0400	/* ->fault blocked, must retry */
#define VM_FAULT_FALLBACK 0x0800	/* huge page fault failed, fall back to small */

#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */

#define VM_FAULT_ERROR	(VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
			 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
			 VM_FAULT_FALLBACK)

/* Encode hstate index for a hwpoisoned large page */
#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)

/*
 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
 */
extern void pagefault_out_of_memory(void);

#define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)

/*
 * Flags passed to show_mem() and show_free_areas() to suppress output in
 * various contexts.
 */
#define SHOW_MEM_FILTER_NODES		(0x0001u)	/* disallowed nodes */
#define SHOW_MEM_FILTER_PAGE_COUNT	(0x0002u)	/* page type count */

extern void show_free_areas(unsigned int flags);
extern bool skip_free_areas_node(unsigned int flags, int nid);

int shmem_zero_setup(struct vm_area_struct *);

extern int can_do_mlock(void);
extern int user_shm_lock(size_t, struct user_struct *);
extern void user_shm_unlock(size_t, struct user_struct *);

/*
 * Parameter block passed down to zap_pte_range in exceptional cases.
 */
struct zap_details {
	struct vm_area_struct *nonlinear_vma;	/* Check page->index if set */
	struct address_space *check_mapping;	/* Check page->mapping if set */
	pgoff_t	first_index;			/* Lowest page->index to unmap */
	pgoff_t last_index;			/* Highest page->index to unmap */
};

struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
		pte_t pte);

int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
		unsigned long size);
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
		unsigned long size, struct zap_details *);
void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
		unsigned long start, unsigned long end);

/**
 * mm_walk - callbacks for walk_page_range
 * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
 *	       this handler is required to be able to handle
 *	       pmd_trans_huge() pmds.  They may simply choose to
 *	       split_huge_page() instead of handling it explicitly.
 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
 * @pte_hole: if set, called for each hole at all levels
 * @hugetlb_entry: if set, called for each hugetlb entry
 *		   *Caution*: The caller must hold mmap_sem() if @hugetlb_entry
 * 			      is used.
 *
 * (see walk_page_range for more details)
 */
struct mm_walk {
	int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
			 unsigned long next, struct mm_walk *walk);
	int (*pud_entry)(pud_t *pud, unsigned long addr,
	                 unsigned long next, struct mm_walk *walk);
	int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
			 unsigned long next, struct mm_walk *walk);
	int (*pte_entry)(pte_t *pte, unsigned long addr,
			 unsigned long next, struct mm_walk *walk);
	int (*pte_hole)(unsigned long addr, unsigned long next,
			struct mm_walk *walk);
	int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
			     unsigned long addr, unsigned long next,
			     struct mm_walk *walk);
	struct mm_struct *mm;
	void *private;
};

int walk_page_range(unsigned long addr, unsigned long end,
		struct mm_walk *walk);
void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
		unsigned long end, unsigned long floor, unsigned long ceiling);
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
			struct vm_area_struct *vma);
void unmap_mapping_range(struct address_space *mapping,
		loff_t const holebegin, loff_t const holelen, int even_cows);
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
	unsigned long *pfn);
int follow_phys(struct vm_area_struct *vma, unsigned long address,
		unsigned int flags, unsigned long *prot, resource_size_t *phys);
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
			void *buf, int len, int write);

static inline void unmap_shared_mapping_range(struct address_space *mapping,
		loff_t const holebegin, loff_t const holelen)
{
	unmap_mapping_range(mapping, holebegin, holelen, 0);
}

extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
int truncate_inode_page(struct address_space *mapping, struct page *page);
int generic_error_remove_page(struct address_space *mapping, struct page *page);
int invalidate_inode_page(struct page *page);

#ifdef CONFIG_MMU
extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, unsigned int flags);
extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
			    unsigned long address, unsigned int fault_flags);
#else
static inline int handle_mm_fault(struct mm_struct *mm,
			struct vm_area_struct *vma, unsigned long address,
			unsigned int flags)
{
	/* should never happen if there's no MMU */
	BUG();
	return VM_FAULT_SIGBUS;
}
static inline int fixup_user_fault(struct task_struct *tsk,
		struct mm_struct *mm, unsigned long address,
		unsigned int fault_flags)
{
	/* should never happen if there's no MMU */
	BUG();
	return -EFAULT;
}
#endif

/*
 * Mainly for aufs which mmap(2) diffrent file and wants to print different path
 * in /proc/PID/maps.
 */
/* #define AUFS_DEBUG_MMAP */
static inline void aufs_trace(struct file *f, struct file *pr,
			      const char func[], int line, const char func2[])
{
#ifdef AUFS_DEBUG_MMAP
	if (pr)
		pr_info("%s:%d: %s, %p\n", func, line, func2,
			f ? (char *)f->f_dentry->d_name.name : "(null)");
#endif
}

static inline struct file *vmr_do_pr_or_file(struct vm_region *region,
					     const char func[], int line)
{
	struct file *f = region->vm_file, *pr = region->vm_prfile;
	aufs_trace(f, pr, func, line, __func__);
	return (f && pr) ? pr : f;
}

static inline void vmr_do_fput(struct vm_region *region,
			       const char func[], int line)
{
	struct file *f = region->vm_file, *pr = region->vm_prfile;
	aufs_trace(f, pr, func, line, __func__);
	fput(f);
	if (f && pr)
		fput(pr);
}

static inline void vma_do_file_update_time(struct vm_area_struct *vma,
					   const char func[], int line)
{
	struct file *f = vma->vm_file, *pr = vma->vm_prfile;
	aufs_trace(f, pr, func, line, __func__);
	file_update_time(f);
	if (f && pr)
		file_update_time(pr);
}

static inline struct file *vma_do_pr_or_file(struct vm_area_struct *vma,
					     const char func[], int line)
{
	struct file *f = vma->vm_file, *pr = vma->vm_prfile;
	aufs_trace(f, pr, func, line, __func__);
	return (f && pr) ? pr : f;
}

static inline void vma_do_get_file(struct vm_area_struct *vma,
				   const char func[], int line)
{
	struct file *f = vma->vm_file, *pr = vma->vm_prfile;
	aufs_trace(f, pr, func, line, __func__);
	get_file(f);
	if (f && pr)
		get_file(pr);
}

static inline void vma_do_fput(struct vm_area_struct *vma,
			       const char func[], int line)
{
	struct file *f = vma->vm_file, *pr = vma->vm_prfile;
	aufs_trace(f, pr, func, line, __func__);
	fput(f);
	if (f && pr)
		fput(pr);
}

#define vmr_pr_or_file(region)		vmr_do_pr_or_file(region, __func__, \
							  __LINE__)
#define vmr_fput(region)		vmr_do_fput(region, __func__, __LINE__)
#define vma_file_update_time(vma)	vma_do_file_update_time(vma, __func__, \
								__LINE__)
#define vma_pr_or_file(vma)		vma_do_pr_or_file(vma, __func__, \
							  __LINE__)
#define vma_get_file(vma)		vma_do_get_file(vma, __func__, __LINE__)
#define vma_fput(vma)			vma_do_fput(vma, __func__, __LINE__)

extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
		void *buf, int len, int write);

long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
		      unsigned long start, unsigned long nr_pages,
		      unsigned int foll_flags, struct page **pages,
		      struct vm_area_struct **vmas, int *nonblocking);
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
		    unsigned long start, unsigned long nr_pages,
		    int write, int force, struct page **pages,
		    struct vm_area_struct **vmas);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
			struct page **pages);
struct kvec;
int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
			struct page **pages);
int get_kernel_page(unsigned long start, int write, struct page **pages);
struct page *get_dump_page(unsigned long addr);

extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
extern void do_invalidatepage(struct page *page, unsigned int offset,
			      unsigned int length);

int __set_page_dirty_nobuffers(struct page *page);
int __set_page_dirty_no_writeback(struct page *page);
int redirty_page_for_writepage(struct writeback_control *wbc,
				struct page *page);
void account_page_dirtied(struct page *page, struct address_space *mapping);
void account_page_writeback(struct page *page);
int set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
int clear_page_dirty_for_io(struct page *page);

/* Is the vma a continuation of the stack vma above it? */
static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
{
	return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
}

static inline int stack_guard_page_start(struct vm_area_struct *vma,
					     unsigned long addr)
{
	return (vma->vm_flags & VM_GROWSDOWN) &&
		(vma->vm_start == addr) &&
		!vma_growsdown(vma->vm_prev, addr);
}

/* Is the vma a continuation of the stack vma below it? */
static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
{
	return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
}

static inline int stack_guard_page_end(struct vm_area_struct *vma,
					   unsigned long addr)
{
	return (vma->vm_flags & VM_GROWSUP) &&
		(vma->vm_end == addr) &&
		!vma_growsup(vma->vm_next, addr);
}

extern pid_t
vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);

extern unsigned long move_page_tables(struct vm_area_struct *vma,
		unsigned long old_addr, struct vm_area_struct *new_vma,
		unsigned long new_addr, unsigned long len,
		bool need_rmap_locks);
extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
			      unsigned long end, pgprot_t newprot,
			      int dirty_accountable, int prot_numa);
extern int mprotect_fixup(struct vm_area_struct *vma,
			  struct vm_area_struct **pprev, unsigned long start,
			  unsigned long end, unsigned long newflags);

/*
 * doesn't attempt to fault and will return short.
 */
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
			  struct page **pages);
/*
 * per-process(per-mm_struct) statistics.
 */
static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
{
	long val = atomic_long_read(&mm->rss_stat.count[member]);

#ifdef SPLIT_RSS_COUNTING
	/*
	 * counter is updated in asynchronous manner and may go to minus.
	 * But it's never be expected number for users.
	 */
	if (val < 0)
		val = 0;
#endif
	return (unsigned long)val;
}

static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
{
	atomic_long_add(value, &mm->rss_stat.count[member]);
}

static inline void inc_mm_counter(struct mm_struct *mm, int member)
{
	atomic_long_inc(&mm->rss_stat.count[member]);
}

static inline void dec_mm_counter(struct mm_struct *mm, int member)
{
	atomic_long_dec(&mm->rss_stat.count[member]);
}

static inline unsigned long get_mm_rss(struct mm_struct *mm)
{
	return get_mm_counter(mm, MM_FILEPAGES) +
		get_mm_counter(mm, MM_ANONPAGES);
}

static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
{
	return max(mm->hiwater_rss, get_mm_rss(mm));
}

static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
{
	return max(mm->hiwater_vm, mm->total_vm);
}

static inline void update_hiwater_rss(struct mm_struct *mm)
{
	unsigned long _rss = get_mm_rss(mm);

	if ((mm)->hiwater_rss < _rss)
		(mm)->hiwater_rss = _rss;
}

static inline void update_hiwater_vm(struct mm_struct *mm)
{
	if (mm->hiwater_vm < mm->total_vm)
		mm->hiwater_vm = mm->total_vm;
}

static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
					 struct mm_struct *mm)
{
	unsigned long hiwater_rss = get_mm_hiwater_rss(mm);

	if (*maxrss < hiwater_rss)
		*maxrss = hiwater_rss;
}

#if defined(SPLIT_RSS_COUNTING)
void sync_mm_rss(struct mm_struct *mm);
#else
static inline void sync_mm_rss(struct mm_struct *mm)
{
}
#endif

int vma_wants_writenotify(struct vm_area_struct *vma);

extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
			       spinlock_t **ptl);
static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
				    spinlock_t **ptl)
{
	pte_t *ptep;
	__cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
	return ptep;
}

#ifdef __PAGETABLE_PUD_FOLDED
static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
						unsigned long address)
{
	return 0;
}
#else
int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
#endif

#ifdef __PAGETABLE_PMD_FOLDED
static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
						unsigned long address)
{
	return 0;
}
#else
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
#endif

int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
		pmd_t *pmd, unsigned long address);
int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);

/*
 * The following ifdef needed to get the 4level-fixup.h header to work.
 * Remove it when 4level-fixup.h has been removed.
 */
#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
{
	return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
		NULL: pud_offset(pgd, address);
}

static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{
	return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
		NULL: pmd_offset(pud, address);
}
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */

#if USE_SPLIT_PTE_PTLOCKS
#if ALLOC_SPLIT_PTLOCKS
extern bool ptlock_alloc(struct page *page);
extern void ptlock_free(struct page *page);

static inline spinlock_t *ptlock_ptr(struct page *page)
{
	return page->ptl;
}
#else /* ALLOC_SPLIT_PTLOCKS */
static inline bool ptlock_alloc(struct page *page)
{
	return true;
}

static inline void ptlock_free(struct page *page)
{
}

static inline spinlock_t *ptlock_ptr(struct page *page)
{
	return &page->ptl;
}
#endif /* ALLOC_SPLIT_PTLOCKS */

static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
	return ptlock_ptr(pmd_page(*pmd));
}

static inline bool ptlock_init(struct page *page)
{
	/*
	 * prep_new_page() initialize page->private (and therefore page->ptl)
	 * with 0. Make sure nobody took it in use in between.
	 *
	 * It can happen if arch try to use slab for page table allocation:
	 * slab code uses page->slab_cache and page->first_page (for tail
	 * pages), which share storage with page->ptl.
	 */
	VM_BUG_ON(*(unsigned long *)&page->ptl);
	if (!ptlock_alloc(page))
		return false;
	spin_lock_init(ptlock_ptr(page));
	return true;
}

/* Reset page->mapping so free_pages_check won't complain. */
static inline void pte_lock_deinit(struct page *page)
{
	page->mapping = NULL;
	ptlock_free(page);
}

#else	/* !USE_SPLIT_PTE_PTLOCKS */
/*
 * We use mm->page_table_lock to guard all pagetable pages of the mm.
 */
static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
	return &mm->page_table_lock;
}
static inline bool ptlock_init(struct page *page) { return true; }
static inline void pte_lock_deinit(struct page *page) {}
#endif /* USE_SPLIT_PTE_PTLOCKS */

static inline bool pgtable_page_ctor(struct page *page)
{
	inc_zone_page_state(page, NR_PAGETABLE);
	return ptlock_init(page);
}

static inline void pgtable_page_dtor(struct page *page)
{
	pte_lock_deinit(page);
	dec_zone_page_state(page, NR_PAGETABLE);
}

#define pte_offset_map_lock(mm, pmd, address, ptlp)	\
({							\
	spinlock_t *__ptl = pte_lockptr(mm, pmd);	\
	pte_t *__pte = pte_offset_map(pmd, address);	\
	*(ptlp) = __ptl;				\
	spin_lock(__ptl);				\
	__pte;						\
})

#define pte_unmap_unlock(pte, ptl)	do {		\
	spin_unlock(ptl);				\
	pte_unmap(pte);					\
} while (0)

#define pte_alloc_map(mm, vma, pmd, address)				\
	((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma,	\
							pmd, address))?	\
	 NULL: pte_offset_map(pmd, address))

#define pte_alloc_map_lock(mm, pmd, address, ptlp)	\
	((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL,	\
							pmd, address))?	\
		NULL: pte_offset_map_lock(mm, pmd, address, ptlp))

#define pte_alloc_kernel(pmd, address)			\
	((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
		NULL: pte_offset_kernel(pmd, address))

#if USE_SPLIT_PMD_PTLOCKS

static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
	return ptlock_ptr(virt_to_page(pmd));
}

static inline bool pgtable_pmd_page_ctor(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	page->pmd_huge_pte = NULL;
#endif
	return ptlock_init(page);
}

static inline void pgtable_pmd_page_dtor(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	VM_BUG_ON(page->pmd_huge_pte);
#endif
	ptlock_free(page);
}

#define pmd_huge_pte(mm, pmd) (virt_to_page(pmd)->pmd_huge_pte)

#else

static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
	return &mm->page_table_lock;
}

static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
static inline void pgtable_pmd_page_dtor(struct page *page) {}

#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)

#endif

static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
{
	spinlock_t *ptl = pmd_lockptr(mm, pmd);
	spin_lock(ptl);
	return ptl;
}

extern void free_area_init(unsigned long * zones_size);
extern void free_area_init_node(int nid, unsigned long * zones_size,
		unsigned long zone_start_pfn, unsigned long *zholes_size);
extern void free_initmem(void);

/*
 * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
 * into the buddy system. The freed pages will be poisoned with pattern
 * "poison" if it's within range [0, UCHAR_MAX].
 * Return pages freed into the buddy system.
 */
extern unsigned long free_reserved_area(void *start, void *end,
					int poison, char *s);

#ifdef	CONFIG_HIGHMEM
/*
 * Free a highmem page into the buddy system, adjusting totalhigh_pages
 * and totalram_pages.
 */
extern void free_highmem_page(struct page *page);
#endif

extern void adjust_managed_page_count(struct page *page, long count);
extern void mem_init_print_info(const char *str);

/* Free the reserved page into the buddy system, so it gets managed. */
static inline void __free_reserved_page(struct page *page)
{
	ClearPageReserved(page);
	init_page_count(page);
	__free_page(page);
}

static inline void free_reserved_page(struct page *page)
{
	__free_reserved_page(page);
	adjust_managed_page_count(page, 1);
}

static inline void mark_page_reserved(struct page *page)
{
	SetPageReserved(page);
	adjust_managed_page_count(page, -1);
}

/*
 * Default method to free all the __init memory into the buddy system.
 * The freed pages will be poisoned with pattern "poison" if it's within
 * range [0, UCHAR_MAX].
 * Return pages freed into the buddy system.
 */
static inline unsigned long free_initmem_default(int poison)
{
	extern char __init_begin[], __init_end[];

	return free_reserved_area(&__init_begin, &__init_end,
				  poison, "unused kernel");
}

static inline unsigned long get_num_physpages(void)
{
	int nid;
	unsigned long phys_pages = 0;

	for_each_online_node(nid)
		phys_pages += node_present_pages(nid);

	return phys_pages;
}

#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
/*
 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
 * zones, allocate the backing mem_map and account for memory holes in a more
 * architecture independent manner. This is a substitute for creating the
 * zone_sizes[] and zholes_size[] arrays and passing them to
 * free_area_init_node()
 *
 * An architecture is expected to register range of page frames backed by
 * physical memory with memblock_add[_node]() before calling
 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
 * usage, an architecture is expected to do something like
 *
 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
 * 							 max_highmem_pfn};
 * for_each_valid_physical_page_range()
 * 	memblock_add_node(base, size, nid)
 * free_area_init_nodes(max_zone_pfns);
 *
 * free_bootmem_with_active_regions() calls free_bootmem_node() for each
 * registered physical page range.  Similarly
 * sparse_memory_present_with_active_regions() calls memory_present() for
 * each range when SPARSEMEM is enabled.
 *
 * See mm/page_alloc.c for more information on each function exposed by
 * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
 */
extern void free_area_init_nodes(unsigned long *max_zone_pfn);
unsigned long node_map_pfn_alignment(void);
unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
						unsigned long end_pfn);
extern unsigned long absent_pages_in_range(unsigned long start_pfn,
						unsigned long end_pfn);
extern void get_pfn_range_for_nid(unsigned int nid,
			unsigned long *start_pfn, unsigned long *end_pfn);
extern unsigned long find_min_pfn_with_active_regions(void);
extern void free_bootmem_with_active_regions(int nid,
						unsigned long max_low_pfn);
extern void sparse_memory_present_with_active_regions(int nid);

#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */

#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
    !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
static inline int __early_pfn_to_nid(unsigned long pfn)
{
	return 0;
}
#else
/* please see mm/page_alloc.c */
extern int __meminit early_pfn_to_nid(unsigned long pfn);
#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
/* there is a per-arch backend function. */
extern int __meminit __early_pfn_to_nid(unsigned long pfn);
#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
#endif

extern void set_dma_reserve(unsigned long new_dma_reserve);
extern void memmap_init_zone(unsigned long, int, unsigned long,
				unsigned long, enum memmap_context);
extern void setup_per_zone_wmarks(void);
extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);
extern void __init mmap_init(void);
extern void show_mem(unsigned int flags);
extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);

extern __printf(3, 4)
void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);

extern void setup_per_cpu_pageset(void);

extern void zone_pcp_update(struct zone *zone);
extern void zone_pcp_reset(struct zone *zone);

/* page_alloc.c */
extern int min_free_kbytes;

/* nommu.c */
extern atomic_long_t mmap_pages_allocated;
extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);

/* interval_tree.c */
void vma_interval_tree_insert(struct vm_area_struct *node,
			      struct rb_root *root);
void vma_interval_tree_insert_after(struct vm_area_struct *node,
				    struct vm_area_struct *prev,
				    struct rb_root *root);
void vma_interval_tree_remove(struct vm_area_struct *node,
			      struct rb_root *root);
struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root,
				unsigned long start, unsigned long last);
struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
				unsigned long start, unsigned long last);

#define vma_interval_tree_foreach(vma, root, start, last)		\
	for (vma = vma_interval_tree_iter_first(root, start, last);	\
	     vma; vma = vma_interval_tree_iter_next(vma, start, last))

static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
					struct list_head *list)
{
	list_add_tail(&vma->shared.nonlinear, list);
}

void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
				   struct rb_root *root);
void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
				   struct rb_root *root);
struct anon_vma_chain *anon_vma_interval_tree_iter_first(
	struct rb_root *root, unsigned long start, unsigned long last);
struct anon_vma_chain *anon_vma_interval_tree_iter_next(
	struct anon_vma_chain *node, unsigned long start, unsigned long last);
#ifdef CONFIG_DEBUG_VM_RB
void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
#endif

#define anon_vma_interval_tree_foreach(avc, root, start, last)		 \
	for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
	     avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))

/* mmap.c */
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
extern struct vm_area_struct *vma_merge(struct mm_struct *,
	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
	unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
	struct mempolicy *);
extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
extern int split_vma(struct mm_struct *,
	struct vm_area_struct *, unsigned long addr, int new_below);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
	struct rb_node **, struct rb_node *);
extern void unlink_file_vma(struct vm_area_struct *);
extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
	unsigned long addr, unsigned long len, pgoff_t pgoff,
	bool *need_rmap_locks);
extern void exit_mmap(struct mm_struct *);

extern int mm_take_all_locks(struct mm_struct *mm);
extern void mm_drop_all_locks(struct mm_struct *mm);

extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
extern struct file *get_mm_exe_file(struct mm_struct *mm);

extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
extern int install_special_mapping(struct mm_struct *mm,
				   unsigned long addr, unsigned long len,
				   unsigned long flags, struct page **pages);

extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);

extern unsigned long mmap_region(struct file *file, unsigned long addr,
	unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
	unsigned long len, unsigned long prot, unsigned long flags,
	unsigned long pgoff, unsigned long *populate);
extern int do_munmap(struct mm_struct *, unsigned long, size_t);

#ifdef CONFIG_MMU
extern int __mm_populate(unsigned long addr, unsigned long len,
			 int ignore_errors);
static inline void mm_populate(unsigned long addr, unsigned long len)
{
	/* Ignore errors */
	(void) __mm_populate(addr, len, 1);
}
#else
static inline void mm_populate(unsigned long addr, unsigned long len) {}
#endif

/* These take the mm semaphore themselves */
extern unsigned long vm_brk(unsigned long, unsigned long);
extern int vm_munmap(unsigned long, size_t);
extern unsigned long vm_mmap(struct file *, unsigned long,
        unsigned long, unsigned long,
        unsigned long, unsigned long);

struct vm_unmapped_area_info {
#define VM_UNMAPPED_AREA_TOPDOWN 1
	unsigned long flags;
	unsigned long length;
	unsigned long low_limit;
	unsigned long high_limit;
	unsigned long align_mask;
	unsigned long align_offset;
};

extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);

/*
 * Search for an unmapped address range.
 *
 * We are looking for a range that:
 * - does not intersect with any VMA;
 * - is contained within the [low_limit, high_limit) interval;
 * - is at least the desired size.
 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
 */
static inline unsigned long
vm_unmapped_area(struct vm_unmapped_area_info *info)
{
	if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
		return unmapped_area(info);
	else
		return unmapped_area_topdown(info);
}

/* truncate.c */
extern void truncate_inode_pages(struct address_space *, loff_t);
extern void truncate_inode_pages_range(struct address_space *,
				       loff_t lstart, loff_t lend);

/* generic vm_area_ops exported for stackable file systems */
extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);

/* mm/page-writeback.c */
int write_one_page(struct page *page, int wait);
void task_dirty_inc(struct task_struct *tsk);

/* readahead.c */
#define VM_MAX_READAHEAD	128	/* kbytes */
#define VM_MIN_READAHEAD	16	/* kbytes (includes current page) */

int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
			pgoff_t offset, unsigned long nr_to_read);

void page_cache_sync_readahead(struct address_space *mapping,
			       struct file_ra_state *ra,
			       struct file *filp,
			       pgoff_t offset,
			       unsigned long size);

void page_cache_async_readahead(struct address_space *mapping,
				struct file_ra_state *ra,
				struct file *filp,
				struct page *pg,
				pgoff_t offset,
				unsigned long size);

unsigned long max_sane_readahead(unsigned long nr);
unsigned long ra_submit(struct file_ra_state *ra,
			struct address_space *mapping,
			struct file *filp);

/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);

/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
extern int expand_downwards(struct vm_area_struct *vma,
		unsigned long address);
#if VM_GROWSUP
extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
#else
  #define expand_upwards(vma, address) (0)
#endif

/* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
					     struct vm_area_struct **pprev);

/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
   NULL if none.  Assume start_addr < end_addr. */
static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
{
	struct vm_area_struct * vma = find_vma(mm,start_addr);

	if (vma && end_addr <= vma->vm_start)
		vma = NULL;
	return vma;
}

static inline unsigned long vma_pages(struct vm_area_struct *vma)
{
	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
}

/* Look up the first VMA which exactly match the interval vm_start ... vm_end */
static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
				unsigned long vm_start, unsigned long vm_end)
{
	struct vm_area_struct *vma = find_vma(mm, vm_start);

	if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
		vma = NULL;

	return vma;
}

#ifdef CONFIG_MMU
pgprot_t vm_get_page_prot(unsigned long vm_flags);
#else
static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
{
	return __pgprot(0);
}
#endif

#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
unsigned long change_prot_numa(struct vm_area_struct *vma,
			unsigned long start, unsigned long end);
#endif

struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
			unsigned long pfn, unsigned long size, pgprot_t);
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
			unsigned long pfn);
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
			unsigned long pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);


struct page *follow_page_mask(struct vm_area_struct *vma,
			      unsigned long address, unsigned int foll_flags,
			      unsigned int *page_mask);

static inline struct page *follow_page(struct vm_area_struct *vma,
		unsigned long address, unsigned int foll_flags)
{
	unsigned int unused_page_mask;
	return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
}

#define FOLL_WRITE	0x01	/* check pte is writable */
#define FOLL_TOUCH	0x02	/* mark page accessed */
#define FOLL_GET	0x04	/* do get_page on page */
#define FOLL_DUMP	0x08	/* give error on hole if it would be zero */
#define FOLL_FORCE	0x10	/* get_user_pages read/write w/o permission */
#define FOLL_NOWAIT	0x20	/* if a disk transfer is needed, start the IO
				 * and return without waiting upon it */
#define FOLL_MLOCK	0x40	/* mark page as mlocked */
#define FOLL_SPLIT	0x80	/* don't return transhuge pages, split them */
#define FOLL_HWPOISON	0x100	/* check page is hwpoisoned */
#define FOLL_NUMA	0x200	/* force NUMA hinting page fault */
#define FOLL_MIGRATION	0x400	/* wait for page to replace migration entry */

typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
			void *data);
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
			       unsigned long size, pte_fn_t fn, void *data);

#ifdef CONFIG_PROC_FS
void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
#else
static inline void vm_stat_account(struct mm_struct *mm,
			unsigned long flags, struct file *file, long pages)
{
	mm->total_vm += pages;
}
#endif /* CONFIG_PROC_FS */

#ifdef CONFIG_DEBUG_PAGEALLOC
extern void kernel_map_pages(struct page *page, int numpages, int enable);
#ifdef CONFIG_HIBERNATION
extern bool kernel_page_present(struct page *page);
#endif /* CONFIG_HIBERNATION */
#else
static inline void
kernel_map_pages(struct page *page, int numpages, int enable) {}
#ifdef CONFIG_HIBERNATION
static inline bool kernel_page_present(struct page *page) { return true; }
#endif /* CONFIG_HIBERNATION */
#endif

extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
#ifdef	__HAVE_ARCH_GATE_AREA
int in_gate_area_no_mm(unsigned long addr);
int in_gate_area(struct mm_struct *mm, unsigned long addr);
#else
int in_gate_area_no_mm(unsigned long addr);
#define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);})
#endif	/* __HAVE_ARCH_GATE_AREA */

#ifdef CONFIG_SYSCTL
extern int sysctl_drop_caches;
int drop_caches_sysctl_handler(struct ctl_table *, int,
					void __user *, size_t *, loff_t *);
#endif

unsigned long shrink_slab(struct shrink_control *shrink,
			  unsigned long nr_pages_scanned,
			  unsigned long lru_pages);

#ifndef CONFIG_MMU
#define randomize_va_space 0
#else
extern int randomize_va_space;
#endif

const char * arch_vma_name(struct vm_area_struct *vma);
void print_vma_addr(char *prefix, unsigned long rip);

void sparse_mem_maps_populate_node(struct page **map_map,
				   unsigned long pnum_begin,
				   unsigned long pnum_end,
				   unsigned long map_count,
				   int nodeid);

struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
void *vmemmap_alloc_block(unsigned long size, int node);
void *vmemmap_alloc_block_buf(unsigned long size, int node);
void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
int vmemmap_populate_basepages(unsigned long start, unsigned long end,
			       int node);
int vmemmap_populate(unsigned long start, unsigned long end, int node);
void vmemmap_populate_print_last(void);
#ifdef CONFIG_MEMORY_HOTPLUG
void vmemmap_free(unsigned long start, unsigned long end);
#endif
void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
				  unsigned long size);

enum mf_flags {
	MF_COUNT_INCREASED = 1 << 0,
	MF_ACTION_REQUIRED = 1 << 1,
	MF_MUST_KILL = 1 << 2,
	MF_SOFT_OFFLINE = 1 << 3,
};
extern int memory_failure(unsigned long pfn, int trapno, int flags);
extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
extern int unpoison_memory(unsigned long pfn);
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
extern void shake_page(struct page *p, int access);
extern atomic_long_t num_poisoned_pages;
extern int soft_offline_page(struct page *page, int flags);

extern void dump_page(struct page *page);

#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
extern void clear_huge_page(struct page *page,
			    unsigned long addr,
			    unsigned int pages_per_huge_page);
extern void copy_user_huge_page(struct page *dst, struct page *src,
				unsigned long addr, struct vm_area_struct *vma,
				unsigned int pages_per_huge_page);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */

#ifdef CONFIG_DEBUG_PAGEALLOC
extern unsigned int _debug_guardpage_minorder;

static inline unsigned int debug_guardpage_minorder(void)
{
	return _debug_guardpage_minorder;
}

static inline bool page_is_guard(struct page *page)
{
	return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
}
#else
static inline unsigned int debug_guardpage_minorder(void) { return 0; }
static inline bool page_is_guard(struct page *page) { return false; }
#endif /* CONFIG_DEBUG_PAGEALLOC */

#if MAX_NUMNODES > 1
void __init setup_nr_node_ids(void);
#else
static inline void setup_nr_node_ids(void) {}
#endif

#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */

Youez - 2016 - github.com/yon3zu
LinuXploit